summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2012-06-08 07:12:34 -0400
committerAnthony G. Basile <blueness@gentoo.org>2012-06-08 07:12:34 -0400
commit883a1200b83b47545ad4e0c2ec36172ad2766e69 (patch)
tree888921d2953f0295515486578ef79a4719a9a62a
parentRSBAC: bump to 3.3.8 plus pax patches (diff)
downloadhardened-patchset-883a1200b83b47545ad4e0c2ec36172ad2766e69.tar.gz
hardened-patchset-883a1200b83b47545ad4e0c2ec36172ad2766e69.tar.bz2
hardened-patchset-883a1200b83b47545ad4e0c2ec36172ad2766e69.zip
RSBAC: bump to 3.4.1 plus pax patches
-rw-r--r--3.4.1/0000_README12
-rw-r--r--3.4.1/4500_patch-linux-3.4.1-rsbac-1.4.6.diff137566
-rw-r--r--3.4.1/4520_pax-linux-3.4-test7.patch72723
3 files changed, 210301 insertions, 0 deletions
diff --git a/3.4.1/0000_README b/3.4.1/0000_README
new file mode 100644
index 0000000..60c2332
--- /dev/null
+++ b/3.4.1/0000_README
@@ -0,0 +1,12 @@
+README
+-----------------------------------------------------------------------------
+
+Individual Patch Descriptions:
+-----------------------------------------------------------------------------
+Patch: 4500_patch-linux-3.4.1-rsbac-1.4.6.diff
+From: Amon Ott <ao@rsbac.org>
+Desc: RSBAC patch from http://www.rsbac.org/
+
+Patch: 4520_pax-linux-3.4-test7.patch
+From: pipacs <pageexec@freemail.hu>
+Desc: http://grsecurity.net/test.php
diff --git a/3.4.1/4500_patch-linux-3.4.1-rsbac-1.4.6.diff b/3.4.1/4500_patch-linux-3.4.1-rsbac-1.4.6.diff
new file mode 100644
index 0000000..b3e6270
--- /dev/null
+++ b/3.4.1/4500_patch-linux-3.4.1-rsbac-1.4.6.diff
@@ -0,0 +1,137566 @@
+diff --git a/Documentation/rsbac/COPYING b/Documentation/rsbac/COPYING
+new file mode 100644
+index 0000000..7e08278
+--- /dev/null
++++ b/Documentation/rsbac/COPYING
+@@ -0,0 +1,19 @@
++Copyright Notice
++----------------
++
++All RSBAC code is copyrighted by me (Amon Ott) unless specified otherwise,
++and published under the restrictions of the GNU General Public Licence
++as to be read in file COPYING in the main directory of the kernel source tree.
++All statements therein apply fully to all RSBAC sources.
++
++RSBAC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 2 of the License, or
++(at your option) any later version.
++
++This software is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++Amon Ott <ao@rsbac.org>
+diff --git a/Documentation/rsbac/Changes b/Documentation/rsbac/Changes
+new file mode 100644
+index 0000000..8217276
+--- /dev/null
++++ b/Documentation/rsbac/Changes
+@@ -0,0 +1,683 @@
++RSBAC Changes
++-------------
++1.4.6:
++ - Port everything to kernel 3.1.5
++ - Show process name and parent when logging PROCESS target accesses
++ - Add RSBAC syscalls to get and set UM password history size per
++ user.
++ - Do not allow to set attributes for FD targets with
++ sys_rsbac_set_attr()
++
++1.4.5:
++ - Fix symlink's stat() call to return the real symlink size
++ Fixes program that would assert on stat->s_size being the same size as when using readlink()
++ - Remove use of task_capability_lock.
++ - Fixes FS object hidding for most cases (still experimental)
++ - Backport fixes and internal features from 1.5:
++ - Add generic list option to use a separate kmem cache / slab per list.
++ - Use that option for several lists, general ACI, RC, ACL, UM.
++ - rkmem.h: define RSBAC_MAX_KMALLOC KMALLOC_MAX_SIZE
++ - Do not create our kmem caches with SLAB_DESTROY_BY_RCU, not needed.
++ - Big cleanup of DAZ code.
++ - Make DAZ path subselection by scanners optional, default is on in 1.4.
++ - Change DAZ module filename allocation to fixed size slabs.
++ - Remove rsbac_get_full_path_length(), which is not used anywhere any more.
++ - Use fixed DAZ filename size PATH_MAX, defined in include/linux/limits.h.
++ - DAZ: allocate memory with _unlocked functions where possible.
++ - Put device items into own slabs.
++ - Fix memory leak in case of failed list rehashing.
++ - Fix notification for setuid() etc: move after commit_creds().
++ - In Unix send and receive, use remote peercred.pid, if local is not available.
++ - Fix NULL pointer deref, if sk_socket is unset.
++ - Move SEND and RECEIVE for Unix sockets from net/socket.c to net/unix/af_inet.c.
++ - This makes interception code cleaner and more reliable, specially for named
++ sockets.
++ - Fix file_p handling in list read functions.
++ - Use mntget() instead of path_get() in rsbac_read_open().
++
++1.4.4:
++ - Port to 2.6.33.2
++ - Fix RC check for CREATE right on new objects.
++ - Backport rsbac_read_open() and rsbac_read_close() fixes from 1.5.
++
++1.4.3:
++ - Depend CONFIG_RSBAC_RC_LEARN on CONFIG_RSBAC_DEBUG.
++ - Show transaction number in learning info messages.
++ - Add transaction names for human use and set names for learn transactions.
++ - Move CREATE checks in rc_main.c into common function rc_check_create() with lea
++ - Fix proc function return types in reg samples.
++ - Remove rsbac_read_lock(), rsbac_write_lock() etc.
++ - Remove rsbac_vmalloc, rsbac_vkmalloc, rsbac_vfree, rsbac_vkfree.
++ - New kernel config RSBAC_SWITCH_BOOT_OFF: 'Allow to switch modules off with kernel parameter'
++ - Join ta_committing and ta_forgetting to ta_committing.
++ - Fix three small memory leaks.
++ - Show program path in CAP learning messages and use INFO level, not DEBUG.
++ - Allow SCD mlock in PM.
++ - Show program path in AUTH learning messages.
++ - When committing or forgetting, lock per list and make other list functions sleep while committing or forgetting.
++ - Optionally put learning mode results into transactions, one per module.
++ - Add global RC learning mode for role rights to types.
++ - Control CAP learning mode switch in CAP module.
++ - Implement CAP learning mode for user and program max_caps.
++ - Move AUTH auth_program_file kernel-only attribute to GEN program_file.
++ - Fix lockup in rehashing, after failing of new hashed struct memory allocation, forgot to spin_unlock. Alloc unlocked instead, better anyway.
++ - Add notes for locking to generic lists.
++ - Store compare results in variable to avoid doing same compare twice.
++ - Only reset curr pointer, if same as removed item (usually the case).
++ - Show rcu_batches_completed() in proc gen_list_counts.
++ - Store nr_hashes in variable, if used after unlocking - might change.
++ - Fix *remove_count(): calling rcu_free on wrong updated pointer, so store.
++ - Do not use count values for checks, can be wrong. Always use head.
++ - Allow rcu rate >= 1 for testing purposes, config help still says 100.
++ - Reorder syscall case struct by frequency of calls (stats from real systems).
++ - Ifdef syscalls for disabled modules instead of returning error in them.
++ - Make RCU rate limit boot and runtime configurable.
++
++1.4.2:
++ - Change generic lists to use RCU instead of rw spinlocks
++ - Show a total of reads and writes in list statistics in gen_lists_count
++ - Disable rsbac attributes store on fuse
++ - Fix RC dev inheritance: Explicitely set minor to 0 before getting major attr
++ - Use Pseudo also for RC ADF debug messages
++ - Use RCU callbacks with a rate limit of 1000/s, use sync, if exceeded, configurable in kernel config
++
++1.4.1:
++ - Support ANY value 255 for NETLINK protocol
++ - Return -EPERM in sys_rsbac_um_check_account, if user does not exist.
++ - Add config option RSBAC_ENFORCE_CLOSE to really deny close, if decided.
++ - Check CLOSE requests in RC.
++ - Add SCD target videomem and kernel attribute pagenr.
++ - Split SCD kmem into kmem and videomem, split hooks in drivers/char/mem.c.
++ - Allow R_MODIFY_SYSTEM_DATA notifications on SCD in adf_check.c.
++ - ext4 secure delete support
++1.4.0:
++ - Added VUM (Virtual User Management) support
++ - OTP support for UM
++ - Converted the common code to 2.6 only. From now on changes will be 2.6 only as well.
++1.3.5:
++ - Check crypto return codes (2.6) and fixed UM password hashing.
++ - Fix compilation issues for "disable writing to disk". See bug #98.
++ - Safety mesures for inheritence in case of null pointers.
++ - Disable debug message "rsbac_get_parent(): oops - d_parent == dentry_p".
++ - Increase string lengths in user and group items significantly.
++ - Add RSBAC memory slab 384 for new user item sizes.
++ - Do not try to write lists, if device is not writable.
++ - Do not sleep in rsbac_get_vfsmount(), can be called while atomic.
++ - Do not write attributes on Oracle Cluster FS 2 (OCFS2).
++ - Complete hook review.
++1.3.4:
++ - No changes :)
++1.3.3:
++ - Change FD cache counters to 64 Bit to avoid wrapping.
++ - Make FD Cache xstats output look nicer.
++ - Make an adf_request permission check when modyfing capabilities is the new set >> old one.
++ - Copy auth_last_auth on CLONE, gets reset on EXECUTE.
++ - Provide pid and process name in some UM debug output.
++ - 2.6 WARNING: sysrq key 'w' is GONE! no more wake up trigger possible
++1.3.2:
++ - mark FS_OBJ_HIDE as EXPERIMENTAL and depends on it
++ - clean compilation warnings, data types and such.
++ - removed double "ready" message in rsbac_do_init()
++ - disable partner process check for unix socks by default.
++ - Show fd cache hits/misses ratio in xstats. Really inline rsbac_is_initialized().
++ - Change fd cache descriptor to get rid of separate compare function in 2.4 kernels.
++ - Add FD inherited attribute value cache. Hash device list. Allow per-list max_ite
++ms_per_hash.
++ - Change return code in AUTH syscalls to INVALIDATTR, if group or eff/fs support i
++s not compiled in.
++ - port from ex 1.4 trunk: do not intercept PTRACE_DETACH request
++ - rewrite of error handling to be more logical in rsbac_handle_filldir().
++ - Also take partner pid from other on Unix socket stream connects.
++ - Accept syscalls from tools with all versions 1.3.x.
++ - Take partner process on UNIXSOCK CONNECT from other->sk_peercred.
++ - Try to get partner process for UNIXSOCK RECEIVE from other sources, if peercred is not filled.
++ - New error code INVALIDLIST, if list handle is invalid.
++ - New jail flags for syslog jail.
++ - Extra check before reiserfs secure_delete call.
++ - Fix Dazuko device registration in 2.6. Return INVALIDPOINTER for invalid pointer
++s in some syscalls.
++ - lvm/md automount fix
++ - Fix oops on loop umounts: device was auto-mounted, if there were dirty lists. Ne
++ver auto-mount loop devices.
++ -
++1.3.1:
++ - Add xstats counter for get_parent calls.
++ - Fix sort order of network templates.
++ - Add missing DAZ request in vector. Add role number in RC syscall denied log message.
++ - Create bitmasks for each module of which requests it is interested in and only call request_xy and set_attr_xy, if necessary.
++ - small performance tunning: removed usage of rsbac_list_compare_u32 (always use memcmp which is asm-tuned per arch)
++ - Reduce stack usage in list, ACL and RC init.
++ - Optimize list hash functions with bit masks instead of % operation.
++ - make sure that rsbac_list_max_hashes and rsbac_list_lol_max_hashes are always potential of 2 and warn the user at configuration time. (127 will round to 64).
++
++1.3.0:
++ - Restarted 1.3 tree from the 1.2.7 release
++ - System call rsbac_version to return numeric version without checking the caller’s version provided to syscall.
++ - JAIL: allow_parent_ipc to allow IPC into parent jail. Useful with Apache mod_jail and others. Needs another process attribute jail_parent
++ - JAIL: add a flag to allow suid/sgid files and dirs.
++ - Optionally check CHANGE_OWNER for PROCESS targets also as CHANGE_OWNER on the new USER. This allows fine grained control also in RC and ACL models.
++ - Change network templates to hold up to 25 ip networks and up to 10 port ranges.
++ - Automatic online resizing of per-list hash table. As list identifiers are pointers to list headers, which must not change, the arrays of list heads are allocated separately and accessed through a pointer.
++ - Change named UNIX sockets to be new filesystem target type T_UNIXSOCK and unnamed to be new IPC type anonunix (like pipes)
++ - RC role def_unixsock_create_type, which overrides the def_(ind_)fd_create_type. Default value use_def_fd.
++ - Change aci, acl and auth devices lists to use RCU on 2.6 kernels
++ - Dazuko udev support
++ - UM password history with configurable length to avoid password reuse.
++ - Update HTML doc in Documentation/rsbac, or point all docs to the website.
++ - Hide dir entries a process has no SEARCH right for
++ - Limit number of items per single list to 50000, so real limit is at 50000 * nr_hashes.
++ - New request type AUTHENTICATE against USER targets. No authentication against RSBAC UM without this right in RC and ACL.
++ - Complete hook review with several small fixes.
++ - More detailed JAIL decision logging for IPC and UNIXSOCK targets with rsbac_debug_adf_jail.
++
++1.2.7:
++ - Use new PaX flags location at current->mm->pax_flags.
++ - Removed remaining non-RSBAC code
++1.2.6:
++ - DAZ Renaming of files from non-scanned to scanned directory
++ now works correctly (does not cache results from non scanned
++ as CLEAN - and/but keep INFECTED status if set when moving file
++ from scanned to non-scanned)
++ - DAZ unscanned files decision is now DO_NOT_CARE instead of
++ GRANTED
++ - Full pathes returned by RSBAC do not display double
++ (or more) / with double (or more) mounts anymore.
++ ex: /home//bob => /home/bob
++ This allows DAZ path based scanning to function normally.
++ - Fix setting of RC IPC type with def_ipc_create_type.
++ - Added ptrace hook for m32r architecture.
++ - New kthread notification code.
++ - Fix xstats to include GROUP targets.
++ - Mark lists dirty again, if saving failed.
++ - Fix FF to allow file READ (but not READ_OPEN) even with execute_only.
++ - Stop making SEND and RECEIVE requests for STREAM sockets, if
++ CONFIG_RSBAC_NET_OBJ_RW is not set.
++ - Notify that shm is destroyed only when it really is (thanks rtp).
++ - Minor compile fixes
++
++1.2.5: - AUTH config switch to globally allow setuid to user (real, eff, fs),
++ who started the program. Boot time option and proc setting to enable
++ per uid type.
++ - Show missing Linux caps in JAIL like in CAP.
++ - Change device attribute backup to use a list of attribute objects
++ instead of traversing /dev and possibly missing some.
++ - Device attribute inheritance: Use values at type:major as default for
++ type:major:minor.
++ - Add a generic request directly in sys_ioctl with new request type
++ IOCTL on DEV and NETOBJ target.
++ - Finish ioctl extra interception with GET_STATUS_DATA and
++ MODIFY_SYSTEM_DATA, e.g. for SCSI.
++ - Store remote IP when process accepted its first INET connection as
++ process attribute and pass on to children. Log remote IP in request
++ log.
++ - Symlink redirection based on remote IP.
++ - Optional UM password limits: Min length, non-alpha char required
++ - Fix EINVALIDPOINTER when changing UM password with passwd via
++ pam_rsbac.
++ - Complete system call interception review with additional hooks where
++ necessary. See Interceptions log for details.
++ - Change USER attribute backup to list of known items.
++ - Fix dereference bug related to rsbac_get_parent: set_attr call in
++ do_exec sometimes used file struct after freeing.
++ - Fix 2.6.11 random file not found errors, caused by symlink redirection
++ and ext2/ext3 kernel fs layer violation.
++ - Add CREATE and DELETE notifications in um syscalls.
++ - Make RC apply default_{user|group}_create_type on {USER|GROUP} CREATE.
++ - Configure module switching per module. Only allow switching stateful
++ models on after switching off with extra kernel config switch.
++ - Review all devision modules, whether they decide on all relevant
++ request to target combinations and whether they protect all relevant
++ attributes.
++ - Full review of all interceptions to make them locks safe
++ - Fix initrd problems showing up with the Adamantix kernel
++
++1.2.4: - Per dir FD type RC default_fd_create_type
++ - Full kernel space user management as a replacement for /etc/passwd,
++ /etc/shadow, /etc/group
++ - Add GROUP target type
++ - Change RC copy_role to be allowed with role in admin_roles
++ - Log full program path, get dentry from mappings for this
++ - Make RSBAC remote logging target configurable at boot or runtime.
++ Suppress remote logging, if address or port is 0.
++ - audit_uid: Default value "unset". Set at CHANGE_OWNER away from a uid
++ != 0, kept, inherited to child processes and logged. Allows to log
++ actions of users who did an su etc. Configurable additional uid per
++ program which works like uid 0, e.g. for SSHD privilege separation
++ (new attr auid_exempt).
++ - AUTH protection for Linux group IDs.
++ - New kernel flag: rsbac_softmode_once: Sets softmode, but denies
++ setting it again during runtime. For those systems that for some
++ reason must start in softmode, disable it and do not want to have it
++ set again later.
++ - New kernel flag: rsbac_softmode_never: Disallows setting softmode
++ during this runtime.
++ - Keep last UM authenticated users in a per-process attribute
++ auth_last_auth. Allow processes with auth_may_set_cap flag to set
++ last_auth.
++ - New kernel flag: rsbac_freeze: Disallows all modifying administration
++ in RSBAC syscalls. Added new switch target FREEZE.
++ - Make PaX default flags configurable.
++ - RC check access to UNIX socket partner process
++ - Transaction support for administration: begin, add a set of desired
++ changes, commit atomically or forget.
++ - Add RC copy_type, to be allowed with ADMIN right.
++ - User Management "exclusive mode": Only users and groups known to
++ RSBAC UM can be reached. Kernel parameter and /proc setting to
++ temporarily disable the restrictions.
++ - Randomize UM password salt better
++ - Optionally randomize transaction numbers
++ - Reduce memory consumption of rsbac_do_init.
++ - Further reduce RSBAC's stack usage to prepare for 4 KB kernel stack
++ size.
++ - Password protection for transaction operations refresh, forget, commit
++ - Add hooks with MODIFY_SYSTEM_DATA on SCD network to queueing
++ administration
++ - Warn explicitely, if CAP max_caps do not get applied because of
++ running in softmode.
++ - Update Dazuko interface to 2.0.5
++ - Update defconfig in all archs
++ - ACLs for Users and Linux groups
++ - Extend AUTH auth_may_setuid flag with values last_auth_only and
++ last_auth_and_gid to allow last authenticated uid to be reached.
++ The second allows all group ids, too, because you cannot auth for
++ them. No longer add process cap at UM authentication, but rather
++ check at CHANGE_OWNER with last_auth process attribute.
++ - Fix severe Oopses when forgetting transactions with lists of lists.
++ - Optionally log all denied calls to capable() function with
++ CONFIG_RSBAC_CAP_LOG_MISSING
++
++1.2.3: - Port to linux kernel 2.6.0-test with LSM
++ - New JAIL flag allow_clock for ntpd encapsulation
++ - Removed LSM support (see http://rsbac.org/documentation/why_rsbac_does_not_use_lsm).
++ - Global AUTH learning mode
++ - AUTH cap inheritance from parent dir (single step only, not
++ accumulated)
++ - RC pretty-print config output
++ - Remove 2.2 kernel support.
++ - Improve AUTH learning mode to use special value for same user
++ - Trigger AUTH learning mode per program
++ - Show type, name and mode of new object in T_DIR/CREATE request log.
++ - Statix PaX support decision module
++ - Faked (root) user ID in ''getuid()'' to make stupid programs with uid
++ checks happy.
++ - Full log separation between syslog and RSBAC log, also for debug
++ messages (except rsbac_debug_aef). RSBAC now really shuts up, if
++ rsbac_nosyslog is set, and sends everything to RSBAC own log only.
++ - ACL learning mode for user rights to filesystem objects, parameter
++ rsbac_acl_learn
++ - New RC syscall to get current role
++ - mac_trusted_for_user with list instead of single user.
++ - Block fchdir outside the jail, if some stupid program opened a dir,
++ called rsbac_jail() and then called fchdir(). Done by simply closing
++ all open dirs after rsbac_jail() called chroot.
++ - Fixed some JAIL bugs found, all relevant chroot items from regression
++ suite solved. Not urgent enough and too many changes to make a 1.2.2
++ bugfix.
++ - Added JAIL Linux Capability restriction
++ - Dazuko integration as fixed module, as replacement for MS module
++ - Dazuko result caching with generic lists (as in old MS module)
++ - AUTH special value for eff and fs uid (thanks to Arnout Engelen)
++ - New optional rsbac_jail parameter max_caps, which limits the Linux
++ capabilities of all processes in the jail
++ - Optionally hide process ids without GET_STATUS_DATA in /proc/
++ dir listing
++ - /proc/rsbac-info/active to get current version and list of active
++ modules: One line each for version, mode: Secure/Softmode/Maintenance,
++ softmode: available/unavailable and one line per module: on/softmode/off
++ - Solve the new "kernel complains about vmalloc with lock" uglyness:
++ removed all vmalloc use in 2.6 kernels, too many workarounds needed.
++ - Protect sysfs objects in 2.6 kernels
++ - Added three real life example REG modules to rsbac/adf/reg,
++ contributed by Michal Purzynski
++ - Changed DEV list descriptor to be compatible between 2.4 and 2.6
++ kernels
++ - Added RC types and compatibility settings for USER targets
++ - Allow to set a different RC boot role than that of user root
++ - Add RC process type for kernel threads
++
++1.2.2: - Added ms_need_scan attribute for selective scanning
++ - MS module support for F-Protd as scanning engine
++ - ms_need_scan FD attribute for selective scanning
++ - JAIL flag allow_inet_localhost to additionally allow to/from
++ local/remote IP 127.0.0.1
++ - RSBAC syscall version numbers
++ - New RES module with minimum and maximum resource settings for
++ users and programs
++ - Moved AUTH module to generic lists with ttl
++ - Added new requests CHANGE_DAC_(EFF|FS)_OWNER on PROCESS targets
++ for seteuid and setfsuid (configurable)
++ - Added caps and checks for effective and fs owner to AUTH module
++ (optional)
++ - Changed behaviour on setuid etc.: Notification is always sent, even
++ if the uid was set to the same value. This allows for restricted RC
++ initial roles with correct role after setuid to root.
++ - New Process Hiding feature in CAP module
++ - Delayed init for initial ramdisks: delay RSBAC init until the first
++ real device mount.
++ - rsbac_init() syscall to trigger init by hand, if not yet
++ initialized - can be used with e.g. rsbac_delayed_root=99:99, which
++ will never trigger init automatically.
++ - MS module support for clamd as scanning engine.
++ - Almost complete reimplementation of the MAC model with many new
++ features.
++ - New system role 'auditor' for most models, which may read and flush
++ RSBAC own log.
++
++1.2.1: - Added support for all other architectures.
++ - Cleaned up rsbac syscall filesystem name lookup and target type
++ checks.
++ - New module JAIL: preconfigured process encapsulation (see kernel
++ config help).
++
++1.2.0: - Moved most lists to generic lists, splitting up between modules on
++ the way (GEN = general for all modules).
++ - DS for each module only included, if module is compiled in.
++ - New Linux Capabilities (CAP) module
++ - Split system_role into mac_role, fc_role, etc. Please do not use
++ old A_system_role attribute any longer.
++ - Changed rsbac_get/set_attr interface to include target module
++ - Added module guessing from attribute into sys_rsbac_get/set_attr,
++ if module is not given (value SW_NONE).
++ - Added user and RC role based symlink redirection
++ - Added network and firewall config protection as SCD network and
++ firewall targets
++ - Added NETDEV, NETTEMP and NETOBJ targets for network access control.
++ - Added network templates for default NETOBJ attribute values
++ - Renamed /rsbac dir to /rsbac.dat to avoid name conflicts.
++ - RC model with unlimited roles and types
++ - Selective dir tree disabling of Linux DAC
++ - Generic list ordering (needed for templates and optimization)
++ - List optimization
++ - Generic time-to-live support in generic lists (new on-disk version)
++ - Support time-to-live for ACL group members and ACL entries
++ - copy_net_temp
++ - Individual module soft mode
++ - Support time-to-live for RC entries
++ - Backport to 2.2.20
++
++1.1.2: - Own RSBAC memory allocation functions. Own RSBAC mem slabs in 2.4
++ kernels.
++ - Generic lists - simply register your list item sizes with filename
++ and persist flag, and a persistent list will be kept for you.
++ - Generic lists of lists, two level version.
++ - Moved pm_data_structures.c to new lists with proc backup files
++ Attention: There is no auto-update from older versions possible!
++ - proc backup files for RC and ACL are now optional
++ - New proc subdir pm, replaces old write_list call
++ - rsbac_pm write_list call removed
++ - New FD aci version with new rc_initial_role and 16 bit ff_flags
++ - New FF flag append_only, which limits all write accesses to
++ APPEND_OPEN and WRITE
++ - Fix for rename hole: rename could replace and thus delete an
++ existing file without DELETE check. Also performs secure_delete, if
++ necessary
++ - New rsbac_mount hook in change_root for initial ramdisk
++ - Fixed missing Linux check in bad_signal
++ - Added optional switch rsbac_dac_disable to disable Linux filesystem
++ access control
++ - Added count support for multiple mounts
++ - Added optional switch rsbac_nosyslog to temporarily disable logging
++ to syslog
++ - Added config option for DEBUG code
++
++1.1.1: - New target type FIFO, with a lot of cleanup, e.g. IPC type fifo
++ removed
++ - MAC module reworked, including MAC-Light option
++ - Several bugfixes
++ - Port to 2.4.0, 2.4.1 and 2.4.2
++ - New Makefiles with lists for 2.4 and without for 2.2 kernels
++ (Thanks to Edward Brocklesby for samples)
++ - init process default ACI now partly depends on root's ACI
++ - Optional interception of sys_read and sys_write.
++ Attention: you might have to add READ and WRITE rights to files,
++ fifos, dirs and sockets first, if upgrading from an older version
++ - REG overhaul. Now you can register syscall functions, everything is
++ kept in unlimited lists instead of arrays and registering is
++ versioned to allow for binary module shipping with REG version
++ checks.
++ - Inheritance is now fixed, except for MAC model
++ - MAC: optional inheritance, new option Smart Inheritance that tries
++ to avoid new attribute objects (see config help)
++ - New soft mode option: all decisions and logging are performed, but
++ DO_NOT_CARE is returned to enforcement. Off by default. See config
++ help for details.
++ - Optional initialization in extra rsbac_initd thread.
++
++1.1.0: - Port to 2.4.0-test11
++ - Interception of sys_mmap and sys_mprotect added. Now execution of
++ library code requires EXECUTE privilege on the library file, and
++ setting non-mmapped memory to EXEC mode requires EXECUTE on target
++ NONE.
++ - MAC Light option by Stanislav Ievlev added. See kernel config help or
++ modules.htm.
++
++1.0.9c:
++ - Port to 2.4.0-test{[789]|10}, this means major changes to the lookup and
++ inheritance code - of course #ifdef'd
++ - Change string declarations to kmalloc. On the way moved
++ MAX_PATH_LEN restriction from 1999 to max_kmalloc - 256
++ (>127K).
++ - Renamed several PM xy.class to xy.object_class for C++
++ compatibility
++ - Added SCD type ST_kmem
++ - Changed rc_force_role default to rc_role_inherit_parent,
++ terminated at root dir with old default rc_role_inherit_mixed.
++ This makes it much easier to keep a dir of force-roled binaries.
++1.0.9b:
++ - Port to 2.3.42 - 2.3.99-pre3
++ - Port to 2.2.14 - 2.2.16
++ - 32 Bit Uid/Gid with new attribute versions
++ - User and program based logging
++ - AUTH capability ranges
++ - Made write to MSDOS fs a config option, so use it on your own risk
++ (see config help)
++ - MAC levels 0-252
++ - Added config option for ioport access (X support)
++
++1.0.9a:
++ - Added group management to ACL module.
++ - Removed CONFIG_RSBAC_SYNC option.
++ - Added module hints to logging
++ - Added RC separation of duty (see models.htm)
++ - Added RC force role inherit_up_mixed and made it default setting
++
++1.0.9: - Added registration of additional decision modules (REG)
++ - Wrote decision module examples (see README-reg and reg_samples dir)
++ - Port to 2.2.8, 2.2.9, 2.2.10, 2.2.11, 2.2.12 (pre versions)
++ - Heavily changed RC model: Now it has a distinguished role-to-type
++ compatibility setting for each request type, instead of one setting
++ for all request types. This allows for much finer grained access
++ control.
++ Unfortunately there was no way to update existing role settings,
++ so those have to be reentered by hand. Still, the types entries are
++ kept.
++ - Set all MSDOS based file systems to read-only, because inode
++ numbers are likely to change between boots.
++ - Added Access Control List module. ACLs are kept on FILE, DIR,
++ DEV, IPC, SCD and PROCESS targets (IPC and PROCESS have only
++ one default ACL each). Each entry contains subject type (user,
++ rc_role, group), subject id and the rights this subject has. Also,
++ rights are inherited from parents and from a target specific default
++ ACL.
++ See html/models.htm for details.
++ - Added optional full path logging.
++
++1.0.8a:
++ - Port to 2.2.7
++ - File Flag no_execute added to prevent execution, e.g. of user
++ binaries under /home tree. Can be circumvented by scripts via
++ 'interpreter scriptname'.
++
++1.0.8: - Port to 2.2.1
++ - Added /proc/rsbac-info/backup to provide an easier means of backup
++ for not device dependent stuff. To be extended.
++ - Added new Role Compatibility (RC) module.
++ - New on-disk binary layout, auto update from all versioned data
++ (1.0.5 upwards).
++ - AUTH module added to support proper authentification by enforcing
++ externally granted CHANGE_OWNER capabilities.
++ - Save to disk inconsistency in PM sets fixed.
++ - MAC categories added, but limited to a fixed number of 64. Apart
++ from that, the MAC module categories are as proposed in the
++ Bell-LaPadula model.
++ - Port to 2.2.2
++ - Port to 2.2.3 with minor changes
++ - Port to 2.2.4
++ - Port to 2.2.5
++
++1.0.7a:
++ - Added alpha support (with Shaun Savage). Has different storage sizes,
++ so default useraci does not work and you need a maint kernel.
++ - Added new error detection features for file/dir entries.
++ - Increasing of NR_FD_LISTS is now handled differently for error
++ detection reasons. See README-nrlists.
++ - Marked init functions as __init - though saving a few KB doesn't
++ make such a big difference while using RSBAC... ;)
++ - Fixed memory leaks in write_*_list while introducing vmalloc for
++ large lists. The number of file/dir lists is now only a matter of
++ performance and available memory.
++ - Added two flags to File Flags
++ - Port to 2.2.0-pre6
++ - Added secure deletion/truncation, needs a config switch to be
++ enabled. If on, all files marked with (inheritable) FF-flag
++ secure_delete and all files marked as PM-personal data are zeroed on
++ deletion and truncation - if the regarding modules are switched on.
++
++1.0.7: - Port to 2.1.131
++ - Added more fs types to non-writable: smbfs, ncpfs, codafs - so
++ there should be no writing on network mounts (unfortunately there
++ is no afs SUPER_MAGIC)
++ - Added configuration option NO_DECISION_ON_NETMOUNTS, which
++ additionally turns off all decisions for all these fs, so that
++ they are completely ignored
++ - Added attribute inheritance: Some attributes for files and dirs
++ have a special value 'inherit'. If this is set, the value of the
++ parent dir's attribute is used instead. This mechanism ends on
++ fs boundaries - each fs root dir gets old style standard values,
++ if attribute is set to 'inherit'.
++ Currently security_level, object_category and data_type are
++ inheritable.
++ - Added configuration option DEF_INHERIT. If set, default values for
++ inheritable attributes are *inherit, rather than the old default.
++ This option setting should not differ between different RSBAC
++ kernels to avoid deeper confusion for administrators and
++ rsbac_check().
++ - To support inheritance, added parameter inherit to both get_attr
++ system calls. If on, the effective (possibly inherited) value is
++ returned, if off, the real value is returned.
++ - Corrected a security hole in receiving from / sending via datagram
++ sockets (thanks to Simone). Now a read/append open and a close
++ request are done for every datagram (if net support is configured,
++ as usual).
++ Attention: Programs that open an UDP socket as one user (e.g. root)
++ and then setuid to another (e.g. bin) may not be able
++ to access that socket, if the new user has insufficent
++ rights! (see config help)
++ Checking of net access can as before be turned on/off via
++ CONFIG_RSBAC_NET.
++ - Worked on rsbac_check(). Is more stable now, but should only be
++ called under maximum of moderate load.
++
++1.0.6: - Moved to 2.1.128
++ - Cleaned up old includes in syscalls.c
++ - Added RSBAC own logging in /proc/rsbac-info/rmsg, to be accessed
++ by modified klogd or sys_rsbac_log, restricted by most modules to
++ security officers.
++ Additionally, logging to standard syslog can be turned off to hide
++ security relevant log from all but those with explicit access.
++ - Added module File Flags with attribute ff_flags for FILE/DIR
++ targets
++ - Added auto-update of last version attributes (only FD changed
++ though)
++ - Changed ms_trusted from boolean to tristate: non-trusted, read,
++ full
++ - Fixed rm -r hang bug
++ - Added consistency check for RSBAC items, which can remove items for
++ deleted inodes (ext2 only) and entries containing only default
++ values (FILE/DIR targets only). It also recalculates item counts.
++ - Added sys_rsbac_check to trigger this check.
++
++1.0.5:
++ - Rewrote most of attribute saving to disk. Now disk writing is never
++ done with a spinlock held, increasing stability significantly
++ (is this a taboo? if yes, where is it documented?)
++ - Changed write-to-disk behaviour: The old immediate write is no
++ longer default, but optional (CONFIG_RSBAC_SYNC_WRITE). Instead,
++ sys_rsbac_write can be used from user space or a kernel daemon can
++ be activated to write changes automatically every n seconds
++ (CONFIG_RSBAC_AUTO_WRITE)
++ - Added kernel param rsbac_debug_auto for the daemon - gives a good
++ overview of attribute change rate
++ - Added proc interface for statistics and many RSBAC settings
++ - Added rsbac_adf_request calls MODIFY_SYSTEM_DATA to sysctl.c
++ - Wrote man pages for all RSBAC syscalls (in Documentation/rsbac/man)
++ - Added version information and check for all file/dir/dev aci and
++ for log_levels
++ - Added some more scan strings to Malware Scan module, had to change
++ string representation to a more general way
++
++1.0.4:
++ - Port via 2.1.115 and 2.1.124 to 2.1.125
++ - IPC targets: changed ids for sockets from pid/fd combination to
++ pointer to sock structure, including (many) changes in the
++ handling.
++ - Added socket level scanning (tcp and udp) to module Malware Scan.
++ This feature can stop malware while still being transferred to
++ your system. Added new attributes for IPC, process and file/dir
++ targets to manage socket scan.
++ - Reordered configuration options
++ - Added CONFIG_RSBAC_NO_WRITE to totally disable writing to disk for
++ testing purposes and kernel parameter rsbac_debug_no_write to
++ temporarily disable disk writing
++ - Added CONFIG_RSBAC_*_ROLE_PROTection for all role dependant
++ modules: Now change-owner (setuid etc.) can be restricted between
++ users with special roles - see configuration help for details
++ - Some more bugfixes, mostly to decision modules
++
++1.0.4-pre2:
++ - Port to 2.1.111
++ - Attribute mac_trusted_for_user added to FILE aci. Value meanings:
++ RSBAC_NO_USER (-3): program is not MAC-trusted
++ RSBAC_ALL_USERS (-4): program is MAC-trusted for all users
++ other user-ID: program is MAC-trusted, if invoked by this user
++ Especially the last is useful for daemon programs that can be
++ started by all users.
++ Init process is checked, too, but is MAC-trusted by default.
++ - Syscalls rsbac_mac_set/get_max_seclevel added. Now a process can
++ reduce its own maximum security level. Useful for wrapper daemons
++ like inetd after forking and before invoking another program.
++ - Object dependent logging #ifdef'd with configuration option.
++ - Configuration option 'Maintenance Kernel' added. Disables all other
++ options.
++ - removed CONFIG_RSBAC_ADMIN and rsbac_admin() stuff - now we have
++ capabilities, and there is no suser() anymore to extend
++ - changed locking for Data Structures component from semaphores to
++ read/write spinlocks
++ - added (U)MOUNT requests for target DEV to sys_(u)mount. Now both
++ target dir and device are checked for access (MAC: dir: read-write,
++ dev: depending on mount mode read or read-write). Note: After
++ mount, all file/dir accesses on this device are checked as usual.
++ - Moved checks for valid request/target combinations from MAC module
++ to extra functions in rsbac/adf/check.c.
++
++1.0.3: - Target DEV added. Now devices can get their own attributes based
++ on major/minor numbers. Attributes based on their file representations
++ in /dev are no longer used for open, but still for all other calls.
++ MAC decisions on open requests for devices must be explicitely enabled
++ by mac_check to keep system bootable.
++ Short rule: Only if contents is accessed, DEV attributes apply.
++ - Attribute object_type removed, was not used anyway and maintained in
++ linux structures.
++ - Attributes log_array_low and log_array_high for FILE/DIR/DEV added,
++ providing individial request based logging for those objects.
++ - PM module: if DEV is personal_data, neccessary access is checked
++ for new class DEV (can be changed to custom class)
++ - A couple of minor bugfixes done
++
++1.0.2A: - Port to 2.0.34
++ - A few #ifdef CONFIG_RSBAC_USE_RSBAC_OWNER were missing, causing
++ error messages "rsbac_set/get_attr returned error" -> added
++
++
++13/Jun/2001
++Amon Ott <ao@rsbac.org>
+diff --git a/Documentation/rsbac/Credits b/Documentation/rsbac/Credits
+new file mode 100644
+index 0000000..7f5921a
+--- /dev/null
++++ b/Documentation/rsbac/Credits
+@@ -0,0 +1,18 @@
++Credits
++-------
++
++You can find information about the RSBAC team on: <http://rsbac.org/contact>
++
++Credits for many patches showing bugs and possible fixes and lots of good
++ideas go to many people. We cannot recall them all so only a few are included
++here, if you are missing let us know :)
++
++ - 'Our Russian RSBAC Team',
++ - AltLinux with their AltLinux Castle distribution.
++ - Adamantix
++ - rtp, addobie, and the people from #rsbac[-dev]
++
++Also, there are several people doing lots of helpful promotion, too many to
++name them here. Their part cannot be rated too low.
++
++The RSBAC team.
+diff --git a/Documentation/rsbac/INSTALL b/Documentation/rsbac/INSTALL
+new file mode 100644
+index 0000000..6c6f9fe
+--- /dev/null
++++ b/Documentation/rsbac/INSTALL
+@@ -0,0 +1,18 @@
++RSBAC INSTALL
++=============
++
++Installation from a RSBAC tarball
++---------------------------------
++
++You probably already untar'ed the rsbac-x.y.z.tar.gz archive. You can safely
++untar this archive into your kernel main directory, or copy all files there
++- no file should be overwritten.
++
++To get RSBAC working, you must then patch the kernel using an RSBAC kernel
++patch patch-x.y.z-va.b.c.bz2, matching your kernel version. In kernel main dir
++call
++bzip2 -dc patch-x.y.z-va.b.c.bz2 | patch -p1 &>perr
++After patching, everything should be in place and a log should be in perr.
++
++If your kernel version is not supported, check at
++<http://www.rsbac.org/download> for newer patch files.
+diff --git a/Documentation/rsbac/Interceptions-2.4 b/Documentation/rsbac/Interceptions-2.4
+new file mode 100644
+index 0000000..e279865
+--- /dev/null
++++ b/Documentation/rsbac/Interceptions-2.4
+@@ -0,0 +1,97 @@
++Interceptions for access decisions (AEF) in RSBAC 1.2.5 for 2.4.30:
++(ordered as in asm-i386/unistd.h)
++
++Not necessary:
++sys_waitpid, sys_time, sys_lseek, sys_getpid, sys_alarm, sys_pause,
++sys_sync, sys_getuid, sys_alarm, sys_ftime, sys_dup, sys_times, sys_brk,
++sys_getgid, sys_signal, sys_geteuid, sys_getegid, sys_olduname, sys_umask,
++sys_ustat, sys_dup2, sys_getppid, sys_getpgrp, sys_setsid, sys_sigaction,
++sys_sgetmask, sys_ssetmask, sys_sigsuspend, sys_sigpending, sys_getrlimit,
++sys_getrusage, sys_gettimeofday, sys_getgroups, sys_select, sys_munmap,
++sys_getpriority, sys_setitimer, sys_getitimer, sys_uname, sys_vhangup,
++sys_vm86old, sys_wait4, sys_sysinfo, sys_fsync, sys_sigreturn, sys_newuname,
++sys_modify_ldt, sys_sigprocmask, sys_get_kernel_syms(? - see discussion),
++sys_sysfs, sys_personality, sys__llseek, sys_newselect, sys_flock,
++sys_msync, sys_fdatasync, sys_mlock, sys_munlock, sys_mlockall,
++sys_munlockall, sys_sched_getparam, sys_sched_getscheduler, sys_sched_yield,
++sys_sched_get_priority_max, sys_sched_get_priority_min,
++sys_sched_rr_get_interval, sys_nanosleep, sys_mremap, sys_getresuid,
++sys_vm86, sys_poll, sys_getresgid, sys_prctl, sys_rt_sigreturn,
++sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending,
++sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_getcwd,
++sys_sigaltstack, sys_ugetrlimit, sys_getuid32, sys_getgid32, sys_geteuid32,
++sys_getegid32, sys_getgroups32, sys_getresuid32, sys_getresgid32,
++sys_mincore, sys_madvise, sys_gettid, sys_readahead, sys_setxattr,
++sys_munlockl, sys_munlockall
++
++Not implemented in this kernel:
++sys_ftime, sys_break, sys_stty, sys_gtty, sys_prof, sys_lock, sys_mpx,
++sys_ulimit, sys_profil, sys_idle, sys_afs_syscall, sys_getpmsg,
++sys_putpmsg, sys_security (used by RSBAC where available)
++
++Intercepted:
++sys_exit, sys_fork, sys_read, sys_write, sys_open, sys_close, sys_creat,
++sys_link, sys_unlink, sys_execve, sys_chdir, sys_mknod, sys_chmod,
++sys_lchmod, sys_oldstat, sys_mount, sys_umount (= oldumount), sys_setuid,
++sys_stime, sys_ptrace, sys_oldfstat, sys_utime, sys_access, sys_nice (if
++priority is raised), sys_rename, sys_mkdir, sys_rmdir, sys_setgid, sys_acct
++(APPEND_OPEN on file), sys_umount2 (= umount), sys_ioctl (socket_ioctl,
++tty_ioctl (tiocsti)), sys_fcntl (except locking - see discussion),
++sys_setpgid, sys_chroot, sys_setreuid, sys_setregid, sys_sethostname,
++sys_setrlimit, sys_settimeofday, sys_setgroups, sys_symlink, sys_oldlstat,
++sys_readlink, sys_uselib, sys_swapon, sys_reboot, sys_readdir,
++sys_mmap (for MAP_EXEC), sys_truncate, sys_ftruncate, sys_fchmod,
++sys_fchown, sys_setpriority, sys_statfs, sys_fstatfs, sys_ioperm,
++sys_socketcall, sys_stat, sys_lstat, sys_fstat, sys_iopl, sys_swapoff,
++sys_ipc, sys_clone, sys_setdomainname, sys_adjtimex, sys_mprotect,
++sys_create_module, sys_init_module, sys_delete_module,
++sys_getpgid, sys_fchdir, sys_setfsuid, sys_setfsgid, sys_vfsreaddir,
++sys_readv, sys_writev, sys_getsid, sys_sysctl, sys_setresuid,
++sys_setresgid, sys_pread, sys_pwrite, sys_chown, sys_capget, sys_capset,
++sys_vfork, sys_mmap2, sys_truncate64, sys_ftruncate64, sys_stat64,
++sys_lstat64, sys_fstat64, sys_lchown32, sys_setreuid32, sys_setregid32,
++sys_setgroups32, sys_fchown, sys_setresuid32, sys_setresgid32,
++sys_chown32, sys_setuid32, sys_setgid32, sys_setfsuid32, sys_setfsgid32,
++sys_pivot_root, sys_getdents64, sys_fcntl64, sys_tkill, sys_sendfile64,
++
++
++Found missing in 1.2.4-bf5 and intercepted in 1.2.5-pre:
++ptrace (on sparc and sparc64), ioctl: BRCTL* (bridging control),
++sys_reboot: add reboot_cmd parameter to see command in log,
++sys_mount: lookback mounts in do_loopback(),
++sys_mount: move mounts in do_move_mounts(),
++sys_socketcall (sys_socketpair, sys_setsockopt, sys_getsockopt,
++sys_getsockname, sys_getpeername),
++sys_getxattr, sys_lgetxattr, sys_fgetxattr, sys_setxattr, sys_lsetxattr,
++sys_fsetxattr, sys_listxattr, sys_llistxattr, sys_flistxattr,
++sys_removexattr, sys_lremovexattr, sys_fremovexattr,
++sys_quotactl (new SCD quota), sys_bdflush, sys_sched_setparam,
++sys_sched_setscheduler, sys_query_module, sys_nfsservctl, sys_sendfile,
++NETLINK sockets (additional IP addresses, routing, firewall, tcpdiag, rules),
++sys_ioctl: fs/ext[3]2/ioctl.c:ext[23]_ioctl(), sys_pipe,
++sys_ioctl: drivers/ide/ide.c:ide_ioctl(),
++sys_ioctl: tty_ioctl,
++sys_fcntl: fs/fcntl.c:do_fcntl(): Control file locking,
++sys_flock: fs/locks.c:sys_flock(): Control file advisory locking,
++sys_get_kernel_syms: kernel/module.c: SCD ksyms, same for /proc/ksyms,
++sys_mlock, sys_mlockall: SCD mlock,
++sys_swapon: Also check access to the device as ADD_TO_KERNEL and
++REMOVE_FROM_KERNEL
++
++Not yet intercepted, for discussion:
++
++- netlink-sockets: iptables_ULOG, decnet, ipv6
++
++Other notes:
++- Added SCD target sysctl, now used by sys_sysctl and /proc/sys instead of
++ non-intuitive ST_other
++
++- Added SCD target nfsd for kernel NFS server control
++
++- Added IPC type anonpipe, used by anonymous T_FIFO (if inode on PIPEFS)
++
++- Added requests GET_STATUS_DATA, GET_PERMISSIONS_DATA, MODIFY_PERMISSIONS_DATA,
++ SEND for target type DEV
++
++- JAIL module: generally deny read, write, GET_STATUS_DATA and MODIFY_SYSTEM_DATA
++ accesses to devices, flags to allow
+diff --git a/Documentation/rsbac/Interceptions-2.6 b/Documentation/rsbac/Interceptions-2.6
+new file mode 100644
+index 0000000..bac1409
+--- /dev/null
++++ b/Documentation/rsbac/Interceptions-2.6
+@@ -0,0 +1,330 @@
++Interceptions for access decisions (AEF) in RSBAC for the kernel 2.6 as of 2.6.23.13:
++to check against LSM interceptions:
++egrep "static inline (.*) security_" include/linux/security.h |cut -d ' ' -f4|cut -d '(' -f1
++
++#define __NR_restart_syscall 0
++#define __NR_exit 1 *
++#define __NR_fork 2 *
++#define __NR_read 3 *
++#define __NR_write 4 *
++#define __NR_open 5 *
++#define __NR_close 6 *
++#define __NR_waitpid 7 -
++#define __NR_creat 8 *
++#define __NR_link 9 * [we do not need to check for T_DIR - hardlinks are not allowed for dirs anyway]
++#define __NR_unlink 10 *
++#define __NR_execve 11 *
++#define __NR_chdir 12 *
++#define __NR_time 13 *
++#define __NR_mknod 14 *
++#define __NR_chmod 15 *
++#define __NR_lchown 16 *
++#define __NR_break 17
++#define __NR_oldstat 18
++#define __NR_lseek 19 -
++#define __NR_getpid 20 -
++#define __NR_mount 21 !
++[do_loopback(), do_move_mount(), do_remount() are missing. we should take care of them, also reading rsbac attributes dirs when necesary]
++#define __NR_umount 22 *
++#define __NR_setuid 23 *
++#define __NR_getuid 24 -
++#define __NR_stime 25 *
++#define __NR_ptrace 26 *
++#define __NR_alarm 27 -
++#define __NR_oldfstat 28
++#define __NR_pause 29 -
++#define __NR_utime 30 *
++#define __NR_stty 31
++#define __NR_gtty 32
++#define __NR_access 33 *
++#define __NR_nice 34 *
++#define __NR_ftime 35
++#define __NR_sync 36 -
++#define __NR_kill 37 *
++#define __NR_rename 38 *
++#define __NR_mkdir 39 *
++#define __NR_rmdir 40 *
++#define __NR_dup 41 -
++#define __NR_pipe 42 ! [see Interceptions-2.4]
++#define __NR_times 43 -
++#define __NR_prof 44
++#define __NR_brk 45 -
++#define __NR_setgid 46 *
++#define __NR_getgid 47 -
++#define __NR_signal 48 -
++#define __NR_geteuid 49 -
++#define __NR_getegid 50 -
++#define __NR_acct 51 ! [missing in 2.6, _and_ also on intercepted on 2.4 !!]
++#define __NR_umount2 52
++#define __NR_lock 53
++#define __NR_ioctl 54 ! [missing interception - should be fine grained]
++#define __NR_fcntl 55 ! [missing interception - should be fine grained]
++#define __NR_mpx 56
++#define __NR_setpgid 57 *
++#define __NR_ulimit 58
++#define __NR_oldolduname 59
++#define __NR_umask 60 -
++#define __NR_chroot 61 *
++#define __NR_ustat 62 -
++#define __NR_dup2 63 -
++#define __NR_getppid 64 -
++#define __NR_getpgrp 65 -
++#define __NR_setsid 66 -
++#define __NR_sigaction 67 -
++#define __NR_sgetmask 68 -
++#define __NR_ssetmask 69 -
++#define __NR_setreuid 70 *
++#define __NR_setregid 71 *
++#define __NR_sigsuspend 72 -
++#define __NR_sigpending 73 -
++#define __NR_sethostname 74 *
++#define __NR_setrlimit 75 *
++#define __NR_getrlimit 76 -
++#define __NR_getrusage 77 -
++#define __NR_gettimeofday 78 -
++#define __NR_settimeofday 79 *
++#define __NR_getgroups 80 -
++#define __NR_setgroups 81 *
++#define __NR_select 82 -
++#define __NR_symlink 83 *
++#define __NR_oldlstat 84
++#define __NR_readlink 85 *
++#define __NR_uselib 86 *
++#define __NR_swapon 87 *
++#define __NR_reboot 88 *
++#define __NR_readdir 89
++#define __NR_mmap 90 *
++#define __NR_munmap 91 -
++#define __NR_truncate 92 *
++#define __NR_ftruncate 93 *
++#define __NR_fchmod 94 *
++#define __NR_fchown 95 *
++#define __NR_getpriority 96 -
++#define __NR_setpriority 97 *
++#define __NR_profil 98
++#define __NR_statfs 99 *
++#define __NR_fstatfs 100 *
++#define __NR_ioperm 101 *
++#define __NR_socketcall 102 ! [missing interception]
++#define __NR_syslog 103 *
++#define __NR_setitimer 104 -
++#define __NR_getitimer 105 -
++#define __NR_stat 106 *
++#define __NR_lstat 107 *
++#define __NR_fstat 108 *
++#define __NR_olduname 109 -
++#define __NR_iopl 110 *
++#define __NR_vhangup 111
++#define __NR_idle 112
++#define __NR_vm86old 113
++#define __NR_wait4 114 -
++#define __NR_swapoff 115 *
++#define __NR_sysinfo 116 -
++#define __NR_ipc 117 ! [missing interception]
++#define __NR_fsync 118 -
++#define __NR_sigreturn 119 -
++#define __NR_clone 120 *
++#define __NR_setdomainname 121 *
++#define __NR_uname 122 -
++#define __NR_modify_ldt 123
++#define __NR_adjtimex 124 *
++#define __NR_mprotect 125 *
++#define __NR_sigprocmask 126 -
++#define __NR_create_module 127
++#define __NR_init_module 128 *
++#define __NR_delete_module 129
++#define __NR_get_kernel_syms 130
++#define __NR_quotactl 131 ! [missing interception]
++#define __NR_getpgid 132 -
++#define __NR_fchdir 133 *
++#define __NR_bdflush 134 -
++#define __NR_sysfs 135 -
++#define __NR_personality 136 ? [what about it]
++#define __NR_afs_syscall 137
++#define __NR_setfsuid 138 *
++#define __NR_setfsgid 139 *
++#define __NR__llseek 140 -
++#define __NR_getdents 141 ! [missing interception - should be in vfs_readdir]
++#define __NR__newselect 142 -
++#define __NR_flock 143 -
++#define __NR_msync 144 -
++#define __NR_readv 145 *
++#define __NR_writev 146 *
++#define __NR_getsid 147 *
++#define __NR_fdatasync 148 -
++#define __NR__sysctl 149 ! [missing interception]
++#define __NR_mlock 150 ? [to care or not to care. this is a question !]
++#define __NR_munlock 151 ? [see above]
++#define __NR_mlockall 152 ? [see above]
++#define __NR_munlockall 153 ? [see above]
++#define __NR_sched_setparam 154 ! [missing interception]
++#define __NR_sched_getparam 155 -
++#define __NR_sched_setscheduler 156 ! [missing interception]
++#define __NR_sched_getscheduler 157 -
++#define __NR_sched_yield 158 -
++#define __NR_sched_get_priority_max 159 -
++#define __NR_sched_get_priority_min 160 -
++#define __NR_sched_rr_get_interval 161 -
++#define __NR_nanosleep 162 -
++#define __NR_mremap 163 -
++#define __NR_setresuid 164 *
++#define __NR_getresuid 165 -
++#define __NR_vm86 166
++#define __NR_query_module 167 ! [intercepted on 2.4, not found on 2.6]
++#define __NR_poll 168
++#define __NR_nfsservctl 169 ! [missing interception]
++#define __NR_setresgid 170 *
++#define __NR_getresgid 171 -
++#define __NR_prctl 172 -
++#define __NR_rt_sigreturn 173 -
++#define __NR_rt_sigaction 174 -
++#define __NR_rt_sigprocmask 175 -
++#define __NR_rt_sigpending 176 -
++#define __NR_rt_sigtimedwait 177 -
++#define __NR_rt_sigqueueinfo 178 -
++#define __NR_rt_sigsuspend 179 -
++#define __NR_pread64 180 *
++#define __NR_pwrite64 181 *
++#define __NR_chown 182 *
++#define __NR_getcwd 183 -
++#define __NR_capget 184 *
++#define __NR_capset 185 *
++#define __NR_sigaltstack 186 -
++#define __NR_sendfile 187 *
++#define __NR_getpmsg 188
++#define __NR_putpmsg 189
++#define __NR_vfork 190 *
++#define __NR_ugetrlimit 191 -
++#define __NR_mmap2 192 *
++#define __NR_truncate64 193 *
++#define __NR_ftruncate64 194 *
++#define __NR_stat64 195 * vfs_stat() seems to be ok, but there is also cp_new_stat()/cp_new_stat64()]
++#define __NR_lstat64 196 *
++#define __NR_fstat64 197 *
++#define __NR_lchown32 198 *
++#define __NR_getuid32 199 -
++#define __NR_getgid32 200 -
++#define __NR_geteuid32 201 -
++#define __NR_getegid32 202 -
++#define __NR_setreuid32 203
++#define __NR_setregid32 204
++#define __NR_getgroups32 205 -
++#define __NR_setgroups32 206
++#define __NR_fchown32 207
++#define __NR_setresuid32 208
++#define __NR_getresuid32 209 -
++#define __NR_setresgid32 210
++#define __NR_getresgid32 211 -
++#define __NR_chown32 212
++#define __NR_setuid32 213
++#define __NR_setgid32 214
++#define __NR_setfsuid32 215
++#define __NR_setfsgid32 216
++#define __NR_pivot_root 217 *
++#define __NR_mincore 218 ? [not intercepted - i do not think it is necesary thought]
++#define __NR_madvise 219 ? [not intercepted - maybe this one should be]
++#define __NR_madvise1 219
++#define __NR_getdents64 220 ! [same as sys_getdents()]
++#define __NR_fcntl64 221 ! [same as sys_fcntl()]
++/* 223 is unused */
++#define __NR_gettid 224 ? [not intercepted, rather no need to]
++#define __NR_readahead 225 ? [not intercepted - shall we ?]
++#define __NR_setxattr 226 ? [do we care about xattr ?]
++#define __NR_lsetxattr 227 ? [see above]
++#define __NR_fsetxattr 228 ? [see above]
++#define __NR_getxattr 229 ? [see above]
++#define __NR_lgetxattr 230 ? [see above]
++#define __NR_fgetxattr 231 ? [see above]
++#define __NR_listxattr 232 ? [see above]
++#define __NR_llistxattr 233 ? [see above]
++#define __NR_flistxattr 234 ? [see above]
++#define __NR_removexattr 235 ? [see above]
++#define __NR_lremovexattr 236 ? [see above]
++#define __NR_fremovexattr 237 ? [see above]
++#define __NR_tkill 238 *
++#define __NR_sendfile64 239 *
++#define __NR_futex 240 ! [multiplexer - check it out]
++#define __NR_sched_setaffinity 241 ! [not intercepted - shall we ?]
++#define __NR_sched_getaffinity 242 ! [not intercepted, there is no need to]
++#define __NR_set_thread_area 243
++#define __NR_get_thread_area 244
++#define __NR_io_setup 245 ? [not intercepted - wtf is it ?]
++#define __NR_io_destroy 246 [see above]
++#define __NR_io_getevents 247 [see above]
++#define __NR_io_submit 248 [see above]
++#define __NR_io_cancel 249 [see above]
++#define __NR_fadvise64 250 ? [not intercepted - don't ask me what the heck is it]
++
++#define __NR_exit_group 252 ? [not intercepted - should be treated like sys_exit() ?]
++#define __NR_lookup_dcookie 253 ? [not intercepted - not necesary]
++#define __NR_epoll_create 254 ? [do we care about epool ?]
++#define __NR_epoll_ctl 255 ? [see above]
++#define __NR_epoll_wait 256 ? [see above]
++#define __NR_remap_file_pages 257 ? [not intercepted - may be dangerous ?]
++#define __NR_set_tid_address 258 ? [not intercepted - no need to]
++#define __NR_timer_create 259 ? [not intercepted - no need to ?]
++#define __NR_timer_settime ? [see above]
++#define __NR_timer_gettime ? [see above]
++#define __NR_timer_getoverrun ? [see above]
++#define __NR_timer_delete ? [see above]
++#define __NR_clock_settime *
++#define __NR_clock_gettime -
++#define __NR_clock_getres -
++#define __NR_clock_nanosleep -
++#define __NR_statfs64 268 *
++#define __NR_fstatfs64 269 *
++#define __NR_tgkill 270 *
++#define __NR_utimes 271 *
++#define __NR_fadvise64_64 272 ? [not intercepted - look fadvise above]
++#define __NR_vserver 273 - [reservation for friendly project]
++#define __NR_mbind 274 ? [not intercepted - i am borring, but will ask once more - what the heck ? (memory allocation policy stuff ?)]
++#define __NR_get_mempolicy 275 ? [see above]
++#define __NR_set_mempolicy 276 ? [see above]
++#define __NR_mq_open 277 ? [message queues - i think that may be security revelant]
++#define __NR_mq_unlink ? [see above]
++#define __NR_mq_timedsend ? [see above]
++#define __NR_mq_timedreceive ? [see above]
++#define __NR_mq_notify ? [see above]
++#define __NR_mq_getsetattr ? [see above]
++#define __NR_sys_kexec_load 283 ! [not found, but kexec is very dangerous - should be imidiatelly be taken care of]
++#define __NR_waitid 284 ? [not intercepted, probably no need to]
++/* #define __NR_sys_setaltroot 285 ? [if any day it will come out from deep shadows we will be playing with it hard] */
++#define __NR_add_key 286 ? [key infrastructure recently hit kernels, another redhat (read: stupid, wastefull and security risky) idea ?]
++#define __NR_request_key 287 ? [see above]
++#define __NR_keyctl 288 ? [see above]
++#define __NR_ioprio_set 289 *
++#define __NR_ioprio_get 290 -
++#define __NR_inotify_init 291
++#define __NR_inotify_add_watch 292
++#define __NR_inotify_rm_watch 293
++#define __NR_migrate_pages 294
++#define __NR_openat 295
++#define __NR_mkdirat 296
++#define __NR_mknodat 297
++#define __NR_fchownat 298
++#define __NR_futimesat 299
++#define __NR_fstatat64 300
++#define __NR_unlinkat 301
++#define __NR_renameat 302
++#define __NR_linkat 303
++#define __NR_symlinkat 304
++#define __NR_readlinkat 305
++#define __NR_fchmodat 306
++#define __NR_faccessat 307
++#define __NR_pselect6 308
++#define __NR_ppoll 309
++#define __NR_unshare 310
++#define __NR_set_robust_list 311
++#define __NR_get_robust_list 312
++#define __NR_splice 313
++#define __NR_sync_file_range 314
++#define __NR_tee 315
++#define __NR_vmsplice 316
++#define __NR_move_pages 317
++#define __NR_getcpu 318
++#define __NR_epoll_pwait 319
++#define __NR_utimensat 320
++#define __NR_signalfd 321
++#define __NR_timerfd 322
++#define __NR_eventfd 323
++#define __NR_fallocate 324 *
+diff --git a/Documentation/rsbac/README b/Documentation/rsbac/README
+new file mode 100644
+index 0000000..fd676e9
+--- /dev/null
++++ b/Documentation/rsbac/README
+@@ -0,0 +1,49 @@
++RSBAC README
++------------
++
++Documentation in this section is here for information only.
++
++The full RSBAC documentation is available online, at:
++<http://www.rsbac.org/documentation>
++
++New RSBAC versions as well as support for other kernel versions and bugfixes
++can be downloaded from: <http://www.rsbac.org>
++
++For installation instructions read INSTALL.
++
++Files description:
++------------------
++
++README-kernparam:
++Describes the various kernel parameters that can be used with RSBAC.
++
++README-proc:
++Describes the proc interface to RSBAC.
++
++Information about the registration of additional RSBAC decision modules
++(option CONFIG_RSBAC_REG) can be found in README-reg and html/reg.htm.
++
++(Quite old) man pages for many RSBAC syscalls are in the man directory.
++Russian versions can be found in man-rus.
++
++README-nrlists:
++For large systems (very many files per partition) you might consider
++increasing RSBAC_NR_FD_LISTS in include/rsbac/aci_data_structures.h before
++compiling. Please read README-nrlists first.
++There is also now a kernel configuration option.
++
++README-patching:
++If you patched against another kernel version than stated in the patch
++filename, it is important to work through README-patching.
++
++README-reg:
++Information about the registration of additional RSBAC decision modules
++(option CONFIG_RSBAC_REG) can be found in README-reg.
++
++
++If you run into problems or have questions, please write to the RSBAC
++mailing list at rsbac@rsbac.org (commands like 'subscribe rsbac' as single
++line in the message body to majordomo@rsbac.org), or see:
++<http://www.rsbac.org/contact>
++
++The RSBAC team.
+diff --git a/Documentation/rsbac/README-kernparam b/Documentation/rsbac/README-kernparam
+new file mode 100644
+index 0000000..55c70df
+--- /dev/null
++++ b/Documentation/rsbac/README-kernparam
+@@ -0,0 +1,93 @@
++RSBAC README for the kernel parameters.
++---------------------------------------
++
++Also see: <http://rsbac.org/documentation/kernel_parameters>
++
++The RSBAC system accepts the following parameters:
++ - rsbac_debug_ds: Debug messages from the Data Structures component.
++ - rsbac_debug_aef: Debug messages from the enforcement component (AEF).
++ - rsbac_debug_no_adf: Set default log level value for all request
++ types to 0: Do not log.
++ - rsbac_debug_adf (default, so obsolete): Set default log level value for all
++ request types to 1: Logging messages
++ from the decision component (ADF) for all requests that were denied (highly
++ recommended for testing, even in normal use). If provided, pseudonyms of
++ users are used.
++ - rsbac_debug_adf_all: Set default log level value for all request types to 2:
++ Logging messages from the decision component (ADF) for all requests. If
++ provided, pseudonyms of users are used. Gives a real lot of logging stuff
++ - never try this, if checking of sys_syslog is turned on and log levels
++ have not yet been saved to keep them permanent...
++ - rsbac_debug_ds_pm: Debug messages from the Data Structures component,
++ on access to privacy model data.
++ - rsbac_debug_aef_pm: Debug messages for privacy model specific system
++ calls.
++ - rsbac_debug_adf_pm: Debug messages for access control in privacy module.
++ - rsbac_debug_pm: Sets rsbac_debug_ds_pm, rsbac_debug_aef_pm,
++ rsbac_debug_adf_pm (recommended for testing privacy model).
++ - rsbac_debug_adf_ms: Debug messages for access control in Malware Scan.
++ - rsbac_debug_ds_rc: Debug messages from the Data Structures component,
++ on access to Role Compatibility model data.
++ - rsbac_debug_aef_rc: Debug messages for Role Compatibility model specific
++ system calls.
++ - rsbac_debug_adf_rc: Debug messages for access control in RC module.
++ - rsbac_debug_rc: Sets rsbac_debug_ds_rc, rsbac_debug_aef_rc,
++ rsbac_debug_adf_rc.
++ - rsbac_debug_ds_auth: Debug messages from the Data Structures component,
++ on access to AUTH model data.
++ - rsbac_debug_aef_auth: Debug messages for AUTH model specific system calls.
++ - rsbac_debug_adf_auth: Debug messages for access control in AUTH module.
++ - rsbac_debug_auth: Sets rsbac_debug_ds_auth, rsbac_debug_aef_auth,
++ rsbac_debug_adf_auth.
++ - rsbac_debug_ds_acl: Debug messages from the Data Structures component,
++ on access to Access Control Lists (ACL) model data.
++ - rsbac_debug_aef_acl: Debug messages for ACL model specific
++ system calls.
++ - rsbac_debug_adf_acl: Debug messages for access control in ACL module.
++ - rsbac_debug_acl: Sets rsbac_debug_ds_acl, rsbac_debug_aef_acl,
++ rsbac_debug_adf_acl.
++ - rsbac_debug_all: Sets all debug options - in fact turns on a huge amount
++ of logging. Beware of a fast growing system log. Hardly ever recommended.
++ - rsbac_debug_no_write: Turn writing to disk off for this
++ single boot time. For testing.
++ - rsbac_debug_auto: Debug messages from auto-write / rsbacd. Recommended
++ for a good disk saving overview.
++ - rsbac_debug_write: Debug messages from all attribute writing related
++ procedures.
++ - rsbac_no_defaults: suppress creation of default settings, useful for
++ restore from existing backup. Warning: An unconfigured system will only
++ come up in softmode or maint mode, and softmode will produce loads of
++ logging (see rsbac_nosyslog option...).
++ - rsbac_auth_enable_login: Sets auth_may_setuid for /bin/login, if AUTH
++ module is on. A good emergency helper, if you cannot login anymore.
++ - rsbac_softmode (only, if enabled on kernel config): switch to softmode
++ - rsbac_softmode_once (only, if enabled on kernel config): switch to softmode
++ and disallow to switch it on again later
++ - rsbac_softmode_never (only, if softmode enabled on kernel config):
++ disallow to switch softmode on during this runtime
++ - rsbac_softmode_<mod> (module name in lowercase, e.g. rc, only if enabled):
++ switch individual model softmode to on
++ - rsbac_freeze (only, if enabled in kernel config): Disallow RSBAC
++ administration for this runtime.
++ - rsbac_dac_disable (only, if enabled in kernel config): disable Linux DAC
++ - rsbac_nosyslog: do not log to syslog for this boot time
++ - rsbac_no_init_delay: disable delayed init for this single boot (if
++ init delay is enabled in kernel config)
++ - rsbac_delayed_root=major[:minor]: initialize, when this device gets
++ mounted. Omit minor or set to 00 to match all devices with this major
++ number. Delayed init must be enabled in kernel config.
++ - rsbac_auth_learn (only, if enabled in kernel config): enable AUTH
++ learning mode, where AUTH module adds all missing capabilities
++ automatically instead of denying the request.
++ - rsbac_acl_learn and rsbac_acl_learn_fd (only, if enabled in kernel
++ config): enable ACL learning mode for user rights to filesystem objects
++ - rsbac_log_remote_addr=a.b.c.d: Set remote logging address to a.b.c.d
++ - rsbac_log_remote_port=n: Set remote logging port to n. Remote logging
++ must be enabled in kernel config.
++ - rsbac_um_no_excl: Disable exlusive user management for this uptime.
++ - rsbac_daz_ttl=n: Set DAZ cache item ttl to n seconds for this boot.
++ - rsbac_cap_log_missing: Log all calls to capable() for caps, which are
++ not in the process set of effective Linux capabilities, i.e., failed
++ capable() checks.
++
++Last updated: 28/Jan/2005
+diff --git a/Documentation/rsbac/README-nrlists b/Documentation/rsbac/README-nrlists
+new file mode 100644
+index 0000000..496aad4
+--- /dev/null
++++ b/Documentation/rsbac/README-nrlists
+@@ -0,0 +1,28 @@
++RSBAC README-nrlists
++--------------------
++
++For large systems (very many files per partition) you should increase
++RSBAC_NR_FD_LISTS in include/rsbac/aci_data_structures.h before compiling.
++
++You should earnestly consider increasing, if you get warning messages like
++"write_fd_list(): list n too large (m bytes), calling partial_write_fd_list()!"
++(this does not lead to data loss though - it only decreases stability a
++bit). This should not happen any longer though - if it does, please send a
++note to RSBAC mailing list containing your /proc/rsbac-info/stats output and
++the output of free at the time when the messages appear for examination.
++
++CAUTION:
++- When restarting with a larger number of lists for the first time, you *must*
++ use the kernel parameter rsbac_change_nr! Only then old attributes are
++ allowed to be sorted into the now correct lists, otherwise they get lost and
++ that's it.
++- Please remember mounting rw all partitions used by RSBAC so far, while
++ rsbac_change_nr is still active.
++- There is definately no way back to a smaller number. All following RSBAC
++ versions must be set to the same value, and rebooting with an older kernel
++ can result in unnoticable attribute losses.
++
++To test this feature, you can use rsbac_debug_no_write. This prevents
++attribute saving and thus attribute loss from previous runs. Those
++partitions that are not mounted rw at boot time can be tested by mounting
++read-only.
+diff --git a/Documentation/rsbac/README-patching b/Documentation/rsbac/README-patching
+new file mode 100644
+index 0000000..2cf1691
+--- /dev/null
++++ b/Documentation/rsbac/README-patching
+@@ -0,0 +1,27 @@
++RSBAC README for patching against other versions.
++-------------------------------------------------
++
++To make my point clear: I do not recommend patching against other kernel
++versions than stated in the patch filename. Rather check RSBAC homepage
++for new versions or send a note to the RSBAC mailing list (see README).
++
++If you had to patch against another version, you will have to do the following:
++ - Make sure you understand how rsbac_adf_request() and rsbac_adf_set_attr()
++ calls work
++ - Patch in all rejects by hand.
++ - Edit fs/namei.c:
++ rsbac_lookup_one_len/hash must be lookup_one_len/hash minus
++ checks (permission(), rsbac_adf_request()).
++ Please do not forget to change the call to lookup_hash in
++ rsbac_lookup_one_len into rsbac_lookup_hash.
++ - arch/i386/kernel/entry.S must contain the RSBAC syscall number added,
++ embraced by #ifdef CONFIG_RSBAC.
++ You may have to adjust syscall numbers there and in
++ include/rsbac/unistd-i386.h. After that make sure you recompiled the
++ admin tools.
++ - Same for all other archs
++ - Check in rsbac/data_structures/aci_data_structures.c, if file opening and
++ closing are done correctly (rsbac_read_open, rsbac_write_open,
++ rsbac_read_close, rsbac_write_close).
++ - Check in rsbac/help/debug.c, whether the logging in rsbac_log() is
++ implemented correctly - see sys_syslog() in kernel/printk.c
+diff --git a/Documentation/rsbac/README-proc b/Documentation/rsbac/README-proc
+new file mode 100644
+index 0000000..168516e
+--- /dev/null
++++ b/Documentation/rsbac/README-proc
+@@ -0,0 +1,93 @@
++RSBAC README for the proc interface.
++------------------------------------
++
++Also see: <http://rsbac.org/documentation/proc_interface>
++
++If enabled in the kernel configuration, RSBAC adds one directory to the
++main proc dir: rsbac-info. Since proc is treated as a normal read-only fs,
++rsbac could not be used.
++
++All successful write accesses are logged via syslog at KERN_INFO level.
++The rsbac-info dir contains the following entries:
++
++ - stats: shows rsbac status, same contents as sys_rsbac_stats writes into
++ syslog
++
++ - active: short summary of version, mode and module states, good for scripts
++
++ - stats_pm (if PM is enabled): shows PM status, same contents as
++ sys_rsbac_stats_pm writes into syslog
++
++ - stats_rc (if RC is enabled): shows RC status
++
++ - stats_auth (if AUTH is enabled): shows AUTH status
++
++ - stats_acl (if ACL is enabled): shows ACL status
++
++ - xstats (if extended status is enabled): shows extended status, e.g. table
++ of call counts for requests and targets
++
++ - devices: shows all rsbac-mounted devices in n:m notation and their
++ no_write status (no_write is set on fd-list read, if wrong version).
++ No_write status can be changed by calling
++ echo "devices no_write n:m k" >devices
++ with n:m is the device in major:minor notation, k is 0 or 1.
++
++ - acl_devices, auth_devices: same for ACL and AUTH data structures
++
++ - debug: shows all RSBAC debug settings, softmode, dac_disable and nosyslog.
++ Levels can be changed by calling
++ echo "debug name n" >debug
++ Valid names are ds, aef, no_write, ds_pm, aef_pm, adf_pm, adf_ms, ds_rc,
++ aef_rc, adf_rc, ds_acl, aef_acl, adf_acl, auto, softmode, dac_disable and
++ nosyslog, but only, if shown when reading this file. Valid levels are 0
++ and 1.
++ Debug levels can be preset to 1 by kernel parameters with same name as
++ variable name shown, e.g. rsbac_debug_ds or rsbac_softmode.
++ Individual model softmode can be switched by calling
++ echo "debug ind_softmode <modname> n" >debug
++ Remote logging address and port can be changed with
++ echo "debug log_remote_addr a.b.c.d" >debug
++ echo "debug log_remote_port n" >debug
++ DAZ cache ttl is set via
++ echo "debug daz_ttl n" >debug
++
++ - log_levels: shows adf log levels for all requests. Log levels can be
++ changed by calling
++ echo "log_levels request n" >log_levels
++ with request = request name, e.g. WRITE, n = level.
++
++ - auto_write (if auto-write is enabled): shows auto write status, currently
++ auto interval in jiffies and auto debug level only.
++ Auto interval can be changed by calling
++ echo "auto interval n" >auto_write
++ with n = number of jiffies, debug level (0 or 1) by calling
++ echo "auto debug n" >auto_write
++
++ - versions: shows aci versions for dev and user list and adf request array
++ version for log_level array and the no_write status of each (set on boot,
++ if wrong version is tried to be read). No_write status can be changed by
++ calling
++ echo "no_write listname n" >versions
++ with listname is one of dev, user, log_levels, n is 0 or 1.
++
++ - rmsg (if own logging is enabled): similar to kmsg in main proc dir, logging
++ of RSBAC requests. This file can be used by programs like klogd.
++
++ - auth_caplist (if AUTH is enabled): shows all AUTH capabilities currently
++ set.
++
++ - reg_modules (if REG is enabled): shows currently registered additional
++ decision modules and syscalls.
++
++ - acl_acllist (if ACL is enabled): Detailed listing of all ACL entries and
++ masks in the system.
++
++ - backup subdir: It contains backups of what would be
++ current aci data files. You can use cp for backups of system independent aci
++ data structures, e.g. rc_roles, rc_types, and the admin backup tools for
++ system dependent ones, e.g. file/dir attributes or AUTH file capabilities.
++ Using the backup_all script or single lines from it is however strongly
++ recommended.
++
++Last updated: 18/Jan/2005
+diff --git a/Documentation/rsbac/README-reg b/Documentation/rsbac/README-reg
+new file mode 100644
+index 0000000..10d6b8b
+--- /dev/null
++++ b/Documentation/rsbac/README-reg
+@@ -0,0 +1,37 @@
++RSBAC README for the REG facility.
++----------------------------------
++
++Also see: <http://rsbac.org/documentation/write_your_decision_module>
++
++If enabled in the kernel configuration, RSBAC REG allows the registration
++and unregistration of additional decision modules at runtime, usually from
++a kernel module.
++
++These modules register with a name and a chosen magic handle, which can be
++used for switching on/off and for unregistration.
++
++By registration, a request (the decision itself), a set_attr (called after
++successful syscall completion) and a need_overwrite (called to determine,
++whether a file needs to be securely deleted/truncated) function can be
++installed.
++
++Apart from these decision functions some support routines can be registered.
++Currently these are write (signal asynchronous attribute writing to disk,
++called regularly by rsbacd), mount and umount (a device has been (u)mounted).
++
++However, each of these function pointers can be set to NULL, if
++no call of this type is wanted.
++
++All functions are *additional* to the existing functions from builtin
++modules, e.g. MAC or RC. This way, they can only further restrict access,
++but not grant anything denied by other models.
++
++Also, you can now register system calls and generic lists.
++
++For examples of builtin real decision modules and their functions see
++subdirs below rsbac/adf/.
++
++Working example modules with simple call counters and a proc pseudo file
++for counter display can be found in the examples/reg/ directory of the
++rsbac-admin tools. These are basically the same modules that are built if
++you enabled building of sample modules in kernel config.
+diff --git a/MAINTAINERS b/MAINTAINERS
+index b362709..abdb025 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -5680,6 +5680,13 @@ F: include/linux/rose.h
+ F: include/net/rose.h
+ F: net/rose/
+
++RSBAC
++P: Amon Ott
++M: ao@rsbac.org
++L: rsbac@rsbac.org
++W: http://www.rsbac.org
++S: Maintained
++
+ RTL8180 WIRELESS DRIVER
+ M: "John W. Linville" <linville@tuxdriver.com>
+ L: linux-wireless@vger.kernel.org
+diff --git a/Makefile b/Makefile
+index 0bd1554..f71c4ce 100644
+--- a/Makefile
++++ b/Makefile
+@@ -681,6 +681,13 @@ export KBUILD_IMAGE ?= vmlinux
+ export INSTALL_PATH ?= /boot
+
+ #
++
++# Add RSBAC version
++ifeq ($(CONFIG_RSBAC),y)
++EXTRAVERSION:=$(EXTRAVERSION)-rsbac
++core-y += rsbac/
++endif
++
+ # INSTALL_MOD_PATH specifies a prefix to MODLIB for module directory
+ # relocations required by build roots. This is not defined in the
+ # makefile but the argument can be passed to make if needed.
+diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
+index 2207fc6..6982eac 100644
+--- a/arch/alpha/include/asm/unistd.h
++++ b/arch/alpha/include/asm/unistd.h
+@@ -332,7 +332,11 @@
+ #define __NR_getdents64 377
+ #define __NR_gettid 378
+ #define __NR_readahead 379
++#ifdef CONFIG_RSBAC
++#define __NR_rsbac 380
++#else
+ /* 380 is unused */
++#endif
+ #define __NR_tkill 381
+ #define __NR_setxattr 382
+ #define __NR_lsetxattr 383
+diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c
+index 6ff8886..658ecc4 100644
+--- a/arch/alpha/kernel/asm-offsets.c
++++ b/arch/alpha/kernel/asm-offsets.c
+@@ -35,6 +35,9 @@ void foo(void)
+ DEFINE(PT_PTRACED, PT_PTRACED);
+ DEFINE(CLONE_VM, CLONE_VM);
+ DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
++#ifdef CONFIG_RSBAC
++ DEFINE(CLONE_KTHREAD, CLONE_KTHREAD);
++#endif
+ DEFINE(SIGCHLD, SIGCHLD);
+ BLANK();
+
+diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
+index 6d159ce..8df575e 100644
+--- a/arch/alpha/kernel/entry.S
++++ b/arch/alpha/kernel/entry.S
+@@ -653,7 +653,11 @@ kernel_thread:
+ stq $2, 152($sp) /* HAE */
+
+ /* Shuffle FLAGS to the front; add CLONE_VM. */
++#ifdef CONFIG_RSBAC
++ ldi $1, CLONE_VM|CLONE_UNTRACED | CLONE_KTHREAD;
++#else
+ ldi $1, CLONE_VM|CLONE_UNTRACED
++#endif
+ or $18, $1, $16
+ bsr $26, sys_clone
+
+diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
+index 54616f4..ac263d0 100644
+--- a/arch/alpha/kernel/ptrace.c
++++ b/arch/alpha/kernel/ptrace.c
+@@ -20,6 +20,8 @@
+
+ #include "proto.h"
+
++#include <rsbac/hooks.h>
++
+ #define DEBUG DBG_MEM
+ #undef DEBUG
+
+@@ -274,6 +276,26 @@ long arch_ptrace(struct task_struct *child, long request,
+ unsigned long tmp;
+ size_t copied;
+ long ret;
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = task_pid(child);
++ rsbac_attribute_value.trace_request = request;
++ if (!rsbac_adf_request(R_TRACE,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_trace_request,
++ rsbac_attribute_value))
++ {
++ rcu_read_unlock();
++ return -EPERM;
++ }
++ rcu_read_unlock();
++#endif
+
+ switch (request) {
+ /* When I and D space are separate, these will need to be fixed. */
+diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
+index 512cd14..14c0752 100644
+--- a/arch/arm/include/asm/unistd.h
++++ b/arch/arm/include/asm/unistd.h
+@@ -248,7 +248,12 @@
+ #define __NR_madvise (__NR_SYSCALL_BASE+220)
+ #define __NR_fcntl64 (__NR_SYSCALL_BASE+221)
+ /* 222 for tux */
++/* RSBAC - we use 223, the old sys_security */
++#ifdef CONFIG_RSBAC
++#define __NR_rsbac (__NR_SYSCALL_BASE+223)
++#else
+ /* 223 is unused */
++#endif
+ #define __NR_gettid (__NR_SYSCALL_BASE+224)
+ #define __NR_readahead (__NR_SYSCALL_BASE+225)
+ #define __NR_setxattr (__NR_SYSCALL_BASE+226)
+diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
+index 463ff4a..09aae70 100644
+--- a/arch/arm/kernel/calls.S
++++ b/arch/arm/kernel/calls.S
+@@ -232,7 +232,11 @@
+ /* 220 */ CALL(sys_madvise)
+ CALL(ABI(sys_fcntl64, sys_oabi_fcntl64))
+ CALL(sys_ni_syscall) /* TUX */
++#ifdef CONFIG_RSBAC
++ CALL(sys_rsbac)
++#else
+ CALL(sys_ni_syscall)
++#endif
+ CALL(sys_gettid)
+ /* 225 */ CALL(ABI(sys_readahead, sys_oabi_readahead))
+ CALL(sys_setxattr)
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 2b7b017..0b2d0b1 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -485,6 +485,10 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+ {
+ struct pt_regs regs;
+
++#ifdef CONFIG_RSBAC
++ int rsbac_retval;
++#endif
++
+ memset(&regs, 0, sizeof(regs));
+
+ regs.ARM_r4 = (unsigned long)arg;
+@@ -494,7 +498,12 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+ regs.ARM_pc = (unsigned long)kernel_thread_helper;
+ regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT;
+
++#ifdef CONFIG_RSBAC
++ rsbac_retval = do_fork(flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD, 0, &regs, 0, NULL, NULL);
++ return rsbac_retval;
++#else
+ return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
++#endif
+ }
+ EXPORT_SYMBOL(kernel_thread);
+
+diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
+index 92c5af9..34d630f 100644
+--- a/arch/avr32/kernel/process.c
++++ b/arch/avr32/kernel/process.c
+@@ -101,8 +101,13 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+ regs.pc = (unsigned long)kernel_thread_helper;
+ regs.sr = MODE_SUPERVISOR;
+
++#ifdef CONFIG_RSBAC
++ return do_fork(flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD,
++ 0, &regs, 0, NULL, NULL);
++#else
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
+ 0, &regs, 0, NULL, NULL);
++#endif
+ }
+ EXPORT_SYMBOL(kernel_thread);
+
+diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
+index c0f4fe2..4ce63a8 100644
+--- a/arch/blackfin/kernel/process.c
++++ b/arch/blackfin/kernel/process.c
+@@ -128,8 +128,13 @@ pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags)
+ mode. */
+ regs.ipend = 0x8002;
+ __asm__ __volatile__("%0 = syscfg;":"=da"(regs.syscfg):);
++#ifdef CONFIG_RSBAC
++ return do_fork(flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD, 0, &regs, 0, NULL,
++ NULL);
++#else
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL,
+ NULL);
++#endif
+ }
+ EXPORT_SYMBOL(kernel_thread);
+
+diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S
+index 592fbe9..0360b7f 100644
+--- a/arch/cris/arch-v10/kernel/entry.S
++++ b/arch/cris/arch-v10/kernel/entry.S
+@@ -825,7 +825,11 @@ sys_call_table:
+ .long sys_getdents64 /* 220 */
+ .long sys_fcntl64
+ .long sys_ni_syscall /* reserved for TUX */
+- .long sys_ni_syscall
++#ifdef CONFIG_RSBAC
++ .long sys_rsbac /* reserved for sys_security */
++#else
++ .long sys_ni_syscall /* reserved for sys_security */
++#endif
+ .long sys_gettid
+ .long sys_readahead /* 225 */
+ .long sys_setxattr
+diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c
+index bee8df4..cc471c6 100644
+--- a/arch/cris/arch-v10/kernel/process.c
++++ b/arch/cris/arch-v10/kernel/process.c
+@@ -94,6 +94,10 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+ {
+ struct pt_regs regs;
+
++#ifdef CONFIG_RSBAC
++ int rsbac_retval;
++#endif
++
+ memset(&regs, 0, sizeof(regs));
+
+ /* Don't use r10 since that is set to 0 in copy_thread */
+@@ -103,7 +107,12 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+ regs.dccr = 1 << I_DCCR_BITNR;
+
+ /* Ok, create the new process.. */
++#ifdef CONFIG_RSBAC
++ rsbac_retval = do_fork(flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD, 0, &regs, 0, NULL, NULL);
++ return rsbac_retval;
++#else
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
++#endif
+ }
+
+ /* setup the child's kernel stack with a pt_regs and switch_stack on it.
+diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c
+index 0570e8c..ef6dfa2 100644
+--- a/arch/cris/arch-v32/kernel/process.c
++++ b/arch/cris/arch-v32/kernel/process.c
+@@ -116,7 +116,11 @@ kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+ regs.ccs = 1 << (I_CCS_BITNR + CCS_SHIFT);
+
+ /* Create the new process. */
++#ifdef CONFIG_RSBAC
++ return do_fork(flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD, 0, &regs, 0, NULL, NULL);
++#else
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
++#endif
+ }
+
+ /*
+diff --git a/arch/frv/kernel/kernel_thread.S b/arch/frv/kernel/kernel_thread.S
+index 4531c83..e97275e7 100644
+--- a/arch/frv/kernel/kernel_thread.S
++++ b/arch/frv/kernel/kernel_thread.S
+@@ -13,6 +13,10 @@
+ #include <asm/unistd.h>
+
+ #define CLONE_VM 0x00000100 /* set if VM shared between processes */
++#ifdef CONFIG_RSBAC
++#define CLONE_KTHREAD 0x100000000ULL /* kernel thread */
++#define CLONE_KT (CLONE_VM | CLONE_KTHREAD) /* kernel thread flags */
++#endif
+ #define KERN_ERR "<3>"
+
+ .section .rodata
+@@ -37,7 +41,11 @@ kernel_thread:
+
+ # start by forking the current process, but with shared VM
+ setlos.p #__NR_clone,gr7 ; syscall number
++#ifdef CONFIG_RSBAC
++ ori gr10,#CLONE_KT,gr8 ; first syscall arg [clone_flags]
++#else
+ ori gr10,#CLONE_VM,gr8 ; first syscall arg [clone_flags]
++#endif
+ sethi.p #0xe4e4,gr9 ; second syscall arg [newsp]
+ setlo #0xe4e4,gr9
+ setlos.p #0,gr10 ; third syscall arg [parent_tidptr]
+diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
+index 0e9c315..beb85b2 100644
+--- a/arch/h8300/kernel/process.c
++++ b/arch/h8300/kernel/process.c
+@@ -130,7 +130,11 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+
+ fs = get_fs();
+ set_fs (KERNEL_DS);
++#ifdef CONFIG_RSBAC
++ clone_arg = flags | CLONE_VM | CLONE_KTHREAD;
++#else
+ clone_arg = flags | CLONE_VM;
++#endif
+ __asm__("mov.l sp,er3\n\t"
+ "sub.l er2,er2\n\t"
+ "mov.l %2,er1\n\t"
+@@ -149,6 +153,12 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+ :"i"(__NR_clone),"g"(clone_arg),"g"(fn),"g"(arg),"i"(__NR_exit)
+ :"er0","er1","er2","er3");
+ set_fs (fs);
++
++#ifdef CONFIG_RSBAC
++ if (retval > 0)
++ rsbac_kthread_notify(find_pid_ns(retval, &init_pid_ns));
++#endif
++
+ return retval;
+ }
+
+diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
+index 7a3bd25..7e37d15 100644
+--- a/arch/ia64/include/asm/unistd.h
++++ b/arch/ia64/include/asm/unistd.h
+@@ -325,10 +325,18 @@
+ #define __NR_process_vm_writev 1333
+ #define __NR_accept4 1334
+
+-#ifdef __KERNEL__
++#ifdef CONFIG_RSBAC
++#define __NR_rsbac 1335
++#endif
++
+
++#ifdef __KERNEL__
+
++#ifdef CONFIG_RSBAC
++#define NR_syscalls 312
++#else
+ #define NR_syscalls 311 /* length of syscall table */
++#endif
+
+ /*
+ * The following defines stop scripts/checksyscalls.sh from complaining about
+diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
+index a48bd9a..ac016ee 100644
+--- a/arch/ia64/kernel/asm-offsets.c
++++ b/arch/ia64/kernel/asm-offsets.c
+@@ -205,6 +205,9 @@ void foo(void)
+ /* for assembly files which can't include sched.h: */
+ DEFINE(IA64_CLONE_VFORK, CLONE_VFORK);
+ DEFINE(IA64_CLONE_VM, CLONE_VM);
++#ifdef CONFIG_RSBAC
++ DEFINE(IA64_CLONE_KTHREAD, CLONE_KTHREAD);
++#endif
+
+ BLANK();
+ DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET,
+diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
+index 1ccbe12..24e8de1 100644
+--- a/arch/ia64/kernel/entry.S
++++ b/arch/ia64/kernel/entry.S
+@@ -1758,6 +1758,9 @@ sys_call_table:
+ data8 sys_timerfd_create // 1310
+ data8 sys_timerfd_settime
+ data8 sys_timerfd_gettime
++#ifdef CONFIG_RSBAC
++ data8 sys_rsbac
++#endif
+ data8 sys_signalfd4
+ data8 sys_eventfd2
+ data8 sys_epoll_create1 // 1315
+diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
+index ce74e14..70577fb 100644
+--- a/arch/ia64/kernel/process.c
++++ b/arch/ia64/kernel/process.c
+@@ -656,6 +656,10 @@ kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
+ struct pt_regs pt;
+ } regs;
+
++#ifdef CONFIG_RSBAC
++ int rsbac_retval;
++#endif
++
+ memset(&regs, 0, sizeof(regs));
+ regs.pt.cr_iip = helper_fptr[0]; /* set entry point (IP) */
+ regs.pt.r1 = helper_fptr[1]; /* set GP */
+@@ -667,7 +671,12 @@ kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
+ regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR);
+ regs.sw.ar_bspstore = (unsigned long) current + IA64_RBS_OFFSET;
+ regs.sw.pr = (1 << PRED_KERNEL_STACK);
++#ifdef CONFIG_RSBAC
++ rsbac_retval = do_fork(flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD, 0, &regs.pt, 0, NULL, NULL);
++ return rsbac_retval;
++#else
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs.pt, 0, NULL, NULL);
++#endif
+ }
+ EXPORT_SYMBOL(kernel_thread);
+
+diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
+index 3a4a32b..0b59287 100644
+--- a/arch/m32r/kernel/process.c
++++ b/arch/m32r/kernel/process.c
+@@ -192,7 +192,11 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+ regs.psw = M32R_PSW_BIE;
+
+ /* Ok, create the new process. */
++#ifdef CONFIG_RSBAC
++ return do_fork(flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD, 0, &regs, 0, NULL,
++#else
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL,
++#endif
+ NULL);
+ }
+
+diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
+index 4c03361..c803612 100644
+--- a/arch/m32r/kernel/ptrace.c
++++ b/arch/m32r/kernel/ptrace.c
+@@ -32,6 +32,8 @@
+ #include <asm/processor.h>
+ #include <asm/mmu_context.h>
+
++#include <rsbac/hooks.h>
++
+ /*
+ * This routine will get a word off of the process kernel stack.
+ */
+@@ -625,6 +627,25 @@ arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+ {
+ int ret;
++
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = task_pid(child);
++ rsbac_attribute_value.trace_request = request;
++ if (!rsbac_adf_request(R_TRACE,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_trace_request,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ unsigned long __user *datap = (unsigned long __user *) data;
+
+ switch (request) {
+diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
+index ea0b502..27843be 100644
+--- a/arch/m68k/include/asm/unistd.h
++++ b/arch/m68k/include/asm/unistd.h
+@@ -353,9 +353,17 @@
+ #define __NR_process_vm_readv 345
+ #define __NR_process_vm_writev 346
+
++#ifdef CONFIG_RSBAC
++#define __NR_rsbac 347
++#endif
++
+ #ifdef __KERNEL__
+
++#ifdef CONFIG_RSBAC
++#define NR_syscalls 348
++#else
+ #define NR_syscalls 347
++#endif
+
+ #define __ARCH_WANT_IPC_PARSE_VERSION
+ #define __ARCH_WANT_OLD_READDIR
+diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
+index b8daf64..16b00e7 100644
+--- a/arch/m68k/kernel/entry.S
++++ b/arch/m68k/kernel/entry.S
+@@ -3,3 +3,10 @@
+ #else
+ #include "entry_no.S"
+ #endif
++#ifdef CONFIG_RSBAC
++ /* we use 400, until sys_security gets defined here */
++ .rept 399-343
++ .long sys_ni_syscall
++ .endr
++ .long sys_rsbac
++#endif
+diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
+index c488e3c..c50c670 100644
+--- a/arch/m68k/kernel/process.c
++++ b/arch/m68k/kernel/process.c
+@@ -133,7 +133,11 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+
+ {
+ register long retval __asm__ ("d0");
++#ifdef CONFIG_RSBAC
++ register long clone_arg __asm__ ("d1") = flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD;
++#else
+ register long clone_arg __asm__ ("d1") = flags | CLONE_VM | CLONE_UNTRACED;
++#endif
+
+ retval = __NR_clone;
+ __asm__ __volatile__
+@@ -161,6 +165,12 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+ }
+
+ set_fs (fs);
++
++#ifdef CONFIG_RSBAC
++ if (pid > 0)
++ rsbac_kthread_notify(find_pid_ns(retval, &init_pid_ns));
++#endif
++
+ return pid;
+ }
+ EXPORT_SYMBOL(kernel_thread);
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index e9a5fd7..e90842c 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -232,6 +232,7 @@ static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *))
+ long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+ {
+ struct pt_regs regs;
++ int retval;
+
+ memset(&regs, 0, sizeof(regs));
+
+@@ -247,7 +248,12 @@ long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+ #endif
+
+ /* Ok, create the new process.. */
+- return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
++#ifdef CONFIG_RSBAC
++ retval = do_fork(flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD, 0, &regs, 0, NULL, NULL);
++#else
++ retval = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
++#endif
++ return retval;
+ }
+
+ /*
+diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
+index a632bc1..3e527cd 100644
+--- a/arch/mips/kernel/scall32-o32.S
++++ b/arch/mips/kernel/scall32-o32.S
+@@ -456,7 +456,11 @@ einval: li v0, -ENOSYS
+ sys sys_madvise 3
+ sys sys_getdents64 3
+ sys sys_fcntl64 3 /* 4220 */
++#ifdef CONFIG_RSBAC
++ sys sys_rsbac 2 /* Security */
++#else
+ sys sys_ni_syscall 0
++#endif
+ sys sys_gettid 0
+ sys sys_readahead 5
+ sys sys_setxattr 5
+diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
+index 3b5a5e9..45b0b3e 100644
+--- a/arch/mips/kernel/scall64-64.S
++++ b/arch/mips/kernel/scall64-64.S
+@@ -303,7 +303,11 @@ sys_call_table:
+ PTR sys_ni_syscall /* res. for getpmsg */
+ PTR sys_ni_syscall /* 5175 for putpmsg */
+ PTR sys_ni_syscall /* res. for afs_syscall */
++#ifdef CONFIG_RSBAC
++ PTR sys_rsbac /* Security */
++#else
+ PTR sys_ni_syscall /* res. for security */
++#endif
+ PTR sys_gettid
+ PTR sys_readahead
+ PTR sys_setxattr /* 5180 */
+diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
+index 5422855..3fd990a 100644
+--- a/arch/mips/kernel/scall64-o32.S
++++ b/arch/mips/kernel/scall64-o32.S
+@@ -424,7 +424,11 @@ sys_call_table:
+ PTR sys_madvise
+ PTR sys_getdents64
+ PTR compat_sys_fcntl64 /* 4220 */
++#ifdef CONFIG_RSBAC
++ PTR sys_rsbac /* Security */
++#else
+ PTR sys_ni_syscall
++#endif
+ PTR sys_gettid
+ PTR sys32_readahead
+ PTR sys_setxattr
+diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
+index 14707f2..b7eaba1 100644
+--- a/arch/mn10300/kernel/process.c
++++ b/arch/mn10300/kernel/process.c
+@@ -177,8 +177,13 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+ regs.epsw |= EPSW_IE | EPSW_IM_7;
+
+ /* Ok, create the new process.. */
++#ifdef CONFIG_RSBAC
++ return do_fork(flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD, 0, &regs, 0,
++ NULL, NULL);
++#else
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0,
+ NULL, NULL);
++#endif
+ }
+ EXPORT_SYMBOL(kernel_thread);
+
+diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
+index d4b94b3..200b255 100644
+--- a/arch/parisc/kernel/process.c
++++ b/arch/parisc/kernel/process.c
+@@ -174,7 +174,12 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+ * kernel_thread can become a #define.
+ */
+
++ /* Ok, create the new process.. */
++#ifdef CONFIG_RSBAC
++ return __kernel_thread(fn, arg, flags | CLONE_KTHREAD);
++#else
+ return __kernel_thread(fn, arg, flags);
++#endif
+ }
+ EXPORT_SYMBOL(kernel_thread);
+
+diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
+index 3735abd..97347de 100644
+--- a/arch/parisc/kernel/syscall_table.S
++++ b/arch/parisc/kernel/syscall_table.S
+@@ -407,6 +407,9 @@
+ ENTRY_SAME(timerfd_create)
+ ENTRY_COMP(timerfd_settime)
+ ENTRY_COMP(timerfd_gettime)
++#ifdef CONFIG_RSBAC
++ ENTRY_SAME(rsbac)
++#endif
+ ENTRY_COMP(signalfd4)
+ ENTRY_SAME(eventfd2) /* 310 */
+ ENTRY_SAME(epoll_create1)
+diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
+index 559ae1e..46d9105 100644
+--- a/arch/powerpc/include/asm/systbl.h
++++ b/arch/powerpc/include/asm/systbl.h
+@@ -227,7 +227,11 @@ SYSCALL_SPU(fremovexattr)
+ COMPAT_SYS_SPU(futex)
+ COMPAT_SYS_SPU(sched_setaffinity)
+ COMPAT_SYS_SPU(sched_getaffinity)
++#ifdef CONFIG_RSBAC
++SYSCALL(rsbac)
++#else
+ SYSCALL(ni_syscall)
++#endif
+ SYSCALL(ni_syscall)
+ SYS32ONLY(sendfile64)
+ COMPAT_SYS_SPU(io_setup)
+diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
+index d3d1b5e..70c3f22 100644
+--- a/arch/powerpc/include/asm/unistd.h
++++ b/arch/powerpc/include/asm/unistd.h
+@@ -238,6 +238,10 @@
+ #define __NR_futex 221
+ #define __NR_sched_setaffinity 222
+ #define __NR_sched_getaffinity 223
++/* RSBAC - we use 224, the old sys_security */
++#ifdef CONFIG_RSBAC
++#define __NR_rsbac 224
++#endif
+ /* 224 currently unused */
+ #define __NR_tuxcall 225
+ #ifndef __powerpc64__
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index 34b8afe9..8e2fa2d 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -315,6 +315,9 @@ int main(void)
+ #endif
+ DEFINE(CLONE_VM, CLONE_VM);
+ DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
++#ifdef CONFIG_RSBAC
++ DEFINE(CLONE_KTHREAD, CLONE_KTHREAD);
++#endif
+
+ #ifndef CONFIG_PPC64
+ DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
+diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
+index 7cd07b4..ff5bc66 100644
+--- a/arch/powerpc/kernel/misc_32.S
++++ b/arch/powerpc/kernel/misc_32.S
+@@ -674,7 +674,11 @@ _GLOBAL(kernel_thread)
+ mr r30,r3 /* function */
+ mr r31,r4 /* argument */
+ ori r3,r5,CLONE_VM /* flags */
++#ifdef CONFIG_RSBAC
++ oris r3,r3,(CLONE_UNTRACED|CLONE_KTHREAD)>>16
++#else
+ oris r3,r3,CLONE_UNTRACED>>16
++#endif
+ li r4,0 /* new sp (unused) */
+ li r0,__NR_clone
+ sc
+diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
+index 616921e..9f8aeb4 100644
+--- a/arch/powerpc/kernel/misc_64.S
++++ b/arch/powerpc/kernel/misc_64.S
+@@ -422,7 +422,11 @@ _GLOBAL(kernel_thread)
+ mr r29,r3
+ mr r30,r4
+ ori r3,r5,CLONE_VM /* flags */
++#ifdef CONFIG_RSBAC
++ oris r3,r3,(CLONE_UNTRACED|CLONE_KTHREAD)>>16
++#else
+ oris r3,r3,(CLONE_UNTRACED>>16)
++#endif
+ li r4,0 /* new sp (unused) */
+ li r0,__NR_clone
+ sc
+diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
+index 60055ce..5a8886e 100644
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -112,6 +112,10 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+ {
+ struct pt_regs regs;
+
++#ifdef CONFIG_RSBAC
++ int rsbac_retval;
++#endif
++
+ memset(&regs, 0, sizeof(regs));
+ regs.psw.mask = psw_kernel_bits |
+ PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
+@@ -122,8 +126,13 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+ regs.orig_gpr2 = -1;
+
+ /* Ok, create the new process.. */
++#ifdef CONFIG_RSBAC
++ rsbac_retval = do_fork(flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD, 0, &regs, 0, NULL, NULL);
++ return rsbac_retval;
++#else
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
+ 0, &regs, 0, NULL, NULL);
++#endif
+ }
+ EXPORT_SYMBOL(kernel_thread);
+
+diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
+index 02f300f..8517494 100644
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -35,6 +35,8 @@
+ #include "compat_ptrace.h"
+ #endif
+
++#include <rsbac/hooks.h>
++
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/syscalls.h>
+
+diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
+index 72fd1e0..83acdbd 100644
+--- a/arch/sh/include/asm/unistd_32.h
++++ b/arch/sh/include/asm/unistd_32.h
+@@ -232,7 +232,11 @@
+ #define __NR_getdents64 220
+ #define __NR_fcntl64 221
+ /* 222 is reserved for tux */
++#ifdef CONFIG_RSBAC
++#define __NR_rsbac 223
++#else
+ /* 223 is unused */
++#endif
+ #define __NR_gettid 224
+ #define __NR_readahead 225
+ #define __NR_setxattr 226
+diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
+index a28edc3..53fa3ee 100644
+--- a/arch/sh/include/asm/unistd_64.h
++++ b/arch/sh/include/asm/unistd_64.h
+@@ -270,7 +270,11 @@
+ #define __NR_getdents64 248
+ #define __NR_fcntl64 249
+ /* 250 is reserved for tux */
++#ifdef CONFIG_RSBAC
++#define __NR_rsbac 251
++#else
+ /* 251 is unused */
++#endif
+ #define __NR_gettid 252
+ #define __NR_readahead 253
+ #define __NR_setxattr 254
+diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
+index 94273aa..9a0b076 100644
+--- a/arch/sh/kernel/process_32.c
++++ b/arch/sh/kernel/process_32.c
+@@ -92,10 +92,16 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+ #endif
+
+ /* Ok, create the new process.. */
++#ifdef CONFIG_RSBAC
++ pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD,
++ 0, &regs, 0, NULL, NULL);
++#else
+ pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
+ &regs, 0, NULL, NULL);
++#endif
+
+ return pid;
++#endif
+ }
+ EXPORT_SYMBOL(kernel_thread);
+
+diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
+index 4264583..0021fb2 100644
+--- a/arch/sh/kernel/process_64.c
++++ b/arch/sh/kernel/process_64.c
+@@ -311,8 +311,12 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+ regs.sr = (1 << 30);
+
+ /* Ok, create the new process.. */
++#ifdef CONFIG_RSBAC
++ return do_fork(flags | CLONE_VM | CLONE_UNTRACED | KERNEL_THREAD, 0,
++#else
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
+ &regs, 0, NULL, NULL);
++#endif
+ }
+ EXPORT_SYMBOL(kernel_thread);
+
+diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
+index 4b68f0f..74d9a75 100644
+--- a/arch/sh/kernel/syscalls_32.S
++++ b/arch/sh/kernel/syscalls_32.S
+@@ -239,7 +239,11 @@ ENTRY(sys_call_table)
+ .long sys_getdents64 /* 220 */
+ .long sys_fcntl64
+ .long sys_ni_syscall /* reserved for TUX */
++#ifdef CONFIG_RSBAC
++ .long sys_rsbac
++#else
+ .long sys_ni_syscall /* Reserved for Security */
++#endif
+ .long sys_gettid
+ .long sys_readahead /* 225 */
+ .long sys_setxattr
+diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
+index 0956345..0c11a59 100644
+--- a/arch/sh/kernel/syscalls_64.S
++++ b/arch/sh/kernel/syscalls_64.S
+@@ -276,7 +276,11 @@ sys_call_table:
+ .long sys_getdents64
+ .long sys_fcntl64
+ .long sys_ni_syscall /* 250 reserved for TUX */
++#ifdef CONFIG_RSBAC
++ .long sys_rsbac
++#else
+ .long sys_ni_syscall /* Reserved for Security */
++#endif
+ .long sys_gettid
+ .long sys_readahead
+ .long sys_setxattr
+diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
+index c7cb0af..0cf7434 100644
+--- a/arch/sparc/include/asm/unistd.h
++++ b/arch/sparc/include/asm/unistd.h
+@@ -409,7 +409,12 @@
+ #define __NR_process_vm_readv 338
+ #define __NR_process_vm_writev 339
+
++#ifdef CONFIG_RSBAC
++#define __NR_rsbac 340
++#define NR_SYSCALLS 341
++#else
+ #define NR_syscalls 340
++#endif
+
+ #ifdef __32bit_syscall_numbers__
+ /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
+diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
+index efa0754..5523e08 100644
+--- a/arch/sparc/kernel/process_32.c
++++ b/arch/sparc/kernel/process_32.c
+@@ -674,9 +674,14 @@ pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+ /* Notreached by child. */
+ "1: mov %%o0, %0\n\t" :
+ "=r" (retval) :
++#ifdef CONFIG_RSBAC
++ "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD),
++#else
+ "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
++#endif
+ "i" (__NR_exit), "r" (fn), "r" (arg) :
+ "g1", "g2", "g3", "o0", "o1", "memory", "cc");
++
+ return retval;
+ }
+ EXPORT_SYMBOL(kernel_thread);
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index aff0c72..a632ebd 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -642,7 +642,11 @@ pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+ /* Notreached by child. */
+ "1:" :
+ "=r" (retval) :
++#ifdef CONFIG_RSBAC
++ "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD),
++#else
+ "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
++#endif
+ "i" (__NR_exit), "r" (fn), "r" (arg) :
+ "g1", "g2", "g3", "o0", "o1", "memory", "cc");
+ return retval;
+diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
+index 896ba7c..91e0dcd 100644
+--- a/arch/sparc/kernel/ptrace_32.c
++++ b/arch/sparc/kernel/ptrace_32.c
+@@ -26,6 +26,8 @@
+ #include <asm/uaccess.h>
+ #include <asm/cacheflush.h>
+
++#include <rsbac/hooks.h>
++
+ /* #define ALLOW_INIT_TRACING */
+
+ /*
+@@ -213,6 +215,28 @@ static int fpregs32_get(struct task_struct *target,
+ const unsigned long *fpregs = target->thread.float_regs;
+ int ret = 0;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = find_pid_ns(pid, &init_pid_ns);
++ rsbac_attribute_value.trace_request = request;
++ if (!rsbac_adf_request(R_TRACE,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_trace_request,
++ rsbac_attribute_value))
++ {
++ rcu_read_unlock();
++ return -EPERM;
++ }
++ rcu_read_unlock();
++#endif
++
++
+ #if 0
+ if (target == current)
+ save_and_clear_fpu();
+diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
+index 6f97c07..1085e3f 100644
+--- a/arch/sparc/kernel/ptrace_64.c
++++ b/arch/sparc/kernel/ptrace_64.c
+@@ -37,6 +37,8 @@
+ #include <asm/cpudata.h>
+ #include <asm/cacheflush.h>
+
++#include <rsbac/hooks.h>
++
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/syscalls.h>
+
+@@ -869,9 +871,32 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ unsigned long data = cdata;
+ int ret;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ pregs = (struct pt_regs32 __user *) addr;
+ fps = (struct compat_fps __user *) addr;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = task_pid(child);
++ rsbac_attribute_value.trace_request = request;
++ if (!rsbac_adf_request(R_TRACE,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_trace_request,
++ rsbac_attribute_value))
++ {
++ rcu_read_unlock();
++ return -EPERM;
++ }
++ rcu_read_unlock();
++#endif
++
+ switch (request) {
+ case PTRACE_PEEKUSR:
+ ret = (addr != 0) ? -EIO : 0;
+diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
+index 63402f9..ede75f0 100644
+--- a/arch/sparc/kernel/systbls_32.S
++++ b/arch/sparc/kernel/systbls_32.S
+@@ -48,7 +48,11 @@ sys_call_table:
+ /*145*/ .long sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
+ /*150*/ .long sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
+ /*155*/ .long sys_fcntl64, sys_inotify_rm_watch, sys_statfs, sys_fstatfs, sys_oldumount
++#ifdef CONFIG_RSBAC /* we use 164, which seems to be unused */
++/*160*/ .long sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_rsbac
++#else
+ /*160*/ .long sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
++#endif
+ /*165*/ .long sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr
+ /*170*/ .long sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
+ /*175*/ .long sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
+diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
+index 3a58e0d..4798576 100644
+--- a/arch/sparc/kernel/systbls_64.S
++++ b/arch/sparc/kernel/systbls_64.S
+@@ -50,7 +50,11 @@ sys_call_table32:
+ .word compat_sys_setrlimit, sys_pivot_root, sys32_prctl, sys_pciconfig_read, sys_pciconfig_write
+ /*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
+ .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
++#ifdef CONFIG_RSBAC /* we use 164, which seems to be unused */
++/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_rsbac
++#else
+ /*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall
++#endif
+ .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys32_setxattr
+ /*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
+ .word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+index 2b73ded..72cc26e 100644
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -74,7 +74,11 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+
+ current->thread.request.u.thread.proc = fn;
+ current->thread.request.u.thread.arg = arg;
++#ifdef CONFIG_RSBAC
++ pid = do_fork(CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD | flags, 0,
++#else
+ pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0,
++#endif
+ &current->thread.regs, 0, NULL, NULL);
+ return pid;
+ }
+diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
+index 8c96897..524e039 100644
+--- a/arch/x86/kernel/ioport.c
++++ b/arch/x86/kernel/ioport.c
+@@ -17,6 +17,8 @@
+ #include <linux/bitmap.h>
+ #include <asm/syscalls.h>
+
++#include <rsbac/hooks.h>
++
+ /*
+ * this changes the io permissions bitmap in the current task.
+ */
+@@ -26,11 +28,31 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+ struct tss_struct *tss;
+ unsigned int i, max_long, bytes, bytes_updated;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
+ return -EINVAL;
+ if (turn_on && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_ioports;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_PERMISSIONS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ /*
+ * If it's the first ioperm() call in this thread's lifetime, set the
+ * IO bitmap up. ioperm() is much less timing critical than clone(),
+@@ -98,6 +120,11 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
+ unsigned int old = (regs->flags >> 12) & 3;
+ struct thread_struct *t = &current->thread;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (level > 3)
+ return -EINVAL;
+ /* Trying to gain more privileges? */
+@@ -105,6 +132,22 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ }
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_ioports;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_PERMISSIONS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
+ t->iopl = level << 12;
+ set_iopl_mask(t->iopl);
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 1d92a5a..e996fb3 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -297,6 +297,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+ {
+ struct pt_regs regs;
+
++#ifdef CONFIG_RSBAC
++ long rsbac_retval;
++#endif
++
+ memset(&regs, 0, sizeof(regs));
+
+ regs.si = (unsigned long) fn;
+@@ -317,7 +321,12 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+ regs.flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
+
+ /* Ok, create the new process.. */
++#ifdef CONFIG_RSBAC
++ rsbac_retval = do_fork(flags | CLONE_VM | CLONE_UNTRACED | CLONE_KTHREAD, 0, &regs, 0, NULL, NULL);
++ return rsbac_retval;
++#else
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
++#endif
+ }
+ EXPORT_SYMBOL(kernel_thread);
+
+diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
+index 29f9f05..5f172fd 100644
+--- a/arch/x86/syscalls/syscall_32.tbl
++++ b/arch/x86/syscalls/syscall_32.tbl
+@@ -229,7 +229,7 @@
+ 220 i386 getdents64 sys_getdents64 compat_sys_getdents64
+ 221 i386 fcntl64 sys_fcntl64 compat_sys_fcntl64
+ # 222 is unused
+-# 223 is unused
++223 i386 rsbac sys_rsbac
+ 224 i386 gettid sys_gettid
+ 225 i386 readahead sys_readahead sys32_readahead
+ 226 i386 setxattr sys_setxattr
+diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
+index dd29a9e..fdf3b05 100644
+--- a/arch/x86/syscalls/syscall_64.tbl
++++ b/arch/x86/syscalls/syscall_64.tbl
+@@ -318,6 +318,7 @@
+ 309 common getcpu sys_getcpu
+ 310 64 process_vm_readv sys_process_vm_readv
+ 311 64 process_vm_writev sys_process_vm_writev
++312 common rsbac sys_rsbac
+ #
+ # x32-specific system call numbers start at 512 to avoid cache impact
+ # for native 64-bit operation.
+diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
+index 6223f33..2f27c3d 100644
+--- a/arch/xtensa/kernel/entry.S
++++ b/arch/xtensa/kernel/entry.S
+@@ -1845,7 +1845,11 @@ ENTRY(kernel_thread)
+ mov a5, a2 # preserve fn over syscall
+ mov a7, a3 # preserve args over syscall
+
++#ifdef CONFIG_RSBAC
++ movi a3, _CLONE_VM | _CLONE_UNTRACED | _CLONE_KTHREAD
++#else
+ movi a3, _CLONE_VM | _CLONE_UNTRACED
++#endif
+ movi a2, __NR_clone
+ or a6, a4, a3 # arg0: flags
+ mov a3, a1 # arg1: sp
+diff --git a/block/ioctl.c b/block/ioctl.c
+index ba15b2d..274ab17 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -9,6 +9,12 @@
+ #include <linux/blktrace_api.h>
+ #include <asm/uaccess.h>
+
++#ifdef CONFIG_RSBAC
++#include <rsbac/adf.h>
++#include <linux/hdreg.h>
++#endif
++
++
+ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
+ {
+ struct block_device *bdevp;
+@@ -210,6 +216,61 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
+ loff_t size;
+ int ret, n;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_adf_request_t rsbac_request;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ rsbac_pr_debug(aef, "blkdev_ioctl(): calling ADF\n");
++
++ /* values taken from include/linux/fs.h and hdreg.h */
++ switch (cmd) {
++ case BLKGETSIZE: /* Return device size */
++ case BLKGETSIZE64:
++ case BLKROGET:
++ case BLKRAGET:
++ case BLKFRAGET:
++ case BLKSECTGET:
++ case BLKSSZGET:
++ case BLKBSZGET:
++ case HDIO_GETGEO:
++ case HDIO_OBSOLETE_IDENTITY:
++ case HDIO_GET_UNMASKINTR:
++ case HDIO_GET_IDENTITY:
++ case HDIO_GET_NICE:
++ case HDIO_GET_BUSSTATE:
++ case HDIO_GET_QDMA:
++ case HDIO_GET_MULTCOUNT:
++ case HDIO_GET_KEEPSETTINGS:
++ case HDIO_GET_32BIT:
++ case HDIO_GET_NOWERR:
++ case HDIO_GET_DMA:
++ case HDIO_GET_WCACHE:
++ case HDIO_GET_ACOUSTIC:
++ case HDIO_GET_ADDRESS:
++ rsbac_request = R_GET_STATUS_DATA;
++ break;
++
++ default:
++ rsbac_request = R_MODIFY_SYSTEM_DATA;
++ }
++
++ rsbac_target_id.dev.type = D_block;
++ rsbac_target_id.dev.major = RSBAC_MAJOR(bdev->bd_dev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(bdev->bd_dev);
++
++ rsbac_attribute_value.ioctl_cmd = cmd;
++ if (!rsbac_adf_request(rsbac_request,
++ task_pid(current),
++ T_DEV,
++ rsbac_target_id,
++ A_ioctl_cmd,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ switch(cmd) {
+ case BLKFLSBUF:
+ if (!capable(CAP_SYS_ADMIN))
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index bbca966..7a751ca 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -79,6 +79,8 @@
+
+ #include <asm/uaccess.h>
+
++#include <rsbac/hooks.h>
++
+ static DEFINE_IDR(loop_index_idr);
+ static DEFINE_MUTEX(loop_index_mutex);
+
+@@ -815,6 +817,12 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
+ int error;
+ loff_t size;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ /* This is safe, since we have a reference from open(). */
+ __module_get(THIS_MODULE);
+
+@@ -864,6 +872,46 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
+
+ error = 0;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[lo_ioctl()]: calling ADF for FILE/DEV\n");
++ if (S_ISREG(inode->i_mode)) {
++ rsbac_target = T_FILE;
++ rsbac_target_id.dir.device = file->f_dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = inode->i_ino;
++ rsbac_target_id.dir.dentry_p = file->f_dentry;
++ }
++ else { /* must be block */
++ rsbac_target = T_DEV;
++ rsbac_target_id.dev.type = D_block;
++ rsbac_target_id.dev.major = RSBAC_MAJOR(inode->i_rdev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(inode->i_rdev);
++ }
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MOUNT,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ error = -EPERM;
++ goto out_putf;
++ }
++ rsbac_pr_debug(aef, "[lo_ioctl()]: calling ADF for DEV\n");
++ rsbac_target_id.dev.type = D_block;
++ rsbac_target_id.dev.major = RSBAC_MAJOR(bdev->bd_dev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(bdev->bd_dev);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MOUNT,
++ task_pid(current),
++ T_DEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ error = -EPERM;
++ goto out_putf;
++ }
++#endif
++
+ set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
+
+ lo->lo_blocksize = lo_blocksize;
+@@ -973,6 +1021,12 @@ static int loop_clr_fd(struct loop_device *lo)
+ gfp_t gfp = lo->old_gfp_mask;
+ struct block_device *bdev = lo->lo_device;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (lo->lo_state != Lo_bound)
+ return -ENXIO;
+
+@@ -982,6 +1036,44 @@ static int loop_clr_fd(struct loop_device *lo)
+ if (filp == NULL)
+ return -EINVAL;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[lo_ioctl()]: calling ADF for FILE/DEV\n");
++ if (S_ISREG(filp->f_dentry->d_inode->i_mode)) {
++ rsbac_target = T_FILE;
++ rsbac_target_id.dir.device = filp->f_dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = filp->f_dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = filp->f_dentry;
++ }
++ else { /* must be block dev */
++ rsbac_target = T_DEV;
++ rsbac_target_id.dev.type = D_block;
++ rsbac_target_id.dev.major = RSBAC_MAJOR(filp->f_dentry->d_inode->i_rdev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(filp->f_dentry->d_inode->i_rdev);
++ }
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_UMOUNT,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++ rsbac_pr_debug(aef, "[lo_ioctl()]: calling ADF for DEV\n");
++ rsbac_target_id.dev.type = D_block;
++ rsbac_target_id.dev.major = LOOP_MAJOR;
++ rsbac_target_id.dev.minor = lo->lo_number;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_UMOUNT,
++ task_pid(current),
++ T_DEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ spin_lock_irq(&lo->lo_lock);
+ lo->lo_state = Lo_rundown;
+ spin_unlock_irq(&lo->lo_lock);
+diff --git a/drivers/char/mem.c b/drivers/char/mem.c
+index d6e9d08..2f3e719 100644
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -35,6 +35,8 @@
+ # include <linux/efi.h>
+ #endif
+
++#include <rsbac/hooks.h>
++
+ static inline unsigned long size_inside_page(unsigned long start,
+ unsigned long size)
+ {
+@@ -98,6 +100,11 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+ ssize_t read, sz;
+ char *ptr;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!valid_phys_addr_range(p, count))
+ return -EFAULT;
+ read = 0;
+@@ -124,6 +131,25 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+ if (!range_is_allowed(p >> PAGE_SHIFT, count))
+ return -EPERM;
+
++#ifdef CONFIG_RSBAC
++ rsbac_attribute_value.pagenr = p >> PAGE_SHIFT;
++ if (rsbac_is_videomem(rsbac_attribute_value.pagenr, count))
++ rsbac_target_id.scd = ST_videomem;
++ else
++ rsbac_target_id.scd = ST_kmem;
++ rsbac_pr_debug(aef, "calling ADF\n");
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_pagenr,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_INFO "read_mem(): RSBAC denied read access to kernel mem page %u, size %u\n",
++ rsbac_attribute_value.pagenr, count);
++ return -EPERM;
++ }
++#endif
++
+ /*
+ * On ia64 if a page has been mapped somewhere as uncached, then
+ * it must also be accessed uncached by the kernel or data
+@@ -156,6 +182,11 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
+ unsigned long copied;
+ void *ptr;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!valid_phys_addr_range(p, count))
+ return -EFAULT;
+
+@@ -179,6 +210,25 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
+ if (!range_is_allowed(p >> PAGE_SHIFT, sz))
+ return -EPERM;
+
++#ifdef CONFIG_RSBAC
++ rsbac_attribute_value.pagenr = p >> PAGE_SHIFT;
++ if (rsbac_is_videomem(rsbac_attribute_value.pagenr, sz))
++ rsbac_target_id.scd = ST_videomem;
++ else
++ rsbac_target_id.scd = ST_kmem;
++ rsbac_pr_debug(aef, "calling ADF\n");
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_pagenr,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_INFO "write_mem(): RSBAC denied write access to kernel mem page %u, size %u\n",
++ rsbac_attribute_value.pagenr, sz);
++ return -EPERM;
++ }
++#endif
++
+ /*
+ * On ia64 if a page has been mapped somewhere as uncached, then
+ * it must also be accessed uncached by the kernel or data
+@@ -301,6 +351,11 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
+ {
+ size_t size = vma->vm_end - vma->vm_start;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
+ return -EINVAL;
+
+@@ -314,6 +369,25 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
+ &vma->vm_page_prot))
+ return -EINVAL;
+
++#ifdef CONFIG_RSBAC
++ rsbac_attribute_value.pagenr = vma->vm_pgoff;
++ if (rsbac_is_videomem(rsbac_attribute_value.pagenr, size))
++ rsbac_target_id.scd = ST_videomem;
++ else
++ rsbac_target_id.scd = ST_kmem;
++ rsbac_pr_debug(aef, "calling ADF\n");
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_pagenr,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_INFO "mmap_mem(): RSBAC denied mmap access to kernel mem page %u, size %u\n",
++ rsbac_attribute_value.pagenr, size);
++ return -EPERM;
++ }
++#endif
++
+ vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
+ size,
+ vma->vm_page_prot);
+diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
+index 4d19eb9..60918d4 100644
+--- a/drivers/ide/ide-ioctls.c
++++ b/drivers/ide/ide-ioctls.c
+@@ -6,6 +6,7 @@
+ #include <linux/hdreg.h>
+ #include <linux/ide.h>
+ #include <linux/slab.h>
++#include <rsbac/hooks.h>
+
+ static const struct ide_ioctl_devset ide_ioctl_settings[] = {
+ { HDIO_GET_32BIT, HDIO_SET_32BIT, &ide_devset_io_32bit },
+@@ -236,6 +237,58 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
+ {
+ int err;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_adf_request_t rsbac_request;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ /* values taken from include/linux/fs.h and hdreg.h */
++ switch (cmd) {
++ case BLKGETSIZE: /* Return device size */
++ case BLKGETSIZE64:
++ case BLKROGET:
++ case BLKRAGET:
++ case BLKFRAGET:
++ case BLKSECTGET:
++ case BLKSSZGET:
++ case BLKBSZGET:
++ case HDIO_GETGEO:
++ case HDIO_OBSOLETE_IDENTITY:
++ case HDIO_GET_UNMASKINTR:
++ case HDIO_GET_IDENTITY:
++ case HDIO_GET_NICE:
++ case HDIO_GET_BUSSTATE:
++ case HDIO_GET_QDMA:
++ case HDIO_GET_MULTCOUNT:
++ case HDIO_GET_KEEPSETTINGS:
++ case HDIO_GET_32BIT:
++ case HDIO_GET_NOWERR:
++ case HDIO_GET_DMA:
++ case HDIO_GET_WCACHE:
++ case HDIO_GET_ACOUSTIC:
++ case HDIO_GET_ADDRESS:
++ rsbac_request = R_GET_STATUS_DATA;
++ break;
++
++ default:
++ rsbac_request = R_MODIFY_SYSTEM_DATA;
++ }
++ rsbac_target_id.dev.type = D_block;
++ rsbac_target_id.dev.major = RSBAC_MAJOR(bdev->bd_dev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(bdev->bd_dev);
++ rsbac_attribute_value.ioctl_cmd = cmd;
++ if (!rsbac_adf_request(rsbac_request,
++ task_pid(current),
++ T_DEV,
++ rsbac_target_id,
++ A_ioctl_cmd,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_ioctl_settings);
+ if (err != -EOPNOTSUPP)
+ return err;
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index 05728894..6f0da92 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -45,6 +45,11 @@
+ #include <asm/ptrace.h>
+ #include <asm/irq_regs.h>
+
++#ifdef CONFIG_RSBAC
++#include <rsbac/types.h>
++#include <rsbac/debug.h>
++#endif
++
+ /* Whether we react on sysrq keys or just ignore them */
+ static int __read_mostly sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
+ static bool __read_mostly sysrq_always_enabled;
+@@ -184,6 +189,24 @@ static struct sysrq_key_op sysrq_mountro_op = {
+ .enable_mask = SYSRQ_ENABLE_REMOUNT,
+ };
+
++#ifdef CONFIG_RSBAC_SOFTMODE_SYSRQ
++static void sysrq_handle_rsbac_softmode(int key) {
++ if (rsbac_softmode) {
++ rsbac_printk(KERN_WARNING "Soft mode disabled via SysRq!\n");
++ rsbac_softmode = 0;
++ }
++ else {
++ rsbac_printk(KERN_WARNING "Soft mode enabled via SysRq!\n");
++ rsbac_softmode = 1;
++ }
++}
++static struct sysrq_key_op sysrq_rsbac_softmode_op = {
++ handler: sysrq_handle_rsbac_softmode,
++ help_msg: "rsbac_toggle_softmode_X",
++ action_msg: "RSBAC toggle softmode\n",
++};
++#endif
++
+ #ifdef CONFIG_LOCKDEP
+ static void sysrq_handle_showlocks(int key)
+ {
+@@ -452,7 +475,11 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
+ NULL, /* v */
+ &sysrq_showstate_blocked_op, /* w */
+ /* x: May be registered on ppc/powerpc for xmon */
++#ifdef CONFIG_RSBAC_SOFTMODE_SYSRQ
++ &sysrq_rsbac_softmode_op, /* x */
++#else
+ NULL, /* x */
++#endif
+ /* y: May be registered on sparc64 for global register dump */
+ NULL, /* y */
+ &sysrq_ftrace_dump_op, /* z */
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index d939bd7..c893e12 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -105,6 +105,8 @@
+ #include <linux/kmod.h>
+ #include <linux/nsproxy.h>
+
++#include <rsbac/hooks.h>
++
+ #undef TTY_DEBUG_HANGUP
+
+ #define TTY_PARANOIA_CHECK 1
+@@ -2118,10 +2120,33 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
+ char ch, mbz = 0;
+ struct tty_ldisc *ld;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(ch, p))
+ return -EFAULT;
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.dev.type = D_char;
++ rsbac_target_id.dev.major = tty->driver->major;
++ rsbac_target_id.dev.minor = tty->driver->minor_start + tty->index;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_SEND,
++ task_pid(current),
++ T_DEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ tty_audit_tiocsti(tty, ch);
+ ld = tty_ldisc_ref_wait(tty);
+ ld->ops->receive_buf(tty, &ch, &mbz, 1);
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index a1b9a2f..b2b14d8 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -24,6 +24,8 @@
+ #include <asm/io.h>
+ #include <asm/uaccess.h>
+
++#include <rsbac/hooks.h>
++
+ #undef TTY_DEBUG_WAIT_UNTIL_SENT
+
+ #undef DEBUG
+@@ -951,14 +953,57 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
+ int ret = 0;
+ struct ktermios kterm;
+
+- BUG_ON(file == NULL);
++#ifdef CONFIG_RSBAC
++ enum rsbac_adf_request_t rsbac_request;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
+
++ BUG_ON(file == NULL);
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER)
+ real_tty = tty->link;
+ else
+ real_tty = tty;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ switch (cmd) {
++#ifdef TIOCGETP
++ case TIOCGETP:
++#endif
++#ifdef TIOCGETC
++ case TIOCGETC:
++#endif
++#ifdef TIOCGLTC
++ case TIOCGLTC:
++#endif
++ case TCGETS:
++ case TCGETA:
++ case TIOCOUTQ:
++ case TIOCINQ:
++ case TIOCGLCKTRMIOS:
++ case TIOCGSOFTCAR:
++ rsbac_request = R_GET_PERMISSIONS_DATA;
++ break;
++ default:
++ rsbac_request = R_MODIFY_PERMISSIONS_DATA;
++ }
++ rsbac_target_id.dev.type = D_char;
++ rsbac_target_id.dev.major = tty->driver->major;
++ rsbac_target_id.dev.minor = tty->driver->minor_start + tty->index;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(rsbac_request,
++ task_pid(current),
++ T_DEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ switch (cmd) {
+ #ifdef TIOCGETP
+ case TIOCGETP:
+diff --git a/fs/exec.c b/fs/exec.c
+index b1fd202..68fb564 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -66,6 +66,8 @@
+
+ #include <trace/events/sched.h>
+
++#include <rsbac/hooks.h>
++
+ int core_uses_pid;
+ char core_pattern[CORENAME_MAX_SIZE] = "core";
+ unsigned int core_pipe_limit;
+@@ -124,6 +126,11 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
+ .intent = LOOKUP_OPEN
+ };
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (IS_ERR(tmp))
+ goto out;
+
+@@ -137,10 +144,36 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
+ if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
+ goto exit;
+
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_PART
++ if (rsbac_dac_part_disabled(file->f_path.dentry))
++ error = 0;
++ else
++#endif
+ error = -EACCES;
+ if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
+ goto exit;
+
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.file.device = file->f_path.dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.file.inode = file->f_path.dentry->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = file->f_path.dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MAP_EXEC,
++ task_pid(current),
++ T_FILE,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ rsbac_pr_debug(aef, "request not granted, my PID: %i\n",
++ task_pid(current));
++ error = -EPERM;
++ goto exit;
++ }
++#endif
++
+ fsnotify_open(file);
+
+ error = -ENOEXEC;
+@@ -163,6 +196,29 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
+ read_unlock(&binfmt_lock);
+ }
+ exit:
++
++ /* RSBAC: notify ADF of mapped segment */
++#ifdef CONFIG_RSBAC
++ if (!error) {
++ union rsbac_target_id_t rsbac_new_target_id;
++
++ rsbac_pr_debug(aef, "calling ADF_set_attr\n");
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_MAP_EXEC,
++ task_pid(current),
++ T_FILE,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_uselib(): rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
++
+ fput(file);
+ out:
+ return error;
+@@ -785,6 +841,13 @@ struct file *open_exec(const char *name)
+
+ fsnotify_open(file);
+
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_PART
++ if (rsbac_dac_part_disabled(file->f_path.dentry))
++ err = 0;
++ else
++#endif
++
++
+ err = deny_write_access(file);
+ if (err)
+ goto exit;
+@@ -1466,6 +1529,12 @@ static int do_execve_common(const char *filename,
+ int retval;
+ const struct cred *cred = current_cred();
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ /*
+ * We move the actual failure in case of RLIMIT_NPROC excess from
+ * set*uid() to execve() because too many poorly written programs
+@@ -1524,6 +1593,26 @@ static int do_execve_common(const char *filename,
+ if ((retval = bprm->envc) < 0)
+ goto out;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_execve()]: calling ADF\n");
++ rsbac_target_id.file.device = file->f_dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = file->f_dentry->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = file->f_dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_EXECUTE,
++ task_pid(current),
++ T_FILE,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ rsbac_pr_debug(aef, "[sys_execve()]: request not granted, my PID: %i\n",
++ task_pid(current));
++ retval = -EPERM;
++ goto out;
++ }
++#endif
++
+ retval = prepare_binprm(bprm);
+ if (retval < 0)
+ goto out;
+@@ -1549,6 +1638,25 @@ static int do_execve_common(const char *filename,
+ current->fs->in_exec = 0;
+ current->in_execve = 0;
+ acct_update_integrals(current);
++/* RSBAC: notify ADF of changed program in this process
++ * Most structures are already filled
++ */
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_execve()]: calling ADF_set_attr\n");
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_EXECUTE,
++ task_pid(current),
++ T_FILE,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ rsbac_printk(KERN_WARNING
++ "do_execve() [sys_execve]: rsbac_adf_set_attr() returned error\n");
++ }
++#endif
+ free_bprm(bprm);
+ if (displaced)
+ put_files_struct(displaced);
+diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
+index 2de655f..120f886 100644
+--- a/fs/ext2/ioctl.c
++++ b/fs/ext2/ioctl.c
+@@ -16,6 +16,10 @@
+ #include <asm/current.h>
+ #include <asm/uaccess.h>
+
++#ifdef CONFIG_RSBAC
++#include <net/sock.h>
++#include <rsbac/hooks.h>
++#endif
+
+ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+@@ -25,6 +29,73 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ unsigned short rsv_window_size;
+ int ret;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_adf_request_t rsbac_request;
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ switch (cmd) {
++ case EXT2_IOC_GETFLAGS:
++ case EXT2_IOC_GETVERSION:
++ rsbac_request = R_GET_PERMISSIONS_DATA;
++ break;
++ case EXT2_IOC_SETFLAGS:
++ case EXT2_IOC_SETVERSION:
++ rsbac_request = R_MODIFY_PERMISSIONS_DATA;
++ break;
++ default:
++ rsbac_request = R_NONE;
++ }
++ if(S_ISSOCK(inode->i_mode)) {
++ if(SOCKET_I(inode)->ops
++ && (SOCKET_I(inode)->ops->family == AF_UNIX)) {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = filp->f_dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = filp->f_dentry;
++ }
++#ifdef CONFIG_RSBAC_NET_OBJ
++ else {
++ rsbac_target = T_NETOBJ;
++ rsbac_target_id.netobj.sock_p
++ = SOCKET_I(inode);
++ rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_target_id.netobj.local_len = 0;
++ rsbac_target_id.netobj.remote_addr = NULL;
++ rsbac_target_id.netobj.remote_len = 0;
++ }
++#endif
++ }
++ else {
++ if (S_ISDIR(inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else
++ rsbac_target = T_FILE;
++ rsbac_target_id.file.device = filp->f_dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = inode->i_ino;
++ rsbac_target_id.file.dentry_p = filp->f_dentry;
++ }
++ rsbac_attribute_value.ioctl_cmd = cmd;
++ if( (rsbac_request != R_NONE)
++ && !rsbac_adf_request(rsbac_request,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_ioctl_cmd,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ ext2_debug ("cmd = %u, arg = %lu\n", cmd, arg);
+
+ switch (cmd) {
+diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
+index dffb865..f6c7a70 100644
+--- a/fs/ext2/namei.c
++++ b/fs/ext2/namei.c
+@@ -37,6 +37,8 @@
+ #include "acl.h"
+ #include "xip.h"
+
++#include <rsbac/hooks.h>
++
+ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
+ {
+ int err = ext2_add_link(dentry, inode);
+@@ -274,6 +276,11 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry)
+ if (err)
+ goto out;
+
++#ifdef CONFIG_RSBAC_SECDEL
++ if (inode->i_nlink == 1)
++ rsbac_sec_del(dentry, TRUE);
++#endif
++
+ inode->i_ctime = dir->i_ctime;
+ inode_dec_link_count(inode);
+ err = 0;
+@@ -334,6 +341,12 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
+ new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page);
+ if (!new_de)
+ goto out_dir;
++
++#ifdef CONFIG_RSBAC_SECDEL
++ if (new_inode->i_nlink == 1)
++ rsbac_sec_del(new_dentry, TRUE);
++#endif
++
+ ext2_set_link(new_dir, new_de, new_page, old_inode, 1);
+ new_inode->i_ctime = CURRENT_TIME_SEC;
+ if (dir_de)
+diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
+index 677a5c2..ddfb96f 100644
+--- a/fs/ext3/ioctl.c
++++ b/fs/ext3/ioctl.c
+@@ -12,6 +12,11 @@
+ #include <asm/uaccess.h>
+ #include "ext3.h"
+
++#ifdef CONFIG_RSBAC
++#include <net/sock.h>
++#include <rsbac/hooks.h>
++#endif
++
+ long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ struct inode *inode = filp->f_dentry->d_inode;
+@@ -19,6 +24,83 @@ long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ unsigned int flags;
+ unsigned short rsv_window_size;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_adf_request_t rsbac_request;
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ switch (cmd) {
++#ifdef CONFIG_JBD_DEBUG
++ case EXT3_IOC_WAIT_FOR_READONLY:
++#endif
++ case EXT3_IOC_GETFLAGS:
++ case EXT3_IOC_GETVERSION:
++ case EXT3_IOC_GETVERSION_OLD:
++ case EXT3_IOC_GETRSVSZ:
++ rsbac_request = R_GET_PERMISSIONS_DATA;
++ break;
++ case EXT3_IOC_SETFLAGS:
++ case EXT3_IOC_SETVERSION:
++ case EXT3_IOC_SETVERSION_OLD:
++ case EXT3_IOC_SETRSVSZ:
++ case EXT3_IOC_GROUP_EXTEND:
++ case EXT3_IOC_GROUP_ADD:
++ rsbac_request = R_MODIFY_PERMISSIONS_DATA;
++ break;
++ default:
++ rsbac_request = R_NONE;
++ }
++ if(S_ISSOCK(inode->i_mode)) {
++ if(SOCKET_I(inode)->ops
++ && (SOCKET_I(inode)->ops->family == AF_UNIX)) {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = filp->f_dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = filp->f_dentry;
++
++ }
++#ifdef CONFIG_RSBAC_NET_OBJ
++ else {
++ rsbac_target = T_NETOBJ;
++ rsbac_target_id.netobj.sock_p
++ = SOCKET_I(inode);
++ rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_target_id.netobj.local_len = 0;
++ rsbac_target_id.netobj.remote_addr = NULL;
++ rsbac_target_id.netobj.remote_len = 0;
++ }
++#endif
++ }
++ else {
++ if (S_ISDIR(inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else
++ rsbac_target = T_FILE;
++ rsbac_target_id.file.device = filp->f_dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = inode->i_ino;
++ rsbac_target_id.file.dentry_p = filp->f_dentry;
++ }
++ rsbac_attribute_value.ioctl_cmd = cmd;
++ if( (rsbac_request != R_NONE)
++ && !rsbac_adf_request(rsbac_request,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_ioctl_cmd,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ ext3_debug ("cmd = %u, arg = %lu\n", cmd, arg);
+
+ switch (cmd) {
+diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
+index d7940b2..8bff772 100644
+--- a/fs/ext3/namei.c
++++ b/fs/ext3/namei.c
+@@ -30,6 +30,8 @@
+ #include "xattr.h"
+ #include "acl.h"
+
++#include <rsbac/hooks.h>
++
+ /*
+ * define how far ahead to read directories while searching them.
+ */
+@@ -2153,6 +2155,19 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry)
+
+ inode = dentry->d_inode;
+
++#ifdef CONFIG_RSBAC_SECDEL
++ if(inode->i_nlink == 1) {
++ ext3_journal_stop(handle);
++ rsbac_sec_del(dentry, TRUE);
++ handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++
++ if (IS_DIRSYNC(dir))
++ handle->h_sync = 1;
++ }
++#endif
++
+ retval = -EIO;
+ if (le32_to_cpu(de->inode) != inode->i_ino)
+ goto end_unlink;
+@@ -2403,6 +2418,22 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
+ if (retval)
+ goto end_rename;
+ } else {
++
++#ifdef CONFIG_RSBAC_SECDEL
++ if (new_inode->i_nlink == 1) {
++ ext3_journal_stop(handle);
++ rsbac_sec_del(new_dentry, TRUE);
++ handle = ext3_journal_start(old_dir, 2 *
++ EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) +
++ EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++
++ if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
++ handle->h_sync = 1;
++ }
++#endif
++
+ BUFFER_TRACE(new_bh, "get write access");
+ retval = ext3_journal_get_write_access(handle, new_bh);
+ if (retval)
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index 6eee255..9404733 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -20,6 +20,11 @@
+
+ #define MAX_32_NUM ((((unsigned long long) 1) << 32) - 1)
+
++#ifdef CONFIG_RSBAC
++#include <net/sock.h>
++#endif
++#include <rsbac/hooks.h>
++
+ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ struct inode *inode = filp->f_dentry->d_inode;
+@@ -27,6 +32,77 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned int flags;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_adf_request_t rsbac_request;
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ switch (cmd) {
++ case EXT4_IOC_GETFLAGS:
++ case EXT4_IOC_GETVERSION:
++ case EXT4_IOC_GETVERSION_OLD:
++ case EXT4_IOC_GETRSVSZ:
++ rsbac_request = R_GET_PERMISSIONS_DATA;
++ break;
++ case EXT4_IOC_SETFLAGS:
++ case EXT4_IOC_SETVERSION:
++ case EXT4_IOC_SETVERSION_OLD:
++ case EXT4_IOC_SETRSVSZ:
++ case EXT4_IOC_GROUP_EXTEND:
++ case EXT4_IOC_GROUP_ADD:
++ case EXT4_IOC_MIGRATE:
++ rsbac_request = R_MODIFY_PERMISSIONS_DATA;
++ break;
++ default:
++ rsbac_request = R_NONE;
++ }
++ if(S_ISSOCK(inode->i_mode)) {
++ if(SOCKET_I(inode)->ops
++ && (SOCKET_I(inode)->ops->family == AF_UNIX)) {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = filp->f_dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = filp->f_dentry;
++ }
++#ifdef CONFIG_RSBAC_NET_OBJ
++ else {
++ rsbac_target = T_NETOBJ;
++ rsbac_target_id.netobj.sock_p
++ = SOCKET_I(inode);
++ rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_target_id.netobj.local_len = 0;
++ rsbac_target_id.netobj.remote_addr = NULL;
++ rsbac_target_id.netobj.remote_len = 0;
++ }
++#endif
++ }
++ else {
++ if (S_ISDIR(inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else
++ rsbac_target = T_FILE;
++ rsbac_target_id.file.device = filp->f_dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = inode->i_ino;
++ rsbac_target_id.file.dentry_p = filp->f_dentry;
++ }
++ rsbac_attribute_value.ioctl_cmd = cmd;
++ if( (rsbac_request != R_NONE)
++ && !rsbac_adf_request(rsbac_request,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_ioctl_cmd,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ ext4_debug("cmd = %u, arg = %lu\n", cmd, arg);
+
+ switch (cmd) {
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 349d7b3..38a9eb7 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -41,6 +41,9 @@
+ #include "acl.h"
+
+ #include <trace/events/ext4.h>
++
++#include <rsbac/hooks.h>
++
+ /*
+ * define how far ahead to read directories while searching them.
+ */
+@@ -2205,6 +2208,19 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
+
+ inode = dentry->d_inode;
+
++#ifdef CONFIG_RSBAC_SECDEL
++ if(inode->i_nlink == 1) {
++ ext4_journal_stop(handle);
++ rsbac_sec_del(dentry, TRUE);
++ handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++
++ if (IS_DIRSYNC(dir))
++ handle->h_sync = 1;
++ }
++#endif
++
+ retval = -EIO;
+ if (le32_to_cpu(de->inode) != inode->i_ino)
+ goto end_unlink;
+@@ -2462,6 +2478,21 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
+ if (retval)
+ goto end_rename;
+ } else {
++
++#ifdef CONFIG_RSBAC_SECDEL
++ if(new_inode->i_nlink == 1) {
++ ext4_journal_stop(handle);
++ rsbac_sec_del(new_dentry, TRUE);
++ handle = ext4_journal_start(old_dir,
++ EXT4_DELETE_TRANS_BLOCKS(old_dir->i_sb));
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++
++ if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
++ handle->h_sync = 1;
++ }
++#endif
++
+ BUFFER_TRACE(new_bh, "get write access");
+ retval = ext4_journal_get_write_access(handle, new_bh);
+ if (retval)
+diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
+index c5938c9..12119f1 100644
+--- a/fs/fat/namei_msdos.c
++++ b/fs/fat/namei_msdos.c
+@@ -9,6 +9,7 @@
+ #include <linux/module.h>
+ #include <linux/time.h>
+ #include <linux/buffer_head.h>
++#include <rsbac/hooks.h>
+ #include "fat.h"
+
+ /* Characters that are undesirable in an MS-DOS file name */
+@@ -423,6 +424,9 @@ static int msdos_unlink(struct inode *dir, struct dentry *dentry)
+ clear_nlink(inode);
+ inode->i_ctime = CURRENT_TIME_SEC;
+ fat_detach(inode);
++#ifdef CONFIG_RSBAC_SECDEL
++ rsbac_sec_del(dentry, TRUE);
++#endif
+ out:
+ unlock_super(sb);
+ if (!err)
+@@ -516,6 +520,11 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
+ }
+ new_dir->i_version++;
+
++#ifdef CONFIG_RSBAC_SECDEL
++ if (new_inode && (new_inode->i_nlink == 1))
++ rsbac_sec_del(new_dentry, TRUE);
++#endif
++
+ fat_detach(old_inode);
+ fat_attach(old_inode, new_i_pos);
+ if (is_hid)
+diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
+index 98ae804..b3d42d7 100644
+--- a/fs/fat/namei_vfat.c
++++ b/fs/fat/namei_vfat.c
+@@ -21,6 +21,7 @@
+ #include <linux/slab.h>
+ #include <linux/buffer_head.h>
+ #include <linux/namei.h>
++#include <rsbac/hooks.h>
+ #include "fat.h"
+
+ /*
+@@ -848,6 +849,10 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
+ if (err)
+ goto out;
+
++#ifdef CONFIG_RSBAC_SECDEL
++ rsbac_sec_del(dentry, TRUE);
++#endif
++
+ err = fat_remove_entries(dir, &sinfo); /* and releases bh */
+ if (err)
+ goto out;
+@@ -944,6 +949,11 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
+ if (err)
+ goto out;
+ }
++#ifdef CONFIG_RSBAC_SECDEL
++ else
++ if(new_inode->i_nlink == 1)
++ rsbac_sec_del(new_dentry, TRUE);
++#endif
+ new_i_pos = MSDOS_I(new_inode)->i_pos;
+ fat_detach(new_inode);
+ } else {
+diff --git a/fs/ioctl.c b/fs/ioctl.c
+index 29167be..f33d430 100644
+--- a/fs/ioctl.c
++++ b/fs/ioctl.c
+@@ -18,6 +18,11 @@
+
+ #include <asm/ioctls.h>
+
++#ifdef CONFIG_RSBAC_IOCTL
++#include <net/sock.h>
++#endif
++#include <rsbac/hooks.h>
++
+ /* So that the fiemap access checks can't overflow on 32 bit machines. */
+ #define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
+
+@@ -37,9 +42,78 @@ static long vfs_ioctl(struct file *filp, unsigned int cmd,
+ {
+ int error = -ENOTTY;
+
++#ifdef CONFIG_RSBAC_IOCTL
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!filp->f_op || !filp->f_op->unlocked_ioctl)
+ goto out;
+
++#ifdef CONFIG_RSBAC_IOCTL
++ if (S_ISBLK(filp->f_dentry->d_inode->i_mode)) {
++ rsbac_target = T_DEV;
++ rsbac_target_id.dev.type = D_block;
++ rsbac_target_id.dev.major = RSBAC_MAJOR(filp->f_dentry->d_inode->i_rdev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(filp->f_dentry->d_inode->i_rdev);
++ }
++ else
++ if (S_ISCHR(filp->f_dentry->d_inode->i_mode)) {
++ rsbac_target = T_DEV;
++ rsbac_target_id.dev.type = D_char;
++ rsbac_target_id.dev.major = RSBAC_MAJOR(filp->f_dentry->d_inode->i_rdev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(filp->f_dentry->d_inode->i_rdev);
++ }
++ else
++ if (S_ISSOCK(filp->f_dentry->d_inode->i_mode)) {
++ if ( SOCKET_I(filp->f_dentry->d_inode)->ops
++ && (SOCKET_I(filp->f_dentry->d_inode)->ops->family == AF_UNIX)
++ ) {
++ if (filp->f_dentry->d_sb->s_magic == SOCKFS_MAGIC) {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = filp->f_dentry->d_inode->i_ino;
++ }
++ else {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = filp->f_dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = filp->f_dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = filp->f_dentry;
++ }
++ }
++ else {
++#ifdef CONFIG_RSBAC_NET_OBJ
++ rsbac_target = T_NETOBJ;
++ rsbac_target_id.netobj.sock_p
++ = SOCKET_I(filp->f_dentry->d_inode);
++ rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_target_id.netobj.local_len = 0;
++ rsbac_target_id.netobj.remote_addr = NULL;
++ rsbac_target_id.netobj.remote_len = 0;
++#else
++ rsbac_target = T_NONE;
++#endif
++ }
++ }
++ else
++ rsbac_target = T_NONE;
++ if (rsbac_target != T_NONE) {
++ rsbac_pr_debug(aef, "[sys_ioctl()]: calling ADF\n");
++ rsbac_attribute_value.ioctl_cmd = cmd;
++ if (!rsbac_adf_request(R_IOCTL,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_ioctl_cmd,
++ rsbac_attribute_value))
++ {
++ error = -EPERM;
++ goto out;
++ }
++ }
++#endif
++
+ error = filp->f_op->unlocked_ioctl(filp, cmd, arg);
+ if (error == -ENOIOCTLCMD)
+ error = -ENOTTY;
+diff --git a/fs/ioprio.c b/fs/ioprio.c
+index 0f1b951..6783b45 100644
+--- a/fs/ioprio.c
++++ b/fs/ioprio.c
+@@ -27,6 +27,7 @@
+ #include <linux/capability.h>
+ #include <linux/syscalls.h>
+ #include <linux/security.h>
++#include <rsbac/hooks.h>
+ #include <linux/pid_namespace.h>
+
+ int set_task_ioprio(struct task_struct *task, int ioprio)
+@@ -67,6 +68,26 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
+ struct pid *pgrp;
+ int ret;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_priority;
++ rsbac_attribute_value.priority = ioprio;
++
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_priority,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ switch (class) {
+ case IOPRIO_CLASS_RT:
+ if (!capable(CAP_SYS_ADMIN))
+@@ -141,6 +162,26 @@ static int get_task_ioprio(struct task_struct *p)
+ {
+ int ret;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_priority;
++ rsbac_attribute_value.dummy = 0;
++
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ ret = security_task_getioprio(p);
+ if (ret)
+ goto out;
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index ddcd354..453b654 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -2200,6 +2200,11 @@ int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
+ if (is_handle_aborted(handle))
+ return -EIO;
+
++#ifdef CONFIG_RSBAC
++ if (!jinode)
++ return 0;
++#endif
++
+ jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
+ transaction->t_tid);
+
+diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
+index 07c91ca..ec6abe9 100644
+--- a/fs/jfs/namei.c
++++ b/fs/jfs/namei.c
+@@ -33,6 +33,8 @@
+ #include "jfs_acl.h"
+ #include "jfs_debug.h"
+
++#include <rsbac/hooks.h>
++
+ /*
+ * forward references
+ */
+@@ -489,6 +491,12 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
+ if ((rc = get_UCSname(&dname, dentry)))
+ goto out;
+
++ /* RSBAC jfs_unlink */
++#ifdef CONFIG_RSBAC_SECDEL
++ if(dentry->d_inode->i_nlink == 1)
++ rsbac_sec_del(dentry, TRUE);
++#endif
++
+ IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
+
+ tid = txBegin(dip->i_sb, 0);
+@@ -1131,6 +1139,10 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ }
+ }
+ } else if (new_ip) {
++#ifdef CONFIG_RSBAC_SECDEL
++ if (new_ip->i_nlink == 1)
++ rsbac_sec_del(new_dentry, TRUE);
++#endif
+ IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL);
+ /* Init inode for quota operations. */
+ dquot_initialize(new_ip);
+diff --git a/fs/locks.c b/fs/locks.c
+index 0d68f1f..eddc3d9 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -129,6 +129,8 @@
+
+ #include <asm/uaccess.h>
+
++#include <rsbac/hooks.h>
++
+ #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
+ #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
+ #define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
+@@ -1640,6 +1642,12 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
+ int can_sleep, unlock;
+ int error;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ error = -EBADF;
+ filp = fget(fd);
+ if (!filp)
+@@ -1653,6 +1661,39 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
+ !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
+ goto out_putf;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target = T_FILE;
++ rsbac_target_id.file.device = filp->f_dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = filp->f_dentry->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = filp->f_dentry;
++ if (S_ISDIR(filp->f_dentry->d_inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(filp->f_dentry->d_inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(filp->f_dentry->d_inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK(filp->f_dentry->d_inode->i_mode)) {
++ if(filp->f_dentry->d_sb->s_magic == SOCKFS_MAGIC) {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = filp->f_dentry->d_inode->i_ino;
++ } else
++ rsbac_target = T_UNIXSOCK;
++ }
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_LOCK,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ error = -EPERM;
++ goto out_putf;
++ }
++#endif
++
+ error = flock_make_lock(filp, &lock, cmd);
+ if (error)
+ goto out_putf;
+@@ -1738,6 +1779,12 @@ int fcntl_getlk(struct file *filp, struct flock __user *l)
+ struct flock flock;
+ int error;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ error = -EFAULT;
+ if (copy_from_user(&flock, l, sizeof(flock)))
+ goto out;
+@@ -1749,6 +1796,33 @@ int fcntl_getlk(struct file *filp, struct flock __user *l)
+ if (error)
+ goto out;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_fcntl()]: calling ADF\n");
++ rsbac_target = T_FILE;
++ if (S_ISDIR(filp->f_dentry->d_inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(filp->f_dentry->d_inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(filp->f_dentry->d_inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK(filp->f_dentry->d_inode->i_mode))
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.file.device = filp->f_dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = filp->f_dentry->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = filp->f_dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ error = -EPERM;
++ goto out;
++ }
++#endif
++
+ error = vfs_test_lock(filp, &file_lock);
+ if (error)
+ goto out;
+@@ -1844,6 +1918,12 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ struct file *f;
+ int error;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (file_lock == NULL)
+ return -ENOLCK;
+
+@@ -1872,6 +1952,39 @@ again:
+ file_lock->fl_flags |= FL_SLEEP;
+ }
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_fcntl()]: calling ADF\n");
++ rsbac_target = T_FILE;
++ rsbac_target_id.file.device = filp->f_dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = inode->i_ino;
++ rsbac_target_id.file.dentry_p = filp->f_dentry;
++ if (S_ISDIR(inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK(filp->f_dentry->d_inode->i_mode)) {
++ if(filp->f_dentry->d_sb->s_magic == SOCKFS_MAGIC) {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = filp->f_dentry->d_inode->i_ino;
++ } else
++ rsbac_target = T_UNIXSOCK;
++ }
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_LOCK,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ error = -EPERM;
++ goto out;
++ }
++#endif
++
+ error = -EBADF;
+ switch (flock.l_type) {
+ case F_RDLCK:
+@@ -1923,6 +2036,12 @@ int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
+ struct flock64 flock;
+ int error;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ error = -EFAULT;
+ if (copy_from_user(&flock, l, sizeof(flock)))
+ goto out;
+@@ -1934,6 +2053,33 @@ int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
+ if (error)
+ goto out;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_fcntl()]: calling ADF\n");
++ rsbac_target = T_FILE;
++ if (S_ISDIR(filp->f_dentry->d_inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(filp->f_dentry->d_inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(filp->f_dentry->d_inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK(filp->f_dentry->d_inode->i_mode))
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.file.device = filp->f_dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = filp->f_dentry->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = filp->f_dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ error = -EPERM;
++ goto out;
++ }
++#endif
++
+ error = vfs_test_lock(filp, &file_lock);
+ if (error)
+ goto out;
+@@ -1962,6 +2108,12 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
+ struct file *f;
+ int error;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (file_lock == NULL)
+ return -ENOLCK;
+
+@@ -2007,6 +2159,39 @@ again:
+ goto out;
+ }
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_fcntl()]: calling ADF\n");
++ rsbac_target = T_FILE;
++ rsbac_target_id.file.device = filp->f_dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = inode->i_ino;
++ rsbac_target_id.file.dentry_p = filp->f_dentry;
++ if (S_ISDIR(inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK(filp->f_dentry->d_inode->i_mode)) {
++ if(filp->f_dentry->d_sb->s_magic == SOCKFS_MAGIC) {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = filp->f_dentry->d_inode->i_ino;
++ } else
++ rsbac_target = T_UNIXSOCK;
++ }
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_LOCK,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ error = -EPERM;
++ goto out;
++ }
++#endif
++
+ error = do_lock_file_wait(filp, cmd, file_lock);
+
+ /*
+diff --git a/fs/minix/namei.c b/fs/minix/namei.c
+index 2d0ee17..ba641d6 100644
+--- a/fs/minix/namei.c
++++ b/fs/minix/namei.c
+@@ -6,6 +6,8 @@
+
+ #include "minix.h"
+
++#include <rsbac/hooks.h>
++
+ static int add_nondir(struct dentry *dentry, struct inode *inode)
+ {
+ int err = minix_add_link(dentry, inode);
+@@ -151,6 +153,11 @@ static int minix_unlink(struct inode * dir, struct dentry *dentry)
+ if (err)
+ goto end_unlink;
+
++#ifdef CONFIG_RSBAC_SECDEL
++ if (inode->i_nlink == 1)
++ rsbac_sec_del(dentry, TRUE);
++#endif
++
+ inode->i_ctime = dir->i_ctime;
+ inode_dec_link_count(inode);
+ end_unlink:
+@@ -206,6 +213,12 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
+ new_de = minix_find_entry(new_dentry, &new_page);
+ if (!new_de)
+ goto out_dir;
++
++#ifdef CONFIG_RSBAC_SECDEL
++ if (new_inode->i_nlink == 1)
++ rsbac_sec_del(new_dentry, TRUE);
++#endif
++
+ minix_set_link(new_de, new_page, old_inode);
+ new_inode->i_ctime = CURRENT_TIME_SEC;
+ if (dir_de)
+diff --git a/fs/namei.c b/fs/namei.c
+index c427919..ac15adb 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -34,6 +34,10 @@
+ #include <linux/fs_struct.h>
+ #include <linux/posix_acl.h>
+ #include <asm/uaccess.h>
++#ifdef CONFIG_RSBAC
++#include <rsbac/hooks.h>
++#include <linux/magic.h>
++#endif
+
+ #include "internal.h"
+ #include "mount.h"
+@@ -343,6 +347,11 @@ int inode_permission(struct inode *inode, int mask)
+ {
+ int retval;
+
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_FULL
++ if (rsbac_dac_disable)
++ return 0;
++#endif
++
+ if (unlikely(mask & MAY_WRITE)) {
+ umode_t mode = inode->i_mode;
+
+@@ -629,6 +638,11 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
+ int error;
+ struct dentry *dentry = link->dentry;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ BUG_ON(nd->flags & LOOKUP_RCU);
+
+ if (link->mnt == nd->path.mnt)
+@@ -652,14 +666,49 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
+ return error;
+ }
+
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.dir.device = link->dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = link->dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = link->dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_SEARCH,
++ task_pid(current),
++ T_SYMLINK,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++#ifdef CONFIG_RSBAC_FSOBJ_HIDE
++ error = -ENOENT;
++#else
++ error = -EPERM;
++#endif
++ *p = ERR_PTR(error); /* no ->put_link(), please */
++ path_put(&nd->path);
++ return error;
++ }
++#endif
++
+ nd->last_type = LAST_BIND;
+ *p = dentry->d_inode->i_op->follow_link(dentry, nd);
+ error = PTR_ERR(*p);
+ if (!IS_ERR(*p)) {
+ char *s = nd_get_link(nd);
+ error = 0;
+- if (s)
++ if (s) {
++#ifdef CONFIG_RSBAC_SYM_REDIR
++ char * rsbac_name;
++
++ rsbac_name = rsbac_symlink_redirect(dentry->d_inode, s, PAGE_SIZE);
++ if (rsbac_name) {
++ error = __vfs_follow_link(nd, rsbac_name);
++ kfree(rsbac_name);
++ }
++ else
++#endif
+ error = __vfs_follow_link(nd, s);
++ }
++
+ else if (nd->last_type == LAST_BIND) {
+ nd->flags |= LOOKUP_JUMPED;
+ nd->inode = nd->path.dentry->d_inode;
+@@ -1147,6 +1196,11 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
+ int status = 1;
+ int err;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ /*
+ * Rename seqlock is not required here because in the off chance
+ * of a false negative due to a concurrent rename, we're going to
+@@ -1211,6 +1265,31 @@ unlazy:
+ done:
+ path->mnt = mnt;
+ path->dentry = dentry;
++
++#ifdef CONFIG_RSBAC
++ if ( path->dentry
++ && path->dentry->d_inode
++ ) {
++ rsbac_target_id.dir.device = path->dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.dir.inode = path->dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = path->dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_SEARCH,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ path_put_conditional(path, nd);
++#ifdef CONFIG_RSBAC_FSOBJ_HIDE
++ return -ENOENT;
++#else
++ return -EPERM;
++#endif
++ }
++ }
++#endif
++
+ err = follow_managed(path, nd->flags);
+ if (unlikely(err < 0)) {
+ path_put_conditional(path, nd);
+@@ -1518,6 +1597,11 @@ static int link_path_walk(const char *name, struct nameidata *nd)
+ struct path next;
+ int err;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ while (*name=='/')
+ name++;
+ if (!*name)
+@@ -1530,9 +1614,34 @@ static int link_path_walk(const char *name, struct nameidata *nd)
+ int type;
+
+ err = may_lookup(nd);
+- if (err)
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_PART
++ if (rsbac_dac_part_disabled(nd->path.dentry))
++ err = 0;
++ else
++#endif
++ if (err)
+ break;
+
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.dir.device = nd->path.dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.dir.inode = nd->path.dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = nd->path.dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_SEARCH,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++#ifdef CONFIG_RSBAC_FSOBJ_HIDE
++ err = -ENOENT;
++#else
++ err = -EPERM;
++#endif
++ break;
++ }
++#endif
++
+ len = hash_name(name, &this.hash);
+ this.name = name;
+ this.len = len;
+@@ -1593,6 +1702,24 @@ last_component:
+ return 0;
+ }
+ terminate_walk(nd);
++
++#ifdef CONFIG_RSBAC_FSOBJ_HIDE
++ rsbac_target_id.dir.device = nd->path.dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.dir.inode = nd->path.dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = nd->path.dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_SEARCH,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ path_put(&nd->path);
++ return -ENOENT;
++ }
++#endif
++
+ return err;
+ }
+
+@@ -1883,6 +2010,47 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
+ return __lookup_hash(&this, base, NULL);
+ }
+
++/* RSBAC
++ * I hate to put new functions into this file, but even more I hate removing
++ * all statics from all the lookup helpers in here...
++ * Still, I need some form of RSBAC bypass for internal file access.
++ * Amon Ott <ao@rsbac.org>
++ */
++#ifdef CONFIG_RSBAC
++struct dentry *rsbac_lookup_one_len(const char *name, struct dentry *base, int len)
++{
++ struct qstr this;
++ unsigned int c;
++
++ WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
++
++ this.name = name;
++ this.len = len;
++ this.hash = full_name_hash(name, len);
++ if (!len)
++ return ERR_PTR(-EACCES);
++
++ while (len--) {
++ c = *(const unsigned char *)name++;
++ if (c == '/' || c == '\0')
++ return ERR_PTR(-EACCES);
++ }
++ /*
++ * See if the low-level filesystem might want
++ * to use its own hash..
++ */
++ if (base->d_flags & DCACHE_OP_HASH) {
++ int err = base->d_op->d_hash(base, base->d_inode, &this);
++ if (err < 0)
++ return ERR_PTR(err);
++ }
++
++ return __lookup_hash(&this, base, NULL);
++}
++
++EXPORT_SYMBOL(rsbac_lookup_one_len);
++#endif
++
+ int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
+ struct path *path, int *empty)
+ {
+@@ -1975,6 +2143,11 @@ static int may_delete(struct inode *dir,struct dentry *victim,int isdir)
+ BUG_ON(victim->d_parent->d_inode != dir);
+ audit_inode_child(victim, dir);
+
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_PART
++ if (rsbac_dac_part_disabled(victim))
++ error = 0;
++ else
++#endif
+ error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ if (error)
+ return error;
+@@ -2011,6 +2184,12 @@ static inline int may_create(struct inode *dir, struct dentry *child)
+ return -EEXIST;
+ if (IS_DEADDIR(dir))
+ return -ENOENT;
++
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_PART
++ if (rsbac_dac_part_disabled(child->d_parent))
++ return 0;
++ else
++#endif
+ return inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ }
+
+@@ -2061,6 +2240,14 @@ int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ {
+ int error = may_create(dir, dentry);
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ enum rsbac_target_t rsbac_new_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (error)
+ return error;
+
+@@ -2071,9 +2258,50 @@ int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ error = security_inode_create(dir, dentry, mode);
+ if (error)
+ return error;
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[open_namei() [filp_open() [do_open() [sys_open()]]]]: calling ADF\n");
++ rsbac_target = T_DIR;
++ rsbac_target_id.dir.device = dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = dir->i_ino;
++ rsbac_target_id.dir.dentry_p = dentry->d_parent;
++ rsbac_attribute_value.create_data.target = T_FILE;
++ rsbac_attribute_value.create_data.dentry_p = dentry;
++ rsbac_attribute_value.create_data.mode = mode;
++ rsbac_attribute_value.create_data.device = 0;
++ if (!rsbac_adf_request(R_CREATE,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_create_data,
++ rsbac_attribute_value))
++ return -EPERM;
++#endif
++
+ error = dir->i_op->create(dir, dentry, mode, nd);
+- if (!error)
++ if (!error) {
+ fsnotify_create(dir, dentry);
++
++ /* RSBAC: notify ADF of new file */
++#ifdef CONFIG_RSBAC
++ rsbac_new_target = T_FILE;
++ rsbac_new_target_id.file.device = dentry->d_sb->s_dev;
++ rsbac_new_target_id.file.inode = dentry->d_inode->i_ino;
++ rsbac_new_target_id.file.dentry_p = dentry;
++ if (rsbac_adf_set_attr(R_CREATE,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ rsbac_new_target,
++ rsbac_new_target_id,
++ A_create_data,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "vfs_create() [open_namei() [filp_open() [do_open() [sys_open()]]]]: rsbac_adf_set_attr() returned error");
++ }
++#endif
++
++ }
+ return error;
+ }
+
+@@ -2108,6 +2336,12 @@ static int may_open(struct path *path, int acc_mode, int flag)
+ break;
+ }
+
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_PART
++ if (rsbac_dac_part_disabled(dentry))
++ error = 0;
++ else
++#endif
++
+ error = inode_permission(inode, acc_mode);
+ if (error)
+ return error;
+@@ -2173,6 +2407,14 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+ struct file *filp;
+ int error;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_adf_request_t rsbac_adf_req = R_NONE;
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ nd->flags &= ~LOOKUP_PARENT;
+ nd->flags |= op->intent;
+
+@@ -2334,6 +2576,68 @@ common:
+ error = may_open(&nd->path, acc_mode, open_flag);
+ if (error)
+ goto exit;
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "do_last() [sys_open()]: calling ADF\n");
++ /* get target type and id clear */
++ if (S_ISBLK(nd->path.dentry->d_inode->i_mode) || S_ISCHR(nd->path.dentry->d_inode->i_mode)){
++ rsbac_target = T_DEV;
++ if (S_ISBLK(nd->path.dentry->d_inode->i_mode)) {
++ rsbac_target_id.dev.type = D_block;
++ }
++ else {
++ rsbac_target_id.dev.type = D_char;
++ }
++ rsbac_target_id.dev.major = RSBAC_MAJOR(nd->path.dentry->d_inode->i_rdev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(nd->path.dentry->d_inode->i_rdev);
++ }
++ else { /* must be file, dir or fifo */
++ if (S_ISDIR(nd->path.dentry->d_inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISSOCK(nd->path.dentry->d_inode->i_mode))
++ rsbac_target = T_UNIXSOCK;
++ else if (S_ISFIFO(nd->path.dentry->d_inode->i_mode)) {
++ if (nd->path.dentry->d_inode->i_sb->s_magic != PIPEFS_MAGIC)
++ rsbac_target = T_FIFO;
++ else
++ rsbac_target = T_NONE;
++ }
++ else
++ rsbac_target = T_FILE;
++
++ rsbac_target_id.file.device = nd->path.dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.file.inode = nd->path.dentry->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = nd->path.dentry;
++ }
++ /* determine request type */
++ rsbac_adf_req = R_NONE;
++ if (open_flag & O_APPEND)
++ rsbac_adf_req = R_APPEND_OPEN;
++ else
++ if ((open_flag & O_RDWR) || ((open_flag & O_WRONLY) && (open_flag & O_RDONLY)))
++ rsbac_adf_req = R_READ_WRITE_OPEN;
++ else
++ if (open_flag & O_WRONLY)
++ rsbac_adf_req = R_WRITE_OPEN;
++ else
++ if (rsbac_target == T_DIR)
++ rsbac_adf_req = R_READ;
++ else
++ rsbac_adf_req = R_READ_OPEN;
++ if ((rsbac_adf_req != R_NONE) && (rsbac_target != T_NONE)) {
++ rsbac_attribute_value.open_flag = open_flag;
++ if (!rsbac_adf_request(rsbac_adf_req,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_open_flag,
++ rsbac_attribute_value)) {
++ error = -EPERM;
++ goto exit;
++ }
++ }
++#endif
++
+ filp = nameidata_to_filp(nd);
+ if (!IS_ERR(filp)) {
+ error = ima_file_check(filp, op->acc_mode);
+@@ -2354,6 +2658,24 @@ common:
+ out:
+ if (want_write)
+ mnt_drop_write(nd->path.mnt);
++
++#ifdef CONFIG_RSBAC
++ if (!PTR_ERR(filp) && (rsbac_adf_req != R_NONE) && (rsbac_target != T_NONE)) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(rsbac_adf_req,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_open_flag,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "do_last() [sys_open()]: rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
++
+ path_put(&nd->path);
+ return filp;
+
+@@ -2373,6 +2695,10 @@ static struct file *path_openat(int dfd, const char *pathname,
+ struct file *filp;
+ struct path path;
+ int error;
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
+
+ filp = get_empty_filp();
+ if (!filp)
+@@ -2401,6 +2727,25 @@ static struct file *path_openat(int dfd, const char *pathname,
+ path_put(&nd->path);
+ filp = ERR_PTR(-ELOOP);
+ break;
++
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.dir.device = path.dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = path.dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = path.dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_SEARCH,
++ task_pid(current),
++ T_SYMLINK,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ path_put_conditional(&path, nd);
++ path_put(&nd->path);
++ filp = ERR_PTR(-EPERM);
++ break;
++ }
++#endif
++
+ }
+ nd->flags |= LOOKUP_PARENT;
+ nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
+@@ -2528,6 +2873,13 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
+ {
+ int error = may_create(dir, dentry);
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ enum rsbac_target_t rsbac_new_target;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (error)
+ return error;
+
+@@ -2546,9 +2898,56 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
+ if (error)
+ return error;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_mknod()]: calling ADF\n");
++ rsbac_target_id.dir.device = dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = dir->i_ino;
++ rsbac_target_id.dir.dentry_p = dentry->d_parent;
++ rsbac_attribute_value.create_data.target = T_FILE;
++ rsbac_attribute_value.create_data.dentry_p = dentry;
++ rsbac_attribute_value.create_data.mode = mode;
++ rsbac_attribute_value.create_data.device = dev;
++ if (!rsbac_adf_request(R_CREATE,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_create_data,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ error = dir->i_op->mknod(dir, dentry, mode, dev);
+- if (!error)
++ if (!error) {
+ fsnotify_create(dir, dentry);
++
++#ifdef CONFIG_RSBAC
++ if (S_ISFIFO(dentry->d_inode->i_mode))
++ rsbac_new_target = T_FIFO;
++ else
++ if (S_ISLNK(dentry->d_inode->i_mode))
++ rsbac_new_target = T_SYMLINK;
++ else
++ if (S_ISSOCK(dentry->d_inode->i_mode))
++ rsbac_new_target = T_UNIXSOCK;
++ else
++ rsbac_new_target = T_FILE;
++ rsbac_new_target_id.dir.device = dentry->d_sb->s_dev;
++ rsbac_new_target_id.dir.inode = dentry->d_inode->i_ino;
++ rsbac_new_target_id.dir.dentry_p = dentry;
++ if (rsbac_adf_set_attr(R_CREATE,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ rsbac_new_target,
++ rsbac_new_target_id,
++ A_create_data,
++ rsbac_attribute_value))
++ {
++ rsbac_pr_debug(aef, "[do_mknod(), sys_mknod()]: rsbac_adf_set_attr() returned error");
++ }
++#endif
++ }
+ return error;
+ }
+
+@@ -2626,6 +3025,12 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ int error = may_create(dir, dentry);
+ unsigned max_links = dir->i_sb->s_max_links;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (error)
+ return error;
+
+@@ -2640,9 +3045,47 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ if (max_links && dir->i_nlink >= max_links)
+ return -EMLINK;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_mkdir()]: calling ADF\n");
++ rsbac_target_id.dir.device = dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = dir->i_ino;
++ rsbac_target_id.dir.dentry_p = dentry->d_parent;
++ rsbac_attribute_value.create_data.target = T_DIR;
++ rsbac_attribute_value.create_data.dentry_p = dentry;
++ rsbac_attribute_value.create_data.mode = mode;
++ rsbac_attribute_value.create_data.device = 0;
++ if (!rsbac_adf_request(R_CREATE,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_create_data,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ error = dir->i_op->mkdir(dir, dentry, mode);
+- if (!error)
++ if (!error) {
+ fsnotify_mkdir(dir, dentry);
++
++#ifdef CONFIG_RSBAC
++ rsbac_new_target_id.dir.device = dentry->d_sb->s_dev;
++ rsbac_new_target_id.dir.inode = dentry->d_inode->i_ino;
++ rsbac_new_target_id.dir.dentry_p = dentry;
++ if (rsbac_adf_set_attr(R_CREATE,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ T_DIR,
++ rsbac_new_target_id,
++ A_create_data,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "vfs_mkdir() [sys_mkdir()]: rsbac_adf_set_attr() returned error");
++ }
++#endif
++ }
++
+ return error;
+ }
+
+@@ -2707,6 +3150,12 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
+ {
+ int error = may_delete(dir, dentry, 1);
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (error)
+ return error;
+
+@@ -2714,6 +3163,24 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
+ return -EPERM;
+
+ dget(dentry);
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[do_rmdir() [sys_rmdir()]]: calling ADF\n");
++ rsbac_target_id.dir.device = dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_DELETE,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ dput(dentry);
++ return -EPERM;
++ }
++#endif
++
+ mutex_lock(&dentry->d_inode->i_mutex);
+
+ error = -EBUSY;
+@@ -2734,6 +3201,24 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
+
+ out:
+ mutex_unlock(&dentry->d_inode->i_mutex);
++
++#ifdef CONFIG_RSBAC
++ if (!error) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_DELETE,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "vfs_rmdir() [do_rmdir() [sys_rmdir()]]: rsbac_adf_set_attr() returned error");
++ }
++ }
++#endif
++
+ dput(dentry);
+ if (!error)
+ d_delete(dentry);
+@@ -2802,6 +3287,13 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
+ {
+ int error = may_delete(dir, dentry, 0);
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (error)
+ return error;
+
+@@ -2814,9 +3306,56 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
+ else {
+ error = security_inode_unlink(dir, dentry);
+ if (!error) {
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[do_unlink() [sys_unlink()]]: calling ADF\n");
++ if (S_ISDIR(dentry->d_inode->i_mode))
++ rsbac_target = T_DIR;
++ else
++ if (S_ISFIFO(dentry->d_inode->i_mode))
++ rsbac_target = T_FIFO;
++ else
++ if (S_ISLNK(dentry->d_inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else
++ if (S_ISSOCK(dentry->d_inode->i_mode))
++ rsbac_target = T_UNIXSOCK;
++ else
++ rsbac_target = T_FILE;
++ rsbac_target_id.file.device = dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = dentry->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = dentry;
++ rsbac_attribute_value.nlink = dentry->d_inode->i_nlink;
++ if (!rsbac_adf_request(R_DELETE,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_nlink,
++ rsbac_attribute_value)) {
++ mutex_unlock(&dentry->d_inode->i_mutex);
++ return -EPERM;
++ }
++#endif
++
+ error = dir->i_op->unlink(dir, dentry);
+ if (!error)
+ dont_mount(dentry);
++
++#ifdef CONFIG_RSBAC
++ if (!error) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_DELETE,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_nlink,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "vfs_unlink() [do_unlink() [sys_unlink()]]: rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
+ }
+ }
+ mutex_unlock(&dentry->d_inode->i_mutex);
+@@ -2911,6 +3450,12 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
+ {
+ int error = may_create(dir, dentry);
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (error)
+ return error;
+
+@@ -2921,9 +3466,46 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
+ if (error)
+ return error;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[do_symlink(), sys_symlink()]: calling ADF\n");
++ rsbac_target_id.dir.device = dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = dir->i_ino;
++ rsbac_target_id.dir.dentry_p = dentry->d_parent;
++ rsbac_attribute_value.create_data.target = T_SYMLINK;
++ rsbac_attribute_value.create_data.dentry_p = dentry;
++ rsbac_attribute_value.create_data.mode = 0;
++ rsbac_attribute_value.create_data.device = 0;
++ if (!rsbac_adf_request(R_CREATE,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_create_data,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ error = dir->i_op->symlink(dir, dentry, oldname);
+- if (!error)
++ if (!error) {
+ fsnotify_create(dir, dentry);
++
++#ifdef CONFIG_RSBAC
++ rsbac_new_target_id.file.device = dentry->d_sb->s_dev;
++ rsbac_new_target_id.file.inode = dentry->d_inode->i_ino;
++ rsbac_new_target_id.file.dentry_p = dentry;
++ if (rsbac_adf_set_attr(R_CREATE,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ T_SYMLINK,
++ rsbac_new_target_id,
++ A_create_data,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "vfs_symlink() [do_symlink(), sys_symlink()]: rsbac_adf_set_attr() returned error");
++ }
++#endif
++ }
+ return error;
+ }
+
+@@ -2973,6 +3555,12 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
+ unsigned max_links = dir->i_sb->s_max_links;
+ int error;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!inode)
+ return -ENOENT;
+
+@@ -2997,6 +3585,31 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
+ if (error)
+ return error;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[do_link() [sys_link()]]: calling ADF\n");
++ rsbac_target = T_FILE;
++ if (S_ISDIR (old_dentry->d_inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO (old_dentry->d_inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK (old_dentry->d_inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK (old_dentry->d_inode->i_mode))
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.dir.device = old_dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = old_dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = old_dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_LINK_HARD,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ mutex_lock(&inode->i_mutex);
+ /* Make sure we don't allow creating hardlink to an unlinked file */
+ if (inode->i_nlink == 0)
+@@ -3114,11 +3727,27 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *target = new_dentry->d_inode;
+ unsigned max_links = new_dir->i_sb->s_max_links;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ enum rsbac_target_t rsbac_target2 = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_target_id2;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ union rsbac_attribute_value_t rsbac_attribute_value2;
++ rsbac_boolean_t target_exists = FALSE;
++#endif
++
+ /*
+ * If we are going to change the parent - check write permissions,
+ * we'll need to flip '..'.
+ */
+ if (new_dir != old_dir) {
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_PART
++ if (rsbac_dac_part_disabled(old_dentry))
++ error = 0;
++ else
++#endif
+ error = inode_permission(old_dentry->d_inode, MAY_WRITE);
+ if (error)
+ return error;
+@@ -3128,6 +3757,72 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
+ if (error)
+ return error;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[vfs_rename_dir()]: calling ADF\n");
++ rsbac_target = T_FILE;
++ if (S_ISDIR(old_dentry->d_inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO (old_dentry->d_inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK (old_dentry->d_inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK (old_dentry->d_inode->i_mode))
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.file.device = old_dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = old_dentry->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = old_dentry;
++ rsbac_attribute_value.new_dir_dentry_p = new_dentry->d_parent;
++ if (!rsbac_adf_request(R_RENAME,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_new_dir_dentry_p,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++ if (new_dir != old_dir) {
++ rsbac_pr_debug(aef, "[vfs_rename_dir()]: calling ADF for WRITE on new_dir\n");
++ rsbac_target_id2.dir.device = new_dentry->d_sb->s_dev;
++ rsbac_target_id2.dir.inode = new_dir->i_ino;
++ rsbac_target_id2.dir.dentry_p = new_dentry->d_parent;
++ rsbac_attribute_value2.dummy = 0;
++ if (!rsbac_adf_request(R_WRITE,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id2,
++ A_none,
++ rsbac_attribute_value2)) {
++ return -EPERM;
++ }
++ }
++ if(new_dentry->d_inode)
++ {
++ target_exists = TRUE;
++ rsbac_pr_debug(aef, "[vfs_rename_dir()]: calling ADF for DELETE on existing target\n");
++ rsbac_target2 = T_FILE;
++ if (S_ISDIR(new_dentry->d_inode->i_mode))
++ rsbac_target2 = T_DIR;
++ else if (S_ISFIFO (new_dentry->d_inode->i_mode))
++ rsbac_target2 = T_FIFO;
++ else if (S_ISLNK (new_dentry->d_inode->i_mode))
++ rsbac_target2 = T_SYMLINK;
++ else if (S_ISSOCK (new_dentry->d_inode->i_mode))
++ rsbac_target2 = T_UNIXSOCK;
++ rsbac_target_id2.file.device = new_dentry->d_sb->s_dev;
++ rsbac_target_id2.file.inode = new_dentry->d_inode->i_ino;
++ rsbac_target_id2.file.dentry_p = new_dentry;
++ rsbac_attribute_value2.nlink = new_dentry->d_inode->i_nlink;
++ if (!rsbac_adf_request(R_DELETE,
++ task_pid(current),
++ rsbac_target2,
++ rsbac_target_id2,
++ A_nlink,
++ rsbac_attribute_value2)) {
++ return -EPERM;
++ }
++ }
++#endif
++
+ dget(new_dentry);
+ if (target)
+ mutex_lock(&target->i_mutex);
+@@ -3154,24 +3849,140 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
+ out:
+ if (target)
+ mutex_unlock(&target->i_mutex);
++
++#ifdef CONFIG_RSBAC
++ if (!error && target_exists) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_DELETE,
++ task_pid(current),
++ rsbac_target2,
++ rsbac_target_id2,
++ T_NONE,
++ rsbac_new_target_id,
++ A_nlink,
++ rsbac_attribute_value2)) {
++ rsbac_printk(KERN_WARNING
++ "do_rename() [sys_rename()]: rsbac_adf_set_attr() for DELETE returned error\n");
++ }
++ }
++#endif
++
+ dput(new_dentry);
+- if (!error)
++ if (!error) {
+ if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
+ d_move(old_dentry,new_dentry);
++
++#ifdef CONFIG_RSBAC
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_RENAME,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_new_dir_dentry_p,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "do_rename() [sys_rename()]: rsbac_adf_set_attr() for RENAME returned error\n");
++ }
++#endif
++
++ }
+ return error;
+ }
+
+ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+ {
+- struct inode *target = new_dentry->d_inode;
++ struct inode *target;
+ int error;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ enum rsbac_target_t rsbac_target2 = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_target_id2;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ union rsbac_attribute_value_t rsbac_attribute_value2;
++ rsbac_boolean_t target_exists = FALSE;
++#endif
++
+ error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
+ if (error)
+ return error;
+
+ dget(new_dentry);
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_rename()]: calling ADF\n");
++ rsbac_target = T_FILE;
++ if (S_ISDIR(old_dentry->d_inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO (old_dentry->d_inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK (old_dentry->d_inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK (old_dentry->d_inode->i_mode))
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.file.device = old_dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = old_dentry->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = old_dentry;
++ rsbac_attribute_value.new_dir_dentry_p = new_dentry->d_parent;
++ if (!rsbac_adf_request(R_RENAME,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_new_dir_dentry_p,
++ rsbac_attribute_value)) {
++ dput(new_dentry);
++ return -EPERM;
++ }
++ if (new_dir != old_dir) {
++ rsbac_pr_debug(aef, "[sys_rename()]: calling ADF for WRITE on new_dir\n");
++ rsbac_target_id2.dir.device = new_dentry->d_sb->s_dev;
++ rsbac_target_id2.dir.inode = new_dir->i_ino;
++ rsbac_target_id2.dir.dentry_p = new_dentry->d_parent;
++ rsbac_attribute_value2.dummy = 0;
++ if (!rsbac_adf_request(R_WRITE,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id2,
++ A_none,
++ rsbac_attribute_value2)) {
++ dput(new_dentry);
++ return -EPERM;
++ }
++ }
++ if(new_dentry->d_inode) {
++ target_exists = TRUE;
++ rsbac_pr_debug(aef, "[sys_rename()]: calling ADF for DELETE on existing target\n");
++ rsbac_target2 = T_FILE;
++ if (S_ISDIR(new_dentry->d_inode->i_mode))
++ rsbac_target2 = T_DIR;
++ else if (S_ISFIFO (new_dentry->d_inode->i_mode))
++ rsbac_target2 = T_FIFO;
++ else if (S_ISLNK (new_dentry->d_inode->i_mode))
++ rsbac_target2 = T_SYMLINK;
++ else if (S_ISSOCK (new_dentry->d_inode->i_mode))
++ rsbac_target2 = T_UNIXSOCK;
++ rsbac_target_id2.file.device = new_dentry->d_sb->s_dev;
++ rsbac_target_id2.file.inode = new_dentry->d_inode->i_ino;
++ rsbac_target_id2.file.dentry_p = new_dentry;
++ rsbac_attribute_value2.nlink = new_dentry->d_inode->i_nlink;
++ if (!rsbac_adf_request(R_DELETE,
++ task_pid(current),
++ rsbac_target2,
++ rsbac_target_id2,
++ A_nlink,
++ rsbac_attribute_value2)) {
++ dput(new_dentry);
++ return -EPERM;
++ }
++ }
++#endif
++
++ target = new_dentry->d_inode;
+ if (target)
+ mutex_lock(&target->i_mutex);
+
+@@ -3187,6 +3998,35 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
+ dont_mount(new_dentry);
+ if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
+ d_move(old_dentry, new_dentry);
++
++#ifdef CONFIG_RSBAC
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_RENAME,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_new_dir_dentry_p,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "do_rename() [sys_rename()]: rsbac_adf_set_attr() returned error");
++ }
++ if (target_exists) {
++ if (rsbac_adf_set_attr(R_DELETE,
++ task_pid(current),
++ rsbac_target2,
++ rsbac_target_id2,
++ T_NONE,
++ rsbac_new_target_id,
++ A_nlink,
++ rsbac_attribute_value2)) {
++ rsbac_printk(KERN_WARNING
++ "do_rename() [sys_rename()]: rsbac_adf_set_attr() returned error");
++ }
++ }
++#endif
++
+ out:
+ if (target)
+ mutex_unlock(&target->i_mutex);
+@@ -3334,6 +4174,9 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
+ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
+ {
+ int len;
++#ifdef CONFIG_RSBAC_SYM_REDIR
++ char * rsbac_name;
++#endif
+
+ len = PTR_ERR(link);
+ if (IS_ERR(link))
+@@ -3342,6 +4185,17 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
+ len = strlen(link);
+ if (len > (unsigned) buflen)
+ len = buflen;
++
++#ifdef CONFIG_RSBAC_SYM_REDIR
++ rsbac_name = rsbac_symlink_redirect(dentry->d_inode, link, buflen);
++ if (rsbac_name) {
++ len = strlen(rsbac_name);
++ if (copy_to_user(buffer, rsbac_name, len))
++ len = -EFAULT;
++ kfree(rsbac_name);
++ }
++ else
++#endif
+ if (copy_to_user(buffer, link, len))
+ len = -EFAULT;
+ out:
+diff --git a/fs/namespace.c b/fs/namespace.c
+index e608199..85a9067 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -20,6 +20,9 @@
+ #include <linux/fs_struct.h> /* get_fs_root et.al. */
+ #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
+ #include <linux/uaccess.h>
++
++#include <rsbac/hooks.h>
++
+ #include "pnode.h"
+ #include "internal.h"
+
+@@ -231,6 +234,13 @@ out_free_cache:
+ */
+ int __mnt_is_readonly(struct vfsmount *mnt)
+ {
++#ifdef CONFIG_RSBAC
++ /* HACK - Remove me when switching to full 2.6, pass over the vfsmount
++ * in init_private_file() instead
++ */
++ if(!mnt)
++ return 0;
++#endif
+ if (mnt->mnt_flags & MNT_READONLY)
+ return 1;
+ if (mnt->mnt_sb->s_flags & MS_RDONLY)
+@@ -305,6 +315,14 @@ int mnt_want_write(struct vfsmount *m)
+ struct mount *mnt = real_mount(m);
+ int ret = 0;
+
++#ifdef CONFIG_RSBAC
++ /* HACK - Remove me when switching to full 2.6, pass over the vfsmount
++ * in init_private_file() instead
++ */
++ if(!mnt)
++ return 0;
++#endif
++
+ preempt_disable();
+ mnt_inc_writers(mnt);
+ /*
+@@ -381,6 +399,13 @@ EXPORT_SYMBOL_GPL(mnt_want_write_file);
+ */
+ void mnt_drop_write(struct vfsmount *mnt)
+ {
++#ifdef CONFIG_RSBAC
++ /* HACK - Remove me when switching to full 2.6, pass over the vfsmount
++ * in init_private_file() instead
++ */
++ if(!mnt)
++ return;
++#endif
+ preempt_disable();
+ mnt_dec_writers(real_mount(mnt));
+ preempt_enable();
+@@ -717,6 +742,11 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
+ br_write_lock(vfsmount_lock);
+ list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
+ br_write_unlock(vfsmount_lock);
++
++#ifdef CONFIG_RSBAC
++ rsbac_mount(&mnt->mnt);
++#endif
++
+ return &mnt->mnt;
+ }
+ EXPORT_SYMBOL_GPL(vfs_kern_mount);
+@@ -1093,6 +1123,11 @@ static int do_umount(struct mount *mnt, int flags)
+ int retval;
+ LIST_HEAD(umount_list);
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ retval = security_sb_umount(&mnt->mnt, flags);
+ if (retval)
+ return retval;
+@@ -1123,6 +1158,44 @@ static int do_umount(struct mount *mnt, int flags)
+ return -EAGAIN;
+ }
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF for DIR\n");
++ rsbac_target_id.dir.device = sb->s_root->d_sb->s_dev;
++ rsbac_target_id.dir.inode = sb->s_root->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = sb->s_root;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_UMOUNT,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++ rsbac_pr_debug(aef, "calling ADF for dev\n");
++ rsbac_target_id.dev.type = D_block;
++ rsbac_target_id.dev.major = RSBAC_MAJOR(sb->s_dev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(sb->s_dev);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_UMOUNT,
++ task_pid(current),
++ T_DEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
++ /* RSBAC: removing data structures for this fs from memory (not /) */
++#ifdef CONFIG_RSBAC
++ if ((&mnt->mnt != current->fs->root.mnt) || (flags & MNT_DETACH)) {
++ rsbac_pr_debug(ds, "[sys_umount()]: calling rsbac_umount for Device %02u:%02u\n",
++ MAJOR(sb->s_dev), MINOR(sb->s_dev));
++ rsbac_umount(&mnt->mnt);
++ }
++#endif
++
+ /*
+ * If we may have to abort operations to get out of this
+ * mount, and they will themselves hold resources we must
+@@ -1172,6 +1245,17 @@ static int do_umount(struct mount *mnt, int flags)
+ retval = 0;
+ }
+ br_write_unlock(vfsmount_lock);
++
++#ifdef CONFIG_RSBAC
++ /* RSBAC: umount failed, so reread data structures for this fs from disk */
++ if(retval) {
++ rsbac_printk(KERN_WARNING
++ "do_umount() [sys_umount()]: umount failed -> calling rsbac_mount for Device %02u:%02u\n",
++ MAJOR(mnt->mnt.mnt_sb->s_dev),MINOR(mnt->mnt.mnt_sb->s_dev));
++ rsbac_mount(&mnt->mnt);
++ }
++#endif
++
+ up_write(&namespace_sem);
+ release_mounts(&umount_list);
+ return retval;
+@@ -1585,6 +1669,13 @@ static int do_loopback(struct path *path, char *old_name,
+ struct path old_path;
+ struct mount *mnt = NULL, *old;
+ int err = mount_is_safe(path);
++
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (err)
+ return err;
+ if (!old_name || !*old_name)
+@@ -1599,6 +1690,57 @@ static int do_loopback(struct path *path, char *old_name,
+
+ old = real_mount(old_path.mnt);
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[do_loopback() [sys_mount()]]: calling ADF for DIR\n");
++ rsbac_target_id.dir.device = old_path.dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = old_path.dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = old_path.dentry;
++ rsbac_attribute_value.mode = recurse;
++ if (!rsbac_adf_request(R_MOUNT,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_mode,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out2;
++ }
++ rsbac_pr_debug(aef, "[do_mount() [sys_mount()]]: calling ADF for DEV\n");
++ if(S_ISBLK(old_path.dentry->d_inode->i_mode))
++ {
++ rsbac_target = T_DEV;
++ rsbac_target_id.dev.type = D_block;
++ rsbac_target_id.dev.major = RSBAC_MAJOR(old_path.dentry->d_sb->s_dev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(old_path.dentry->d_sb->s_dev);
++ }
++ else
++ if(S_ISDIR(old_path.dentry->d_inode->i_mode))
++ {
++ rsbac_target = T_DIR;
++ rsbac_target_id.dir.device = old_path.dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = old_path.dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = old_path.dentry;
++ }
++ else
++ {
++ rsbac_target = T_FILE;
++ rsbac_target_id.file.device = old_path.dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = old_path.dentry->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = old_path.dentry;
++ }
++ if (!rsbac_adf_request(R_MOUNT,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_mode,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out2;
++ }
++#endif
++
+ err = -EINVAL;
+ if (IS_MNT_UNBINDABLE(old))
+ goto out2;
+@@ -1626,6 +1768,12 @@ out2:
+ release_mounts(&umount_list);
+ out:
+ path_put(&old_path);
++
++#ifdef CONFIG_RSBAC
++ if (!err)
++ rsbac_mount(&mnt->mnt);
++#endif
++
+ return err;
+ }
+
+@@ -1658,6 +1806,11 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
+ struct super_block *sb = path->mnt->mnt_sb;
+ struct mount *mnt = real_mount(path->mnt);
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+@@ -1671,6 +1824,34 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
+ if (err)
+ return err;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[do_mount() [sys_mount()]]: calling ADF for DIR\n");
++ rsbac_target_id.dir.device = path->dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = path->dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = path->dentry;
++ rsbac_attribute_value.mode = flags;
++ if (!rsbac_adf_request(R_MOUNT,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_mode,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++ rsbac_pr_debug(aef, "[do_mount() [sys_mount()]]: calling ADF for DEV\n");
++ rsbac_target_id.dev.type = D_block;
++ rsbac_target_id.dev.major = RSBAC_MAJOR(sb->s_dev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(sb->s_dev);
++ if (!rsbac_adf_request(R_MOUNT,
++ task_pid(current),
++ T_DEV,
++ rsbac_target_id,
++ A_mode,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ down_write(&sb->s_umount);
+ if (flags & MS_BIND)
+ err = change_mount_flags(path->mnt, flags);
+@@ -1707,6 +1888,12 @@ static int do_move_mount(struct path *path, char *old_name)
+ struct mount *p;
+ struct mount *old;
+ int err = 0;
++
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (!old_name || !*old_name)
+@@ -1722,6 +1909,61 @@ static int do_move_mount(struct path *path, char *old_name)
+ old = real_mount(old_path.mnt);
+ p = real_mount(path->mnt);
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[do_mount() [sys_mount()]]: calling ADF for UMOUNT on old DIR\n");
++ rsbac_target_id.dir.device = old_path.dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = old_path.dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = old_path.dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_UMOUNT,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out1;
++ }
++ rsbac_pr_debug(aef, "[do_mount() [sys_mount()]]: calling ADF for MOUNT on new DIR\n");
++ rsbac_target_id.dir.device = path->dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = path->dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = path->dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MOUNT,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out1;
++ }
++ rsbac_pr_debug(aef, "[do_mount() [sys_mount()]]: calling ADF for UMOUNT on DEV\n");
++ rsbac_target_id.dev.type = D_block;
++ rsbac_target_id.dev.major = RSBAC_MAJOR(old_path.dentry->d_sb->s_dev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(old_path.dentry->d_sb->s_dev);
++ if (!rsbac_adf_request(R_UMOUNT,
++ task_pid(current),
++ T_DEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out1;
++ }
++ rsbac_pr_debug(aef, "[do_mount() [sys_mount()]]: calling ADF for MOUNT on DEV\n");
++ if (!rsbac_adf_request(R_MOUNT,
++ task_pid(current),
++ T_DEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out1;
++ }
++#endif
++
++
+ err = -EINVAL;
+ if (!check_mnt(p) || !check_mnt(old))
+ goto out1;
+@@ -1759,6 +2001,10 @@ static int do_move_mount(struct path *path, char *old_name)
+ if (err)
+ goto out1;
+
++#ifdef CONFIG_RSBAC
++ rsbac_mount(path->mnt);
++#endif
++
+ /* if the mount is moved, it should no longer be expire
+ * automatically */
+ list_del_init(&old->mnt_expire);
+@@ -1816,12 +2062,50 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
+ {
+ int err;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL);
+
+ err = lock_mount(path);
+ if (err)
+ return err;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[do_mount() [sys_mount()]]: calling ADF for DIR\n");
++ rsbac_target_id.dir.device = path->dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = path->dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = path->dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MOUNT,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto unlock;
++ }
++ rsbac_pr_debug(aef, "[do_mount() [sys_mount()]]: calling ADF for DEV\n");
++ rsbac_target_id.dev.type = D_block;
++ rsbac_target_id.dev.major = RSBAC_MAJOR(newmnt->mnt.mnt_sb->s_dev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(newmnt->mnt.mnt_sb->s_dev);
++ rsbac_attribute_value.mode = mnt_flags;
++ if (!rsbac_adf_request(R_MOUNT,
++ task_pid(current),
++ T_DEV,
++ rsbac_target_id,
++ A_mode,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto unlock;
++ }
++#endif
++
+ err = -EINVAL;
+ if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(real_mount(path->mnt)))
+ goto unlock;
+@@ -2455,6 +2739,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+ struct mount *new_mnt, *root_mnt;
+ int error;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+@@ -2475,6 +2764,42 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+ if (error)
+ goto out3;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF for MOUNT on put_old\n");
++ rsbac_target_id.dir.device = old.dentry->d_sb->s_dev;
++ rsbac_target_id.dir.inode = old.dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = old.dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MOUNT,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ error = -EPERM;
++ goto out4;
++ }
++ rsbac_pr_debug(aef, "calling ADF for MOUNT on root DIR\n");
++ rsbac_target_id.dir.device = current->fs->root.mnt->mnt_sb->s_dev;
++ rsbac_target_id.dir.inode = current->fs->root.dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = current->fs->root.dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MOUNT,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ error = -EPERM;
++ goto out4;
++ }
++
++ /* Make the new root's cached rsbac.dat dentry be put to free the old root's dcache */
++ rsbac_free_dat_dentries();
++#endif
++
+ error = -EINVAL;
+ new_mnt = real_mount(new.mnt);
+ root_mnt = real_mount(root.mnt);
+diff --git a/fs/open.c b/fs/open.c
+index 5720854..94623f4 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -33,16 +33,51 @@
+
+ #include "internal.h"
+
++#ifdef CONFIG_RSBAC
++#include <net/sock.h>
++#endif
++#include <rsbac/hooks.h>
++
+ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
+ struct file *filp)
+ {
+ int ret;
+ struct iattr newattrs;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#ifdef CONFIG_RSBAC_SECDEL
++ loff_t old_len = dentry->d_inode->i_size;
++#endif
++#endif
++
+ /* Not pretty: "inode->i_size" shouldn't really be signed. But it is. */
+ if (length < 0)
+ return -EINVAL;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[open_namei(), do_sys_truncate() [sys_truncate()]]: calling ADF\n");
++ rsbac_target_id.file.device = dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.file.inode = dentry->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_TRUNCATE,
++ task_pid(current),
++ T_FILE,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
++ /* RSBAC: Overwrite truncated part, if asked by flag */
++#ifdef CONFIG_RSBAC_SECDEL
++ rsbac_sec_trunc(dentry, length, old_len);
++#endif
++
+ newattrs.ia_size = length;
+ newattrs.ia_valid = ATTR_SIZE | time_attrs;
+ if (filp) {
+@@ -58,6 +93,25 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
+ mutex_lock(&dentry->d_inode->i_mutex);
+ ret = notify_change(dentry, &newattrs);
+ mutex_unlock(&dentry->d_inode->i_mutex);
++
++#ifdef CONFIG_RSBAC
++ if (!ret) {
++ rsbac_pr_debug(aef, "[open_namei(), do_sys_truncate() [sys_truncate()]]: notifying ADF\n");
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_TRUNCATE,
++ task_pid(current),
++ T_FILE,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "do_truncate() [open_namei(), do_sys_truncate() [sys_truncate()]]: rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
++
+ return ret;
+ }
+
+@@ -85,6 +139,11 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
+ if (!S_ISREG(inode->i_mode))
+ goto dput_and_out;
+
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_PART
++ if (rsbac_dac_part_disabled(path.dentry))
++ error = 0;
++ else
++#endif
+ error = mnt_want_write(path.mnt);
+ if (error)
+ goto dput_and_out;
+@@ -218,6 +277,10 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ {
+ struct inode *inode = file->f_path.dentry->d_inode;
+ long ret;
++#ifdef CONFIG_RSBAC_RW
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
+
+ if (offset < 0 || len <= 0)
+ return -EINVAL;
+@@ -249,6 +312,21 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ if (ret)
+ return ret;
+
++#ifdef CONFIG_RSBAC_RW
++ rsbac_pr_debug(aef, "sys_fallocate(): calling ADF\n");
++ rsbac_target_id.file.device = inode->i_sb->s_dev;
++ rsbac_target_id.file.inode = inode->i_ino;
++ rsbac_target_id.file.dentry_p = file->f_path.dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_WRITE,
++ task_pid(current),
++ T_FILE,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ return -EPERM;
++#endif
++
+ if (S_ISFIFO(inode->i_mode))
+ return -ESPIPE;
+
+@@ -304,6 +382,12 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
+ struct inode *inode;
+ int res;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (mode & ~S_IRWXO) /* where's F_OK, X_OK, W_OK, R_OK? */
+ return -EINVAL;
+
+@@ -316,7 +400,11 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
+
+ if (!issecure(SECURE_NO_SETUID_FIXUP)) {
+ /* Clear the capabilities if we switch to a non-root user */
+- if (override_cred->uid)
++ if (override_cred->uid
++#ifdef CONFIG_RSBAC_FAKE_ROOT_UID
++ && !rsbac_uid_faked()
++#endif
++ )
+ cap_clear(override_cred->cap_effective);
+ else
+ override_cred->cap_effective =
+@@ -329,6 +417,32 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
+ if (res)
+ goto out;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target = T_FILE;
++ if (S_ISDIR(path.dentry->d_inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(path.dentry->d_inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(path.dentry->d_inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK(path.dentry->d_inode->i_mode))
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.file.device = path.dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.file.inode = path.dentry->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = path.dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_PERMISSIONS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ res = -EPERM;
++ goto out_path_release;
++ }
++#endif
++
+ inode = path.dentry->d_inode;
+
+ if ((mode & MAY_EXEC) && S_ISREG(inode->i_mode)) {
+@@ -376,14 +490,41 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
+ struct path path;
+ int error;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ error = user_path_dir(filename, &path);
+ if (error)
+ goto out;
+
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_PART
++ if (rsbac_dac_part_disabled(path.dentry))
++ error = 0;
++ else
++#endif
+ error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
+ if (error)
+ goto dput_and_out;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.dir.device = path.dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.dir.inode = path.dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = path.dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_CHDIR,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ error = -EPERM;
++ goto dput_and_out;
++ }
++#endif
++
+ set_fs_pwd(current->fs, &path);
+
+ dput_and_out:
+@@ -398,6 +539,11 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
+ struct inode *inode;
+ int error;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ error = -EBADF;
+ file = fget(fd);
+ if (!file)
+@@ -409,7 +555,32 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
+ if (!S_ISDIR(inode->i_mode))
+ goto out_putf;
+
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_PART
++ if (rsbac_dac_part_disabled(file->f_path.dentry))
++ error = 0;
++ else
++#endif
++
+ error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
++
++#ifdef CONFIG_RSBAC
++ if (!error) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.dir.device = inode->i_sb->s_dev;
++ rsbac_target_id.dir.inode = inode->i_ino;
++ rsbac_target_id.dir.dentry_p = file->f_path.dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_CHDIR,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ error = -EPERM;
++ }
++ }
++#endif
++
+ if (!error)
+ set_fs_pwd(current->fs, &file->f_path);
+ out_putf:
+@@ -423,10 +594,21 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
+ struct path path;
+ int error;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ error = user_path_dir(filename, &path);
+ if (error)
+ goto out;
+
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_PART
++ if (rsbac_dac_part_disabled(path.dentry))
++ error = 0;
++ else
++#endif
++
+ error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
+ if (error)
+ goto dput_and_out;
+@@ -438,6 +620,23 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
+ if (error)
+ goto dput_and_out;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.dir.device = path.dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.dir.inode = path.dentry->d_inode->i_ino;
++ rsbac_target_id.dir.dentry_p = path.dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_CHDIR,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ error = -EPERM;
++ goto dput_and_out;
++ }
++#endif
++
+ set_fs_root(current->fs, &path);
+ error = 0;
+ dput_and_out:
+@@ -452,9 +651,50 @@ static int chmod_common(struct path *path, umode_t mode)
+ struct iattr newattrs;
+ int error;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ error = mnt_want_write(path->mnt);
+ if (error)
+ return error;
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target = T_FILE;
++ rsbac_target_id.file.device = inode->i_sb->s_dev;
++ rsbac_target_id.file.inode = inode->i_ino;
++ rsbac_target_id.file.dentry_p = path->dentry;
++ if (S_ISDIR(inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK(inode->i_mode)) {
++ if(inode->i_sb->s_magic == SOCKFS_MAGIC) {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = inode->i_ino;
++ } else {
++ rsbac_target = T_UNIXSOCK;
++ }
++ }
++ rsbac_attribute_value.mode = mode;
++ if (!rsbac_adf_request(R_MODIFY_PERMISSIONS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_mode,
++ rsbac_attribute_value))
++ {
++ mnt_drop_write(path->mnt);
++ return -EPERM;
++ }
++#endif
++
+ mutex_lock(&inode->i_mutex);
+ error = security_path_chmod(path, mode);
+ if (error)
+@@ -506,6 +746,37 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
+ int error;
+ struct iattr newattrs;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_*chown]: calling ADF\n");
++ rsbac_target = T_FILE;
++ if (S_ISDIR(inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK(inode->i_mode))
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.file.device = inode->i_sb->s_dev;
++ rsbac_target_id.file.inode = inode->i_ino;
++ rsbac_target_id.file.dentry_p = path->dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_CHANGE_OWNER,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ newattrs.ia_valid = ATTR_CTIME;
+ if (user != (uid_t) -1) {
+ newattrs.ia_valid |= ATTR_UID;
+@@ -1042,11 +1313,92 @@ int filp_close(struct file *filp, fl_owner_t id)
+ {
+ int retval = 0;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ enum rsbac_attribute_t rsbac_attribute = A_none;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!file_count(filp)) {
+ printk(KERN_ERR "VFS: Close: file count is 0\n");
+ return 0;
+ }
+
++#ifdef CONFIG_RSBAC
++ if (filp && filp->f_dentry && filp->f_dentry->d_inode) {
++ rsbac_pr_debug(aef, "[sys_close]: calling ADF\n");
++ rsbac_target = T_NONE;
++ if (S_ISBLK(filp->f_dentry->d_inode->i_mode)
++ || S_ISCHR(filp->f_dentry->d_inode->i_mode)) {
++ rsbac_target = T_DEV;
++ if (S_ISBLK(filp->f_dentry->d_inode->i_mode)) {
++ rsbac_target_id.dev.type = D_block;
++ }
++ else {
++ rsbac_target_id.dev.type = D_char;
++ }
++ rsbac_target_id.dev.major = RSBAC_MAJOR(filp->f_dentry->d_inode->i_sb->s_dev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(filp->f_dentry->d_inode->i_sb->s_dev);
++ rsbac_attribute = A_f_mode;
++ rsbac_attribute_value.f_mode = filp->f_mode;
++ } else
++ if (S_ISSOCK(filp->f_dentry->d_inode->i_mode)) {
++ if (SOCKET_I(filp->f_dentry->d_inode)->ops
++ && (SOCKET_I(filp->f_dentry->d_inode)->ops->family == AF_UNIX)) {
++ if (filp->f_dentry->d_sb->s_magic == SOCKFS_MAGIC) {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = filp->f_dentry->d_inode->i_ino;
++ rsbac_attribute = A_nlink;
++ rsbac_attribute_value.nlink = filp->f_dentry->d_inode->i_nlink;
++ } else {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = filp->f_dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = filp->f_dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = filp->f_dentry;
++ rsbac_attribute = A_f_mode;
++ rsbac_attribute_value.f_mode = filp->f_mode;
++ }
++ } else {
++ rsbac_target = T_NETOBJ;
++ rsbac_target_id.netobj.sock_p
++ = SOCKET_I(filp->f_dentry->d_inode);
++ rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_target_id.netobj.local_len = 0;
++ rsbac_target_id.netobj.remote_addr = NULL;
++ rsbac_target_id.netobj.remote_len = 0;
++ rsbac_attribute = A_f_mode;
++ rsbac_attribute_value.f_mode = filp->f_mode;
++ }
++ } else { /* must be file, fifo or dir */
++ if (S_ISDIR(filp->f_dentry->d_inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(filp->f_dentry->d_inode->i_mode))
++ rsbac_target = T_FIFO;
++ else
++ rsbac_target = T_FILE;
++ rsbac_target_id.file.device = filp->f_dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.file.inode = filp->f_dentry->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = filp->f_dentry;
++ rsbac_attribute = A_f_mode;
++ rsbac_attribute_value.f_mode = filp->f_mode;
++ }
++ if ((rsbac_target != T_NONE) && !rsbac_adf_request(R_CLOSE,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value))
++ {
++#ifdef CONFIG_RSBAC_ENFORCE_CLOSE
++ return -EPERM;
++#endif
++ }
++ }
++#endif
++
+ if (filp->f_op && filp->f_op->flush)
+ retval = filp->f_op->flush(filp, id);
+
+@@ -1054,6 +1406,26 @@ int filp_close(struct file *filp, fl_owner_t id)
+ dnotify_flush(filp, id);
+ locks_remove_posix(filp, id);
+ }
++
++#ifdef CONFIG_RSBAC
++ if (rsbac_target != T_NONE) {
++ rsbac_pr_debug(aef, "[sys_close]: notifying ADF\n");
++ rsbac_new_target_id.dummy = 0;
++ rsbac_attribute_value.f_mode = filp->f_mode;
++ if (rsbac_adf_set_attr(R_CLOSE,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "filp_close() [sys_close]: rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
++
+ fput(filp);
+ return retval;
+ }
+diff --git a/fs/pipe.c b/fs/pipe.c
+index fec5e4a..c9718a3 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -25,6 +25,8 @@
+ #include <asm/uaccess.h>
+ #include <asm/ioctls.h>
+
++#include <rsbac/hooks.h>
++
+ /*
+ * The max size that a non-root user is allowed to grow the pipe. Can
+ * be set by root in /proc/sys/fs/pipe-max-size
+@@ -368,11 +370,33 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
+ struct iovec *iov = (struct iovec *)_iov;
+ size_t total_len;
+
++#ifdef CONFIG_RSBAC_RW
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ total_len = iov_length(iov, nr_segs);
+ /* Null read succeeds. */
+ if (unlikely(total_len == 0))
+ return 0;
+
++#ifdef CONFIG_RSBAC_RW
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.ipc.type = I_anonpipe;
++ rsbac_target_id.ipc.id.id_nr = inode->i_ino;
++ rsbac_attribute_value.dummy = 0;
++
++ if (!rsbac_adf_request(R_READ,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ do_wakeup = 0;
+ ret = 0;
+ mutex_lock(&inode->i_mutex);
+@@ -471,8 +495,26 @@ redo:
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
+ kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
+ }
+- if (ret > 0)
++ if (ret > 0) {
+ file_accessed(filp);
++
++#ifdef CONFIG_RSBAC_RW
++ rsbac_new_target_id.dummy = 0;
++
++ if (rsbac_adf_set_attr(R_READ,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "pipe_readv(): rsbac_adf_set_attr() returned error\n");
++ }
++#endif
++
++ }
+ return ret;
+ }
+
+@@ -494,11 +536,33 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
+ size_t total_len;
+ ssize_t chars;
+
++#ifdef CONFIG_RSBAC_RW
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ total_len = iov_length(iov, nr_segs);
+ /* Null write succeeds. */
+ if (unlikely(total_len == 0))
+ return 0;
+
++#ifdef CONFIG_RSBAC_RW
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.ipc.type = I_anonpipe;
++ rsbac_target_id.ipc.id.id_nr = inode->i_ino;
++ rsbac_attribute_value.dummy = 0;
++
++ if (!rsbac_adf_request(R_WRITE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ do_wakeup = 0;
+ ret = 0;
+ mutex_lock(&inode->i_mutex);
+@@ -654,8 +718,26 @@ out:
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ }
+- if (ret > 0)
++ if (ret > 0) {
+ file_update_time(filp);
++
++#ifdef CONFIG_RSBAC_RW
++ rsbac_new_target_id.dummy = 0;
++
++ if (rsbac_adf_set_attr(R_WRITE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "pipe_writev(): rsbac_adf_set_attr() returned error\n");
++ }
++#endif
++ }
++
+ return ret;
+ }
+
+@@ -741,7 +823,20 @@ pipe_release(struct inode *inode, int decr, int decw)
+ pipe->writers -= decw;
+
+ if (!pipe->readers && !pipe->writers) {
++
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++#endif
++
+ free_pipe_info(inode);
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ACI remove_target()\n");
++ rsbac_target_id.ipc.type = I_anonpipe;
++ rsbac_target_id.ipc.id.id_nr = inode->i_ino;
++ rsbac_remove_target(T_IPC, rsbac_target_id);
++#endif
++
+ } else {
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+@@ -758,10 +853,47 @@ pipe_read_fasync(int fd, struct file *filp, int on)
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ int retval;
+
++#ifdef CONFIG_RSBAC_RW
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_RW
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.ipc.type = I_anonpipe;
++ rsbac_target_id.ipc.id.id_nr = inode->i_ino;
++ rsbac_attribute_value.dummy = 0;
++
++ if (!rsbac_adf_request(R_READ,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ mutex_lock(&inode->i_mutex);
+ retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_readers);
+ mutex_unlock(&inode->i_mutex);
+
++#ifdef CONFIG_RSBAC_RW
++ rsbac_new_target_id.dummy = 0;
++
++ if ((retval >= 0) && rsbac_adf_set_attr(R_READ,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING "pipe_read_fasync(): rsbac_adf_set_attr() returned error\n");
++ }
++#endif
++
+ return retval;
+ }
+
+@@ -772,10 +904,47 @@ pipe_write_fasync(int fd, struct file *filp, int on)
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ int retval;
+
++#ifdef CONFIG_RSBAC_RW
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_RW
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.ipc.type = I_anonpipe;
++ rsbac_target_id.ipc.id.id_nr = inode->i_ino;
++ rsbac_attribute_value.dummy = 0;
++
++ if (!rsbac_adf_request(R_WRITE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ mutex_lock(&inode->i_mutex);
+ retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_writers);
+ mutex_unlock(&inode->i_mutex);
+
++#ifdef CONFIG_RSBAC_RW
++ rsbac_new_target_id.dummy = 0;
++
++ if ((retval >= 0) && rsbac_adf_set_attr(R_WRITE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING "pipe_write_fasync(): rsbac_adf_set_attr() returned error\n");
++ }
++#endif
++
+ return retval;
+ }
+
+@@ -787,6 +956,43 @@ pipe_rdwr_fasync(int fd, struct file *filp, int on)
+ struct pipe_inode_info *pipe = inode->i_pipe;
+ int retval;
+
++#ifdef CONFIG_RSBAC_RW
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_RW
++ rsbac_pr_debug(aef, "calling ADF for READ\n");
++ rsbac_target_id.ipc.type = I_anonpipe;
++ rsbac_target_id.ipc.id.id_nr = inode->i_ino;
++ rsbac_attribute_value.dummy = 0;
++
++ if (!rsbac_adf_request(R_READ,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ rsbac_pr_debug(aef, "calling ADF for WRITE\n");
++ rsbac_target_id.ipc.type = I_anonpipe;
++ rsbac_target_id.ipc.id.id_nr = inode->i_ino;
++ rsbac_attribute_value.dummy = 0;
++
++ if (!rsbac_adf_request(R_WRITE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
++
+ mutex_lock(&inode->i_mutex);
+ retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
+ if (retval >= 0) {
+@@ -795,6 +1001,36 @@ pipe_rdwr_fasync(int fd, struct file *filp, int on)
+ fasync_helper(-1, filp, 0, &pipe->fasync_readers);
+ }
+ mutex_unlock(&inode->i_mutex);
++
++#ifdef CONFIG_RSBAC_RW
++ if (retval >= 0) {
++ rsbac_new_target_id.dummy = 0;
++
++ if (rsbac_adf_set_attr(R_READ,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "pipe_rwdr_fasync(): rsbac_adf_set_attr() for READ returned error\n");
++ }
++ if (rsbac_adf_set_attr(R_WRITE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "pipe_rwdr_fasync(): rsbac_adf_set_attr() for WRITE returned error\n");
++ }
++ }
++#endif
++
+ return retval;
+ }
+
+@@ -826,6 +1062,28 @@ pipe_read_open(struct inode *inode, struct file *filp)
+ {
+ int ret = -ENOENT;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.ipc.type = I_anonpipe;
++ rsbac_target_id.ipc.id.id_nr = inode->i_ino;
++ rsbac_attribute_value.dummy = 0;
++
++ if (!rsbac_adf_request(R_READ_OPEN,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ mutex_lock(&inode->i_mutex);
+
+ if (inode->i_pipe) {
+@@ -835,6 +1093,21 @@ pipe_read_open(struct inode *inode, struct file *filp)
+
+ mutex_unlock(&inode->i_mutex);
+
++#ifdef CONFIG_RSBAC
++ rsbac_new_target_id.dummy = 0;
++
++ if (!ret && rsbac_adf_set_attr(R_READ_OPEN,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING "pipe_read_open(): rsbac_adf_set_attr() returned error\n");
++ }
++#endif
++
+ return ret;
+ }
+
+@@ -843,6 +1116,28 @@ pipe_write_open(struct inode *inode, struct file *filp)
+ {
+ int ret = -ENOENT;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.ipc.type = I_anonpipe;
++ rsbac_target_id.ipc.id.id_nr = inode->i_ino;
++ rsbac_attribute_value.dummy = 0;
++
++ if (!rsbac_adf_request(R_WRITE_OPEN,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ mutex_lock(&inode->i_mutex);
+
+ if (inode->i_pipe) {
+@@ -852,6 +1147,21 @@ pipe_write_open(struct inode *inode, struct file *filp)
+
+ mutex_unlock(&inode->i_mutex);
+
++#ifdef CONFIG_RSBAC
++ rsbac_new_target_id.dummy = 0;
++
++ if (!ret && rsbac_adf_set_attr(R_WRITE_OPEN,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING "pipe_write_open(): rsbac_adf_set_attr() returned error\n");
++ }
++#endif
++
+ return ret;
+ }
+
+@@ -860,6 +1170,29 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
+ {
+ int ret = -ENOENT;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.ipc.type = I_anonpipe;
++ rsbac_target_id.ipc.id.id_nr = inode->i_ino;
++ rsbac_attribute_value.dummy = 0;
++
++ if (!rsbac_adf_request(R_READ_WRITE_OPEN,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++#endif
++
+ mutex_lock(&inode->i_mutex);
+
+ if (inode->i_pipe) {
+@@ -872,6 +1205,22 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
+
+ mutex_unlock(&inode->i_mutex);
+
++#ifdef CONFIG_RSBAC
++ /* RSBAC: notify adf of read_write_open on pipe */
++ rsbac_new_target_id.dummy = 0;
++
++ if (!ret && rsbac_adf_set_attr(R_READ_WRITE_OPEN,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING "pipe_rdwr_open(): rsbac_adf_set_attr() returned error\n");
++ }
++#endif
++
+ return ret;
+ }
+
+@@ -1021,6 +1370,12 @@ struct file *create_write_pipe(int flags)
+ struct path path;
+ struct qstr name = { .name = "" };
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ err = -ENFILE;
+ inode = get_pipe_inode();
+ if (!inode)
+@@ -1043,6 +1398,23 @@ struct file *create_write_pipe(int flags)
+ f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
+ f->f_version = 0;
+
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.ipc.type = I_anonpipe;
++ rsbac_target_id.ipc.id.id_nr = inode->i_ino;
++ rsbac_new_target_id.dummy = 0;
++ rsbac_attribute_value.dummy = 0;
++ if (rsbac_adf_set_attr(R_CREATE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "do_pipe() [sys_pipe()]: rsbac_adf_set_attr() returned error");
++ }
++#endif
+ return f;
+
+ err_dentry:
+@@ -1084,9 +1456,29 @@ int do_pipe_flags(int *fd, int flags)
+ int error;
+ int fdw, fdr;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
+ return -EINVAL;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_pipe()]: calling ADF\n");
++ rsbac_target_id.ipc.type = I_anonpipe;
++ rsbac_target_id.ipc.id.id_nr = 0;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_CREATE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ fw = create_write_pipe(flags);
+ if (IS_ERR(fw))
+ return PTR_ERR(fw);
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index f9bd395..de2fcbd 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -85,6 +85,7 @@
+ #include <asm/pgtable.h>
+ #include <asm/processor.h>
+ #include "internal.h"
++#include <rsbac/hooks.h>
+
+ static inline void task_name(struct seq_file *m, struct task_struct *p)
+ {
+@@ -340,7 +341,31 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
+ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task)
+ {
+- struct mm_struct *mm = get_task_mm(task);
++ struct mm_struct *mm;
++
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = task_pid(task);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rcu_read_unlock();
++ return -EPERM;
++ }
++ rcu_read_unlock();
++#endif
++
++ mm = get_task_mm(task);
+
+ task_name(m, task);
+ task_state(m, ns, pid, task);
+@@ -378,6 +403,28 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ char tcomm[sizeof(task->comm)];
+ unsigned long flags;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = task_pid(task);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rcu_read_unlock();
++ return -EPERM;
++ }
++ rcu_read_unlock();
++#endif
++
+ state = *get_task_state(task);
+ vsize = eip = esp = 0;
+ permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
+@@ -532,9 +579,32 @@ int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns,
+ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task)
+ {
++
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
+ unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
+- struct mm_struct *mm = get_task_mm(task);
++ struct mm_struct *mm;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = task_pid(task);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rcu_read_unlock();
++ return -EPERM;
++ }
++ rcu_read_unlock();
++#endif
++
++ mm = get_task_mm(task);
+ if (mm) {
+ size = task_statm(mm, &shared, &text, &data, &resident);
+ mmput(mm);
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 57b8159..15d6c0a 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -89,6 +89,7 @@
+ #endif
+ #include <trace/events/oom.h>
+ #include "internal.h"
++#include <rsbac/hooks.h>
+
+ /* NOTE:
+ * Implementing inode permission operations in /proc is almost
+@@ -174,6 +175,29 @@ static int proc_cwd_link(struct dentry *dentry, struct path *path)
+ struct task_struct *task = get_proc_task(dentry->d_inode);
+ int result = -ENOENT;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if(!task)
++ return result;
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = task_pid(task);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ put_task_struct(task);
++ rcu_read_unlock();
++ return -EPERM;
++ }
++ rcu_read_unlock();
++#endif
++
+ if (task) {
+ task_lock(task);
+ if (task->fs) {
+@@ -191,6 +215,31 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
+ struct task_struct *task = get_proc_task(dentry->d_inode);
+ int result = -ENOENT;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ if(!task)
++ return result;
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = task_pid(task);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ put_task_struct(task);
++ rcu_read_unlock();
++ return -EPERM;
++ }
++ rcu_read_unlock();
++#endif
++
+ if (task) {
+ result = get_task_root(task, path);
+ put_task_struct(task);
+@@ -208,11 +257,35 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
+ int res = 0;
+ unsigned int len;
+ struct mm_struct *mm = get_task_mm(task);
++
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!mm)
+ goto out;
+ if (!mm->arg_end)
+ goto out_mm; /* Shh! No looking before we're done */
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = task_pid(task);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ res = -EPERM;
++ rcu_read_unlock();
++ goto out_mm;
++ }
++ rcu_read_unlock();
++#endif
++
+ len = mm->arg_end - mm->arg_start;
+
+ if (len > PAGE_SIZE)
+@@ -244,6 +317,31 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
+ {
+ struct mm_struct *mm = mm_for_maps(task);
+ int res = PTR_ERR(mm);
++
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = task_pid(task);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ if (mm && !IS_ERR(mm))
++ mmput(mm);
++ rcu_read_unlock();
++ return -EPERM;
++ }
++ rcu_read_unlock();
++#endif
++
+ if (mm && !IS_ERR(mm)) {
+ unsigned int nwords = 0;
+ do {
+@@ -269,6 +367,28 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer)
+ unsigned long wchan;
+ char symname[KSYM_NAME_LEN];
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = task_pid(task);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rcu_read_unlock();
++ return -EPERM;
++ }
++ rcu_read_unlock();
++#endif
++
+ wchan = get_wchan(task);
+
+ if (lookup_symbol_name(wchan, symname) < 0)
+@@ -310,6 +430,28 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
+ int err;
+ int i;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = task_pid(task);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rcu_read_unlock();
++ return -EPERM;
++ }
++ rcu_read_unlock();
++#endif
++
+ entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+@@ -341,6 +483,28 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
+ */
+ static int proc_pid_schedstat(struct task_struct *task, char *buffer)
+ {
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = task_pid(task);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rcu_read_unlock();
++ return -EPERM;
++ }
++ rcu_read_unlock();
++#endif
++
+ return sprintf(buffer, "%llu %llu %lu\n",
+ (unsigned long long)task->se.sum_exec_runtime,
+ (unsigned long long)task->sched_info.run_delay,
+@@ -355,8 +519,33 @@ static int lstats_show_proc(struct seq_file *m, void *v)
+ struct inode *inode = m->private;
+ struct task_struct *task = get_proc_task(inode);
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++
+ if (!task)
+ return -ESRCH;
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = task_pid(task);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rcu_read_unlock();
++ put_task_struct(task);
++ return -EPERM;
++ }
++ rcu_read_unlock();
++#endif
++
+ seq_puts(m, "Latency Top version : v0.1\n");
+ for (i = 0; i < 32; i++) {
+ struct latency_record *lr = &task->latency_record[i];
+@@ -412,6 +601,26 @@ static int proc_oom_score(struct task_struct *task, char *buffer)
+ {
+ unsigned long points = 0;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = task_pid(task);
++ rsbac_attribute_value.dummy = 0;
++ if(!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rcu_read_unlock();
++ return -EPERM;
++ }
++ rcu_read_unlock();
++#endif
++
+ read_lock(&tasklist_lock);
+ if (pid_alive(task))
+ points = oom_badness(task, NULL, NULL,
+@@ -1607,12 +1816,37 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
+ struct task_struct *task;
+ const struct cred *cred;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (nd && nd->flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ inode = dentry->d_inode;
+ task = get_proc_task(inode);
+
++#ifdef CONFIG_RSBAC
++ if(!task)
++ return -EINVAL;
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = task_pid(task);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ put_task_struct(task);
++ rcu_read_unlock();
++ return -EPERM;
++ }
++ rcu_read_unlock();
++#endif
++
+ if (task) {
+ if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
+ task_dumpable(task)) {
+@@ -1736,6 +1970,30 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
+ struct file *file;
+ int fd = proc_fd(inode);
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ rsbac_target_id.process = proc_pid(inode);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ if (task)
++ put_task_struct(task);
++ rcu_read_unlock();
++ return -EPERM;
++ }
++ rcu_read_unlock();
++#endif
++
+ if (task) {
+ files = get_files_struct(task);
+ put_task_struct(task);
+@@ -3177,6 +3435,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
+ unsigned tgid;
+ struct pid_namespace *ns;
+
++#ifdef CONFIG_RSBAC_PROC_HIDE
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ result = proc_base_lookup(dir, dentry);
+ if (!IS_ERR(result) || PTR_ERR(result) != -ENOENT)
+ goto out;
+@@ -3194,6 +3457,21 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
+ if (!task)
+ goto out;
+
++#ifdef CONFIG_RSBAC_PROC_HIDE
++ rsbac_target_id.process = task_pid(task);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ result = ERR_PTR(-ENOENT);
++ put_task_struct(task);
++ goto out;
++ }
++#endif
++
+ result = proc_pid_instantiate(dir, dentry, task, NULL);
+ put_task_struct(task);
+ out:
+@@ -3441,6 +3719,10 @@ static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry
+ struct task_struct *leader = get_proc_task(dir);
+ unsigned tid;
+ struct pid_namespace *ns;
++#ifdef CONFIG_RSBAC_PROC_HIDE
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
+
+ if (!leader)
+ goto out_no_task;
+@@ -3460,6 +3742,19 @@ static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry
+ if (!same_thread_group(leader, task))
+ goto out_drop_task;
+
++#ifdef CONFIG_RSBAC_PROC_HIDE
++ rsbac_target_id.process = task_pid(task);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ goto out_drop_task;
++ }
++#endif
++
+ result = proc_task_instantiate(dir, dentry, task, NULL);
+ out_drop_task:
+ put_task_struct(task);
+diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
+index 86c67ee..2b58f63 100644
+--- a/fs/proc/kcore.c
++++ b/fs/proc/kcore.c
+@@ -27,6 +27,8 @@
+ #include <linux/memory.h>
+ #include <asm/sections.h>
+
++#include <rsbac/hooks.h>
++
+ #define CORE_STR "CORE"
+
+ #ifndef ELF_CORE_EFLAGS
+@@ -544,8 +546,27 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
+
+ static int open_kcore(struct inode *inode, struct file *filp)
+ {
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
++
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.scd = ST_kmem;
++ rsbac_attribute_value.dummy = 0;
++ rsbac_pr_debug(aef, "calling ADF\n");
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ return -EPERM;
++#endif
++
+ if (kcore_need_update)
+ kcore_update_ram();
+ if (i_size_read(inode) != proc_root_kcore->size) {
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 21d836f..4dae4c6 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -10,6 +10,9 @@
+ #include <linux/namei.h>
+ #include <linux/mm.h>
+ #include <linux/module.h>
++
++#include <rsbac/hooks.h>
++
+ #include "internal.h"
+
+ static const struct dentry_operations proc_sys_dentry_operations;
+@@ -384,11 +387,32 @@ static int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int
+ {
+ int mode;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (root->permissions)
+ mode = root->permissions(root, current->nsproxy, table);
+ else
+ mode = table->mode;
+
++#ifdef CONFIG_RSBAC
++ if (op & 002) { /* write access */
++ rsbac_target_id.scd = ST_sysctl;
++ rsbac_attribute_value.mode = mode;
++ rsbac_pr_debug(aef, "[sysctl() etc.]: calling ADF\n");
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_mode,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++ }
++#endif
++
+ return test_perm(mode, op);
+ }
+
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 1030a71..bc12722 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -16,6 +16,7 @@
+ #include <asm/uaccess.h>
+ #include <asm/tlbflush.h>
+ #include "internal.h"
++#include <rsbac/hooks.h>
+
+ void task_mem(struct seq_file *m, struct mm_struct *mm)
+ {
+@@ -195,9 +196,28 @@ static int do_maps_open(struct inode *inode, struct file *file,
+ {
+ struct proc_maps_private *priv;
+ int ret = -ENOMEM;
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv) {
+ priv->pid = proc_pid(inode);
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = priv->pid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ kfree(priv);
++ return -EPERM;
++ }
++#endif
+ ret = seq_open(file, ops);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
+index 74fe164..a55aab8 100644
+--- a/fs/proc/task_nommu.c
++++ b/fs/proc/task_nommu.c
+@@ -8,6 +8,7 @@
+ #include <linux/slab.h>
+ #include <linux/seq_file.h>
+ #include "internal.h"
++#include <rsbac/hooks.h>
+
+ /*
+ * Logic: we've got two memory sums for each process, "shared", and
+@@ -278,6 +279,26 @@ static int maps_open(struct inode *inode, struct file *file,
+ struct proc_maps_private *priv;
+ int ret = -ENOMEM;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = pid_task(proc_pid(inode), PIDTYPE_PID)->pid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
++
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv) {
+ priv->pid = proc_pid(inode);
+diff --git a/fs/quota/quota.c b/fs/quota/quota.c
+index 9a39120..ac32e29 100644
+--- a/fs/quota/quota.c
++++ b/fs/quota/quota.c
+@@ -15,12 +15,18 @@
+ #include <linux/syscalls.h>
+ #include <linux/capability.h>
+ #include <linux/quotaops.h>
++#include <rsbac/hooks.h>
+ #include <linux/types.h>
+ #include <linux/writeback.h>
+
+ static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
+ qid_t id)
+ {
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ switch (cmd) {
+ /* these commands do not require any special privilegues */
+ case Q_GETFMT:
+@@ -28,17 +34,60 @@ static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
+ case Q_GETINFO:
+ case Q_XGETQSTAT:
+ case Q_XQUOTASYNC:
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.scd = ST_quota;
++ rsbac_attribute_value.dummy = 0;
++ rsbac_pr_debug(aef, "[sys_quotactl()]: calling ADF\n");
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
+ break;
+ /* allow to query information for dquots we "own" */
+ case Q_GETQUOTA:
+ case Q_XGETQUOTA:
+ if ((type == USRQUOTA && current_euid() == id) ||
+- (type == GRPQUOTA && in_egroup_p(id)))
++ (type == GRPQUOTA && in_egroup_p(id))) {
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.scd = ST_quota;
++ rsbac_attribute_value.dummy = 0;
++ rsbac_pr_debug(aef, "[sys_quotactl()]: calling ADF\n");
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
+ break;
++ }
+ /*FALLTHROUGH*/
+ default:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
++
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.scd = ST_quota;
++ rsbac_attribute_value.dummy = 0;
++ rsbac_pr_debug(aef, "[sys_quotactl()]: calling ADF\n");
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
+ }
+
+ return security_quotactl(cmd, type, id, sb);
+diff --git a/fs/read_write.c b/fs/read_write.c
+index ffc99d2..d23e4b9 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -17,6 +17,12 @@
+ #include <linux/splice.h>
+ #include "read_write.h"
+
++#ifdef CONFIG_RSBAC_RW
++#include <net/sock.h>
++#include <net/af_unix.h>
++#endif
++#include <rsbac/hooks.h>
++
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+
+@@ -325,7 +331,11 @@ int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count
+ return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
+ }
+
++#ifdef CONFIG_RSBAC
++void wait_on_retry_sync_kiocb(struct kiocb *iocb)
++#else
+ static void wait_on_retry_sync_kiocb(struct kiocb *iocb)
++#endif
+ {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (!kiocbIsKicked(iocb))
+@@ -365,6 +375,10 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
+ {
+ ssize_t ret;
+
++#ifdef CONFIG_RSBAC_RW
++ struct rsbac_rw_req rsbac_rw_req_obj;
++#endif
++
+ if (!(file->f_mode & FMODE_READ))
+ return -EBADF;
+ if (!file->f_op || (!file->f_op->read && !file->f_op->aio_read))
+@@ -375,6 +389,12 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
+ ret = rw_verify_area(READ, file, pos, count);
+ if (ret >= 0) {
+ count = ret;
++#ifdef CONFIG_RSBAC_RW
++ rsbac_rw_req_obj.rsbac_target = T_NONE;
++ rsbac_rw_req_obj.rsbac_request = R_READ;
++ if (!rsbac_handle_rw_req(file, &rsbac_rw_req_obj))
++ return -EPERM;
++#endif
+ if (file->f_op->read)
+ ret = file->f_op->read(file, buf, count, pos);
+ else
+@@ -382,6 +402,9 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
+ if (ret > 0) {
+ fsnotify_access(file);
+ add_rchar(current, ret);
++#ifdef CONFIG_RSBAC_RW
++ rsbac_handle_rw_up(&rsbac_rw_req_obj);
++#endif
+ }
+ inc_syscr(current);
+ }
+@@ -421,6 +444,10 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
+ {
+ ssize_t ret;
+
++#ifdef CONFIG_RSBAC_RW
++ struct rsbac_rw_req rsbac_rw_req_obj;
++#endif
++
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EBADF;
+ if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
+@@ -431,6 +458,12 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
+ ret = rw_verify_area(WRITE, file, pos, count);
+ if (ret >= 0) {
+ count = ret;
++#ifdef CONFIG_RSBAC_RW
++ rsbac_rw_req_obj.rsbac_target = T_NONE;
++ rsbac_rw_req_obj.rsbac_request = R_WRITE;
++ if (!rsbac_handle_rw_req(file, &rsbac_rw_req_obj))
++ return -EPERM;
++#endif
+ if (file->f_op->write)
+ ret = file->f_op->write(file, buf, count, pos);
+ else
+@@ -438,6 +471,10 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
+ if (ret > 0) {
+ fsnotify_modify(file);
+ add_wchar(current, ret);
++
++#ifdef CONFIG_RSBAC_RW
++ rsbac_handle_rw_up(&rsbac_rw_req_obj);
++#endif
+ }
+ inc_syscw(current);
+ }
+@@ -717,6 +754,11 @@ static ssize_t do_readv_writev(int type, struct file *file,
+ io_fn_t fn;
+ iov_fn_t fnv;
+
++#ifdef CONFIG_RSBAC_RW
++ struct rsbac_rw_req rsbac_rw_req_obj;
++ rsbac_rw_req_obj.rsbac_target = T_NONE;
++#endif
++
+ if (!file->f_op) {
+ ret = -EINVAL;
+ goto out;
+@@ -732,6 +774,19 @@ static ssize_t do_readv_writev(int type, struct file *file,
+ if (ret < 0)
+ goto out;
+
++#ifdef CONFIG_RSBAC_RW
++ if (type == READ)
++ rsbac_rw_req_obj.rsbac_request = R_READ;
++ else
++/* if type wouldn't be WRITE here it's going to be funny ;)
++ kernel itself does NOT check on it. */
++ rsbac_rw_req_obj.rsbac_request = R_WRITE;
++ if(!rsbac_handle_rw_req(file, &rsbac_rw_req_obj)) {
++ ret = -EPERM;
++ goto out;
++ }
++#endif
++
+ fnv = NULL;
+ if (type == READ) {
+ fn = file->f_op->read;
+@@ -756,6 +811,12 @@ out:
+ else
+ fsnotify_modify(file);
+ }
++
++#ifdef CONFIG_RSBAC_RW
++ if (ret > 0)
++ rsbac_handle_rw_up(&rsbac_rw_req_obj);
++#endif
++
+ return ret;
+ }
+
+@@ -892,6 +953,15 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
+ ssize_t retval;
+ int fput_needed_in, fput_needed_out, fl;
+
++#ifdef CONFIG_RSBAC_RW
++ struct rsbac_rw_req rsbac_rw_req_obj1;
++ struct rsbac_rw_req rsbac_rw_req_obj2;
++ struct socket * sock1;
++ struct socket * sock2;
++ rsbac_rw_req_obj1.rsbac_target = T_NONE;
++ rsbac_rw_req_obj2.rsbac_target = T_NONE;
++#endif
++
+ /*
+ * Get input file, and verify that it is ok..
+ */
+@@ -912,6 +982,29 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
+ goto fput_in;
+ count = retval;
+
++#ifdef CONFIG_RSBAC_RW
++/* i could have done it in few lines of code, but that's way it is MUCH faster and sendfile is mostly beeing used with network sockets */
++ if (S_ISSOCK(in_file->f_dentry->d_inode->i_mode)) {
++ sock1 = SOCKET_I(in_file->f_dentry->d_inode);
++ if ((sock1->ops) && (sock1->ops->family != AF_UNIX)) {
++ rsbac_rw_req_obj1.rsbac_target = T_NETOBJ;
++ rsbac_rw_req_obj1.rsbac_target_id.netobj.sock_p = sock1;
++ rsbac_rw_req_obj1.rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_rw_req_obj1.rsbac_target_id.netobj.local_len = 0;
++ rsbac_rw_req_obj1.rsbac_target_id.netobj.remote_addr = NULL;
++ rsbac_rw_req_obj1.rsbac_target_id.netobj.remote_len = 0;
++ rsbac_rw_req_obj1.rsbac_attribute = A_sock_type;
++ rsbac_rw_req_obj1.rsbac_attribute_value.sock_type = sock1->type;
++ }
++ }
++ rsbac_rw_req_obj1.rsbac_request = R_READ;
++ if(!rsbac_handle_rw_req(in_file, &rsbac_rw_req_obj1))
++ {
++ retval = -EPERM;
++ goto fput_in;
++ }
++#endif
++
+ /*
+ * Get output file, and verify that it is ok..
+ */
+@@ -929,6 +1022,28 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
+ goto fput_out;
+ count = retval;
+
++#ifdef CONFIG_RSBAC_RW
++ if (S_ISSOCK(out_file->f_dentry->d_inode->i_mode)) {
++ sock2 = SOCKET_I(out_file->f_dentry->d_inode);
++ if ((sock2->ops) && (sock2->ops->family != AF_UNIX)) {
++ rsbac_rw_req_obj2.rsbac_target = T_NETOBJ;
++ rsbac_rw_req_obj2.rsbac_target_id.netobj.sock_p = sock2;
++ rsbac_rw_req_obj2.rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_rw_req_obj2.rsbac_target_id.netobj.local_len = 0;
++ rsbac_rw_req_obj2.rsbac_target_id.netobj.remote_addr = NULL;
++ rsbac_rw_req_obj2.rsbac_target_id.netobj.remote_len = 0;
++ rsbac_rw_req_obj2.rsbac_attribute = A_sock_type;
++ rsbac_rw_req_obj2.rsbac_attribute_value.sock_type = sock2->type;
++ }
++ }
++ rsbac_rw_req_obj2.rsbac_request = R_WRITE;
++ if(!rsbac_handle_rw_req(out_file, &rsbac_rw_req_obj2))
++ {
++ retval = -EPERM;
++ goto fput_out;
++ }
++#endif
++
+ if (!max)
+ max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
+
+@@ -963,6 +1078,11 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
+ if (*ppos > max)
+ retval = -EOVERFLOW;
+
++#ifdef CONFIG_RSBAC_RW
++ rsbac_handle_rw_up(&rsbac_rw_req_obj1);
++ rsbac_handle_rw_up(&rsbac_rw_req_obj2);
++#endif
++
+ fput_out:
+ fput_light(out_file, fput_needed_out);
+ fput_in:
+diff --git a/fs/readdir.c b/fs/readdir.c
+index cc0a822..a27286d 100644
+--- a/fs/readdir.c
++++ b/fs/readdir.c
+@@ -20,10 +20,27 @@
+
+ #include <asm/uaccess.h>
+
++#ifdef CONFIG_RSBAC
++#include <net/sock.h>
++#ifdef CONFIG_RSBAC_FSOBJ_HIDE
++#include "hfsplus/hfsplus_fs.h"
++#include "hfsplus/hfsplus_raw.h"
++#endif
++#endif
++
++#include <rsbac/hooks.h>
++#include <linux/namei.h>
++
+ int vfs_readdir(struct file *file, filldir_t filler, void *buf)
+ {
+ struct inode *inode = file->f_path.dentry->d_inode;
+ int res = -ENOTDIR;
++
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!file->f_op || !file->f_op->readdir)
+ goto out;
+
+@@ -31,6 +48,23 @@ int vfs_readdir(struct file *file, filldir_t filler, void *buf)
+ if (res)
+ goto out;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[old_readdir(), sys_getdents()]: calling ADF\n");
++ rsbac_target_id.dir.device = inode->i_sb->s_dev;
++ rsbac_target_id.dir.inode = inode->i_ino;
++ rsbac_target_id.dir.dentry_p = file->f_dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_READ,
++ task_pid(current),
++ T_DIR,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ res = -EPERM;
++ goto out;
++ }
++#endif
++
+ res = mutex_lock_killable(&inode->i_mutex);
+ if (res)
+ goto out;
+@@ -67,6 +101,9 @@ struct old_linux_dirent {
+
+ struct readdir_callback {
+ struct old_linux_dirent __user * dirent;
++#ifdef CONFIG_RSBAC_FSOBJ_HIDE
++ struct file * file;
++#endif
+ int result;
+ };
+
+@@ -79,6 +116,11 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
+
+ if (buf->result)
+ return -EINVAL;
++#ifdef CONFIG_RSBAC_FSOBJ_HIDE
++ if (!rsbac_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++#endif
++
+ d_ino = ino;
+ if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
+ buf->result = -EOVERFLOW;
+@@ -116,6 +158,9 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
+
+ buf.result = 0;
+ buf.dirent = dirent;
++#ifdef CONFIG_RSBAC_FSOBJ_HIDE
++ buf.file = file;
++#endif
+
+ error = vfs_readdir(file, fillonedir, &buf);
+ if (buf.result)
+@@ -142,6 +187,9 @@ struct linux_dirent {
+ struct getdents_callback {
+ struct linux_dirent __user * current_dir;
+ struct linux_dirent __user * previous;
++#ifdef CONFIG_RSBAC_FSOBJ_HIDE
++ struct file * file;
++#endif
+ int count;
+ int error;
+ };
+@@ -158,6 +206,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
++#ifdef CONFIG_RSBAC_FSOBJ_HIDE
++ if (!rsbac_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++#endif
+ d_ino = ino;
+ if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
+ buf->error = -EOVERFLOW;
+@@ -208,6 +260,9 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
+
+ buf.current_dir = dirent;
+ buf.previous = NULL;
++#ifdef CONFIG_RSBAC_FSOBJ_HIDE
++ buf.file = file;
++#endif
+ buf.count = count;
+ buf.error = 0;
+
+@@ -231,6 +286,9 @@ struct getdents_callback64 {
+ struct linux_dirent64 __user * previous;
+ int count;
+ int error;
++#ifdef CONFIG_RSBAC_FSOBJ_HIDE
++ struct file * file;
++#endif
+ };
+
+ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
+@@ -244,6 +302,12 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
++
++#ifdef CONFIG_RSBAC_FSOBJ_HIDE
++ if (!rsbac_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++#endif
++
+ dirent = buf->previous;
+ if (dirent) {
+ if (__put_user(offset, &dirent->d_off))
+@@ -291,6 +355,9 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
+
+ buf.current_dir = dirent;
+ buf.previous = NULL;
++#ifdef CONFIG_RSBAC_FSOBJ_HIDE
++ buf.file = file;
++#endif
+ buf.count = count;
+ buf.error = 0;
+
+diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
+index 84e8a69..aaf5015 100644
+--- a/fs/reiserfs/namei.c
++++ b/fs/reiserfs/namei.c
+@@ -18,6 +18,7 @@
+ #include "acl.h"
+ #include "xattr.h"
+ #include <linux/quotaops.h>
++#include <rsbac/hooks.h>
+
+ #define INC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) { inc_nlink(i); if (i->i_nlink >= REISERFS_LINK_MAX) set_nlink(i, 1); }
+ #define DEC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) drop_nlink(i);
+@@ -975,6 +976,11 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
+ */
+ savelink = inode->i_nlink;
+
++#ifdef CONFIG_RSBAC_SECDEL
++ if (inode->i_nlink == 1)
++ rsbac_sec_del(dentry, TRUE);
++#endif
++
+ retval =
+ reiserfs_cut_from_item(&th, &path, &(de.de_entry_key), dir, NULL,
+ 0);
+@@ -1357,6 +1363,11 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ journal_end(&th, old_dir->i_sb, jbegin_count);
+ reiserfs_write_unlock(old_dir->i_sb);
+ return -EIO;
++#ifdef CONFIG_RSBAC_SECDEL
++ } else {
++ if (new_dentry_inode && (new_dentry_inode->i_nlink == 1))
++ rsbac_sec_del(new_dentry, TRUE);
++#endif
+ }
+
+ copy_item_head(&new_entry_ih, get_ih(&new_entry_path));
+diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
+index 46fc1c2..52944f0 100644
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -951,6 +951,10 @@ static const struct dentry_operations xattr_lookup_poison_ops = {
+ .d_revalidate = xattr_hide_revalidate,
+ };
+
++#ifdef CONFIG_RSBAC
++struct dentry * rsbac_lookup_one_len(const char * name, struct dentry * base, int len);
++#endif
++
+ int reiserfs_lookup_privroot(struct super_block *s)
+ {
+ struct dentry *dentry;
+@@ -995,8 +999,13 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
+ reiserfs_mutex_lock_safe(&privroot->d_inode->i_mutex, s);
+ if (!REISERFS_SB(s)->xattr_root) {
+ struct dentry *dentry;
++#ifdef CONFIG_RSBAC
++ dentry = rsbac_lookup_one_len(XAROOT_NAME, s->s_root,
++ strlen(XAROOT_NAME));
++#else
+ dentry = lookup_one_len(XAROOT_NAME, privroot,
+ strlen(XAROOT_NAME));
++#endif
+ if (!IS_ERR(dentry))
+ REISERFS_SB(s)->xattr_root = dentry;
+ else
+diff --git a/fs/stat.c b/fs/stat.c
+index c733dc5..1619bd3 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -18,8 +18,19 @@
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+
++#ifdef CONFIG_RSBAC
++#include <net/sock.h>
++#include <rsbac/hooks.h>
++#endif
++
+ void generic_fillattr(struct inode *inode, struct kstat *stat)
+ {
++
++#ifdef CONFIG_RSBAC_SYM_REDIR
++ char *rsbac_name;
++ int len = 0;
++#endif
++
+ stat->dev = inode->i_sb->s_dev;
+ stat->ino = inode->i_ino;
+ stat->mode = inode->i_mode;
+@@ -27,6 +38,19 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
+ stat->uid = inode->i_uid;
+ stat->gid = inode->i_gid;
+ stat->rdev = inode->i_rdev;
++
++#ifdef CONFIG_RSBAC_SYM_REDIR
++ if (S_ISLNK(inode->i_mode)) {
++ rsbac_name = rsbac_symlink_redirect(inode, "", 0);
++ if (rsbac_name) {
++ len = strlen(rsbac_name);
++ kfree(rsbac_name);
++ }
++ stat->size = i_size_read(inode) + len;
++ }
++ else
++#endif
++
+ stat->size = i_size_read(inode);
+ stat->atime = inode->i_atime;
+ stat->mtime = inode->i_mtime;
+@@ -42,10 +66,51 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+ struct inode *inode = dentry->d_inode;
+ int retval;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ retval = security_inode_getattr(mnt, dentry);
+ if (retval)
+ return retval;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_stat() etc.]: calling ADF\n");
++ rsbac_target_id.file.device = inode->i_sb->s_dev;
++ rsbac_target_id.file.inode = inode->i_ino;
++ rsbac_target_id.file.dentry_p = dentry;
++ if (S_ISDIR(inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK(inode->i_mode)) {
++ if (inode->i_sb->s_magic == SOCKFS_MAGIC) {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = inode->i_ino;
++ } else {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = inode->i_sb->s_dev;
++ rsbac_target_id.unixsock.inode = inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = dentry;
++ }
++ } else
++ rsbac_target = T_FILE;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ if (inode->i_op->getattr)
+ return inode->i_op->getattr(mnt, dentry, stat);
+
+@@ -296,6 +361,11 @@ SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
+ int error;
+ int empty = 0;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (bufsiz <= 0)
+ return -EINVAL;
+
+@@ -307,6 +377,24 @@ SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
+ if (inode->i_op->readlink) {
+ error = security_inode_readlink(path.dentry);
+ if (!error) {
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.file.device = path.dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = inode->i_ino;
++ rsbac_target_id.file.dentry_p = path.dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SYMLINK,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ path_put(&path);
++ return -EPERM;
++ }
++#endif
++
+ touch_atime(&path);
+ error = inode->i_op->readlink(path.dentry,
+ buf, bufsiz);
+diff --git a/fs/statfs.c b/fs/statfs.c
+index 43e6b6f..c3b9b0e 100644
+--- a/fs/statfs.c
++++ b/fs/statfs.c
+@@ -9,6 +9,8 @@
+ #include <linux/uaccess.h>
+ #include "internal.h"
+
++#include <rsbac/hooks.h>
++
+ static int flags_by_mnt(int mnt_flags)
+ {
+ int flags = 0;
+@@ -50,9 +52,30 @@ static int statfs_by_dentry(struct dentry *dentry, struct kstatfs *buf)
+ {
+ int retval;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!dentry->d_sb->s_op->statfs)
+ return -ENOSYS;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.dev.type = D_block;
++ rsbac_target_id.dev.major = RSBAC_MAJOR(dentry->d_sb->s_dev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(dentry->d_sb->s_dev);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_DEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ memset(buf, 0, sizeof(*buf));
+ retval = security_sb_statfs(dentry);
+ if (retval)
+diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
+index 00012e3..9a3209f 100644
+--- a/fs/sysfs/file.c
++++ b/fs/sysfs/file.c
+@@ -24,6 +24,8 @@
+
+ #include "sysfs.h"
+
++#include <rsbac/hooks.h>
++
+ /*
+ * There's one sysfs_buffer for each open file and one
+ * sysfs_open_dirent for each sysfs_dirent with one or more open
+@@ -330,6 +332,11 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
+ struct sysfs_buffer *buffer;
+ const struct sysfs_ops *ops;
+ int error = -EACCES;
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+
+ /* need attr_sd for attr and ops, its parent for kobj */
+ if (!sysfs_get_active(attr_sd))
+@@ -351,6 +358,21 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
+ if (file->f_mode & FMODE_WRITE) {
+ if (!(inode->i_mode & S_IWUGO) || !ops->store)
+ goto err_out;
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sysfs_open_file()]: calling ADF\n");
++ rsbac_target_id.scd = ST_sysfs;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_owner,
++ rsbac_attribute_value))
++ {
++ error = -EPERM;
++ goto err_out;
++ }
++#endif
+ }
+
+ /* File needs read support.
+@@ -360,6 +382,22 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
+ if (file->f_mode & FMODE_READ) {
+ if (!(inode->i_mode & S_IRUGO) || !ops->show)
+ goto err_out;
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sysfs_open_file()]: calling ADF\n");
++ rsbac_target_id.scd = ST_sysfs;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_owner,
++ rsbac_attribute_value))
++ {
++ error = -EPERM;
++ goto err_out;
++ }
++#endif
+ }
+
+ /* No error? Great, allocate a buffer for the file, and store it
+diff --git a/fs/utimes.c b/fs/utimes.c
+index ba653f3..0c8e4d7 100644
+--- a/fs/utimes.c
++++ b/fs/utimes.c
+@@ -10,6 +10,7 @@
+ #include <linux/syscalls.h>
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
++#include <rsbac/hooks.h>
+
+ #ifdef __ARCH_WANT_SYS_UTIME
+
+@@ -54,6 +55,12 @@ static int utimes_common(struct path *path, struct timespec *times)
+ struct iattr newattrs;
+ struct inode *inode = path->dentry->d_inode;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ error = mnt_want_write(path->mnt);
+ if (error)
+ goto out;
+@@ -95,12 +102,43 @@ static int utimes_common(struct path *path, struct timespec *times)
+ if (IS_IMMUTABLE(inode))
+ goto mnt_drop_write_and_out;
+
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_PART
++ if (!rsbac_dac_part_disabled(path->dentry))
++#endif
+ if (!inode_owner_or_capable(inode)) {
+ error = inode_permission(inode, MAY_WRITE);
+ if (error)
+ goto mnt_drop_write_and_out;
+ }
+ }
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target = T_FILE;
++ if (S_ISDIR(inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK(inode->i_mode))
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.file.device = inode->i_sb->s_dev;
++ rsbac_target_id.file.inode = inode->i_ino;
++ rsbac_target_id.file.dentry_p = path->dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_ACCESS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ error = -EPERM;
++ goto mnt_drop_write_and_out;
++ }
++#endif
++
+ mutex_lock(&inode->i_mutex);
+ error = notify_change(path->dentry, &newattrs);
+ mutex_unlock(&inode->i_mutex);
+diff --git a/fs/xattr.c b/fs/xattr.c
+index 3c8c1cc..def82bf 100644
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -116,6 +116,7 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
+ return error;
+ }
+
++#include <rsbac/hooks.h>
+
+ int
+ vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+@@ -123,6 +124,36 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ {
+ struct inode *inode = dentry->d_inode;
+ int error;
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_*setxattr()]: calling ADF\n");
++ rsbac_target = T_FILE;
++ if (S_ISDIR(inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK(inode->i_mode))
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.file.device = dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = inode->i_ino;
++ rsbac_target_id.file.dentry_p = dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_PERMISSIONS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
+
+ error = xattr_permission(inode, name, MAY_WRITE);
+ if (error)
+@@ -232,6 +263,12 @@ vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
+ struct inode *inode = dentry->d_inode;
+ int error;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ error = xattr_permission(inode, name, MAY_READ);
+ if (error)
+ return error;
+@@ -240,6 +277,31 @@ vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
+ if (error)
+ return error;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_*getxattr()]: calling ADF\n");
++ rsbac_target = T_FILE;
++ if (S_ISDIR(inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK(inode->i_mode))
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.file.device = dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = inode->i_ino;
++ rsbac_target_id.file.dentry_p = dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_PERMISSIONS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ if (!strncmp(name, XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN)) {
+ const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
+@@ -266,10 +328,41 @@ ssize_t
+ vfs_listxattr(struct dentry *d, char *list, size_t size)
+ {
+ ssize_t error;
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
+
+ error = security_inode_listxattr(d);
+ if (error)
+ return error;
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_*listxattr()]: calling ADF\n");
++ rsbac_target = T_FILE;
++ if (S_ISDIR(d->d_inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(d->d_inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(d->d_inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK(d->d_inode->i_mode))
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.file.device = d->d_sb->s_dev;
++ rsbac_target_id.file.inode = d->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = d;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_PERMISSIONS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ error = -EOPNOTSUPP;
+ if (d->d_inode->i_op->listxattr) {
+ error = d->d_inode->i_op->listxattr(d, list, size);
+@@ -288,6 +381,12 @@ vfs_removexattr(struct dentry *dentry, const char *name)
+ struct inode *inode = dentry->d_inode;
+ int error;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!inode->i_op->removexattr)
+ return -EOPNOTSUPP;
+
+@@ -299,6 +398,31 @@ vfs_removexattr(struct dentry *dentry, const char *name)
+ if (error)
+ return error;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_*removexattr()]: calling ADF\n");
++ rsbac_target = T_FILE;
++ if (S_ISDIR(inode->i_mode))
++ rsbac_target = T_DIR;
++ else if (S_ISFIFO(inode->i_mode))
++ rsbac_target = T_FIFO;
++ else if (S_ISLNK(inode->i_mode))
++ rsbac_target = T_SYMLINK;
++ else if (S_ISSOCK(inode->i_mode))
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.file.device = dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = inode->i_ino;
++ rsbac_target_id.file.dentry_p = dentry;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_PERMISSIONS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ mutex_lock(&inode->i_mutex);
+ error = inode->i_op->removexattr(dentry, name);
+ mutex_unlock(&inode->i_mutex);
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index 3011b87..4d317e0 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -69,6 +69,8 @@ xfs_initxattrs(
+ return error;
+ }
+
++#include <rsbac/hooks.h>
++
+ /*
+ * Hook in SELinux. This is not quite correct yet, what we really need
+ * here (as we do for default ACLs) is a mechanism by which creation of
+@@ -290,6 +292,10 @@ xfs_vn_unlink(
+ struct xfs_name name;
+ int error;
+
++#ifdef CONFIG_RSBAC_SECDEL
++ if (dentry->d_inode->i_nlink == 1)
++ rsbac_sec_del(dentry, FALSE);
++#endif
+ xfs_dentry_to_name(&name, dentry);
+
+ error = -xfs_remove(XFS_I(dir), &name, XFS_I(dentry->d_inode));
+@@ -351,10 +357,34 @@ xfs_vn_rename(
+ struct inode *new_inode = ndentry->d_inode;
+ struct xfs_name oname;
+ struct xfs_name nname;
++#ifdef CONFIG_RSBAC_SECDEL
++ struct xfs_inode *cip;
++#endif
+
+ xfs_dentry_to_name(&oname, odentry);
+ xfs_dentry_to_name(&nname, ndentry);
+
++#ifdef CONFIG_RSBAC_SECDEL
++ /* RSBAC secure delete code. in the event of overwritting existing
++ * file with sec_del flag set, its blocks will be deallocated so we
++ * have to overwrite their content. since XFS does all the necessary
++ * checks on the layer below linux VFS, operating on vnodes
++ * i decided to implement my own set of checks here, so we can see
++ * if the existing file is being overwritten.
++ * inspired by ext2/3/4 and jfs code. michal@rsbac.org
++ */
++
++ if (new_inode) {
++ if (new_inode->i_nlink == 1) {
++ if (!xfs_lookup(XFS_I(ndir), &nname, &cip, NULL)) {
++ IRELE(cip);
++ if(!S_ISDIR(new_inode->i_mode))
++ rsbac_sec_del(ndentry, TRUE);
++ }
++ }
++ }
++#endif
++
+ return -xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode),
+ XFS_I(ndir), &nname, new_inode ?
+ XFS_I(new_inode) : NULL);
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 81a173c..68a80aa 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -29,6 +29,9 @@
+ #define CLONE_NEWPID 0x20000000 /* New pid namespace */
+ #define CLONE_NEWNET 0x40000000 /* New network namespace */
+ #define CLONE_IO 0x80000000 /* Clone io context */
++#ifdef CONFIG_RSBAC
++#define CLONE_KTHREAD 0x100000000ULL /* clone a kernel thread */
++#endif
+
+ /*
+ * Scheduling policies
+@@ -93,6 +96,10 @@ struct sched_param {
+
+ #include <asm/processor.h>
+
++#if defined(CONFIG_RSBAC_CAP_LOG_MISSING) || defined(CONFIG_RSBAC_JAIL_LOG_MISSING)
++#include <rsbac/log_cap.h>
++#endif
++
+ struct exec_domain;
+ struct futex_pi_state;
+ struct robust_list_head;
+@@ -105,7 +112,11 @@ struct blk_plug;
+ * List of flags we want to share for kernel threads,
+ * if only because they are not used by them anyway.
+ */
++#ifdef CONFIG_RSBAC
++#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_KTHREAD)
++#else
+ #define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
++#endif
+
+ /*
+ * These are the constant used to fake the fixed-point load-average
+@@ -2290,6 +2301,13 @@ static inline void mmdrop(struct mm_struct * mm)
+
+ /* mmput gets rid of the mappings and all user-space */
+ extern void mmput(struct mm_struct *);
++#ifdef CONFIG_RSBAC
++/* mmput gets rid of the mappings and all user-space
++ * not sleeping version. feeling like we have something in common ;)
++ * michal.
++ * */
++extern void mmput_nosleep(struct mm_struct *);
++#endif
+ /* Grab a reference to a task's mm, if it is not already going away */
+ extern struct mm_struct *get_task_mm(struct task_struct *task);
+ /*
+@@ -2323,7 +2341,12 @@ extern int disallow_signal(int);
+ extern int do_execve(const char *,
+ const char __user * const __user *,
+ const char __user * const __user *, struct pt_regs *);
++
++#ifdef CONFIG_RSBAC
++extern long do_fork(unsigned long long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
++#else
+ extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
++#endif
+ struct task_struct *fork_idle(int);
+
+ extern void set_task_comm(struct task_struct *tsk, char *from);
+diff --git a/include/rsbac/aci.h b/include/rsbac/aci.h
+new file mode 100644
+index 0000000..2cc8d17
+--- /dev/null
++++ b/include/rsbac/aci.h
+@@ -0,0 +1,151 @@
++/******************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2012: */
++/* Amon Ott <ao@rsbac.org> */
++/* API: Data structures */
++/* and functions for Access */
++/* Control Information */
++/* Last modified: 19/Apr/2012 */
++/******************************* */
++
++#ifndef __RSBAC_ACI_H
++#define __RSBAC_ACI_H
++
++#include <rsbac/types.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/mount.h>
++
++/***************************************************/
++/* Prototypes */
++/***************************************************/
++
++/* All functions return 0, if no error occurred, and a negative error code */
++/* otherwise. The error codes are defined in rsbac_error.h. */
++
++/****************************************************************************/
++/* Initialization, including ACI restoration for all mounted devices from */
++/* disk. After this call, all ACI is kept in memory for performance reasons,*/
++/* but user and file/dir object ACI are written to disk on every change. */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++extern int rsbac_init(kdev_t root_dev);
++#else
++extern int rsbac_init(kdev_t root_dev) __init;
++#endif
++
++/* Notify RSBAC of new kernel thread */
++int rsbac_kthread_notify(rsbac_pid_t pid);
++
++/* To turn RSBAC off on umount of root device */
++void rsbac_off(void);
++
++/* For other kernel parts to check, whether RSBAC was initialized correctly */
++extern rsbac_boolean_t rsbac_initialized;
++
++static inline rsbac_boolean_t rsbac_is_initialized(void)
++{
++ return rsbac_initialized;
++}
++
++/* Check if the device exists */
++int rsbac_check_device(kdev_t kdev);
++
++/* Is device writable? */
++rsbac_boolean_t rsbac_writable(struct super_block * sb_p);
++
++/* When mounting a device, its ACI must be read and added to the ACI lists. */
++int rsbac_mount(struct vfsmount * vfsmount_p);
++
++/* When umounting a device, its ACI must be removed from the ACI lists. */
++int rsbac_umount(struct vfsmount * vfsmount_p);
++
++/* On pivot_root, we must unblock the dentry tree of the old root */
++/* by putting all cached rsbac.dat dentries */
++int rsbac_free_dat_dentries(void);
++
++/* Some information about the current status is also available */
++int rsbac_stats(void);
++
++/* Trigger internal consistency check (int: if != 0: correct errors) */
++int rsbac_check(int correct, int check_inode);
++
++/* RSBAC attribute saving to disk can be triggered from outside
++ * param: call lock_kernel() before disk access?
++ */
++#if defined(CONFIG_RSBAC_MAINT) || defined(CONFIG_RSBAC_AUTO_WRITE)
++int rsbac_write(void);
++#endif
++
++/* get the parent of a target
++ * returns -RSBAC_EINVALIDTARGET for non-fs targets
++ * and -RSBAC_ENOTFOUND, if no parent available
++ * In kernels >= 2.4.0, device_p->d_covers is used and the item is properly
++ * locked for reading, so never call with a write lock held on device_p!
++ */
++int rsbac_get_parent(enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_target_t * parent_target_p,
++ union rsbac_target_id_t * parent_tid_p);
++
++/* Invalidate cached attribute values for one or all filesystem objects */
++
++#ifdef CONFIG_RSBAC_FD_CACHE
++int rsbac_fd_cache_invalidate(struct rsbac_fs_file_t * file_p);
++
++int rsbac_fd_cache_invalidate_all(void);
++#endif
++
++/****************************************************************************/
++/* For objects, users and processes all manipulation is encapsulated by the */
++/* function calls rsbac_set_attr, rsbac_get_attr and rsbac_remove_target. */
++
++int rsbac_ta_get_attr(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t * value,
++ rsbac_boolean_t inherit);
++
++#define rsbac_get_attr(module, target, tid, attr, value, inherit) \
++ rsbac_ta_get_attr(0, module, target, tid, attr, value, inherit)
++
++int rsbac_ta_set_attr(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t value);
++
++#define rsbac_set_attr(module, target, tid, attr, value) \
++ rsbac_ta_set_attr(0, module, target, tid, attr, value)
++
++/* All RSBAC targets should be removed, if no longer needed, to prevent */
++/* memory wasting. */
++
++int rsbac_ta_remove_target(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid);
++
++#define rsbac_remove_target(target, tid) \
++ rsbac_ta_remove_target(0, target, tid)
++
++int rsbac_ta_list_all_dev(rsbac_list_ta_number_t ta_number,
++ struct rsbac_dev_desc_t ** id_pp);
++
++int rsbac_ta_list_all_user(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t ** id_pp);
++
++int rsbac_ta_list_all_ipc(rsbac_list_ta_number_t ta_number,
++ struct rsbac_ipc_t ** id_pp);
++
++int rsbac_ta_list_all_group(rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t ** id_pp);
++
++int rsbac_mark_kthread(rsbac_pid_t pid);
++int rsbac_kthreads_init(void);
++#endif
+diff --git a/include/rsbac/aci_data_structures.h b/include/rsbac/aci_data_structures.h
+new file mode 100644
+index 0000000..432a606
+--- /dev/null
++++ b/include/rsbac/aci_data_structures.h
+@@ -0,0 +1,1852 @@
++/**************************************/
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2012: Amon Ott */
++/* Data structures */
++/* Last modified: 19/Apr/2012 */
++/**************************************/
++
++#ifndef __RSBAC_DATA_STRUC_H
++#define __RSBAC_DATA_STRUC_H
++
++#ifdef __KERNEL__ /* only include in kernel code */
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/wait.h>
++#include <linux/interrupt.h>
++#include <linux/semaphore.h>
++#include <linux/sched.h>
++#include <rsbac/types.h>
++#include <linux/spinlock.h>
++#include <rsbac/pm_types.h>
++#include <rsbac/rc_types.h>
++#include <rsbac/aci.h>
++#include <rsbac/debug.h>
++#include <rsbac/lists.h>
++#endif /* __KERNEL__ */
++
++#ifdef __KERNEL__
++
++/* List to keep mounts before init, so that we can rsbac_mount them at init */
++
++struct rsbac_mount_list_t {
++ struct vfsmount * vfsmount_p;
++ struct rsbac_mount_list_t * next;
++};
++
++/* First of all we define dirname and filenames for saving the ACIs to disk. */
++/* The path must be a valid single dir name! Each mounted device gets its */
++/* own file set, residing in 'DEVICE_ROOT/RSBAC_ACI_PATH/'. */
++/* The dynamic data structures for PM, RC and ACL are kept in their own files.*/
++/* All user access to these files will be denied. */
++/* Backups are kept in FILENAMEb. */
++
++#define RSBAC_LOG_BUF_LEN (16384)
++
++#define RSBAC_ACI_PATH "rsbac.dat"
++
++#define RSBAC_GEN_FD_NAME "fd_gen"
++#define RSBAC_GEN_OLD_FD_NAME "fd_gen."
++#define RSBAC_MAC_FD_NAME "fd_mac"
++#define RSBAC_MAC_OLD_FD_NAME "fd_mac."
++#define RSBAC_PM_FD_NAME "fd_pm"
++#define RSBAC_PM_OLD_FD_NAME "fd_pm."
++#define RSBAC_DAZ_FD_NAME "fd_dazt"
++#define RSBAC_DAZ_OLD_FD_NAME "fd_dazt."
++#define RSBAC_DAZ_SCANNED_FD_NAME "fd_dazs"
++#define RSBAC_DAZ_SCANNED_OLD_FD_NAME "fd_dazs."
++#define RSBAC_FF_FD_NAME "fd_ff"
++#define RSBAC_FF_OLD_FD_NAME "fd_ff."
++#define RSBAC_RC_FD_NAME "fd_rc"
++#define RSBAC_RC_OLD_FD_NAME "fd_rc."
++#define RSBAC_AUTH_FD_NAME "fd_auth"
++#define RSBAC_AUTH_OLD_FD_NAME "fd_auth."
++#define RSBAC_CAP_FD_NAME "fd_cap"
++#define RSBAC_CAP_OLD_FD_NAME "fd_cap."
++#define RSBAC_PAX_FD_NAME "fd_pax"
++#define RSBAC_PAX_OLD_FD_NAME "fd_pax."
++#define RSBAC_RES_FD_NAME "fd_res"
++#define RSBAC_RES_OLD_FD_NAME "fd_res."
++
++#define RSBAC_ACI_USER_NAME "useraci"
++/* dir creation mode for discretionary access control: no rights*/
++#define RSBAC_ACI_DIR_MODE (S_IFDIR)
++/* file creation mode for discretionary access control: rw for user only*/
++#define RSBAC_ACI_FILE_MODE (S_IFREG | S_IRUSR | S_IWUSR)
++/* minimal mem chunk size available to try write_partial_fd_list, else defer */
++#define RSBAC_MIN_WRITE_FD_BUF_LEN 32768
++/* max size for write_chunks */
++#define RSBAC_MAX_WRITE_CHUNK ((1 << 15) - 1)
++
++#define RSBAC_GEN_NR_FD_LISTS 2
++#define RSBAC_MAC_NR_FD_LISTS 4
++#define RSBAC_PM_NR_FD_LISTS 2
++#define RSBAC_DAZ_NR_FD_LISTS 2
++#define RSBAC_DAZ_SCANNED_NR_FD_LISTS 4
++#define RSBAC_FF_NR_FD_LISTS 4
++#define RSBAC_RC_NR_FD_LISTS 4
++#define RSBAC_AUTH_NR_FD_LISTS 2
++#define RSBAC_CAP_NR_FD_LISTS 2
++#define RSBAC_PAX_NR_FD_LISTS 2
++#define RSBAC_RES_NR_FD_LISTS 2
++
++#ifdef CONFIG_RSBAC_INIT_THREAD
++/* Check and set init timeout */
++#if CONFIG_RSBAC_MAX_INIT_TIME >= 5
++#define RSBAC_MAX_INIT_TIME CONFIG_RSBAC_MAX_INIT_TIME
++#else
++#define RSBAC_MAX_INIT_TIME 5
++#endif
++#endif /* INIT_THREAD */
++
++#endif /* __KERNEL__ */
++
++/* The following structures privide attributes for all possible targets. */
++/* The data structures are kept in double linked lists, and are optimized */
++/* by hash functions. */
++
++/* Only ATTRIBUTES are saved in those structures, that are saved to disk, */
++/* because saving sublists means breaking up the structures for every */
++/* single list. */
++/* If a list of policy dependant items is to be stored, this is done in */
++/* the policy dependant data structures. Here only an ID as a handle is */
++/* supported. */
++
++/* OK, first we define the file/dir ACI, holding all file/dir information */
++/* the ADF needs for decisions. */
++
++/* Caution: whenever ACI changes, version and old_version should be increased! */
++
++// #define CONFIG_RSBAC_FD_CACHE 1
++
++#ifdef CONFIG_RSBAC_FD_CACHE
++#define RSBAC_FD_CACHE_NAME "fd_cache."
++#define RSBAC_FD_CACHE_VERSION 1
++#define RSBAC_FD_CACHE_KEY 3626114
++//#define RSBAC_FD_CACHE_TTL 3600
++struct rsbac_fd_cache_desc_t {
++ __u32 device;
++ rsbac_inode_nr_t inode;
++};
++#endif
++
++#define RSBAC_GEN_FD_ACI_VERSION 8
++#define RSBAC_GEN_FD_ACI_KEY 1001
++struct rsbac_gen_fd_aci_t {
++ rsbac_log_array_t log_array_low; /* file/dir based logging, */
++ rsbac_log_array_t log_array_high; /* high and low bits */
++ rsbac_request_vector_t log_program_based; /* Program based logging */
++ rsbac_enum_t symlink_add_remote_ip;
++ rsbac_enum_t symlink_add_uid;
++ rsbac_enum_t symlink_add_mac_level;
++ rsbac_enum_t symlink_add_rc_role;
++ rsbac_enum_t linux_dac_disable;
++ rsbac_fake_root_uid_int_t fake_root_uid;
++ rsbac_uid_t auid_exempt;
++ rsbac_um_set_t vset;
++};
++#define DEFAULT_GEN_FD_ACI \
++ { \
++ .log_array_low = -1, \
++ .log_array_high = -1, \
++ .log_program_based = 0, \
++ .symlink_add_uid = FALSE, \
++ .symlink_add_mac_level = FALSE, \
++ .symlink_add_rc_role = FALSE, \
++ .linux_dac_disable = LDD_inherit, \
++ .fake_root_uid = FR_off, \
++ .auid_exempt = RSBAC_NO_USER, \
++ .vset = RSBAC_UM_VIRTUAL_KEEP, \
++ }
++
++#define DEFAULT_GEN_ROOT_DIR_ACI \
++ { \
++ .log_array_low = -1, \
++ .log_array_high = -1, \
++ .log_program_based = 0, \
++ .symlink_add_uid = FALSE, \
++ .symlink_add_mac_level = FALSE, \
++ .symlink_add_rc_role = FALSE, \
++ .linux_dac_disable = LDD_false, \
++ .fake_root_uid = FR_off, \
++ .auid_exempt = RSBAC_NO_USER, \
++ .vset = RSBAC_UM_VIRTUAL_KEEP, \
++ }
++
++#define RSBAC_GEN_FD_OLD_ACI_VERSION 7
++struct rsbac_gen_fd_old_aci_t {
++ rsbac_log_array_t log_array_low; /* file/dir based logging, */
++ rsbac_log_array_t log_array_high; /* high and low bits */
++ rsbac_request_vector_t log_program_based; /* Program based logging */
++ rsbac_enum_t symlink_add_remote_ip;
++ rsbac_enum_t symlink_add_uid;
++ rsbac_enum_t symlink_add_mac_level;
++ rsbac_enum_t symlink_add_rc_role;
++ rsbac_enum_t linux_dac_disable;
++ rsbac_fake_root_uid_int_t fake_root_uid;
++ rsbac_old_uid_t auid_exempt;
++};
++#define RSBAC_GEN_FD_OLD_OLD_ACI_VERSION 6
++struct rsbac_gen_fd_old_old_aci_t {
++ rsbac_log_array_t log_array_low; /* file/dir based logging, */
++ rsbac_log_array_t log_array_high; /* high and low bits */
++ rsbac_request_vector_t log_program_based; /* Program based logging */
++ rsbac_enum_t symlink_add_uid;
++ rsbac_enum_t symlink_add_mac_level;
++ rsbac_enum_t symlink_add_rc_role;
++ rsbac_enum_t linux_dac_disable;
++ rsbac_fake_root_uid_int_t fake_root_uid;
++ rsbac_old_uid_t auid_exempt;
++};
++
++#define RSBAC_GEN_FD_OLD_OLD_OLD_ACI_VERSION 5
++struct rsbac_gen_fd_old_old_old_aci_t {
++ rsbac_log_array_t log_array_low; /* file/dir based logging, */
++ rsbac_log_array_t log_array_high; /* high and low bits */
++ rsbac_request_vector_t log_program_based; /* Program based logging */
++ rsbac_enum_t symlink_add_uid;
++ rsbac_enum_t symlink_add_mac_level;
++ rsbac_enum_t symlink_add_rc_role;
++ rsbac_enum_t linux_dac_disable;
++ rsbac_fake_root_uid_int_t fake_root_uid;
++};
++
++#if defined(CONFIG_RSBAC_MAC)
++#define RSBAC_MAC_FD_ACI_VERSION 5
++#define RSBAC_MAC_FD_ACI_KEY 1001
++struct rsbac_mac_fd_aci_t {
++ rsbac_security_level_t sec_level; /* MAC */
++ rsbac_mac_category_vector_t mac_categories; /* MAC category set */
++ rsbac_mac_auto_int_t mac_auto; /* auto-adjust current level */
++ rsbac_boolean_int_t mac_prop_trusted; /* Keep trusted flag when executing this file */
++ rsbac_mac_file_flags_t mac_file_flags; /* allow write_up, read_up etc. to it */
++};
++
++#define RSBAC_MAC_FD_OLD_ACI_VERSION 4
++struct rsbac_mac_fd_old_aci_t {
++ rsbac_security_level_t sec_level; /* MAC */
++ rsbac_uid_t mac_trusted_for_user; /* MAC (for FILE only) */
++ rsbac_mac_category_vector_t mac_categories; /* MAC category set */
++ rsbac_mac_auto_int_t mac_auto; /* auto-adjust current level */
++ rsbac_boolean_int_t mac_prop_trusted; /* Keep trusted flag when executing this file */
++ rsbac_mac_file_flags_t mac_file_flags; /* allow write_up, read_up etc. to it */
++};
++
++#define RSBAC_MAC_FD_OLD_OLD_ACI_VERSION 3
++struct rsbac_mac_fd_old_old_aci_t {
++ rsbac_security_level_t sec_level; /* MAC */
++ rsbac_uid_t mac_trusted_for_user; /* MAC (for FILE only) */
++ rsbac_mac_category_vector_t mac_categories; /* MAC category set */
++ rsbac_mac_auto_int_t mac_auto; /* auto-adjust current level */
++ rsbac_boolean_int_t mac_prop_trusted; /* Keep trusted flag when executing this file */
++ rsbac_boolean_int_t mac_shared; /* Shared dir, i.e., allow write_up to it */
++};
++
++#define RSBAC_MAC_FD_OLD_OLD_OLD_ACI_VERSION 2
++struct rsbac_mac_fd_old_old_old_aci_t {
++ rsbac_security_level_t sec_level; /* MAC */
++ rsbac_uid_t mac_trusted_for_user; /* MAC (for FILE only) */
++ rsbac_mac_category_vector_t mac_categories; /* MAC category set */
++ rsbac_mac_auto_int_t mac_auto; /* auto-adjust current level */
++};
++
++#define DEFAULT_MAC_FD_ACI_INH \
++ { \
++ .sec_level = SL_inherit, \
++ .mac_categories = RSBAC_MAC_INHERIT_CAT_VECTOR, \
++ .mac_auto = MA_inherit, \
++ .mac_prop_trusted = FALSE, \
++ .mac_file_flags = 0, \
++ }
++#define DEFAULT_MAC_FD_ACI_NO_INH \
++ { \
++ .sec_level = SL_unclassified, \
++ .mac_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .mac_auto = MA_yes, \
++ .mac_prop_trusted = FALSE, \
++ .mac_file_flags = 0, \
++ }
++
++#ifdef CONFIG_RSBAC_MAC_DEF_INHERIT
++#define DEFAULT_MAC_FD_ACI DEFAULT_MAC_FD_ACI_INH
++#else
++#define DEFAULT_MAC_FD_ACI DEFAULT_MAC_FD_ACI_NO_INH
++#endif /* MAC_DEF_INHERIT */
++
++#define DEFAULT_MAC_ROOT_DIR_ACI \
++ { \
++ .sec_level = SL_unclassified, \
++ .mac_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .mac_auto = MA_yes, \
++ .mac_prop_trusted = FALSE, \
++ .mac_file_flags = 0, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_PM)
++#define RSBAC_PM_FD_ACI_VERSION 1
++#define RSBAC_PM_FD_ACI_KEY 1001
++struct rsbac_pm_fd_aci_t {
++ rsbac_pm_object_class_id_t pm_object_class; /* PM */
++ rsbac_pm_tp_id_t pm_tp; /* PM (for FILE only) */
++ rsbac_pm_object_type_int_t pm_object_type; /* PM (enum rsbac_pm_object_type_t -> __u8) */
++};
++
++#define DEFAULT_PM_FD_ACI \
++ { \
++ .pm_object_class = 0, \
++ .pm_tp = 0, \
++ .pm_object_type = PO_none, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_DAZ)
++#define RSBAC_DAZ_FD_ACI_VERSION 2
++#define RSBAC_DAZ_FD_OLD_ACI_VERSION 1
++#define RSBAC_DAZ_FD_ACI_KEY 10535
++#define RSBAC_DAZ_CACHE_CLEANUP_INTERVAL 86400
++#define RSBAC_DAZ_SCANNED_FD_ACI_VERSION 1
++struct rsbac_daz_fd_aci_t
++ {
++ rsbac_daz_scanner_t daz_scanner; /* DAZ (for FILE only) */
++ rsbac_daz_do_scan_t daz_do_scan;
++ };
++
++struct rsbac_daz_fd_old_aci_t
++ {
++ rsbac_daz_scanner_t daz_scanner; /* DAZ (for FILE only) (boolean) */
++ };
++
++#define DEFAULT_DAZ_FD_ACI \
++ { \
++ .daz_scanner = FALSE, \
++ .daz_do_scan = DEFAULT_DAZ_FD_DO_SCAN \
++ }
++
++#define DEFAULT_DAZ_ROOT_DIR_ACI \
++ { \
++ .daz_scanner = FALSE, \
++ .daz_do_scan = DEFAULT_DAZ_FD_ROOT_DO_SCAN \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_FF)
++#define RSBAC_FF_FD_ACI_VERSION 1
++#define RSBAC_FF_FD_ACI_KEY 1001
++#endif
++
++#if defined(CONFIG_RSBAC_RC)
++#define RSBAC_RC_FD_ACI_VERSION 1
++#define RSBAC_RC_FD_ACI_KEY 1001
++struct rsbac_rc_fd_aci_t {
++ rsbac_rc_type_id_t rc_type_fd; /* RC */
++ rsbac_rc_role_id_t rc_force_role; /* RC */
++ rsbac_rc_role_id_t rc_initial_role; /* RC */
++};
++
++#define DEFAULT_RC_FD_ACI \
++ { \
++ .rc_type_fd = RC_type_inherit_parent, \
++ .rc_force_role = RC_default_force_role, \
++ .rc_initial_role = RC_default_initial_role, \
++ }
++#define DEFAULT_RC_ROOT_DIR_ACI \
++ { \
++ .rc_type_fd = RSBAC_RC_GENERAL_TYPE, \
++ .rc_force_role = RC_default_root_dir_force_role, \
++ .rc_initial_role = RC_default_root_dir_initial_role, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH)
++#define RSBAC_AUTH_FD_ACI_VERSION 2
++#define RSBAC_AUTH_FD_OLD_ACI_VERSION 1
++#define RSBAC_AUTH_FD_ACI_KEY 1001
++struct rsbac_auth_fd_aci_t {
++ __u8 auth_may_setuid; /* AUTH (enum) */
++ __u8 auth_may_set_cap; /* AUTH (boolean) */
++ __u8 auth_learn; /* AUTH (boolean) */
++};
++
++struct rsbac_auth_fd_old_aci_t {
++ __u8 auth_may_setuid; /* AUTH (boolean) */
++ __u8 auth_may_set_cap; /* AUTH (boolean) */
++};
++
++#define DEFAULT_AUTH_FD_ACI \
++ { \
++ .auth_may_setuid = FALSE, \
++ .auth_may_set_cap = FALSE, \
++ .auth_learn = FALSE, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_CAP)
++#define RSBAC_CAP_FD_ACI_VERSION 3
++#define RSBAC_CAP_FD_OLD_ACI_VERSION 2
++#define RSBAC_CAP_FD_OLD_OLD_ACI_VERSION 1
++#define RSBAC_CAP_FD_ACI_KEY 1001
++struct rsbac_cap_fd_aci_t {
++ rsbac_cap_vector_t min_caps; /* Program forced minimum Linux capabilities */
++ rsbac_cap_vector_t max_caps; /* Program max Linux capabilities */
++ rsbac_cap_ld_env_int_t cap_ld_env;
++};
++
++struct rsbac_cap_fd_old_aci_t {
++ rsbac_cap_old_vector_t min_caps; /* Program forced minimum Linux capabilities */
++ rsbac_cap_old_vector_t max_caps; /* Program max Linux capabilities */
++ rsbac_cap_ld_env_int_t cap_ld_env;
++};
++
++struct rsbac_cap_fd_old_old_aci_t {
++ rsbac_cap_old_vector_t min_caps;
++ rsbac_cap_old_vector_t max_caps;
++};
++
++#define DEFAULT_CAP_FD_ACI \
++ { \
++ .min_caps.cap[0] = RSBAC_CAP_DEFAULT_MIN, \
++ .max_caps.cap[0] = RSBAC_CAP_DEFAULT_MAX, \
++ .min_caps.cap[1] = RSBAC_CAP_DEFAULT_MIN, \
++ .max_caps.cap[1] = RSBAC_CAP_DEFAULT_MAX, \
++ .cap_ld_env = LD_keep, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_PAX)
++#define RSBAC_PAX_FD_ACI_VERSION 1
++#define RSBAC_PAX_FD_ACI_KEY 100112
++#endif
++
++#if defined(CONFIG_RSBAC_RES)
++#define RSBAC_RES_FD_ACI_VERSION 1
++#define RSBAC_RES_FD_ACI_KEY 1002
++struct rsbac_res_fd_aci_t {
++ rsbac_res_array_t res_min;
++ rsbac_res_array_t res_max;
++};
++#define DEFAULT_RES_FD_ACI \
++ { \
++ .res_min = { \
++ RSBAC_RES_UNSET, /* cpu time */ \
++ RSBAC_RES_UNSET, /* file size */ \
++ RSBAC_RES_UNSET, /* process data segment size */ \
++ RSBAC_RES_UNSET, /* stack size */ \
++ RSBAC_RES_UNSET, /* core dump size */ \
++ RSBAC_RES_UNSET, /* resident memory set size */ \
++ RSBAC_RES_UNSET, /* number of processes for this user */ \
++ RSBAC_RES_UNSET, /* number of files */ \
++ RSBAC_RES_UNSET, /* locked-in-memory address space */ \
++ RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \
++ RSBAC_RES_UNSET /* maximum file locks */ \
++ }, \
++ .res_max = { \
++ RSBAC_RES_UNSET, /* cpu time */ \
++ RSBAC_RES_UNSET, /* file size */ \
++ RSBAC_RES_UNSET, /* process data segment size */ \
++ RSBAC_RES_UNSET, /* stack size */ \
++ RSBAC_RES_UNSET, /* core dump size */ \
++ RSBAC_RES_UNSET, /* resident memory set size */ \
++ RSBAC_RES_UNSET, /* number of processes for this user */ \
++ RSBAC_RES_UNSET, /* number of files */ \
++ RSBAC_RES_UNSET, /* locked-in-memory address space */ \
++ RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \
++ RSBAC_RES_UNSET /* maximum file locks */ \
++ } \
++ }
++#endif
++
++#define RSBAC_FD_NR_ATTRIBUTES 34
++#define RSBAC_FD_ATTR_LIST { \
++ A_security_level, \
++ A_mac_categories, \
++ A_mac_auto, \
++ A_mac_prop_trusted, \
++ A_mac_file_flags, \
++ A_pm_object_class, \
++ A_pm_tp, \
++ A_pm_object_type, \
++ A_daz_scanner, \
++ A_ff_flags, \
++ A_rc_type_fd, \
++ A_rc_force_role, \
++ A_rc_initial_role, \
++ A_auth_may_setuid, \
++ A_auth_may_set_cap, \
++ A_auth_learn, \
++ A_log_array_low, \
++ A_log_array_high, \
++ A_log_program_based, \
++ A_symlink_add_remote_ip, \
++ A_symlink_add_uid, \
++ A_symlink_add_mac_level, \
++ A_symlink_add_rc_role, \
++ A_linux_dac_disable, \
++ A_min_caps, \
++ A_max_caps, \
++ A_cap_ld_env, \
++ A_res_min, \
++ A_res_max, \
++ A_pax_flags, \
++ A_fake_root_uid, \
++ A_auid_exempt, \
++ A_daz_do_scan, \
++ A_vset \
++ }
++
++#ifdef __KERNEL__
++struct rsbac_fd_list_handles_t {
++ rsbac_list_handle_t gen;
++#if defined(CONFIG_RSBAC_MAC)
++ rsbac_list_handle_t mac;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ rsbac_list_handle_t pm;
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++ rsbac_list_handle_t daz;
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++ rsbac_list_handle_t dazs;
++#endif
++#endif
++#if defined(CONFIG_RSBAC_FF)
++ rsbac_list_handle_t ff;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ rsbac_list_handle_t rc;
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ rsbac_list_handle_t auth;
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ rsbac_list_handle_t cap;
++#endif
++#if defined(CONFIG_RSBAC_PAX)
++ rsbac_list_handle_t pax;
++#endif
++#if defined(CONFIG_RSBAC_RES)
++ rsbac_list_handle_t res;
++#endif
++};
++
++/* The list of devices is also a double linked list, so we define list */
++/* items and a list head. */
++
++/* Hash size. Must be power of 2. */
++
++#define RSBAC_NR_DEVICE_LISTS 8
++
++struct rsbac_device_list_item_t {
++ kdev_t id;
++ u_int mount_count;
++ struct rsbac_fd_list_handles_t handles;
++ struct dentry *rsbac_dir_dentry_p;
++ struct vfsmount *vfsmount_p;
++ rsbac_inode_nr_t rsbac_dir_inode;
++ struct rsbac_device_list_item_t *prev;
++ struct rsbac_device_list_item_t *next;
++};
++
++/* To provide consistency we use spinlocks for all list accesses. The */
++/* 'curr' entry is used to avoid repeated lookups for the same item. */
++
++struct rsbac_device_list_head_t {
++ struct rsbac_device_list_item_t *head;
++ struct rsbac_device_list_item_t *tail;
++ struct rsbac_device_list_item_t *curr;
++ u_int count;
++};
++
++#endif /* __KERNEL__ */
++
++/******************************/
++/* OK, now we define the block/char device ACI, holding all dev information */
++/* the ADF needs for decisions. */
++
++#define RSBAC_GEN_ACI_DEV_NAME "dev_gen"
++#define RSBAC_MAC_ACI_DEV_NAME "dev_mac"
++#define RSBAC_PM_ACI_DEV_NAME "dev_pm"
++#define RSBAC_RC_ACI_DEV_MAJOR_NAME "devm_rc"
++#define RSBAC_RC_ACI_DEV_NAME "dev_rc"
++
++/* Caution: whenever ACI changes, version should be increased! */
++
++#define RSBAC_GEN_DEV_ACI_VERSION 2
++#define RSBAC_GEN_DEV_OLD_ACI_VERSION 1
++#define RSBAC_GEN_DEV_ACI_KEY 1001
++
++struct rsbac_gen_dev_aci_t {
++ rsbac_log_array_t log_array_low; /* dev based logging, */
++ rsbac_log_array_t log_array_high; /* high and low bits */
++};
++#define DEFAULT_GEN_DEV_ACI \
++ { \
++ .log_array_low = -1, \
++ .log_array_high = -1, \
++ }
++
++#if defined(CONFIG_RSBAC_MAC)
++#define RSBAC_MAC_DEV_ACI_VERSION 2
++#define RSBAC_MAC_DEV_OLD_ACI_VERSION 1
++#define RSBAC_MAC_DEV_ACI_KEY 1001
++struct rsbac_mac_dev_aci_t {
++ rsbac_security_level_t sec_level; /* MAC */
++ rsbac_mac_category_vector_t mac_categories; /* MAC category set */
++ __u8 mac_check; /* MAC (boolean) */
++};
++#define DEFAULT_MAC_DEV_ACI \
++ { \
++ .sec_level = SL_unclassified, \
++ .mac_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .mac_check = FALSE, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_PM)
++#define RSBAC_PM_DEV_ACI_VERSION 2
++#define RSBAC_PM_DEV_OLD_ACI_VERSION 1
++#define RSBAC_PM_DEV_ACI_KEY 1001
++struct rsbac_pm_dev_aci_t {
++ rsbac_pm_object_type_int_t pm_object_type; /* PM (enum rsbac_pm_object_type_t) */
++ rsbac_pm_object_class_id_t pm_object_class; /* dev only */
++};
++
++#define DEFAULT_PM_DEV_ACI \
++ { \
++ .pm_object_type = PO_none, \
++ .pm_object_class = 0, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_RC)
++#define RSBAC_RC_DEV_ACI_VERSION 2
++#define RSBAC_RC_DEV_OLD_ACI_VERSION 1
++#define RSBAC_RC_DEV_ACI_KEY 1001
++#endif
++
++#define RSBAC_DEV_NR_ATTRIBUTES 8
++#define RSBAC_DEV_ATTR_LIST { \
++ A_security_level, \
++ A_mac_categories, \
++ A_mac_check, \
++ A_pm_object_type, \
++ A_pm_object_class, \
++ A_rc_type, \
++ A_log_array_low, \
++ A_log_array_high \
++ }
++
++#ifdef __KERNEL__
++struct rsbac_dev_handles_t {
++ rsbac_list_handle_t gen;
++#if defined(CONFIG_RSBAC_MAC)
++ rsbac_list_handle_t mac;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ rsbac_list_handle_t pm;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ rsbac_list_handle_t rc;
++#endif
++};
++#endif /* __KERNEL__ */
++
++/**************************************************************************/
++/* Next we define the ipc ACI, holding all ipc information */
++/* the ADF needs for decisions. */
++
++#define RSBAC_MAC_ACI_IPC_NAME "ipc_mac"
++#define RSBAC_PM_ACI_IPC_NAME "ipc_pm"
++#define RSBAC_RC_ACI_IPC_NAME "ipc_rc"
++#define RSBAC_JAIL_ACI_IPC_NAME "ipc_jai"
++
++#if defined(CONFIG_RSBAC_MAC)
++#define RSBAC_MAC_IPC_ACI_VERSION 1
++#define RSBAC_MAC_IPC_ACI_KEY 1001
++struct rsbac_mac_ipc_aci_t {
++ rsbac_security_level_t sec_level; /* enum old_rsbac_security_level_t / __u8 */
++ rsbac_mac_category_vector_t mac_categories; /* MAC category set */
++};
++#define DEFAULT_MAC_IPC_ACI \
++ { \
++ .sec_level = SL_unclassified, \
++ .mac_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_PM)
++#define RSBAC_PM_IPC_ACI_VERSION 1
++#define RSBAC_PM_IPC_ACI_KEY 1001
++struct rsbac_pm_ipc_aci_t {
++ rsbac_pm_object_class_id_t pm_object_class; /* ipc only */
++ rsbac_pm_purpose_id_t pm_ipc_purpose;
++ rsbac_pm_object_type_int_t pm_object_type; /* enum rsbac_pm_object_type_t */
++};
++#define DEFAULT_PM_IPC_ACI \
++ { \
++ .pm_object_class = RSBAC_PM_IPC_OBJECT_CLASS_ID, \
++ .pm_ipc_purpose = 0, \
++ .pm_object_type = PO_ipc, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_RC)
++#define RSBAC_RC_IPC_ACI_VERSION 1
++#define RSBAC_RC_IPC_ACI_KEY 1001
++#endif
++
++#if defined(CONFIG_RSBAC_JAIL)
++#define RSBAC_JAIL_IPC_ACI_VERSION 1
++#define RSBAC_JAIL_IPC_ACI_KEY 1001
++#endif
++
++#define RSBAC_IPC_NR_ATTRIBUTES 7
++#define RSBAC_IPC_ATTR_LIST { \
++ A_security_level, \
++ A_mac_categories, \
++ A_pm_object_class, \
++ A_pm_ipc_purpose, \
++ A_pm_object_type, \
++ A_rc_type, \
++ A_jail_id \
++ }
++
++#ifdef __KERNEL__
++struct rsbac_ipc_handles_t {
++#if defined(CONFIG_RSBAC_MAC)
++ rsbac_list_handle_t mac;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ rsbac_list_handle_t pm;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ rsbac_list_handle_t rc;
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ rsbac_list_handle_t jail;
++#endif
++};
++#endif /* __KERNEL__ */
++
++/*************************************/
++/* The user ACI holds all user information the ADF needs. */
++
++#define RSBAC_GEN_ACI_USER_NAME "u_gen"
++#define RSBAC_MAC_ACI_USER_NAME "u_mac"
++#define RSBAC_PM_ACI_USER_NAME "u_pm"
++#define RSBAC_DAZ_ACI_USER_NAME "u_daz"
++#define RSBAC_FF_ACI_USER_NAME "u_ff"
++#define RSBAC_RC_ACI_USER_NAME "u_rc"
++#define RSBAC_AUTH_ACI_USER_NAME "u_auth"
++#define RSBAC_CAP_ACI_USER_NAME "u_cap"
++#define RSBAC_JAIL_ACI_USER_NAME "u_jail"
++#define RSBAC_PAX_ACI_USER_NAME "u_pax"
++#define RSBAC_RES_ACI_USER_NAME "u_res"
++
++#define RSBAC_GEN_USER_ACI_VERSION 2
++#define RSBAC_GEN_USER_OLD_ACI_VERSION 1
++#define RSBAC_GEN_USER_ACI_KEY 1001
++struct rsbac_gen_user_aci_t {
++ rsbac_pseudo_t pseudo;
++ rsbac_request_vector_t log_user_based; /* User based logging */
++};
++#define DEFAULT_GEN_U_ACI \
++ { \
++ .pseudo = (rsbac_pseudo_t) 0, \
++ .log_user_based = 0, \
++ }
++
++#if defined(CONFIG_RSBAC_MAC)
++#define RSBAC_MAC_USER_ACI_VERSION 5
++#define RSBAC_MAC_USER_OLD_ACI_VERSION 4
++#define RSBAC_MAC_USER_OLD_OLD_ACI_VERSION 3
++#define RSBAC_MAC_USER_OLD_OLD_OLD_ACI_VERSION 2
++#define RSBAC_MAC_USER_OLD_OLD_OLD_OLD_ACI_VERSION 1
++#define RSBAC_MAC_USER_ACI_KEY 1001
++struct rsbac_mac_user_aci_t {
++ rsbac_security_level_t security_level; /* maximum level */
++ rsbac_security_level_t initial_security_level; /* maximum level */
++ rsbac_security_level_t min_security_level; /* minimum level / __u8 */
++ rsbac_mac_category_vector_t mac_categories; /* MAC max category set */
++ rsbac_mac_category_vector_t mac_initial_categories; /* MAC max category set */
++ rsbac_mac_category_vector_t mac_min_categories; /* MAC min category set */
++ rsbac_system_role_int_t system_role; /* enum rsbac_system_role_t */
++ rsbac_mac_user_flags_t mac_user_flags; /* flags (override, trusted, allow_auto etc.) */
++};
++struct rsbac_mac_user_old_aci_t {
++ rsbac_security_level_t access_appr; /* maximum level */
++ rsbac_security_level_t min_access_appr; /* minimum level / __u8 */
++ rsbac_mac_category_vector_t mac_categories; /* MAC max category set */
++ rsbac_mac_category_vector_t mac_min_categories; /* MAC min category set */
++ rsbac_system_role_int_t system_role; /* enum rsbac_system_role_t */
++ rsbac_boolean_int_t mac_allow_auto; /* allow to auto-adjust current level */
++};
++struct rsbac_mac_user_old_old_aci_t {
++ rsbac_security_level_t access_appr; /* maximum level */
++ rsbac_security_level_t min_access_appr; /* minimum level / __u8 */
++ rsbac_mac_category_vector_t mac_categories; /* MAC max category set */
++ rsbac_mac_category_vector_t mac_min_categories; /* MAC min category set */
++ rsbac_system_role_int_t system_role; /* enum rsbac_system_role_t */
++};
++struct rsbac_mac_user_old_old_old_aci_t {
++ rsbac_security_level_t access_appr; /* enum old_rsbac_security_level_t / __u8 */
++ rsbac_mac_category_vector_t mac_categories; /* MAC category set */
++ rsbac_system_role_int_t system_role; /* enum rsbac_system_role_t */
++};
++#define DEFAULT_MAC_U_ACI \
++ { \
++ .security_level = SL_unclassified, \
++ .initial_security_level = SL_unclassified, \
++ .min_security_level = SL_unclassified, \
++ .mac_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .mac_initial_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .mac_min_categories = RSBAC_MAC_MIN_CAT_VECTOR, \
++ .system_role = SR_user, \
++ .mac_user_flags = RSBAC_MAC_DEF_U_FLAGS, \
++ }
++#define DEFAULT_MAC_U_SYSADM_ACI \
++ { \
++ .security_level = SL_unclassified, \
++ .initial_security_level = SL_unclassified, \
++ .min_security_level = SL_unclassified, \
++ .mac_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .mac_initial_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .mac_min_categories = RSBAC_MAC_MIN_CAT_VECTOR, \
++ .system_role = SR_administrator, \
++ .mac_user_flags = RSBAC_MAC_DEF_SYSADM_U_FLAGS, \
++ }
++#define DEFAULT_MAC_U_SECOFF_ACI \
++ { \
++ .security_level = SL_unclassified, \
++ .initial_security_level = SL_unclassified, \
++ .min_security_level = SL_unclassified, \
++ .mac_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .mac_initial_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .mac_min_categories = RSBAC_MAC_MIN_CAT_VECTOR, \
++ .system_role = SR_security_officer, \
++ .mac_user_flags = RSBAC_MAC_DEF_SECOFF_U_FLAGS, \
++ }
++#define DEFAULT_MAC_U_AUDITOR_ACI \
++ { \
++ .security_level = SL_unclassified, \
++ .initial_security_level = SL_unclassified, \
++ .min_security_level = SL_unclassified, \
++ .mac_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .mac_initial_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .mac_min_categories = RSBAC_MAC_MIN_CAT_VECTOR, \
++ .system_role = SR_auditor, \
++ .mac_user_flags = RSBAC_MAC_DEF_U_FLAGS, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_PM)
++#define RSBAC_PM_USER_ACI_VERSION 2
++#define RSBAC_PM_USER_OLD_ACI_VERSION 1
++#define RSBAC_PM_USER_ACI_KEY 1001
++struct rsbac_pm_user_aci_t {
++ rsbac_pm_task_set_id_t pm_task_set;
++ rsbac_pm_role_int_t pm_role; /* enum rsbac_pm_role_t */
++};
++#define DEFAULT_PM_U_ACI \
++ { \
++ .pm_task_set = 0, \
++ .pm_role = PR_user, \
++ }
++#define DEFAULT_PM_U_SYSADM_ACI \
++ { \
++ .pm_task_set = 0, \
++ .pm_role = PR_system_admin, \
++ }
++#define DEFAULT_PM_U_SECOFF_ACI \
++ { \
++ .pm_task_set = 0, \
++ .pm_role = PR_security_officer, \
++ }
++#define DEFAULT_PM_U_DATAPROT_ACI \
++ { \
++ .pm_task_set = 0, \
++ .pm_role = PR_data_protection_officer, \
++ }
++#define DEFAULT_PM_U_TPMAN_ACI \
++ { \
++ .pm_task_set = 0, \
++ .pm_role = PR_tp_manager, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_DAZ)
++#define RSBAC_DAZ_USER_ACI_VERSION 2
++#define RSBAC_DAZ_USER_OLD_ACI_VERSION 1
++#define RSBAC_DAZ_USER_ACI_KEY 1001
++#endif
++
++#if defined(CONFIG_RSBAC_FF)
++#define RSBAC_FF_USER_ACI_VERSION 2
++#define RSBAC_FF_USER_OLD_ACI_VERSION 1
++#define RSBAC_FF_USER_ACI_KEY 1001
++#endif
++
++#if defined(CONFIG_RSBAC_RC)
++#define RSBAC_RC_USER_ACI_VERSION 3
++#define RSBAC_RC_USER_OLD_ACI_VERSION 2
++#define RSBAC_RC_USER_OLD_OLD_ACI_VERSION 1
++#define RSBAC_RC_USER_ACI_KEY 1001
++struct rsbac_rc_user_aci_t {
++ rsbac_rc_role_id_t rc_role;
++ rsbac_rc_type_id_t rc_type;
++};
++#define DEFAULT_RC_U_ACI \
++ { \
++ .rc_role = RSBAC_RC_GENERAL_ROLE, \
++ .rc_type = RSBAC_RC_GENERAL_TYPE, \
++ }
++#define DEFAULT_RC_U_SYSADM_ACI \
++ { \
++ .rc_role = RSBAC_RC_SYSTEM_ADMIN_ROLE, /* rc_role (RC) */ \
++ .rc_type = RSBAC_RC_SYS_TYPE, \
++ }
++#define DEFAULT_RC_U_SECOFF_ACI \
++ { \
++ .rc_role = RSBAC_RC_ROLE_ADMIN_ROLE, /* rc_role (RC) */ \
++ .rc_type = RSBAC_RC_SEC_TYPE, \
++ }
++#define DEFAULT_RC_U_AUDITOR_ACI \
++ { \
++ .rc_role = RSBAC_RC_AUDITOR_ROLE, /* rc_role (RC) */ \
++ .rc_type = RSBAC_RC_SEC_TYPE, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH)
++#define RSBAC_AUTH_USER_ACI_VERSION 2
++#define RSBAC_AUTH_USER_OLD_ACI_VERSION 1
++#define RSBAC_AUTH_USER_ACI_KEY 1001
++
++#endif /* AUTH */
++
++#if defined(CONFIG_RSBAC_CAP)
++#define RSBAC_CAP_USER_ACI_VERSION 4
++#define RSBAC_CAP_USER_OLD_ACI_VERSION 3
++#define RSBAC_CAP_USER_OLD_OLD_ACI_VERSION 2
++#define RSBAC_CAP_USER_OLD_OLD_OLD_ACI_VERSION 1
++#define RSBAC_CAP_USER_ACI_KEY 1001
++struct rsbac_cap_user_aci_t {
++ rsbac_system_role_int_t cap_role; /* System role for CAP administration */
++ rsbac_cap_vector_t min_caps; /* User forced minimum Linux capabilities */
++ rsbac_cap_vector_t max_caps; /* User max Linux capabilities */
++ rsbac_cap_ld_env_int_t cap_ld_env;
++};
++
++struct rsbac_cap_user_old_aci_t {
++ rsbac_system_role_int_t cap_role; /* System role for CAP administration */
++ rsbac_cap_old_vector_t min_caps; /* User forced minimum Linux capabilities */
++ rsbac_cap_old_vector_t max_caps; /* User max Linux capabilities */
++ rsbac_cap_ld_env_int_t cap_ld_env;
++};
++
++struct rsbac_cap_user_old_old_aci_t {
++ rsbac_system_role_int_t cap_role; /* System role for CAP administration */
++ rsbac_cap_old_vector_t min_caps; /* User forced minimum Linux capabilities */
++ rsbac_cap_old_vector_t max_caps; /* User max Linux capabilities */
++ rsbac_cap_ld_env_int_t cap_ld_env;
++};
++
++struct rsbac_cap_user_old_old_old_aci_t {
++ rsbac_system_role_int_t cap_role; /* System role for CAP administration */
++ rsbac_cap_old_vector_t min_caps; /* User forced minimum Linux capabilities */
++ rsbac_cap_old_vector_t max_caps; /* User max Linux capabilities */
++};
++
++#define DEFAULT_CAP_U_ACI \
++ { \
++ .cap_role = SR_user, \
++ .min_caps.cap[0] = RSBAC_CAP_DEFAULT_MIN, \
++ .max_caps.cap[0] = RSBAC_CAP_DEFAULT_MAX, \
++ .min_caps.cap[1] = RSBAC_CAP_DEFAULT_MIN, \
++ .max_caps.cap[1] = RSBAC_CAP_DEFAULT_MAX, \
++ .cap_ld_env = LD_keep, \
++ }
++#define DEFAULT_CAP_U_SYSADM_ACI \
++ { \
++ .cap_role = SR_administrator, \
++ .min_caps.cap[0] = RSBAC_CAP_DEFAULT_MIN, \
++ .max_caps.cap[0] = RSBAC_CAP_DEFAULT_MAX, \
++ .min_caps.cap[1] = RSBAC_CAP_DEFAULT_MIN, \
++ .max_caps.cap[1] = RSBAC_CAP_DEFAULT_MAX, \
++ .cap_ld_env = LD_keep, \
++ }
++#define DEFAULT_CAP_U_SECOFF_ACI \
++ { \
++ .cap_role = SR_security_officer, \
++ .min_caps.cap[0] = RSBAC_CAP_DEFAULT_MIN, \
++ .max_caps.cap[0] = RSBAC_CAP_DEFAULT_MAX, \
++ .min_caps.cap[1] = RSBAC_CAP_DEFAULT_MIN, \
++ .max_caps.cap[1] = RSBAC_CAP_DEFAULT_MAX, \
++ .cap_ld_env = LD_keep, \
++ }
++#define DEFAULT_CAP_U_AUDITOR_ACI \
++ { \
++ .cap_role = SR_auditor, \
++ .min_caps.cap[0] = RSBAC_CAP_DEFAULT_MIN, \
++ .max_caps.cap[0] = RSBAC_CAP_DEFAULT_MAX, \
++ .min_caps.cap[1] = RSBAC_CAP_DEFAULT_MIN, \
++ .max_caps.cap[1] = RSBAC_CAP_DEFAULT_MAX, \
++ .cap_ld_env = LD_keep, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_JAIL)
++#define RSBAC_JAIL_USER_ACI_VERSION 2
++#define RSBAC_JAIL_USER_OLD_ACI_VERSION 1
++#define RSBAC_JAIL_USER_ACI_KEY 1001
++#endif
++
++#if defined(CONFIG_RSBAC_PAX)
++#define RSBAC_PAX_USER_ACI_VERSION 2
++#define RSBAC_PAX_USER_OLD_ACI_VERSION 1
++#define RSBAC_PAX_USER_ACI_KEY 1001221
++#endif
++
++#if defined(CONFIG_RSBAC_RES)
++#define RSBAC_RES_USER_ACI_VERSION 2
++#define RSBAC_RES_USER_OLD_ACI_VERSION 1
++#define RSBAC_RES_USER_ACI_KEY 1002
++struct rsbac_res_user_aci_t {
++ rsbac_system_role_int_t res_role; /* System role for RES administration */
++ rsbac_res_array_t res_min;
++ rsbac_res_array_t res_max;
++};
++#define DEFAULT_RES_U_ACI \
++ { \
++ .res_role = SR_user, \
++ .res_min = { \
++ RSBAC_RES_UNSET, /* cpu time */ \
++ RSBAC_RES_UNSET, /* file size */ \
++ RSBAC_RES_UNSET, /* process data segment size */ \
++ RSBAC_RES_UNSET, /* stack size */ \
++ RSBAC_RES_UNSET, /* core dump size */ \
++ RSBAC_RES_UNSET, /* resident memory set size */ \
++ RSBAC_RES_UNSET, /* number of processes for this user */ \
++ RSBAC_RES_UNSET, /* number of files */ \
++ RSBAC_RES_UNSET, /* locked-in-memory address space */ \
++ RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \
++ RSBAC_RES_UNSET /* maximum file locks */ \
++ }, \
++ .res_max = { \
++ RSBAC_RES_UNSET, /* cpu time */ \
++ RSBAC_RES_UNSET, /* file size */ \
++ RSBAC_RES_UNSET, /* process data segment size */ \
++ RSBAC_RES_UNSET, /* stack size */ \
++ RSBAC_RES_UNSET, /* core dump size */ \
++ RSBAC_RES_UNSET, /* resident memory set size */ \
++ RSBAC_RES_UNSET, /* number of processes for this user */ \
++ RSBAC_RES_UNSET, /* number of files */ \
++ RSBAC_RES_UNSET, /* locked-in-memory address space */ \
++ RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \
++ RSBAC_RES_UNSET /* maximum file locks */ \
++ }, \
++ }
++#define DEFAULT_RES_U_SYSADM_ACI \
++ { \
++ .res_role = SR_administrator, \
++ .res_min = { \
++ RSBAC_RES_UNSET, /* cpu time */ \
++ RSBAC_RES_UNSET, /* file size */ \
++ RSBAC_RES_UNSET, /* process data segment size */ \
++ RSBAC_RES_UNSET, /* stack size */ \
++ RSBAC_RES_UNSET, /* core dump size */ \
++ RSBAC_RES_UNSET, /* resident memory set size */ \
++ RSBAC_RES_UNSET, /* number of processes for this user */ \
++ RSBAC_RES_UNSET, /* number of files */ \
++ RSBAC_RES_UNSET, /* locked-in-memory address space */ \
++ RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \
++ RSBAC_RES_UNSET /* maximum file locks */ \
++ }, \
++ .res_max = { \
++ RSBAC_RES_UNSET, /* cpu time */ \
++ RSBAC_RES_UNSET, /* file size */ \
++ RSBAC_RES_UNSET, /* process data segment size */ \
++ RSBAC_RES_UNSET, /* stack size */ \
++ RSBAC_RES_UNSET, /* core dump size */ \
++ RSBAC_RES_UNSET, /* resident memory set size */ \
++ RSBAC_RES_UNSET, /* number of processes for this user */ \
++ RSBAC_RES_UNSET, /* number of files */ \
++ RSBAC_RES_UNSET, /* locked-in-memory address space */ \
++ RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \
++ RSBAC_RES_UNSET /* maximum file locks */ \
++ } \
++ }
++#define DEFAULT_RES_U_SECOFF_ACI \
++ { \
++ .res_role = SR_security_officer, \
++ .res_min = { \
++ RSBAC_RES_UNSET, /* cpu time */ \
++ RSBAC_RES_UNSET, /* file size */ \
++ RSBAC_RES_UNSET, /* process data segment size */ \
++ RSBAC_RES_UNSET, /* stack size */ \
++ RSBAC_RES_UNSET, /* core dump size */ \
++ RSBAC_RES_UNSET, /* resident memory set size */ \
++ RSBAC_RES_UNSET, /* number of processes for this user */ \
++ RSBAC_RES_UNSET, /* number of files */ \
++ RSBAC_RES_UNSET, /* locked-in-memory address space */ \
++ RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \
++ RSBAC_RES_UNSET /* maximum file locks */ \
++ }, \
++ .res_max = { \
++ RSBAC_RES_UNSET, /* cpu time */ \
++ RSBAC_RES_UNSET, /* file size */ \
++ RSBAC_RES_UNSET, /* process data segment size */ \
++ RSBAC_RES_UNSET, /* stack size */ \
++ RSBAC_RES_UNSET, /* core dump size */ \
++ RSBAC_RES_UNSET, /* resident memory set size */ \
++ RSBAC_RES_UNSET, /* number of processes for this user */ \
++ RSBAC_RES_UNSET, /* number of files */ \
++ RSBAC_RES_UNSET, /* locked-in-memory address space */ \
++ RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \
++ RSBAC_RES_UNSET /* maximum file locks */ \
++ } \
++ }
++#define DEFAULT_RES_U_AUDITOR_ACI \
++ { \
++ .res_role = SR_auditor, \
++ .res_min = { \
++ RSBAC_RES_UNSET, /* cpu time */ \
++ RSBAC_RES_UNSET, /* file size */ \
++ RSBAC_RES_UNSET, /* process data segment size */ \
++ RSBAC_RES_UNSET, /* stack size */ \
++ RSBAC_RES_UNSET, /* core dump size */ \
++ RSBAC_RES_UNSET, /* resident memory set size */ \
++ RSBAC_RES_UNSET, /* number of processes for this user */ \
++ RSBAC_RES_UNSET, /* number of files */ \
++ RSBAC_RES_UNSET, /* locked-in-memory address space */ \
++ RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \
++ RSBAC_RES_UNSET /* maximum file locks */ \
++ }, \
++ .res_max = { \
++ RSBAC_RES_UNSET, /* cpu time */ \
++ RSBAC_RES_UNSET, /* file size */ \
++ RSBAC_RES_UNSET, /* process data segment size */ \
++ RSBAC_RES_UNSET, /* stack size */ \
++ RSBAC_RES_UNSET, /* core dump size */ \
++ RSBAC_RES_UNSET, /* resident memory set size */ \
++ RSBAC_RES_UNSET, /* number of processes for this user */ \
++ RSBAC_RES_UNSET, /* number of files */ \
++ RSBAC_RES_UNSET, /* locked-in-memory address space */ \
++ RSBAC_RES_UNSET, /* address space (virtual memory) limit */ \
++ RSBAC_RES_UNSET /* maximum file locks */ \
++ } \
++ }
++#endif
++
++#define RSBAC_USER_NR_ATTRIBUTES 24
++#define RSBAC_USER_ATTR_LIST { \
++ A_pseudo, \
++ A_log_user_based, \
++ A_security_level, \
++ A_initial_security_level, \
++ A_min_security_level, \
++ A_mac_categories, \
++ A_mac_initial_categories, \
++ A_mac_min_categories, \
++ A_mac_role, \
++ A_mac_user_flags, \
++ A_daz_role, \
++ A_ff_role, \
++ A_auth_role, \
++ A_pm_task_set, \
++ A_pm_role, \
++ A_rc_def_role, \
++ A_rc_type, \
++ A_min_caps, \
++ A_max_caps, \
++ A_cap_role, \
++ A_cap_ld_env, \
++ A_jail_role, \
++ A_res_role, \
++ A_pax_role \
++ }
++
++#ifdef __KERNEL__
++struct rsbac_user_handles_t {
++ rsbac_list_handle_t gen;
++#if defined(CONFIG_RSBAC_MAC)
++ rsbac_list_handle_t mac;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ rsbac_list_handle_t pm;
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++ rsbac_list_handle_t daz;
++#endif
++#if defined(CONFIG_RSBAC_FF)
++ rsbac_list_handle_t ff;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ rsbac_list_handle_t rc;
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ rsbac_list_handle_t auth;
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ rsbac_list_handle_t cap;
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ rsbac_list_handle_t jail;
++#endif
++#if defined(CONFIG_RSBAC_PAX)
++ rsbac_list_handle_t pax;
++#endif
++#if defined(CONFIG_RSBAC_RES)
++ rsbac_list_handle_t res;
++#endif
++};
++#endif
++
++/********************************/
++/* Process ACI. */
++
++#define RSBAC_GEN_ACI_PROCESS_NAME "process_gen"
++#define RSBAC_MAC_ACI_PROCESS_NAME "process_mac"
++#define RSBAC_PM_ACI_PROCESS_NAME "process_pm"
++#define RSBAC_DAZ_ACI_PROCESS_NAME "process_daz"
++#define RSBAC_RC_ACI_PROCESS_NAME "process_rc"
++#define RSBAC_AUTH_ACI_PROCESS_NAME "process_auth"
++#define RSBAC_CAP_ACI_PROCESS_NAME "process_cap"
++#define RSBAC_JAIL_ACI_PROCESS_NAME "process_jail"
++
++#define RSBAC_GEN_PROCESS_ACI_VERSION 3
++#define RSBAC_GEN_PROCESS_ACI_KEY 1001
++struct rsbac_gen_process_aci_t {
++ rsbac_request_vector_t log_program_based;
++ rsbac_fake_root_uid_int_t fake_root_uid;
++ rsbac_uid_t audit_uid;
++ rsbac_uid_t auid_exempt;
++ __u32 remote_ip;
++ rsbac_boolean_t kernel_thread;
++ rsbac_um_set_t vset;
++#if defined(CONFIG_RSBAC_AUTH_LEARN) || defined(CONFIG_RSBAC_CAP_LEARN)
++ struct rsbac_fs_file_t program_file;
++#endif
++};
++#if defined(CONFIG_RSBAC_AUTH_LEARN) || defined(CONFIG_RSBAC_CAP_LEARN)
++#define DEFAULT_GEN_P_ACI \
++ { \
++ .log_program_based = 0, \
++ .fake_root_uid = FR_off, \
++ .audit_uid = RSBAC_NO_USER, \
++ .auid_exempt = RSBAC_NO_USER, \
++ .remote_ip = 0, \
++ .kernel_thread = 0, \
++ .vset = 0, \
++ .program_file = { RSBAC_ZERO_DEV, 0, NULL }, \
++ }
++#else
++#define DEFAULT_GEN_P_ACI \
++ { \
++ .log_program_based = 0, \
++ .fake_root_uid = FR_off, \
++ .audit_uid = RSBAC_NO_USER, \
++ .auid_exempt = RSBAC_NO_USER, \
++ .remote_ip = 0, \
++ .kernel_thread = 0, \
++ .vset = 0, \
++ }
++#endif
++
++
++#if defined(CONFIG_RSBAC_MAC) || defined(CONFIG_RSBAC_MAC_MAINT)
++#define RSBAC_MAC_PROCESS_ACI_VERSION 1
++#define RSBAC_MAC_PROCESS_ACI_KEY 1001
++struct rsbac_mac_process_aci_t {
++ rsbac_security_level_t owner_sec_level; /* enum old_rsbac_security_level_t */
++ rsbac_security_level_t owner_initial_sec_level; /* enum old_rsbac_security_level_t */
++ rsbac_security_level_t owner_min_sec_level; /* enum old_rsbac_security_level_t */
++ rsbac_mac_category_vector_t mac_owner_categories; /* MAC category set */
++ rsbac_mac_category_vector_t mac_owner_initial_categories; /* MAC category set */
++ rsbac_mac_category_vector_t mac_owner_min_categories; /* MAC category set */
++ rsbac_security_level_t current_sec_level; /* enum rsbac_security_level_t */
++ rsbac_mac_category_vector_t mac_curr_categories; /* MAC current category set */
++ rsbac_security_level_t min_write_open; /* for *-property, enum rsbac_security_level_t */
++ rsbac_mac_category_vector_t min_write_categories; /* MAC, for *-property */
++ rsbac_security_level_t max_read_open; /* for *-property, enum rsbac_security_level_t */
++ rsbac_mac_category_vector_t max_read_categories; /* MAC, for *-property */
++ rsbac_mac_process_flags_t mac_process_flags; /* flags (override, trusted, auto etc.) */
++};
++#define DEFAULT_MAC_P_ACI \
++ { \
++ .owner_sec_level = SL_unclassified, \
++ .owner_initial_sec_level = SL_unclassified, \
++ .owner_min_sec_level = SL_unclassified, \
++ .mac_owner_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .mac_owner_initial_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .mac_owner_min_categories = RSBAC_MAC_MIN_CAT_VECTOR, \
++ .current_sec_level = SL_unclassified, \
++ .mac_curr_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .min_write_open = SL_max, \
++ .min_write_categories = RSBAC_MAC_MAX_CAT_VECTOR, \
++ .max_read_open = SL_unclassified, \
++ .max_read_categories = RSBAC_MAC_MIN_CAT_VECTOR, \
++ .mac_process_flags = RSBAC_MAC_DEF_P_FLAGS, \
++ }
++#define DEFAULT_MAC_P_INIT_ACI \
++ { \
++ .owner_sec_level = SL_unclassified, \
++ .owner_initial_sec_level = SL_unclassified, \
++ .owner_min_sec_level = SL_unclassified, \
++ .mac_owner_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .mac_owner_initial_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .mac_owner_min_categories = RSBAC_MAC_MIN_CAT_VECTOR, \
++ .current_sec_level = SL_unclassified, \
++ .mac_curr_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ .min_write_open = SL_max, \
++ .min_write_categories = RSBAC_MAC_MAX_CAT_VECTOR, \
++ .max_read_open = SL_unclassified, \
++ .max_read_categories = RSBAC_MAC_MIN_CAT_VECTOR, \
++ .mac_process_flags = RSBAC_MAC_DEF_INIT_P_FLAGS, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_PM)
++#define RSBAC_PM_PROCESS_ACI_VERSION 1
++#define RSBAC_PM_PROCESS_ACI_KEY 1001
++struct rsbac_pm_process_aci_t {
++ rsbac_pm_tp_id_t pm_tp;
++ rsbac_pm_task_id_t pm_current_task;
++ rsbac_pm_process_type_int_t pm_process_type; /* enum rsbac_pm_process_type_t */
++};
++#define DEFAULT_PM_P_ACI \
++ { \
++ .pm_tp = 0, \
++ .pm_current_task = 0, \
++ .pm_process_type = PP_none, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_DAZ)
++#define RSBAC_DAZ_PROCESS_ACI_VERSION 1
++#define RSBAC_DAZ_PROCESS_ACI_KEY 1001
++struct rsbac_daz_process_aci_t {
++ rsbac_boolean_int_t daz_scanner; /* DAZ, boolean */
++};
++#define DEFAULT_DAZ_P_ACI \
++ { \
++ .daz_scanner = FALSE, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_RC)
++#define RSBAC_RC_PROCESS_ACI_VERSION 1
++#define RSBAC_RC_PROCESS_ACI_KEY 1001
++struct rsbac_rc_process_aci_t {
++ rsbac_rc_role_id_t rc_role; /* RC */
++ rsbac_rc_type_id_t rc_type; /* RC */
++ rsbac_rc_role_id_t rc_force_role; /* RC */
++ rsbac_rc_type_id_t rc_select_type; /* RC */
++};
++#define DEFAULT_RC_P_ACI \
++ { \
++ .rc_role = RSBAC_RC_GENERAL_ROLE, \
++ .rc_type = RSBAC_RC_GENERAL_TYPE, \
++ .rc_force_role = RC_default_force_role, \
++ .rc_select_type = RC_type_use_fd, \
++ }
++#define DEFAULT_RC_P_INIT_ACI \
++ { \
++ .rc_role = RSBAC_RC_SYSTEM_ADMIN_ROLE, \
++ .rc_type = RSBAC_RC_GENERAL_TYPE, \
++ .rc_force_role = RC_default_force_role, \
++ .rc_select_type = RC_type_use_fd, \
++ }
++#define DEFAULT_RC_P_KERNEL_ACI \
++ { \
++ .rc_role = RSBAC_RC_SYSTEM_ADMIN_ROLE, \
++ .rc_type = CONFIG_RSBAC_RC_KERNEL_PROCESS_TYPE, \
++ .rc_force_role = RC_default_force_role, \
++ .rc_select_type = RC_type_use_fd, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH)
++#define RSBAC_AUTH_PROCESS_ACI_VERSION 1
++#define RSBAC_AUTH_PROCESS_ACI_KEY 1001
++struct rsbac_auth_process_aci_t {
++ __u8 auth_may_setuid; /* AUTH (boolean) */
++ __u8 auth_may_set_cap; /* AUTH (boolean) */
++ rsbac_uid_t auth_last_auth;
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++ rsbac_uid_t auth_start_uid;
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ rsbac_uid_t auth_start_euid;
++#endif
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ rsbac_gid_t auth_start_gid;
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ rsbac_gid_t auth_start_egid;
++#endif
++#endif
++ __u8 auth_learn; /* AUTH (boolean) */
++#endif
++};
++
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++#define DEFAULT_AUTH_P_ACI \
++ { \
++ .auth_may_setuid = FALSE, \
++ .auth_may_set_cap = FALSE, \
++ .auth_last_auth = RSBAC_NO_USER, \
++ .auth_start_uid = 0, \
++ .auth_learn = 0, \
++ }
++#else
++#define DEFAULT_AUTH_P_ACI \
++ { \
++ .auth_may_setuid = FALSE, \
++ .auth_may_set_cap = FALSE, \
++ .auth_last_auth = RSBAC_NO_USER, \
++ }
++#endif
++#endif
++
++
++#if defined(CONFIG_RSBAC_CAP)
++#define RSBAC_CAP_PROCESS_ACI_VERSION 2
++#define RSBAC_CAP_PROCESS_ACI_KEY 10013283
++struct rsbac_cap_process_aci_t {
++ rsbac_cap_process_hiding_int_t cap_process_hiding;
++#if defined(CONFIG_RSBAC_CAP_LOG_MISSING) || defined(CONFIG_RSBAC_CAP_LEARN)
++ rsbac_cap_vector_t max_caps_user;
++ rsbac_cap_vector_t max_caps_program;
++#endif
++ rsbac_cap_ld_env_int_t cap_ld_env;
++};
++
++#ifdef CONFIG_RSBAC_CAP_LOG_MISSING
++#define DEFAULT_CAP_P_ACI \
++ { \
++ .cap_process_hiding = PH_off, \
++ .max_caps_user.cap[0] = RSBAC_CAP_DEFAULT_MAX, \
++ .max_caps_user.cap[1] = RSBAC_CAP_DEFAULT_MAX, \
++ .max_caps_program.cap[0] = RSBAC_CAP_DEFAULT_MAX, \
++ .max_caps_program.cap[1] = RSBAC_CAP_DEFAULT_MAX, \
++ .cap_ld_env = LD_allow, \
++ }
++#else
++#define DEFAULT_CAP_P_ACI \
++ { \
++ .cap_process_hiding = PH_off, \
++ .cap_ld_env = LD_allow, \
++ }
++#endif
++#endif
++
++#if defined(CONFIG_RSBAC_JAIL)
++#define RSBAC_JAIL_PROCESS_ACI_VERSION 1
++#define RSBAC_JAIL_PROCESS_ACI_KEY 1001
++struct rsbac_jail_process_aci_t {
++ rsbac_jail_id_t id;
++ rsbac_jail_id_t parent;
++ rsbac_jail_ip_t ip;
++ rsbac_jail_flags_t flags;
++ rsbac_cap_vector_t max_caps; /* Program max Linux capabilities */
++ rsbac_jail_scd_vector_t scd_get; /* SCD targets GET_STATUS_DATA */
++ rsbac_jail_scd_vector_t scd_modify; /* SCD targets MODIFY_SYSTEM_DATA */
++};
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
++#define DEFAULT_JAIL_P_ACI \
++ { \
++ .id = 0, \
++ .parent = 0, \
++ .ip = 0, \
++ .flags = 0, \
++ .max_caps.cap[0] = -1, \
++ .max_caps.cap[1] = -1, \
++ .scd_get = 0, \
++ .scd_modify = 0, \
++ }
++#else
++#define DEFAULT_JAIL_P_ACI \
++ { \
++ .id = 0, \
++ .parent = 0, \
++ .ip = 0, \
++ .flags = 0, \
++ .max_caps = -1, \
++ .scd_get = 0, \
++ .scd_modify = 0, \
++ }
++#endif
++#endif
++
++#define RSBAC_PROCESS_NR_ATTRIBUTES 39
++#define RSBAC_PROCESS_ATTR_LIST { \
++ A_security_level, \
++ A_min_security_level, \
++ A_mac_categories, \
++ A_mac_min_categories, \
++ A_current_sec_level, \
++ A_mac_curr_categories, \
++ A_min_write_open, \
++ A_min_write_categories, \
++ A_max_read_open, \
++ A_max_read_categories, \
++ A_mac_process_flags, \
++ A_pm_tp, \
++ A_pm_current_task, \
++ A_pm_process_type, \
++ A_daz_scanner, \
++ A_rc_role, \
++ A_rc_type, \
++ A_rc_force_role, \
++ A_rc_select_type, \
++ A_auth_may_setuid, \
++ A_auth_may_set_cap, \
++ A_auth_learn, \
++ A_cap_process_hiding, \
++ A_max_caps_user, \
++ A_max_caps_program, \
++ A_cap_ld_env, \
++ A_jail_id, \
++ A_jail_ip, \
++ A_jail_flags, \
++ A_jail_max_caps, \
++ A_jail_scd_get, \
++ A_jail_scd_modify, \
++ A_log_program_based, \
++ A_fake_root_uid, \
++ A_audit_uid, \
++ A_auid_exempt, \
++ A_auth_last_auth, \
++ A_remote_ip, \
++ A_vset \
++ }
++
++#ifdef __KERNEL__
++struct rsbac_process_handles_t {
++ rsbac_list_handle_t gen;
++#if defined(CONFIG_RSBAC_MAC)
++ rsbac_list_handle_t mac;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ rsbac_list_handle_t pm;
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++ rsbac_list_handle_t daz;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ rsbac_list_handle_t rc;
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ rsbac_list_handle_t auth;
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ rsbac_list_handle_t cap;
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ rsbac_list_handle_t jail;
++#endif
++};
++#endif /* __KERNEL__ */
++
++
++/******************************/
++/* OK, now we define the UM group ACI, holding all information */
++/* the ADF needs for decisions. */
++
++#define RSBAC_RC_ACI_GROUP_NAME "grouprc"
++
++/* Caution: whenever ACI changes, version should be increased! */
++
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++#define RSBAC_RC_GROUP_ACI_VERSION 1
++#define RSBAC_RC_GROUP_ACI_KEY 13276142
++#endif
++
++#define RSBAC_GROUP_NR_ATTRIBUTES 1
++#define RSBAC_GROUP_ATTR_LIST { \
++ A_rc_type \
++ }
++
++#ifdef __KERNEL__
++struct rsbac_group_handles_t {
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ rsbac_list_handle_t rc;
++#endif
++};
++#endif /* __KERNEL__ */
++
++/********************************/
++/* NETDEV ACI */
++
++#define RSBAC_GEN_ACI_NETDEV_NAME "nd_gen"
++#define RSBAC_RC_ACI_NETDEV_NAME "nd_rc"
++
++#define RSBAC_GEN_NETDEV_ACI_VERSION 1
++#define RSBAC_GEN_NETDEV_ACI_KEY 1001
++struct rsbac_gen_netdev_aci_t {
++ rsbac_log_array_t log_array_low; /* netdev based logging, */
++ rsbac_log_array_t log_array_high; /* high and low bits */
++};
++#define DEFAULT_GEN_NETDEV_ACI \
++ { \
++ .log_array_low = -1, \
++ .log_array_high = -1, \
++ }
++
++#if defined(CONFIG_RSBAC_RC) || defined(CONFIG_RSBAC_RC_MAINT)
++#define RSBAC_RC_NETDEV_ACI_VERSION 1
++#define RSBAC_RC_NETDEV_ACI_KEY 1001
++#endif
++
++#define RSBAC_NETDEV_NR_ATTRIBUTES 3
++#define RSBAC_NETDEV_ATTR_LIST { \
++ A_rc_type, \
++ A_log_array_low, \
++ A_log_array_high \
++ }
++
++#ifdef __KERNEL__
++struct rsbac_netdev_handles_t {
++#if defined(CONFIG_RSBAC_IND_NETDEV_LOG)
++ rsbac_list_handle_t gen;
++#endif
++#if defined(CONFIG_RSBAC_RC) || defined(CONFIG_RSBAC_RC_MAINT)
++ rsbac_list_handle_t rc;
++#endif
++};
++#endif /* __KERNEL__ */
++
++/********************************/
++/* NETTEMP ACI */
++
++#define RSBAC_GEN_ACI_NETTEMP_NAME "nt_gen"
++#define RSBAC_MAC_ACI_NETTEMP_NAME "nt_mac"
++#define RSBAC_PM_ACI_NETTEMP_NAME "nt_pm"
++#define RSBAC_RC_ACI_NETTEMP_NAME "nt_rc"
++
++#define RSBAC_MAC_ACI_LNETOBJ_NAME "lnetobj_mac"
++#define RSBAC_PM_ACI_LNETOBJ_NAME "lnetobj_pm"
++#define RSBAC_RC_ACI_LNETOBJ_NAME "lnetobj_rc"
++#define RSBAC_MAC_ACI_RNETOBJ_NAME "rnetobj_mac"
++#define RSBAC_PM_ACI_RNETOBJ_NAME "rnetobj_pm"
++#define RSBAC_RC_ACI_RNETOBJ_NAME "rnetobj_rc"
++
++#define RSBAC_GEN_NETOBJ_ACI_VERSION 1
++#define RSBAC_GEN_NETOBJ_ACI_KEY 1001
++struct rsbac_gen_netobj_aci_t {
++ rsbac_log_array_t log_array_low; /* nettemp/netobj based logging, */
++ rsbac_log_array_t log_array_high; /* high and low bits */
++};
++#define DEFAULT_GEN_NETOBJ_ACI \
++ { \
++ .log_array_low = -1, \
++ .log_array_high = -1, \
++ }
++
++#if defined(CONFIG_RSBAC_MAC) || defined(CONFIG_RSBAC_MAC_MAINT)
++#define RSBAC_MAC_NETOBJ_ACI_VERSION 1
++#define RSBAC_MAC_NETOBJ_ACI_KEY 1001
++struct rsbac_mac_netobj_aci_t {
++ rsbac_security_level_t sec_level; /* enum old_rsbac_security_level_t / __u8 */
++ rsbac_mac_category_vector_t mac_categories; /* MAC category set */
++};
++#define DEFAULT_MAC_NETOBJ_ACI \
++ { \
++ .sec_level = SL_unclassified, /* security_level (MAC) */ \
++ .mac_categories = RSBAC_MAC_DEF_CAT_VECTOR, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT)
++#define RSBAC_PM_NETOBJ_ACI_VERSION 1
++#define RSBAC_PM_NETOBJ_ACI_KEY 1001
++struct rsbac_pm_netobj_aci_t {
++ rsbac_pm_object_class_id_t pm_object_class; /* netobj only */
++ rsbac_pm_purpose_id_t pm_ipc_purpose;
++ rsbac_pm_object_type_int_t pm_object_type; /* enum rsbac_pm_object_type_t */
++};
++#define DEFAULT_PM_NETOBJ_ACI \
++ { \
++ .pm_object_class = RSBAC_PM_IPC_OBJECT_CLASS_ID, \
++ .pm_ipc_purpose = 0, \
++ .pm_object_type = PO_ipc, \
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_RC) || defined(CONFIG_RSBAC_RC_MAINT)
++#define RSBAC_RC_NETOBJ_ACI_VERSION 1
++#define RSBAC_RC_NETOBJ_ACI_KEY 1001
++#define RSBAC_RC_NETTEMP_ACI_VERSION 1
++#define RSBAC_RC_NETTEMP_ACI_KEY 1002
++
++struct rsbac_rc_nettemp_aci_t {
++ rsbac_rc_type_id_t netobj_type; /* type inherited to netobj */
++ rsbac_rc_type_id_t nettemp_type; /* type of this tenplate */
++};
++#define DEFAULT_RC_NETTEMP_ACI \
++ { \
++ .netobj_type = RSBAC_RC_GENERAL_TYPE, \
++ .nettemp_type = RSBAC_RC_GENERAL_TYPE, \
++ }
++#endif
++
++#define RSBAC_NETTEMP_NR_ATTRIBUTES 9
++#define RSBAC_NETTEMP_ATTR_LIST { \
++ A_security_level, \
++ A_mac_categories, \
++ A_pm_object_class, \
++ A_pm_ipc_purpose, \
++ A_pm_object_type, \
++ A_rc_type, \
++ A_rc_type_nt, \
++ A_log_array_low, \
++ A_log_array_high \
++ }
++
++#define RSBAC_NETOBJ_NR_ATTRIBUTES 16
++#define RSBAC_NETOBJ_ATTR_LIST { \
++ A_local_sec_level, \
++ A_remote_sec_level, \
++ A_local_mac_categories, \
++ A_remote_mac_categories, \
++ A_local_pm_object_class, \
++ A_remote_pm_object_class, \
++ A_local_pm_ipc_purpose, \
++ A_remote_pm_ipc_purpose, \
++ A_local_pm_object_type, \
++ A_remote_pm_object_type, \
++ A_local_rc_type, \
++ A_remote_rc_type, \
++ A_local_log_array_low, \
++ A_remote_log_array_low, \
++ A_local_log_array_high, \
++ A_remote_log_array_high \
++ }
++
++#ifdef __KERNEL__
++struct rsbac_nettemp_handles_t {
++#if defined(CONFIG_RSBAC_IND_NETOBJ_LOG)
++ rsbac_list_handle_t gen;
++#endif
++#if defined(CONFIG_RSBAC_MAC) || defined(CONFIG_RSBAC_MAC_MAINT)
++ rsbac_list_handle_t mac;
++#endif
++#if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT)
++ rsbac_list_handle_t pm;
++#endif
++#if defined(CONFIG_RSBAC_RC) || defined(CONFIG_RSBAC_RC_MAINT)
++ rsbac_list_handle_t rc;
++#endif
++};
++
++struct rsbac_lnetobj_handles_t {
++#if defined(CONFIG_RSBAC_MAC) || defined(CONFIG_RSBAC_MAC_MAINT)
++ rsbac_list_handle_t mac;
++#endif
++#if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT)
++ rsbac_list_handle_t pm;
++#endif
++#if defined(CONFIG_RSBAC_RC) || defined(CONFIG_RSBAC_RC_MAINT)
++ rsbac_list_handle_t rc;
++#endif
++};
++struct rsbac_rnetobj_handles_t {
++#if defined(CONFIG_RSBAC_MAC) || defined(CONFIG_RSBAC_MAC_MAINT)
++ rsbac_list_handle_t mac;
++#endif
++#if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT)
++ rsbac_list_handle_t pm;
++#endif
++#if defined(CONFIG_RSBAC_RC) || defined(CONFIG_RSBAC_RC_MAINT)
++ rsbac_list_handle_t rc;
++#endif
++};
++#endif /* __KERNEL__ */
++
++
++/**********************************************/
++/* Declarations */
++/**********************************************/
++
++#ifdef __KERNEL__
++extern kdev_t rsbac_root_dev;
++
++int rsbac_read_open(char *, struct file **, /* file */
++ kdev_t);
++
++int rsbac_write_open(char *, struct file **, /* file */
++ kdev_t);
++
++void rsbac_read_close(struct file *);
++
++void rsbac_write_close(struct file *);
++
++extern struct semaphore rsbac_write_sem;
++
++#endif /* __KERNEL__ */
++
++/**********************************************/
++/* External Declarations */
++/**********************************************/
++
++#ifdef __KERNEL__
++
++static inline struct dentry *lock_parent(struct dentry *dentry)
++{
++ struct dentry *dir = dget(dentry->d_parent);
++
++ mutex_lock(&dir->d_inode->i_mutex);
++ return dir;
++}
++
++static inline void unlock_dir(struct dentry *dir)
++{
++ mutex_unlock(&dir->d_inode->i_mutex);
++ dput(dir);
++}
++
++static inline void double_mutex_lock(struct mutex *m1, struct mutex *m2)
++{
++ if (m1 != m2) {
++ if ((unsigned long) m1 < (unsigned long) m2) {
++ struct mutex *tmp = m2;
++ m2 = m1;
++ m1 = tmp;
++ }
++ mutex_lock(m1);
++ }
++ mutex_lock(m2);
++}
++
++static inline void double_mutex_unlock(struct mutex *m1, struct mutex *m2)
++{
++ mutex_unlock(m1);
++ if (m1 != m2)
++ mutex_unlock(m2);
++}
++
++static inline void double_lock(struct dentry *d1, struct dentry *d2)
++{
++ double_mutex_lock(&d1->d_inode->i_mutex, &d2->d_inode->i_mutex);
++}
++
++static inline void double_unlock(struct dentry *d1, struct dentry *d2)
++{
++ double_mutex_unlock(&d1->d_inode->i_mutex, &d2->d_inode->i_mutex);
++ dput(d1);
++ dput(d2);
++}
++
++#ifdef CONFIG_RSBAC_DEBUG
++static inline unsigned long rsbac_stack_free_space(void)
++{
++ unsigned long *n = (unsigned long *)(current + 1);
++ while (!*n)
++ n++;
++ return (unsigned long)n - (unsigned long)(current + 1);
++}
++#else
++#define rsbac_stack_free_space() 0
++#endif
++
++#endif /* __KERNEL__ */
++
++#endif
+diff --git a/include/rsbac/acl.h b/include/rsbac/acl.h
+new file mode 100644
+index 0000000..35554c3
+--- /dev/null
++++ b/include/rsbac/acl.h
+@@ -0,0 +1,266 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2009: Amon Ott */
++/* API: Data structures */
++/* and functions for Access */
++/* Control Information / ACL */
++/* Last modified: 15/Oct/2009 */
++/************************************ */
++
++#ifndef __RSBAC_ACL_H
++#define __RSBAC_ACL_H
++
++#include <linux/init.h>
++#include <rsbac/types.h>
++
++/***************************************************/
++/* General Prototypes */
++/***************************************************/
++
++/* All functions return 0, if no error occurred, and a negative error code */
++/* otherwise. The error codes are defined in rsbac_error.h. */
++
++/****************************************************************************/
++/* Initialization, including ACI restoration for all mounted devices from */
++/* disk. After this call, all ACI is kept in memory for performance reasons,*/
++/* but user and file/dir object ACI are written to disk on every change. */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++extern int rsbac_init_acl(void);
++#else
++extern int rsbac_init_acl(void) __init;
++#endif
++
++/* mounting and umounting */
++int rsbac_mount_acl(kdev_t kdev);
++int rsbac_umount_acl(kdev_t kdev);
++
++/* Some information about the current status is also available */
++extern int rsbac_stats_acl(void);
++
++/* Status checking */
++extern int rsbac_check_acl(int correct);
++
++/************************************************* */
++/* Access functions */
++/************************************************* */
++
++/* All these procedures handle the spinlocks to protect the targets during */
++/* access. */
++
++/* rsbac_acl_set_acl_entry
++ * Set ACL entry for given target and subject to given rights. If entry does
++ * not exist, it is created, thus cutting the inheritance from default/parent.
++ */
++
++int rsbac_acl_set_acl_entry(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ rsbac_acl_rights_vector_t rights,
++ rsbac_time_t ttl);
++
++/* rsbac_acl_remove_acl_entry
++ * Remove ACL entry for given target and subject. This reactivates the
++ * inheritance from default/parent.
++ */
++
++int rsbac_acl_remove_acl_entry(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id);
++
++/* rsbac_acl_remove_acl
++ * Remove ACL for given target. For cleanup on delete.
++ */
++
++int rsbac_acl_remove_acl(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid);
++
++/* rsbac_acl_add_to_acl_entry
++ * Add given rights to ACL entry for given target and subject. If entry does
++ * not exist, behaviour is exactly like rsbac_acl_set_acl_entry.
++ */
++
++int rsbac_acl_add_to_acl_entry(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ rsbac_acl_rights_vector_t rights,
++ rsbac_time_t ttl);
++
++/* rsbac_acl_remove_from_acl_entry
++ * Remove given rights from ACL entry for given target and subject. If entry does
++ * not exist, nothing happens.
++ * This function does NOT remove the ACL entry, so removing all rights results in
++ * NO rights for this subject/target combination!
++ */
++
++int rsbac_acl_remove_from_acl_entry(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t
++ subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ rsbac_acl_rights_vector_t rights);
++
++/* rsbac_acl_set_mask
++ * Set inheritance mask for given target to given rights. If item does
++ * not exist, it is created.
++ */
++
++int rsbac_acl_set_mask(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_acl_rights_vector_t mask);
++
++/* rsbac_acl_get_mask
++ * Get inheritance mask for given target to given rights. If item does
++ * not exist, default mask is returned.
++ */
++
++int rsbac_acl_get_mask(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_acl_rights_vector_t * mask_p);
++
++/* rsbac_acl_get_rights
++ * Get effective rights from ACL entry for given target and subject.
++ * If entry does not exist, inherited rights are used. If there is no parent,
++ * the default rights vector for this target type is returned.
++ * This function does NOT add role or group rights to user rights!
++ */
++
++int rsbac_acl_get_rights(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ rsbac_acl_rights_vector_t * rights_p,
++ rsbac_boolean_t inherit);
++
++/* rsbac_acl_get_single_right
++ * Show, whether a right is set for given target and subject.
++ * If right is not set, it is checked at all parents, unless it has been
++ * masked out *or* it is SUPERVISOR, CONFIG_RSBAC_ACL_SUPER_FILTER is set
++ * and supervisor is masked out.
++ */
++
++int rsbac_acl_get_single_right(enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ enum rsbac_adf_request_t right,
++ rsbac_boolean_t * result);
++
++
++/************************************************************************** */
++/* The rsbac_acl_copy_fd_acl() function copies a file/dir ACL to another */
++/* file/dir ACL. The old ACL of fd2 is erased before copying. */
++
++int rsbac_acl_copy_fd_acl(struct rsbac_fs_file_t file1,
++ struct rsbac_fs_file_t file2);
++
++/************************************************************************** */
++/* The rsbac_acl_copy_pp_acl() function copies a process acl to another */
++
++int rsbac_acl_copy_pp_acl(rsbac_pid_t old_pid, rsbac_pid_t new_pid);
++
++/*************************************************
++ * rsbac_acl_get_tlist
++ * Get subjects from ACL entries for given target.
++ */
++
++int rsbac_acl_get_tlist(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ struct rsbac_acl_entry_t **entry_pp,
++ rsbac_time_t ** ttl_pp);
++
++/*************************************************
++ * Group management
++ */
++
++/* add a group with new id and fill this id into *group_id_p */
++int rsbac_acl_add_group(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t owner,
++ enum rsbac_acl_group_type_t type,
++ char *name, rsbac_acl_group_id_t * group_id_p);
++
++int rsbac_acl_change_group(rsbac_list_ta_number_t ta_number,
++ rsbac_acl_group_id_t id,
++ rsbac_uid_t owner,
++ enum rsbac_acl_group_type_t type, char *name);
++
++int rsbac_acl_remove_group(rsbac_list_ta_number_t ta_number,
++ rsbac_acl_group_id_t id);
++
++int rsbac_acl_get_group_entry(rsbac_list_ta_number_t ta_number,
++ rsbac_acl_group_id_t group,
++ struct rsbac_acl_group_entry_t *entry_p);
++
++int rsbac_acl_list_groups(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t owner,
++ rsbac_boolean_t include_global,
++ struct rsbac_acl_group_entry_t **entry_pp);
++
++/* check group existence */
++rsbac_boolean_t rsbac_acl_group_exist(rsbac_acl_group_id_t group);
++
++int rsbac_acl_add_group_member(rsbac_list_ta_number_t ta_number,
++ rsbac_acl_group_id_t group,
++ rsbac_uid_t user, rsbac_time_t ttl);
++
++int rsbac_acl_remove_group_member(rsbac_list_ta_number_t ta_number,
++ rsbac_acl_group_id_t group,
++ rsbac_uid_t user);
++
++/* check membership */
++rsbac_boolean_t rsbac_acl_group_member(rsbac_acl_group_id_t group,
++ rsbac_uid_t user);
++
++/* build rsbac_kmalloc'd array of all group memberships of the given user */
++/* returns number of groups or negative error */
++/* Attention: memory deallocation with rsbac_kfree must be done by caller! */
++int rsbac_acl_get_user_groups(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user,
++ rsbac_acl_group_id_t ** group_pp,
++ rsbac_time_t ** ttl_pp);
++
++/* Returns number of members or negative error */
++int rsbac_acl_get_group_members(rsbac_list_ta_number_t ta_number,
++ rsbac_acl_group_id_t group,
++ rsbac_uid_t user_array[],
++ rsbac_time_t ttl_array[], int maxnum);
++
++/* Remove subject from all ACLs */
++int rsbac_acl_remove_subject(rsbac_list_ta_number_t ta_number,
++ struct rsbac_acl_entry_desc_t desc);
++
++/*************************************************/
++/* remove user from all groups and from all ACLs */
++int rsbac_acl_remove_user(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user);
++
++/* Get list of all device entries */
++
++int rsbac_acl_list_all_dev(rsbac_list_ta_number_t ta_number,
++ struct rsbac_dev_desc_t **id_pp);
++
++int rsbac_acl_list_all_major_dev(rsbac_list_ta_number_t ta_number,
++ struct rsbac_dev_desc_t **id_pp);
++
++int rsbac_acl_list_all_user(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t ** id_pp);
++
++int rsbac_acl_list_all_group(rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t ** id_pp);
++
++int rsbac_acl_list_all_ipc(rsbac_list_ta_number_t ta_number,
++ struct rsbac_ipc_t ** id_pp);
++
++#endif
+diff --git a/include/rsbac/acl_data_structures.h b/include/rsbac/acl_data_structures.h
+new file mode 100644
+index 0000000..f30d72b
+--- /dev/null
++++ b/include/rsbac/acl_data_structures.h
+@@ -0,0 +1,469 @@
++/**************************************/
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2007: */
++/* Amon Ott <ao@rsbac.org> */
++/* Data structures / ACL */
++/* Last modified: 25/Sep/2007 */
++/**************************************/
++
++#ifndef __RSBAC_ACL_DATA_STRUC_H
++#define __RSBAC_ACL_DATA_STRUC_H
++
++#include <linux/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/types.h>
++#include <rsbac/lists.h>
++
++#define RSBAC_ACL_LIST_KEY 0x815affe
++
++#define RSBAC_ACL_GENERAL_FD_ENTRY \
++ { ACLS_GROUP, \
++ RSBAC_ACL_GROUP_EVERYONE, \
++ ( RSBAC_FD_REQUEST_VECTOR & RSBAC_READ_WRITE_REQUEST_VECTOR ) | RSBAC_EXECUTE_REQUEST_VECTOR | RSBAC_ACL_GEN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_ACMAN_FD_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SECOFF_UID, \
++ ( RSBAC_FD_REQUEST_VECTOR & \
++ ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_EXECUTE_REQUEST_VECTOR | RSBAC_SECURITY_REQUEST_VECTOR ) ) \
++ | RSBAC_ACL_ACMAN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_SYSADM_FD_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SYSADM_UID, \
++ ( RSBAC_FD_REQUEST_VECTOR & \
++ ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_EXECUTE_REQUEST_VECTOR | RSBAC_SYSTEM_REQUEST_VECTOR ) ) \
++ | RSBAC_ACL_SYSADM_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_GENERAL_DEV_ENTRY \
++ { ACLS_GROUP, \
++ RSBAC_ACL_GROUP_EVERYONE, \
++ ( RSBAC_DEV_REQUEST_VECTOR & RSBAC_READ_WRITE_REQUEST_VECTOR ) | RSBAC_ACL_GEN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_ACMAN_DEV_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SECOFF_UID, \
++ ( RSBAC_DEV_REQUEST_VECTOR & \
++ ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SECURITY_REQUEST_VECTOR ) ) \
++ | RSBAC_ACL_ACMAN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_SYSADM_DEV_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SYSADM_UID, \
++ ( RSBAC_DEV_REQUEST_VECTOR & \
++ ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SYSTEM_REQUEST_VECTOR ) ) \
++ | RSBAC_ACL_SYSADM_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_GENERAL_IPC_ENTRY \
++ { ACLS_GROUP, \
++ RSBAC_ACL_GROUP_EVERYONE, \
++ ( RSBAC_IPC_REQUEST_VECTOR & RSBAC_READ_WRITE_REQUEST_VECTOR ) | RSBAC_ACL_GEN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_ACMAN_IPC_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SECOFF_UID, \
++ ( RSBAC_IPC_REQUEST_VECTOR & \
++ ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SECURITY_REQUEST_VECTOR ) ) \
++ | RSBAC_ACL_ACMAN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_SYSADM_IPC_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SYSADM_UID, \
++ ( RSBAC_IPC_REQUEST_VECTOR & \
++ ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SYSTEM_REQUEST_VECTOR ) ) \
++ | RSBAC_ACL_SYSADM_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_GENERAL_SCD_ENTRY \
++ { ACLS_GROUP, \
++ RSBAC_ACL_GROUP_EVERYONE, \
++ ( RSBAC_SCD_REQUEST_VECTOR & \
++ ( RSBAC_READ_WRITE_REQUEST_VECTOR | ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) ) \
++ ) \
++ | RSBAC_ACL_GEN_RIGHTS_VECTOR \
++ }
++
++#ifdef CONFIG_RSBAC_USER_MOD_IOPERM
++#define RSBAC_ACL_GENERAL_SCD_IOPORTS_ENTRY \
++ { ACLS_GROUP, \
++ RSBAC_ACL_GROUP_EVERYONE, \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) \
++ }
++#endif
++
++#define RSBAC_ACL_GENERAL_SCD_OTHER_ENTRY \
++ { ACLS_GROUP, \
++ RSBAC_ACL_GROUP_EVERYONE, \
++ ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \
++ }
++
++#define RSBAC_ACL_GENERAL_SCD_NETWORK_ENTRY \
++ { ACLS_GROUP, \
++ RSBAC_ACL_GROUP_EVERYONE, \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) \
++ }
++
++#define RSBAC_ACL_ACMAN_SCD_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SECOFF_UID, \
++ ( RSBAC_SCD_REQUEST_VECTOR & \
++ ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SECURITY_REQUEST_VECTOR ) ) \
++ | RSBAC_ACL_ACMAN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_ACMAN_SCD_OTHER_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SECOFF_UID, \
++ ( RSBAC_NONE_REQUEST_VECTOR & \
++ ( \
++ ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \
++ | ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) \
++ | ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) \
++ | ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) \
++ | ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) \
++ | ((rsbac_request_vector_t) 1 << R_SWITCH_LOG) \
++ | ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) \
++ ) \
++ ) \
++ | RSBAC_ACL_ACMAN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_SYSADM_SCD_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SYSADM_UID, \
++ ( RSBAC_SCD_REQUEST_VECTOR & \
++ ( \
++ ((rsbac_request_vector_t) 1 << R_GET_PERMISSIONS_DATA) \
++ | ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) \
++ | ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) \
++ | ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) \
++ | ((rsbac_request_vector_t) 1 << R_WRITE) \
++ ) \
++ ) \
++ | RSBAC_ACL_SYSADM_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_SYSADM_SCD_OTHER_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SYSADM_UID, \
++ ( RSBAC_NONE_REQUEST_VECTOR & \
++ ( \
++ ((rsbac_request_vector_t) 1 << R_ADD_TO_KERNEL) \
++ | ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) \
++ | ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) \
++ | ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) \
++ | ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \
++ | ((rsbac_request_vector_t) 1 << R_MOUNT) \
++ | ((rsbac_request_vector_t) 1 << R_REMOVE_FROM_KERNEL) \
++ | ((rsbac_request_vector_t) 1 << R_UMOUNT) \
++ | ((rsbac_request_vector_t) 1 << R_SHUTDOWN) \
++ ) \
++ ) \
++ | ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) \
++ | RSBAC_ACL_SYSADM_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_AUDITOR_SCD_RSBACLOG_ENTRY \
++ { ACLS_USER, \
++ RSBAC_AUDITOR_UID, \
++ ( RSBAC_SCD_REQUEST_VECTOR & \
++ ( \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) \
++ | ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) \
++ ) \
++ ) \
++ }
++
++#ifdef CONFIG_RSBAC_USER_MOD_IOPERM
++#define RSBAC_ACL_SYSADM_SCD_KMEM_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SYSADM_UID, \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) \
++ }
++#endif
++
++#define RSBAC_ACL_GENERAL_U_ENTRY \
++ { ACLS_GROUP, \
++ RSBAC_ACL_GROUP_EVERYONE, \
++ RSBAC_REQUEST_VECTOR(R_CHANGE_OWNER) | RSBAC_REQUEST_VECTOR(R_SEARCH) \
++ | RSBAC_REQUEST_VECTOR(R_GET_STATUS_DATA) }
++
++#define RSBAC_ACL_ACMAN_U_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SECOFF_UID, \
++ RSBAC_ACL_USER_RIGHTS_VECTOR \
++ | RSBAC_ACL_ACMAN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_SYSADM_U_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SYSADM_UID, \
++ RSBAC_REQUEST_VECTOR(R_CHANGE_OWNER) | RSBAC_ACL_RIGHTS_VECTOR(R_READ_ATTRIBUTE) \
++ | RSBAC_REQUEST_VECTOR(R_SEARCH) | RSBAC_REQUEST_VECTOR(R_GET_STATUS_DATA) \
++ | RSBAC_REQUEST_VECTOR(R_AUTHENTICATE) \
++ | RSBAC_ACL_SYSADM_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_GENERAL_P_ENTRY \
++ { ACLS_GROUP, \
++ RSBAC_ACL_GROUP_EVERYONE, \
++ ( RSBAC_PROCESS_REQUEST_VECTOR & RSBAC_READ_WRITE_REQUEST_VECTOR ) | RSBAC_ACL_GEN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_ACMAN_P_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SECOFF_UID, \
++ ( RSBAC_PROCESS_REQUEST_VECTOR & \
++ ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SECURITY_REQUEST_VECTOR ) ) \
++ | RSBAC_ACL_ACMAN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_SYSADM_P_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SYSADM_UID, \
++ ( RSBAC_PROCESS_REQUEST_VECTOR & \
++ ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SYSTEM_REQUEST_VECTOR ) ) \
++ | RSBAC_ACL_SYSADM_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_GENERAL_G_ENTRY \
++ { ACLS_GROUP, \
++ RSBAC_ACL_GROUP_EVERYONE, \
++ RSBAC_REQUEST_VECTOR(R_SEARCH) | RSBAC_REQUEST_VECTOR(R_READ) }
++
++#define RSBAC_ACL_ACMAN_G_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SECOFF_UID, \
++ ( RSBAC_GROUP_REQUEST_VECTOR & \
++ ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SECURITY_REQUEST_VECTOR ) ) \
++ | RSBAC_ACL_ACMAN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_SYSADM_G_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SYSADM_UID, \
++ RSBAC_REQUEST_VECTOR(R_SEARCH) | RSBAC_REQUEST_VECTOR(R_READ) }
++
++#define RSBAC_ACL_GENERAL_NETDEV_ENTRY \
++ { ACLS_GROUP, \
++ RSBAC_ACL_GROUP_EVERYONE, \
++ ( RSBAC_NETDEV_REQUEST_VECTOR ) | RSBAC_ACL_GEN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_ACMAN_NETDEV_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SECOFF_UID, \
++ ( RSBAC_NETDEV_REQUEST_VECTOR & \
++ ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SECURITY_REQUEST_VECTOR ) ) \
++ | RSBAC_ACL_ACMAN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_SYSADM_NETDEV_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SYSADM_UID, \
++ ( RSBAC_NETDEV_REQUEST_VECTOR & \
++ ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SYSTEM_REQUEST_VECTOR ) ) \
++ | RSBAC_ACL_SYSADM_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_GENERAL_NETTEMP_NT_ENTRY \
++ { ACLS_GROUP, \
++ RSBAC_ACL_GROUP_EVERYONE, \
++ ( RSBAC_NETTEMP_REQUEST_VECTOR & RSBAC_READ_REQUEST_VECTOR ) | RSBAC_ACL_GEN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_ACMAN_NETTEMP_NT_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SECOFF_UID, \
++ ( RSBAC_NETTEMP_REQUEST_VECTOR & \
++ ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SECURITY_REQUEST_VECTOR ) ) \
++ | RSBAC_ACL_ACMAN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_SYSADM_NETTEMP_NT_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SYSADM_UID, \
++ ( RSBAC_NETTEMP_REQUEST_VECTOR & \
++ ( RSBAC_READ_REQUEST_VECTOR | RSBAC_SYSTEM_REQUEST_VECTOR ) ) \
++ | RSBAC_ACL_SYSADM_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_GENERAL_NETOBJ_ENTRY \
++ { ACLS_GROUP, \
++ RSBAC_ACL_GROUP_EVERYONE, \
++ ( RSBAC_NETOBJ_REQUEST_VECTOR & RSBAC_READ_WRITE_REQUEST_VECTOR ) \
++ | RSBAC_REQUEST_VECTOR(R_MODIFY_SYSTEM_DATA) \
++ | RSBAC_ACL_GEN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_ACMAN_NETOBJ_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SECOFF_UID, \
++ ( RSBAC_NETOBJ_REQUEST_VECTOR & \
++ ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SECURITY_REQUEST_VECTOR ) ) \
++ | RSBAC_REQUEST_VECTOR(R_MODIFY_SYSTEM_DATA) \
++ | RSBAC_ACL_ACMAN_RIGHTS_VECTOR }
++
++#define RSBAC_ACL_SYSADM_NETOBJ_ENTRY \
++ { ACLS_USER, \
++ RSBAC_SYSADM_UID, \
++ ( RSBAC_NETOBJ_REQUEST_VECTOR & \
++ ( RSBAC_READ_WRITE_REQUEST_VECTOR | RSBAC_SYSTEM_REQUEST_VECTOR ) ) \
++ | RSBAC_REQUEST_VECTOR(R_MODIFY_SYSTEM_DATA) \
++ | RSBAC_ACL_SYSADM_RIGHTS_VECTOR }
++
++
++/**********************************************/
++/* Lists of ACL / General subitems */
++/**********************************************/
++
++/* Each list represents sets of ACL entries, using a set-id and a sublist each */
++
++#define RSBAC_ACL_VERSION 1
++
++/**********************************************/
++/* ACL and device entries for File/Dir ACL */
++/**********************************************/
++
++#define RSBAC_ACL_FD_FILENAME "aclfd"
++#define RSBAC_ACL_FD_OLD_FILENAME "aclfd."
++#define RSBAC_ACL_DEF_FD_FILENAME "aclfd.df"
++#define RSBAC_ACL_NR_FD_LISTS 4
++#define RSBAC_ACL_FD_LIST_VERSION 3
++#define RSBAC_ACL_DEF_FD_LIST_VERSION 3
++#define RSBAC_ACL_FD_OLD_LIST_VERSION 2
++#define RSBAC_ACL_DEF_FD_OLD_LIST_VERSION 2
++#define RSBAC_ACL_FD_OLD_OLD_LIST_VERSION 1
++#define RSBAC_ACL_DEF_FD_OLD_OLD_LIST_VERSION 1
++
++/* The list of devices is also a double linked list, so we define list */
++/* items and a list head. */
++
++struct rsbac_acl_device_list_item_t {
++ kdev_t id;
++ u_int mount_count;
++ rsbac_list_handle_t handle;
++ struct rsbac_acl_device_list_item_t *prev;
++ struct rsbac_acl_device_list_item_t *next;
++};
++
++/* To provide consistency we use spinlocks for all list accesses. The */
++/* 'curr' entry is used to avoid repeated lookups for the same item. */
++
++struct rsbac_acl_device_list_head_t {
++ struct rsbac_acl_device_list_item_t *head;
++ struct rsbac_acl_device_list_item_t *tail;
++ struct rsbac_acl_device_list_item_t *curr;
++ u_int count;
++};
++
++
++/**********************************************/
++/* ACL entries for Device ACL */
++/**********************************************/
++
++#define RSBAC_ACL_DEV_FILENAME "acldev"
++#define RSBAC_ACL_DEV_MAJOR_FILENAME "acldevm"
++#define RSBAC_ACL_DEV_LIST_VERSION 4
++#define RSBAC_ACL_DEV_OLD_LIST_VERSION 3
++#define RSBAC_ACL_DEV_OLD_OLD_LIST_VERSION 2
++#define RSBAC_ACL_DEV_OLD_OLD_OLD_LIST_VERSION 1
++#define RSBAC_ACL_DEF_DEV_FILENAME "acldev.df"
++#define RSBAC_ACL_DEF_DEV_LIST_VERSION 3
++#define RSBAC_ACL_DEF_DEV_OLD_LIST_VERSION 2
++#define RSBAC_ACL_DEF_DEV_OLD_OLD_LIST_VERSION 1
++
++/**********************************************/
++/* ACL entries for IPC ACL */
++/**********************************************/
++
++#define RSBAC_ACL_DEF_IPC_FILENAME "aclipc.df"
++#define RSBAC_ACL_DEF_IPC_LIST_VERSION 3
++#define RSBAC_ACL_DEF_IPC_OLD_LIST_VERSION 2
++#define RSBAC_ACL_DEF_IPC_OLD_OLD_LIST_VERSION 1
++
++/**********************************************/
++/* ACL entries for SCD ACL */
++/**********************************************/
++
++#define RSBAC_ACL_SCD_FILENAME "aclscd"
++#define RSBAC_ACL_DEF_SCD_FILENAME "aclscd.df"
++#define RSBAC_ACL_SCD_LIST_VERSION 3
++#define RSBAC_ACL_SCD_OLD_LIST_VERSION 2
++#define RSBAC_ACL_SCD_OLD_OLD_LIST_VERSION 1
++#define RSBAC_ACL_DEF_SCD_LIST_VERSION 3
++#define RSBAC_ACL_DEF_SCD_OLD_LIST_VERSION 2
++#define RSBAC_ACL_DEF_SCD_OLD_OLD_LIST_VERSION 1
++
++/**********************************************/
++/* ACL entries for user ACL */
++/**********************************************/
++
++#define RSBAC_ACL_U_FILENAME "acluser"
++#define RSBAC_ACL_U_LIST_VERSION 2
++#define RSBAC_ACL_U_OLD_LIST_VERSION 1
++#define RSBAC_ACL_DEF_U_FILENAME "acluser.df"
++#define RSBAC_ACL_DEF_U_LIST_VERSION 3
++#define RSBAC_ACL_DEF_U_OLD_LIST_VERSION 2
++#define RSBAC_ACL_DEF_U_OLD_OLD_LIST_VERSION 1
++
++/**********************************************/
++/* ACL entries for process ACL */
++/**********************************************/
++
++#define RSBAC_ACL_DEF_P_FILENAME "aclproc.df"
++#define RSBAC_ACL_DEF_P_LIST_VERSION 3
++#define RSBAC_ACL_DEF_P_OLD_LIST_VERSION 2
++#define RSBAC_ACL_DEF_P_OLD_OLD_LIST_VERSION 1
++
++/**********************************************/
++/* ACL entries for Linux group ACL */
++/**********************************************/
++
++#define RSBAC_ACL_G_FILENAME "acllgrp"
++#define RSBAC_ACL_G_LIST_VERSION 2
++#define RSBAC_ACL_G_OLD_LIST_VERSION 1
++#define RSBAC_ACL_DEF_G_FILENAME "acllgrp.df"
++#define RSBAC_ACL_DEF_G_LIST_VERSION 3
++#define RSBAC_ACL_DEF_G_OLD_LIST_VERSION 2
++#define RSBAC_ACL_DEF_G_OLD_OLD_LIST_VERSION 1
++
++/**********************************************/
++/* ACL entries for Network Device ACL */
++/**********************************************/
++
++#define RSBAC_ACL_NETDEV_FILENAME "aclndev"
++#define RSBAC_ACL_NETDEV_LIST_VERSION 3
++#define RSBAC_ACL_NETDEV_OLD_LIST_VERSION 2
++#define RSBAC_ACL_NETDEV_OLD_OLD_LIST_VERSION 1
++#define RSBAC_ACL_DEF_NETDEV_FILENAME "aclndev.df"
++#define RSBAC_ACL_DEF_NETDEV_LIST_VERSION 3
++#define RSBAC_ACL_DEF_NETDEV_OLD_LIST_VERSION 2
++#define RSBAC_ACL_DEF_NETDEV_OLD_OLD_LIST_VERSION 1
++
++/**********************************************/
++/* ACL entries for Network Template NT (template protection) ACL */
++/**********************************************/
++
++#define RSBAC_ACL_NETTEMP_NT_FILENAME "aclntnt"
++#define RSBAC_ACL_NETTEMP_NT_LIST_VERSION 3
++#define RSBAC_ACL_NETTEMP_NT_OLD_LIST_VERSION 2
++#define RSBAC_ACL_NETTEMP_NT_OLD_OLD_LIST_VERSION 1
++#define RSBAC_ACL_DEF_NETTEMP_NT_FILENAME "aclntnt.df"
++#define RSBAC_ACL_DEF_NETTEMP_NT_LIST_VERSION 3
++#define RSBAC_ACL_DEF_NETTEMP_NT_OLD_LIST_VERSION 2
++#define RSBAC_ACL_DEF_NETTEMP_NT_OLD_OLD_LIST_VERSION 1
++
++/**********************************************/
++/* ACL entries for Network Object ACL */
++/**********************************************/
++
++#define RSBAC_ACL_NETTEMP_FILENAME "aclnt"
++#define RSBAC_ACL_NETTEMP_LIST_VERSION 3
++#define RSBAC_ACL_NETTEMP_OLD_LIST_VERSION 2
++#define RSBAC_ACL_NETTEMP_OLD_OLD_LIST_VERSION 1
++#define RSBAC_ACL_NETOBJ_FILENAME "aclno"
++#define RSBAC_ACL_NETOBJ_LIST_VERSION 3
++#define RSBAC_ACL_NETOBJ_OLD_LIST_VERSION 2
++#define RSBAC_ACL_NETOBJ_OLD_OLD_LIST_VERSION 1
++#define RSBAC_ACL_DEF_NETOBJ_FILENAME "aclno.df"
++#define RSBAC_ACL_DEF_NETOBJ_LIST_VERSION 3
++#define RSBAC_ACL_DEF_NETOBJ_OLD_LIST_VERSION 2
++#define RSBAC_ACL_DEF_NETOBJ_OLD_OLD_LIST_VERSION 1
++
++
++/**********************************************/
++/* Group Lists */
++/**********************************************/
++
++#define RSBAC_ACL_GROUP_FILENAME "aclgrp"
++#define RSBAC_ACL_GM_FILENAME "aclgm"
++
++/* In acl_types.h: #define RSBAC_ACL_GROUP_VERSION 2 */
++
++#define RSBAC_ACL_GM_VERSION 2
++#define RSBAC_ACL_GM_OLD_VERSION 1
++
++#endif
+diff --git a/include/rsbac/acl_getname.h b/include/rsbac/acl_getname.h
+new file mode 100644
+index 0000000..638b43c
+--- /dev/null
++++ b/include/rsbac/acl_getname.h
+@@ -0,0 +1,42 @@
++/********************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2001: */
++/* Amon Ott <ao@rsbac.org> */
++/* Getname functions for ACL parts */
++/* Last modified: 02/Aug/2001 */
++/********************************* */
++
++#ifndef __RSBAC_ACL_GETNAME_H
++#define __RSBAC_ACL_GETNAME_H
++
++#include <rsbac/types.h>
++
++char * get_acl_subject_type_name(char * name,
++ enum rsbac_acl_subject_type_t value);
++
++#ifndef __KERNEL__
++enum rsbac_acl_subject_type_t get_acl_subject_type_nr(const char * name);
++#endif
++
++char * get_acl_group_syscall_name(char * name,
++ enum rsbac_acl_group_syscall_type_t value);
++
++#ifndef __KERNEL__
++enum rsbac_acl_group_syscall_type_t get_acl_group_syscall_nr(const char * name);
++#endif
++
++char * get_acl_special_right_name(char * name,
++ enum rsbac_acl_special_rights_t value);
++
++#ifndef __KERNEL__
++enum rsbac_acl_special_rights_t get_acl_special_right_nr(const char * name);
++#endif
++
++char * get_acl_scd_type_name(char * name,
++ enum rsbac_acl_scd_type_t value);
++
++#ifndef __KERNEL__
++enum rsbac_acl_scd_type_t get_acl_scd_type_nr(const char * name);
++#endif
++
++#endif
+diff --git a/include/rsbac/acl_types.h b/include/rsbac/acl_types.h
+new file mode 100644
+index 0000000..259ba72
+--- /dev/null
++++ b/include/rsbac/acl_types.h
+@@ -0,0 +1,253 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2012: */
++/* Amon Ott <ao@rsbac.org> */
++/* API: Data types for attributes */
++/* and standard module calls */
++/* Last modified: 07/May/2012 */
++/************************************ */
++
++#ifndef __RSBAC_ACL_TYPES_H
++#define __RSBAC_ACL_TYPES_H
++
++#include <linux/types.h>
++
++#define RSBAC_ACL_TTL_KEEP RSBAC_LIST_TTL_KEEP
++
++#define RSBAC_ACL_MAX_MAXNUM 1000000
++
++enum rsbac_acl_subject_type_t {ACLS_USER, ACLS_ROLE, ACLS_GROUP, ACLS_NONE};
++
++typedef __u8 rsbac_acl_int_subject_type_t;
++typedef __u64 rsbac_acl_subject_id_t;
++typedef __u32 rsbac_acl_old_subject_id_t;
++
++#define RSBAC_ACL_GROUP_EVERYONE 0
++
++#define RSBAC_ACL_ROLE_EVERYROLE 64
++
++#define RSBAC_ACL_OLD_SPECIAL_RIGHT_BASE 48
++#define RSBAC_ACL_SPECIAL_RIGHT_BASE 56
++
++enum rsbac_acl_special_rights_t
++ { ACLR_FORWARD = RSBAC_ACL_SPECIAL_RIGHT_BASE,
++ ACLR_ACCESS_CONTROL,
++ ACLR_SUPERVISOR,
++ ACLR_NONE};
++
++typedef __u64 rsbac_acl_rights_vector_t;
++
++#define RSBAC_ACL_RIGHTS_VECTOR(x) ((rsbac_acl_rights_vector_t) 1 << (x))
++
++#define RSBAC_ACL_SPECIAL_RIGHTS_VECTOR (\
++ ((rsbac_acl_rights_vector_t) 1 << ACLR_FORWARD) | \
++ ((rsbac_acl_rights_vector_t) 1 << ACLR_ACCESS_CONTROL) | \
++ ((rsbac_acl_rights_vector_t) 1 << ACLR_SUPERVISOR) \
++ )
++
++#define RSBAC_ACL_SUPERVISOR_RIGHT_VECTOR (\
++ ((rsbac_acl_rights_vector_t) 1 << ACLR_SUPERVISOR) \
++ )
++#define RSBAC_NWS_REQUEST_VECTOR RSBAC_ACL_SUPERVISOR_RIGHT_VECTOR
++
++#define RSBAC_ACL_ACCESS_CONTROL_RIGHT_VECTOR (\
++ ((rsbac_acl_rights_vector_t) 1 << ACLR_ACCESS_CONTROL) \
++ )
++#define RSBAC_NWA_REQUEST_VECTOR RSBAC_ACL_ACCESS_CONTROL_RIGHT_VECTOR
++
++#define RSBAC_ACL_ALL_RIGHTS_VECTOR (RSBAC_ALL_REQUEST_VECTOR | RSBAC_ACL_SPECIAL_RIGHTS_VECTOR)
++
++#define RSBAC_ACL_DEFAULT_FD_MASK (RSBAC_FD_REQUEST_VECTOR | RSBAC_ACL_SPECIAL_RIGHTS_VECTOR)
++#define RSBAC_ACL_DEFAULT_DEV_MASK (RSBAC_DEV_REQUEST_VECTOR | RSBAC_ACL_SPECIAL_RIGHTS_VECTOR)
++#define RSBAC_ACL_DEFAULT_SCD_MASK (RSBAC_SCD_REQUEST_VECTOR | RSBAC_ACL_SPECIAL_RIGHTS_VECTOR)
++#define RSBAC_ACL_DEFAULT_U_MASK (RSBAC_USER_REQUEST_VECTOR | RSBAC_ACL_SPECIAL_RIGHTS_VECTOR)
++#define RSBAC_ACL_DEFAULT_G_MASK (RSBAC_GROUP_REQUEST_VECTOR | RSBAC_ACL_SPECIAL_RIGHTS_VECTOR)
++#define RSBAC_ACL_DEFAULT_NETDEV_MASK (RSBAC_NETDEV_REQUEST_VECTOR | RSBAC_ACL_SPECIAL_RIGHTS_VECTOR)
++#define RSBAC_ACL_DEFAULT_NETTEMP_MASK (RSBAC_NETTEMP_REQUEST_VECTOR | RSBAC_ACL_SPECIAL_RIGHTS_VECTOR)
++#define RSBAC_ACL_DEFAULT_NETOBJ_MASK (RSBAC_NETOBJ_REQUEST_VECTOR | RSBAC_ACL_SPECIAL_RIGHTS_VECTOR)
++
++#define RSBAC_ACL_USER_RIGHTS_VECTOR (RSBAC_USER_REQUEST_VECTOR \
++ | RSBAC_ACL_RIGHTS_VECTOR(R_DELETE))
++
++#define RSBAC_ACL_GROUP_RIGHTS_VECTOR RSBAC_GROUP_REQUEST_VECTOR
++
++#define RSBAC_ACL_GEN_RIGHTS_VECTOR 0
++
++#define RSBAC_ACL_ACMAN_RIGHTS_VECTOR (\
++ ((rsbac_acl_rights_vector_t) 1 << ACLR_FORWARD) | \
++ ((rsbac_acl_rights_vector_t) 1 << ACLR_ACCESS_CONTROL) | \
++ ((rsbac_acl_rights_vector_t) 1 << ACLR_SUPERVISOR) \
++ )
++
++#define RSBAC_ACL_SYSADM_RIGHTS_VECTOR 0
++
++/*
++ * System Control Types, including general SCD types
++ * (start at 32 to allow future SCD types, max is 63)
++ * (should always be same as in RC model)
++ */
++#define AST_min 32
++enum rsbac_acl_scd_type_t{AST_auth_administration = AST_min,
++ AST_none};
++
++/* note: the desc struct must be the same as the beginning of the entry struct! */
++struct rsbac_acl_entry_t
++ {
++ rsbac_acl_int_subject_type_t subj_type; /* enum rsbac_acl_subject_type_t */
++ rsbac_acl_subject_id_t subj_id;
++ rsbac_acl_rights_vector_t rights;
++ };
++
++struct rsbac_acl_entry_desc_t
++ {
++ rsbac_acl_int_subject_type_t subj_type; /* enum rsbac_acl_subject_type_t */
++ rsbac_acl_subject_id_t subj_id;
++ };
++
++struct rsbac_acl_old_entry_desc_t
++ {
++ rsbac_acl_int_subject_type_t subj_type; /* enum rsbac_acl_subject_type_t */
++ rsbac_acl_old_subject_id_t subj_id;
++ };
++
++enum rsbac_acl_group_type_t {ACLG_GLOBAL, ACLG_PRIVATE, ACLG_NONE};
++
++typedef __u32 rsbac_acl_group_id_t;
++
++#define RSBAC_ACL_GROUP_NAMELEN 16
++
++#define RSBAC_ACL_GROUP_VERSION 2
++
++struct rsbac_acl_group_entry_t
++ {
++ rsbac_acl_group_id_t id;
++ rsbac_uid_t owner;
++ enum rsbac_acl_group_type_t type;
++ char name[RSBAC_ACL_GROUP_NAMELEN];
++ };
++
++/**** syscalls ****/
++
++enum rsbac_acl_syscall_type_t
++ {
++ ACLC_set_acl_entry,
++ ACLC_remove_acl_entry,
++ ACLC_remove_acl,
++ ACLC_add_to_acl_entry,
++ ACLC_remove_from_acl_entry,
++ ACLC_set_mask,
++ ACLC_remove_user,
++ ACLC_none
++ };
++
++struct rsbac_acl_syscall_arg_t
++ {
++ enum rsbac_target_t target;
++ union rsbac_target_id_t tid;
++ enum rsbac_acl_subject_type_t subj_type;
++ rsbac_acl_subject_id_t subj_id;
++ rsbac_acl_rights_vector_t rights;
++ rsbac_time_t ttl;
++ };
++
++struct rsbac_acl_syscall_n_arg_t
++ {
++ enum rsbac_target_t target;
++ char __user * name;
++ enum rsbac_acl_subject_type_t subj_type;
++ rsbac_acl_subject_id_t subj_id;
++ rsbac_acl_rights_vector_t rights;
++ rsbac_time_t ttl;
++ };
++
++
++enum rsbac_acl_group_syscall_type_t
++ {
++ ACLGS_add_group,
++ ACLGS_change_group,
++ ACLGS_remove_group,
++ ACLGS_get_group_entry,
++ ACLGS_list_groups,
++ ACLGS_add_member,
++ ACLGS_remove_member,
++ ACLGS_get_user_groups,
++ ACLGS_get_group_members,
++ ACLGS_none
++ };
++
++struct rsbac_acl_add_group_arg_t
++ {
++ enum rsbac_acl_group_type_t type;
++ char __user * name;
++ rsbac_acl_group_id_t * group_id_p;
++ };
++
++struct rsbac_acl_change_group_arg_t
++ {
++ rsbac_acl_group_id_t id;
++ rsbac_uid_t owner;
++ enum rsbac_acl_group_type_t type;
++ char __user * name;
++ };
++
++struct rsbac_acl_remove_group_arg_t
++ {
++ rsbac_acl_group_id_t id;
++ };
++
++struct rsbac_acl_get_group_entry_arg_t
++ {
++ rsbac_acl_group_id_t id;
++ struct rsbac_acl_group_entry_t __user * entry_p;
++ };
++
++struct rsbac_acl_list_groups_arg_t
++ {
++ rsbac_boolean_t include_global;
++ struct rsbac_acl_group_entry_t __user * group_entry_array;
++ u_int maxnum;
++ };
++
++struct rsbac_acl_add_member_arg_t
++ {
++ rsbac_acl_group_id_t group;
++ rsbac_uid_t user;
++ rsbac_time_t ttl;
++ };
++
++struct rsbac_acl_remove_member_arg_t
++ {
++ rsbac_acl_group_id_t group;
++ rsbac_uid_t user;
++ };
++
++struct rsbac_acl_get_user_groups_arg_t
++ {
++ rsbac_uid_t user;
++ rsbac_acl_group_id_t __user * group_array;
++ rsbac_time_t __user * ttl_array;
++ u_int maxnum;
++ };
++
++struct rsbac_acl_get_group_members_arg_t
++ {
++ rsbac_acl_group_id_t group;
++ rsbac_uid_t __user * user_array;
++ rsbac_time_t __user * ttl_array;
++ u_int maxnum;
++ };
++
++union rsbac_acl_group_syscall_arg_t
++ {
++ struct rsbac_acl_add_group_arg_t add_group;
++ struct rsbac_acl_change_group_arg_t change_group;
++ struct rsbac_acl_remove_group_arg_t remove_group;
++ struct rsbac_acl_get_group_entry_arg_t get_group_entry;
++ struct rsbac_acl_list_groups_arg_t list_groups;
++ struct rsbac_acl_add_member_arg_t add_member;
++ struct rsbac_acl_remove_member_arg_t remove_member;
++ struct rsbac_acl_get_user_groups_arg_t get_user_groups;
++ struct rsbac_acl_get_group_members_arg_t get_group_members;
++ };
++
++#endif
+diff --git a/include/rsbac/adf.h b/include/rsbac/adf.h
+new file mode 100644
+index 0000000..0a7aa68
+--- /dev/null
++++ b/include/rsbac/adf.h
+@@ -0,0 +1,138 @@
++/******************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2009: */
++/* Amon Ott <ao@rsbac.org> */
++/* API: for Access Control */
++/* Decision Facility */
++/* Last modified: 16/Jan/2009 */
++/******************************* */
++
++#ifndef __RSBAC_ADF_H
++#define __RSBAC_ADF_H
++
++#include <linux/init.h>
++#include <linux/binfmts.h>
++#include <asm/page.h>
++#include <rsbac/types.h>
++#include <rsbac/debug.h>
++#include <rsbac/fs.h>
++
++/***************************************************/
++/* Prototypes */
++/***************************************************/
++
++/* Init function */
++#ifdef CONFIG_RSBAC_INIT_DELAY
++extern void rsbac_init_adf(void);
++#else
++extern void rsbac_init_adf(void) __init;
++#endif
++
++/* This function is the internal decision function, called from the next. */
++/* It allows to ignore a certain module (last parameter), e.g. for asking */
++/* all _other_ modules, but not the calling module, to avoid a circle. */
++
++extern enum rsbac_adf_req_ret_t
++ rsbac_adf_request_int(enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t * tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t * attr_val_p,
++ enum rsbac_switch_target_t ignore_module);
++
++/*********************************************************************/
++/* rsbac_adf_request() */
++/* This function is the main decision function, called from the AEF. */
++/* It is a simple wrapper to the internal function, setting */
++/* ignore_module to SW_NONE. */
++
++static inline enum rsbac_adf_req_ret_t
++ rsbac_adf_request( enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val)
++ {
++ return rsbac_adf_request_int(request,
++ caller_pid,
++ target,
++ &tid,
++ attr,
++ &attr_val,
++ SW_NONE);
++ }
++
++
++/* If the request returned granted and the operation is performed, */
++/* the following function is called by the AEF to get all aci set correctly. */
++/* The second instance of target specification is the new target, if one has */
++/* been created, otherwise its values are ignored. */
++/* It returns 0 on success and an error from error.h otherwise. */
++
++extern int rsbac_adf_set_attr( enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t);
++
++#include <linux/types.h>
++#include <linux/dcache.h>
++
++int rsbac_sec_del(struct dentry * dentry_p, u_int may_sync);
++
++int rsbac_sec_trunc(struct dentry * dentry_p,
++ loff_t new_len, loff_t old_len);
++
++/* This function changes the symlink content by adding a suffix, if
++ * requested. It returns NULL, if unchanged, or a pointer to a
++ * kmalloc'd new char * otherwise, which has to be kfree'd after use.
++ */
++char * rsbac_symlink_redirect(
++ struct inode * inode_p,
++ const char * name,
++ u_int maxlen);
++
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_PART
++extern int rsbac_dac_part_disabled(struct dentry * dentry_p);
++#endif
++
++#ifdef CONFIG_RSBAC_FAKE_ROOT_UID
++extern rsbac_uid_t rsbac_fake_uid(void);
++extern rsbac_uid_t rsbac_fake_euid(void);
++extern int rsbac_uid_faked(void);
++#endif
++
++int rsbac_cap_check_envp(struct linux_binprm *bprm);
++
++extern int rsbac_handle_filldir(const struct file *file, const char *name, const unsigned int namlen, const ino_t ino);
++
++int rsbac_set_audit_uid(rsbac_uid_t uid);
++
++/* Mostly copied from drivers/char/mem.c */
++static inline rsbac_boolean_t rsbac_is_videomem(unsigned long pfn, unsigned long size)
++{
++/* Intel architecture is a security disaster */
++#if defined X86_64 || defined X86
++
++ u64 from = ((u64)pfn) << PAGE_SHIFT;
++ u64 to = from + size;
++ u64 cursor = from;
++
++ while (cursor < to) {
++ if (!devmem_is_allowed(pfn)) {
++ return FALSE;
++ }
++ cursor += PAGE_SIZE;
++ pfn++;
++ }
++ return TRUE;
++#endif
++ return TRUE;
++};
++
++#endif
+diff --git a/include/rsbac/adf_main.h b/include/rsbac/adf_main.h
+new file mode 100644
+index 0000000..ffba0b8
+--- /dev/null
++++ b/include/rsbac/adf_main.h
+@@ -0,0 +1,836 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2010: */
++/* Amon Ott <ao@rsbac.org> */
++/* Data Structs etc. for Access */
++/* Control Decision Facility */
++/* Last modified: 21/May/2010 */
++/************************************ */
++
++#ifndef __RSBAC_ADF_MAIN_H
++#define __RSBAC_ADF_MAIN_H
++
++#include <linux/sched.h>
++#include <rsbac/types.h>
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++#include <rsbac/reg.h>
++#endif
++
++#ifdef CONFIG_RSBAC_SECDEL
++#include <linux/dcache.h>
++#endif
++
++/***************************************************/
++/* Global Variables */
++/***************************************************/
++
++extern __u64 rsbac_adf_request_count[T_NONE+1];
++extern __u64 rsbac_adf_set_attr_count[T_NONE+1];
++#ifdef CONFIG_RSBAC_XSTATS
++extern __u64 rsbac_adf_request_xcount[T_NONE+1][R_NONE];
++extern __u64 rsbac_adf_set_attr_xcount[T_NONE+1][R_NONE];
++#endif
++
++/* Bitmasks to ignore some requests on some modules */
++
++#ifdef CONFIG_RSBAC_MAC
++#define RSBAC_MAC_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_ADD_TO_KERNEL) | \
++ ((rsbac_request_vector_t) 1 << R_ALTER) | \
++ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHDIR) | \
++ ((rsbac_request_vector_t) 1 << R_CLOSE) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) | \
++ ((rsbac_request_vector_t) 1 << R_GET_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_LINK_HARD) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ACCESS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_REMOVE_FROM_KERNEL) | \
++ ((rsbac_request_vector_t) 1 << R_RENAME) | \
++ ((rsbac_request_vector_t) 1 << R_SEARCH) | \
++ ((rsbac_request_vector_t) 1 << R_SEND_SIGNAL) | \
++ ((rsbac_request_vector_t) 1 << R_SHUTDOWN) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_LOG) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) | \
++ ((rsbac_request_vector_t) 1 << R_TRACE) | \
++ ((rsbac_request_vector_t) 1 << R_TRUNCATE) | \
++ ((rsbac_request_vector_t) 1 << R_UMOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_MAP_EXEC) | \
++ ((rsbac_request_vector_t) 1 << R_BIND) | \
++ ((rsbac_request_vector_t) 1 << R_LISTEN) | \
++ ((rsbac_request_vector_t) 1 << R_ACCEPT) | \
++ ((rsbac_request_vector_t) 1 << R_CONNECT) | \
++ ((rsbac_request_vector_t) 1 << R_SEND) | \
++ ((rsbac_request_vector_t) 1 << R_RECEIVE) \
++ )
++#define RSBAC_MAC_SET_ATTR_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CLONE) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_READ_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_SEARCH) | \
++ ((rsbac_request_vector_t) 1 << R_TRACE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_BIND) | \
++ ((rsbac_request_vector_t) 1 << R_LISTEN) | \
++ ((rsbac_request_vector_t) 1 << R_ACCEPT) | \
++ ((rsbac_request_vector_t) 1 << R_CONNECT) | \
++ ((rsbac_request_vector_t) 1 << R_SEND) | \
++ ((rsbac_request_vector_t) 1 << R_RECEIVE) \
++ )
++#endif
++
++#ifdef CONFIG_RSBAC_PM
++#define RSBAC_PM_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_ADD_TO_KERNEL) | \
++ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CLONE) | \
++ ((rsbac_request_vector_t) 1 << R_CLOSE) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) | \
++ ((rsbac_request_vector_t) 1 << R_LINK_HARD) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ACCESS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_REMOVE_FROM_KERNEL) | \
++ ((rsbac_request_vector_t) 1 << R_RENAME) | \
++ ((rsbac_request_vector_t) 1 << R_SEND_SIGNAL) | \
++ ((rsbac_request_vector_t) 1 << R_SHUTDOWN) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_LOG) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) | \
++ ((rsbac_request_vector_t) 1 << R_TERMINATE) | \
++ ((rsbac_request_vector_t) 1 << R_TRACE) | \
++ ((rsbac_request_vector_t) 1 << R_TRUNCATE) | \
++ ((rsbac_request_vector_t) 1 << R_UMOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \
++ )
++#define RSBAC_PM_SET_ATTR_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_CLONE) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_READ_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE_OPEN) \
++ )
++#endif
++
++#ifdef CONFIG_RSBAC_DAZ
++#define RSBAC_DAZ_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_CLOSE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) \
++ )
++#define RSBAC_DAZ_SET_ATTR_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_CLONE) | \
++ ((rsbac_request_vector_t) 1 << R_CLOSE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE_OPEN) )
++#endif
++
++#ifdef CONFIG_RSBAC_FF
++#if defined(CONFIG_RSBAC_FF_UM_PROT)
++#define RSBAC_FF_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHDIR) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) | \
++ ((rsbac_request_vector_t) 1 << R_GET_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_LINK_HARD) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ACCESS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_RENAME) | \
++ ((rsbac_request_vector_t) 1 << R_SEARCH) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_LOG) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) | \
++ ((rsbac_request_vector_t) 1 << R_TRUNCATE) | \
++ ((rsbac_request_vector_t) 1 << R_UMOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \
++ )
++#else
++#define RSBAC_FF_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHDIR) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_LINK_HARD) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ACCESS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_RENAME) | \
++ ((rsbac_request_vector_t) 1 << R_SEARCH) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_LOG) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) | \
++ ((rsbac_request_vector_t) 1 << R_TRUNCATE) | \
++ ((rsbac_request_vector_t) 1 << R_UMOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \
++ )
++#endif
++#endif
++
++#ifdef CONFIG_RSBAC_AUTH
++#if defined(CONFIG_RSBAC_AUTH_UM_PROT)
++#define RSBAC_AUTH_REQUEST_VECTOR_UM (\
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_GET_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_RENAME) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) )
++#else
++#define RSBAC_AUTH_REQUEST_VECTOR_UM 0
++#endif
++#if defined(CONFIG_RSBAC_AUTH_UM_PROT) || defined(CONFIG_RSBAC_AUTH_GROUP)
++#define RSBAC_AUTH_REQUEST_VECTOR_CG ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP)
++#else
++#define RSBAC_AUTH_REQUEST_VECTOR_CG 0
++#endif
++#if defined(CONFIG_RSBAC_AUTH_GROUP) && defined (CONFIG_RSBAC_AUTH_DAC_GROUP)
++#define RSBAC_AUTH_REQUEST_VECTOR_DG ( \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_GROUP) )
++#else
++#define RSBAC_AUTH_REQUEST_VECTOR_DG 0
++#endif
++#if defined (CONFIG_RSBAC_AUTH_DAC_OWNER)
++#define RSBAC_AUTH_REQUEST_VECTOR_DO ( \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_OWNER) )
++#else
++#define RSBAC_AUTH_REQUEST_VECTOR_DO 0
++#endif
++#if defined (CONFIG_RSBAC_AUTH_AUTH_PROT)
++#define RSBAC_AUTH_REQUEST_VECTOR_AA ( \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_LOG) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) )
++#else
++#define RSBAC_AUTH_REQUEST_VECTOR_AA 0
++#endif
++
++#define RSBAC_AUTH_REQUEST_VECTOR (\
++ RSBAC_AUTH_REQUEST_VECTOR_UM | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ RSBAC_AUTH_REQUEST_VECTOR_CG | \
++ RSBAC_AUTH_REQUEST_VECTOR_DG | \
++ RSBAC_AUTH_REQUEST_VECTOR_DO | \
++ RSBAC_AUTH_REQUEST_VECTOR_AA | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) \
++ )
++
++#if defined (CONFIG_RSBAC_AUTH_AUTH_PROT)
++#define RSBAC_AUTH_SET_ATTR_VECTOR_AA ( \
++ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_LINK_HARD) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ACCESS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_RENAME) | \
++ ((rsbac_request_vector_t) 1 << R_TRUNCATE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE_OPEN) )
++#else
++#define RSBAC_AUTH_SET_ATTR_VECTOR_AA 0
++#endif
++#define RSBAC_AUTH_SET_ATTR_VECTOR (\
++ RSBAC_AUTH_SET_ATTR_VECTOR_AA | \
++ ((rsbac_request_vector_t) 1 << R_CLONE) | \
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) \
++ )
++#endif
++
++#ifdef CONFIG_RSBAC_CAP
++#ifdef CONFIG_RSBAC_CAP_PROC_HIDE
++#define RSBAC_CAP_REQUEST_VECTOR ( \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_SEND_SIGNAL) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_LOG) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) | \
++ ((rsbac_request_vector_t) 1 << R_TRACE) )
++#else
++#define RSBAC_CAP_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_LOG) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) )
++#endif
++#if defined (CONFIG_RSBAC_CAP_PROC_HIDE) || defined(CONFIG_RSBAC_CAP_LOG_MISSING)
++#define RSBAC_CAP_SET_ATTR_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CLONE) | \
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) )
++#else
++#define RSBAC_CAP_SET_ATTR_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) )
++#endif
++#endif
++
++#ifdef CONFIG_RSBAC_JAIL
++#define RSBAC_JAIL_REQUEST_VECTOR ( \
++ ((rsbac_request_vector_t) 1 << R_ADD_TO_KERNEL) | \
++ ((rsbac_request_vector_t) 1 << R_ALTER) | \
++ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_GET_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_REMOVE_FROM_KERNEL) | \
++ ((rsbac_request_vector_t) 1 << R_SEND_SIGNAL) | \
++ ((rsbac_request_vector_t) 1 << R_SHUTDOWN) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_LOG) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) | \
++ ((rsbac_request_vector_t) 1 << R_TRACE) | \
++ ((rsbac_request_vector_t) 1 << R_UMOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_BIND) | \
++ ((rsbac_request_vector_t) 1 << R_LISTEN) | \
++ ((rsbac_request_vector_t) 1 << R_ACCEPT) | \
++ ((rsbac_request_vector_t) 1 << R_CONNECT) | \
++ ((rsbac_request_vector_t) 1 << R_SEND) | \
++ ((rsbac_request_vector_t) 1 << R_RECEIVE) | \
++ ((rsbac_request_vector_t) 1 << R_NET_SHUTDOWN) )
++#define RSBAC_JAIL_SET_ATTR_VECTOR ( \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CLONE) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) | \
++ ((rsbac_request_vector_t) 1 << R_CONNECT) | \
++ ((rsbac_request_vector_t) 1 << R_BIND) )
++#endif
++
++#ifdef CONFIG_RSBAC_PAX
++#define RSBAC_PAX_REQUEST_VECTOR ( \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_LOG) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) )
++#endif
++
++#ifdef CONFIG_RSBAC_RES
++#define RSBAC_RES_REQUEST_VECTOR ( \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_LOG) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) )
++#define RSBAC_RES_SET_ATTR_VECTOR ( \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) )
++#endif
++
++/***************************************************/
++/* General Prototypes */
++/***************************************************/
++
++/* We call this function in kernel/sched.c */
++extern struct task_struct * find_process_by_pid(pid_t);
++
++#ifdef CONFIG_RSBAC_DEBUG
++extern enum rsbac_adf_req_ret_t
++ rsbac_adf_request_check (enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t * tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t * attr_val_p,
++ rsbac_uid_t owner);
++
++extern int rsbac_adf_set_attr_check( enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++#endif
++
++extern enum rsbac_adf_req_ret_t
++ adf_and_plus(enum rsbac_adf_req_ret_t res1,
++ enum rsbac_adf_req_ret_t res2);
++
++/***************************************************/
++/* Module Prototypes */
++/***************************************************/
++
++#if !defined(CONFIG_RSBAC_MAINT)
++
++/******* MAC ********/
++
++#ifdef CONFIG_RSBAC_MAC
++#ifdef CONFIG_RSBAC_SWITCH_MAC
++extern rsbac_boolean_t rsbac_switch_mac;
++#endif
++
++extern enum rsbac_adf_req_ret_t rsbac_adf_request_mac(
++ enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++extern int rsbac_adf_set_attr_mac( enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++#endif /* MAC */
++
++
++/******* PM ********/
++
++#ifdef CONFIG_RSBAC_PM
++#ifdef CONFIG_RSBAC_SWITCH_PM
++extern rsbac_boolean_t rsbac_switch_pm;
++#endif
++
++extern enum rsbac_adf_req_ret_t rsbac_adf_request_pm(
++ enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++extern int rsbac_adf_set_attr_pm ( enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++#ifdef CONFIG_RSBAC_SECDEL
++extern rsbac_boolean_t rsbac_need_overwrite_pm(struct dentry * dentry_p);
++#endif
++
++#endif /* PM */
++
++/******* DAZ ********/
++
++#ifdef CONFIG_RSBAC_DAZ
++#ifdef CONFIG_RSBAC_SWITCH_DAZ
++extern rsbac_boolean_t rsbac_switch_daz;
++#endif
++
++extern enum rsbac_adf_req_ret_t rsbac_adf_request_daz(
++ enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++extern int rsbac_adf_set_attr_daz (enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++#endif /* DAZ */
++
++/******* FF ********/
++
++#ifdef CONFIG_RSBAC_FF
++#ifdef CONFIG_RSBAC_SWITCH_FF
++extern rsbac_boolean_t rsbac_switch_ff;
++#endif
++
++extern enum rsbac_adf_req_ret_t rsbac_adf_request_ff(
++ enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++extern int rsbac_adf_set_attr_ff ( enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++#ifdef CONFIG_RSBAC_SECDEL
++extern rsbac_boolean_t rsbac_need_overwrite_ff(struct dentry * dentry_p);
++#endif
++
++#endif /* FF */
++
++/******* RC ********/
++
++#ifdef CONFIG_RSBAC_RC
++#ifdef CONFIG_RSBAC_SWITCH_RC
++extern rsbac_boolean_t rsbac_switch_rc;
++#endif
++
++extern enum rsbac_adf_req_ret_t rsbac_adf_request_rc(
++ enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++extern int rsbac_adf_set_attr_rc ( enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++/* Secure delete/truncate for this module */
++#ifdef CONFIG_RSBAC_SECDEL
++extern rsbac_boolean_t rsbac_need_overwrite_rc(struct dentry * dentry_p);
++#endif
++#endif /* RC */
++
++/****** AUTH *******/
++
++#ifdef CONFIG_RSBAC_AUTH
++#ifdef CONFIG_RSBAC_SWITCH_AUTH
++extern rsbac_boolean_t rsbac_switch_auth;
++#endif
++
++extern enum rsbac_adf_req_ret_t rsbac_adf_request_auth(
++ enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++extern int rsbac_adf_set_attr_auth(enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++#endif /* AUTH */
++
++/****** ACL *******/
++
++#ifdef CONFIG_RSBAC_ACL
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++extern rsbac_boolean_t rsbac_switch_acl;
++#endif
++
++extern enum rsbac_adf_req_ret_t rsbac_adf_request_acl(
++ enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++extern int rsbac_adf_set_attr_acl (enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++#endif /* ACL */
++
++/****** CAP *******/
++
++#ifdef CONFIG_RSBAC_CAP
++#ifdef CONFIG_RSBAC_SWITCH_CAP
++extern rsbac_boolean_t rsbac_switch_cap;
++#endif
++
++extern enum rsbac_adf_req_ret_t rsbac_adf_request_cap(
++ enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++extern int rsbac_adf_set_attr_cap (enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++#endif /* CAP */
++
++/****** JAIL *******/
++
++#ifdef CONFIG_RSBAC_JAIL
++#ifdef CONFIG_RSBAC_SWITCH_JAIL
++extern rsbac_boolean_t rsbac_switch_jail;
++#endif
++
++extern enum rsbac_adf_req_ret_t rsbac_adf_request_jail(
++ enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++extern int rsbac_adf_set_attr_jail(enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++#endif /* JAIL */
++
++/******* PAX ********/
++
++#ifdef CONFIG_RSBAC_PAX
++#ifdef CONFIG_RSBAC_SWITCH_PAX
++extern rsbac_boolean_t rsbac_switch_pax;
++#endif
++
++extern enum rsbac_adf_req_ret_t rsbac_adf_request_pax(
++ enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++extern int rsbac_adf_set_attr_pax( enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++#endif /* PAX */
++
++
++/****** RES *******/
++
++#ifdef CONFIG_RSBAC_RES
++#ifdef CONFIG_RSBAC_SWITCH_RES
++extern rsbac_boolean_t rsbac_switch_res;
++#endif
++
++extern enum rsbac_adf_req_ret_t rsbac_adf_request_res(
++ enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++extern int rsbac_adf_set_attr_res (enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++#ifdef CONFIG_RSBAC_SECDEL
++extern inline rsbac_boolean_t rsbac_need_overwrite_res(struct dentry * dentry_p)
++ {
++ return FALSE;
++ }
++#endif
++#endif /* RES */
++
++/****** REG *******/
++
++#if defined(CONFIG_RSBAC_REG)
++extern enum rsbac_adf_req_ret_t rsbac_adf_request_reg(
++ enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++extern int rsbac_adf_set_attr_reg (enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++#ifdef CONFIG_RSBAC_SECDEL
++extern inline rsbac_boolean_t rsbac_need_overwrite_reg(struct dentry * dentry_p)
++ {
++ return FALSE;
++ }
++#endif
++#endif /* REG */
++
++#endif /* !MAINT */
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++/* Init */
++#ifdef CONFIG_RSBAC_INIT_DELAY
++void rsbac_reg_init(void);
++#else
++void rsbac_reg_init(void) __init;
++#endif
++
++/* mounting and umounting */
++extern int rsbac_mount_reg(kdev_t kdev);
++extern int rsbac_umount_reg(kdev_t kdev);
++
++/* RSBAC attribute saving to disk can be triggered from outside
++ * param: call lock_kernel() before writing?
++ */
++#if defined(CONFIG_RSBAC_AUTO_WRITE)
++extern int rsbac_write_reg(void);
++#endif /* CONFIG_RSBAC_AUTO_WRITE */
++
++/* Status checking */
++extern int rsbac_check_reg(int correct, int check_inode);
++
++#endif /* REG */
++
++#endif /* End of adf_main.h */
+diff --git a/include/rsbac/adf_syshelpers.h b/include/rsbac/adf_syshelpers.h
+new file mode 100644
+index 0000000..ccec900
+--- /dev/null
++++ b/include/rsbac/adf_syshelpers.h
+@@ -0,0 +1,285 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2012: */
++/* Amon Ott <ao@rsbac.org> */
++/* */
++/* Helper Prototypes for model */
++/* specific system calls */
++/* Last modified: 07/May/2012 */
++/************************************ */
++
++#ifndef __RSBAC_ADF_SYSHELPERS_H
++#define __RSBAC_ADF_SYSHELPERS_H
++
++/* #include <linux/sched.h> */
++#include <rsbac/types.h>
++
++/***************************************************/
++/* Global Variables */
++/***************************************************/
++
++/***************************************************/
++/* General Prototypes */
++/***************************************************/
++
++/***************************************************/
++/* Module Prototypes */
++/***************************************************/
++
++/******* MAC ********/
++
++#if defined(CONFIG_RSBAC_MAC) || defined(CONFIG_RSBAC_MAC_MAINT)
++int rsbac_mac_set_curr_level(rsbac_security_level_t level,
++ rsbac_mac_category_vector_t categories);
++
++int rsbac_mac_get_curr_level(rsbac_security_level_t * level_p,
++ rsbac_mac_category_vector_t * categories_p);
++
++int rsbac_mac_get_max_level(rsbac_security_level_t * level_p,
++ rsbac_mac_category_vector_t * categories_p);
++
++int rsbac_mac_get_min_level(rsbac_security_level_t * level_p,
++ rsbac_mac_category_vector_t * categories_p);
++
++int rsbac_mac_add_p_tru(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ rsbac_uid_t uid,
++ rsbac_time_t ttl);
++
++int rsbac_mac_remove_p_tru(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ rsbac_uid_t uid);
++
++int rsbac_mac_add_f_tru(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_mac_file_t file,
++ rsbac_uid_t uid,
++ rsbac_time_t ttl);
++
++int rsbac_mac_remove_f_tru(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_mac_file_t file,
++ rsbac_uid_t uid);
++
++#endif /* MAC */
++
++
++/******* PM ********/
++
++#if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT)
++/* This function is called via sys_rsbac_pm() system call */
++/* and serves as a dispatcher for all PM dependant system calls. */
++
++int rsbac_pm(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_function_type_t,
++ union rsbac_pm_function_param_t,
++ rsbac_pm_tkt_id_t);
++
++int rsbac_pm_change_current_task(rsbac_pm_task_id_t);
++
++int rsbac_pm_create_file(const char *, /* filename */
++ int, /* creation mode */
++ rsbac_pm_object_class_id_t); /* class for file */
++#endif /* PM */
++
++/******* FF ********/
++
++/******* RC ********/
++
++#if defined(CONFIG_RSBAC_RC) || defined(CONFIG_RSBAC_RC_MAINT)
++/* These functions in adf/rc/syscalls.c are called via sys_* system calls */
++/* and check for validity before passing the call to the rc_data_structures. */
++
++/* All roles are always there, so instead of creation, we supply a copy for */
++/* initialization. There is always the well-defined role general to copy */
++extern int rsbac_rc_sys_copy_role (
++ rsbac_list_ta_number_t ta_number,
++ rsbac_rc_role_id_t from_role,
++ rsbac_rc_role_id_t to_role);
++
++extern int rsbac_rc_sys_copy_type (
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ rsbac_rc_type_id_t from_type,
++ rsbac_rc_type_id_t to_type);
++
++/* Getting item values */
++extern int rsbac_rc_sys_get_item (
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ union rsbac_rc_target_id_t tid,
++ union rsbac_rc_target_id_t subtid,
++ enum rsbac_rc_item_t item,
++ union rsbac_rc_item_value_t * value_p,
++ rsbac_time_t * ttl_p);
++
++/* Setting item values */
++extern int rsbac_rc_sys_set_item (
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ union rsbac_rc_target_id_t tid,
++ union rsbac_rc_target_id_t subtid,
++ enum rsbac_rc_item_t item,
++ union rsbac_rc_item_value_t value,
++ rsbac_time_t ttl);
++
++/* Set own role, if allowed ( = in role_comp vector of current role) */
++extern int rsbac_rc_sys_change_role (rsbac_rc_role_id_t role, char __user * pass);
++
++/* Getting own effective rights */
++int rsbac_rc_sys_get_eff_rights (
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_rc_request_vector_t * request_vector,
++ rsbac_time_t * ttl_p);
++
++int rsbac_rc_sys_get_current_role (rsbac_rc_role_id_t * role_p);
++
++#endif /* RC || RC_MAINT */
++
++/****** AUTH *******/
++
++#if defined(CONFIG_RSBAC_AUTH) || defined(CONFIG_RSBAC_AUTH_MAINT)
++/* This function is called via sys_rsbac_auth_add_p_cap() system call */
++int rsbac_auth_add_p_cap(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range,
++ rsbac_time_t ttl);
++
++/* This function is called via sys_rsbac_auth_remove_p_cap() system call */
++int rsbac_auth_remove_p_cap(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range);
++
++/* This function is called via sys_rsbac_auth_add_f_cap() system call */
++int rsbac_auth_add_f_cap(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_auth_file_t file,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range,
++ rsbac_time_t ttl);
++
++/* This function is called via sys_rsbac_auth_remove_f_cap() system call */
++int rsbac_auth_remove_f_cap(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_auth_file_t file,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range);
++
++#endif /* AUTH || AUTH_MAINT */
++
++/****** REG *******/
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++/*
++ * System call dispatcher
++ * Returns 0 on success or -EINVALIDTARGET, if handle is invalid.
++ */
++
++int rsbac_reg_syscall(rsbac_reg_handle_t handle,
++ void __user * arg);
++#endif /* REG || REG_MAINT */
++
++/****** ACL *******/
++
++#if defined(CONFIG_RSBAC_ACL) || defined(CONFIG_RSBAC_ACL_MAINT)
++int rsbac_acl_sys_set_acl_entry(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ rsbac_acl_rights_vector_t rights,
++ rsbac_time_t ttl);
++
++int rsbac_acl_sys_remove_acl_entry(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id);
++
++int rsbac_acl_sys_remove_acl(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid);
++
++int rsbac_acl_sys_add_to_acl_entry(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ rsbac_acl_rights_vector_t rights,
++ rsbac_time_t ttl);
++
++int rsbac_acl_sys_remove_from_acl_entry(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ rsbac_acl_rights_vector_t rights);
++
++int rsbac_acl_sys_set_mask(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_acl_rights_vector_t mask);
++
++int rsbac_acl_sys_remove_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t uid);
++
++int rsbac_acl_sys_get_mask(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_acl_rights_vector_t * mask_p);
++
++
++int rsbac_acl_sys_get_rights(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ rsbac_acl_rights_vector_t * rights_p,
++ rsbac_boolean_t inherit);
++
++int rsbac_acl_sys_get_tlist(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ struct rsbac_acl_entry_t ** entry_pp,
++ rsbac_time_t ** ttl_pp);
++
++int rsbac_acl_sys_group(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_acl_group_syscall_type_t call,
++ union rsbac_acl_group_syscall_arg_t arg);
++
++#endif /* ACL || ACL_MAINT */
++
++/****** JAIL *******/
++
++#if defined(CONFIG_RSBAC_JAIL)
++/* This function is called via sys_rsbac_jail() system call */
++int rsbac_jail_sys_jail(rsbac_version_t version,
++ char __user * path,
++ rsbac_jail_ip_t ip,
++ rsbac_jail_flags_t flags,
++ rsbac_cap_vector_t max_caps,
++ rsbac_jail_scd_vector_t scd_get,
++ rsbac_jail_scd_vector_t scd_modify);
++#endif
++
++#endif /* End of adf_syshelpers.h */
+diff --git a/include/rsbac/auth.h b/include/rsbac/auth.h
+new file mode 100644
+index 0000000..b213a03
+--- /dev/null
++++ b/include/rsbac/auth.h
+@@ -0,0 +1,154 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2005: */
++/* Amon Ott <ao@rsbac.org> */
++/* API: Data structures */
++/* and functions for Access */
++/* Control Information / AUTH */
++/* Last modified: 09/Feb/2005 */
++/************************************ */
++
++#ifndef __RSBAC_AUTH_H
++#define __RSBAC_AUTH_H
++
++#include <linux/init.h>
++#include <rsbac/types.h>
++
++/***************************************************/
++/* General Prototypes */
++/***************************************************/
++
++/* All functions return 0, if no error occurred, and a negative error code */
++/* otherwise. The error codes are defined in rsbac_error.h. */
++
++/****************************************************************************/
++/* Initialization, including ACI restoration for all mounted devices from */
++/* disk. After this call, all ACI is kept in memory for performance reasons,*/
++/* but user and file/dir object ACI are written to disk on every change. */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++extern int rsbac_init_auth(void);
++#else
++extern int rsbac_init_auth(void) __init;
++#endif
++
++/* mounting and umounting */
++int rsbac_mount_auth(kdev_t kdev);
++int rsbac_umount_auth(kdev_t kdev);
++
++/* Some information about the current status is also available */
++extern int rsbac_stats_auth(void);
++
++/* Status checking */
++extern int rsbac_check_auth(int correct, int check_inode);
++
++/* RSBAC attribute saving to disk can be triggered from outside
++ * param: call lock_kernel() before writing?
++ */
++#if defined(CONFIG_RSBAC_MAINT) || defined(CONFIG_RSBAC_AUTO_WRITE)
++extern int rsbac_write_auth(rsbac_boolean_t);
++#endif /* CONFIG_RSBAC_AUTO_WRITE */
++
++/************************************************* */
++/* Access functions */
++/************************************************* */
++
++/* All these procedures handle the semaphores to protect the targets during */
++/* access. */
++/* Trying to access a never created or removed set returns an error! */
++
++/* rsbac_auth_add_to_p_capset */
++/* Add a set member to a set sublist. Set behaviour: also returns success, */
++/* if member was already in set! */
++
++int rsbac_auth_add_to_p_capset(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range,
++ rsbac_time_t ttl);
++
++int rsbac_auth_add_to_f_capset(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_auth_file_t file,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range,
++ rsbac_time_t ttl);
++
++/* rsbac_auth_remove_from_p_capset */
++/* Remove a set member from a sublist. Set behaviour: Returns no error, if */
++/* member is not in list. */
++
++int rsbac_auth_remove_from_p_capset(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range);
++
++int rsbac_auth_remove_from_f_capset(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_auth_file_t file,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range);
++
++/* rsbac_auth_clear_p_capset */
++/* Remove all set members from a sublist. Set behaviour: Returns no error, */
++/* if list is empty. */
++
++int rsbac_auth_clear_p_capset(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ enum rsbac_auth_cap_type_t cap_type);
++
++int rsbac_auth_clear_f_capset(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_auth_file_t file,
++ enum rsbac_auth_cap_type_t cap_type);
++
++/* rsbac_auth_p_capset_member */
++/* Return truth value, whether member is in set */
++
++rsbac_boolean_t rsbac_auth_p_capset_member(rsbac_pid_t pid,
++ enum rsbac_auth_cap_type_t cap_type,
++ rsbac_uid_t member);
++
++/* rsbac_auth_remove_p_capset */
++/* Remove a full set. After this call the given id can only be used for */
++/* creating a new set, anything else returns an error. */
++/* To empty an existing set use rsbac_auth_clear_p_capset. */
++
++int rsbac_auth_remove_p_capsets(rsbac_pid_t pid);
++
++int rsbac_auth_remove_f_capsets(rsbac_auth_file_t file);
++
++/* rsbac_auth_copy_fp_capset */
++/* copy a file capset to a process capset */
++int rsbac_auth_copy_fp_capset(rsbac_auth_file_t file,
++ rsbac_pid_t p_cap_set_id);
++
++/* rsbac_auth_copy_pp_capset */
++/* copy a process capset to another process capset */
++int rsbac_auth_copy_pp_capset(rsbac_pid_t old_p_set_id,
++ rsbac_pid_t new_p_set_id);
++
++/* rsbac_auth_get_f_caplist */
++/* copy a file/dir capset to an array of length 2 * maxnum (first+last), */
++/* returns number of caps copied */
++int rsbac_auth_get_f_caplist(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_auth_file_t file,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t **caplist_p,
++ rsbac_time_t **ttllist_p);
++
++/* rsbac_auth_get_p_caplist */
++/* copy a process capset to an array of length 2 * maxnum (first+last), */
++/* returns number of caps copied */
++int rsbac_auth_get_p_caplist(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t **caplist_p,
++ rsbac_time_t **ttllist_p);
++
++#endif
+diff --git a/include/rsbac/auth_data_structures.h b/include/rsbac/auth_data_structures.h
+new file mode 100644
+index 0000000..4ffaa20
+--- /dev/null
++++ b/include/rsbac/auth_data_structures.h
+@@ -0,0 +1,97 @@
++/**************************************/
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2007: */
++/* Amon Ott <ao@rsbac.org> */
++/* Data structures / AUTH */
++/* Last modified: 16/Sep/2007 */
++/**************************************/
++
++#ifndef __RSBAC_AUTH_DATA_STRUC_H
++#define __RSBAC_AUTH_DATA_STRUC_H
++
++#include <linux/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/types.h>
++
++/**********************************************/
++/* Capability lists */
++/**********************************************/
++
++#define RSBAC_AUTH_LIST_KEY 626281
++
++#define RSBAC_AUTH_P_LIST_VERSION 1
++#define RSBAC_AUTH_P_LIST_NAME "authproc"
++#define RSBAC_AUTH_P_EFF_LIST_NAME "authproceff"
++#define RSBAC_AUTH_P_FS_LIST_NAME "authprocfs"
++#define RSBAC_AUTH_P_GROUP_LIST_NAME "authprocgr"
++#define RSBAC_AUTH_P_GROUP_EFF_LIST_NAME "authprocgreff"
++#define RSBAC_AUTH_P_GROUP_FS_LIST_NAME "authprocgrfs"
++
++#define RSBAC_AUTH_FD_FILENAME "authfd"
++#define RSBAC_AUTH_FD_EFF_FILENAME "authfde"
++#define RSBAC_AUTH_FD_FS_FILENAME "authfdf"
++#define RSBAC_AUTH_FD_GROUP_FILENAME "authfg"
++#define RSBAC_AUTH_FD_GROUP_EFF_FILENAME "authfge"
++#define RSBAC_AUTH_FD_GROUP_FS_FILENAME "authfgf"
++#define RSBAC_AUTH_FD_OLD_FILENAME "authfd."
++#define RSBAC_AUTH_FD_OLD_EFF_FILENAME "authfde."
++#define RSBAC_AUTH_FD_OLD_FS_FILENAME "authfdf."
++#define RSBAC_AUTH_FD_OLD_GROUP_FILENAME "authfg."
++#define RSBAC_AUTH_FD_OLD_GROUP_EFF_FILENAME "authfge."
++#define RSBAC_AUTH_FD_OLD_GROUP_FS_FILENAME "authfgf."
++#define RSBAC_AUTH_NR_CAP_FD_LISTS 4
++#define RSBAC_AUTH_NR_CAP_EFF_FD_LISTS 2
++#define RSBAC_AUTH_NR_CAP_FS_FD_LISTS 2
++#define RSBAC_AUTH_NR_CAP_GROUP_FD_LISTS 4
++#define RSBAC_AUTH_NR_CAP_GROUP_EFF_FD_LISTS 2
++#define RSBAC_AUTH_NR_CAP_GROUP_FS_FD_LISTS 2
++
++#define RSBAC_AUTH_FD_LIST_VERSION 2
++#define RSBAC_AUTH_FD_EFF_LIST_VERSION 2
++#define RSBAC_AUTH_FD_FS_LIST_VERSION 2
++#define RSBAC_AUTH_FD_GROUP_LIST_VERSION 2
++#define RSBAC_AUTH_FD_GROUP_EFF_LIST_VERSION 2
++#define RSBAC_AUTH_FD_GROUP_FS_LIST_VERSION 2
++#define RSBAC_AUTH_FD_OLD_LIST_VERSION 1
++#define RSBAC_AUTH_FD_EFF_OLD_LIST_VERSION 1
++#define RSBAC_AUTH_FD_FS_OLD_LIST_VERSION 1
++#define RSBAC_AUTH_FD_GROUP_OLD_LIST_VERSION 1
++#define RSBAC_AUTH_FD_GROUP_EFF_OLD_LIST_VERSION 1
++#define RSBAC_AUTH_FD_GROUP_FS_OLD_LIST_VERSION 1
++
++/* The list of devices is also a double linked list, so we define list */
++/* items and a list head. */
++
++struct rsbac_auth_device_list_item_t {
++ kdev_t id; /* set to 0 before deletion */
++ u_int mount_count;
++ rsbac_list_handle_t handle;
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ rsbac_list_handle_t eff_handle;
++ rsbac_list_handle_t fs_handle;
++#endif
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ rsbac_list_handle_t
++ group_handle;
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ rsbac_list_handle_t
++ group_eff_handle;
++ rsbac_list_handle_t
++ group_fs_handle;
++#endif
++#endif
++ struct rsbac_auth_device_list_item_t *prev;
++ struct rsbac_auth_device_list_item_t *next;
++};
++
++/* To provide consistency we use spinlocks for all list accesses. The */
++/* 'curr' entry is used to avoid repeated lookups for the same item. */
++
++struct rsbac_auth_device_list_head_t {
++ struct rsbac_auth_device_list_item_t *head;
++ struct rsbac_auth_device_list_item_t *tail;
++ struct rsbac_auth_device_list_item_t *curr;
++ u_int count;
++};
++
++#endif
+diff --git a/include/rsbac/cap_getname.h b/include/rsbac/cap_getname.h
+new file mode 100644
+index 0000000..e5024a4
+--- /dev/null
++++ b/include/rsbac/cap_getname.h
+@@ -0,0 +1,14 @@
++/********************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2005: */
++/* Amon Ott <ao@rsbac.org> */
++/* Getname functions for CAP module */
++/* Last modified: 28/Jan/2005 */
++/********************************** */
++
++#ifndef __RSBAC_CAP_GETNAME_H
++#define __RSBAC_CAP_GETNAME_H
++
++void rsbac_cap_log_missing_cap(int cap);
++
++#endif
+diff --git a/include/rsbac/daz.h b/include/rsbac/daz.h
+new file mode 100644
+index 0000000..9da0112
+--- /dev/null
++++ b/include/rsbac/daz.h
+@@ -0,0 +1,27 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2005: Amon Ott */
++/* API: */
++/* Functions for Access */
++/* Control Information / DAZ */
++/* Last modified: 18/Jan/2005 */
++/************************************ */
++
++#ifndef __RSBAC_DAZ_H
++#define __RSBAC_DAZ_H
++
++#include <rsbac/types.h>
++
++/* Get ttl for new cache items in seconds */
++/* This function returns 0, if no cache is available, and the ttl value
++ otherwise */
++rsbac_time_t rsbac_daz_get_ttl(void);
++
++/* Set ttl for new cache items in seconds */
++/* ttl must be positive, values bigger than 10 years in seconds
++ (RSBAC_LIST_MAX_AGE_LIMIT in lists.h) are reduced to this limit */
++void rsbac_daz_set_ttl(rsbac_time_t ttl);
++
++/* Flush DAZuko cache lists */
++int rsbac_daz_flush_cache(void);
++#endif
+diff --git a/include/rsbac/debug.h b/include/rsbac/debug.h
+new file mode 100644
+index 0000000..2c7ab79
+--- /dev/null
++++ b/include/rsbac/debug.h
+@@ -0,0 +1,288 @@
++/******************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2009: */
++/* Amon Ott <ao@rsbac.org> */
++/* debug definitions */
++/* Last modified: 03/Oct/2009 */
++/******************************* */
++
++#ifndef __RSBAC_DEBUG_H
++#define __RSBAC_DEBUG_H
++
++#include <linux/init.h>
++//#include <rsbac/types.h>
++
++#define set_rsbac_softmode 1
++#define set_rsbac_softmode_once 2
++#define set_rsbac_softmode_never 4
++#define set_rsbac_freeze 8
++#define set_rsbac_um_no_excl 16
++#define set_rsbac_auth_learn 32
++#define set_rsbac_acl_learn_fd 64
++#define set_rsbac_cap_log_missing 128
++#define set_rsbac_jail_log_missing 256
++#define set_rsbac_dac_disable 512
++#define set_rsbac_no_delay_init 1024
++#define set_rsbac_no_defaults 2048
++#define set_rsbac_nosyslog 4096
++#define set_rsbac_cap_process_hiding 8192
++#define set_rsbac_cap_learn 16384
++#define set_rsbac_rc_learn 32768
++
++extern unsigned long int rsbac_flags;
++extern void rsbac_flags_set(unsigned long int);
++
++extern int rsbac_debug_no_write;
++
++#ifdef CONFIG_RSBAC_DEBUG
++extern int rsbac_debug_ds;
++extern int rsbac_debug_write;
++extern int rsbac_debug_stack;
++extern int rsbac_debug_lists;
++extern int rsbac_debug_aef;
++#endif
++
++extern int rsbac_debug_adf_default;
++extern rsbac_log_entry_t rsbac_log_levels[R_NONE+1];
++
++#define RSBAC_LOG_LEVELS_NAME "log_levels"
++#define RSBAC_LOG_LEVEL_LIST_NAME "ll"
++#define RSBAC_LOG_LEVEL_VERSION 4
++#define RSBAC_LOG_LEVEL_OLD_VERSION 3
++#define RSBAC_LOG_LEVEL_OLD_OLD_VERSION 2
++#define RSBAC_LOG_LEVEL_KEY 13123231
++
++
++extern int rsbac_no_defaults;
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++extern void rsbac_init_debug(void);
++#else
++extern void rsbac_init_debug(void) __init;
++#endif
++
++extern rsbac_boolean_t rsbac_parse_koptions(char *);
++
++#define RSBAC_WAKEUP_KEY 'w'
++#define RSBAC_WAKEUP_UKEY 'W'
++
++#ifdef CONFIG_RSBAC_SOFTMODE
++#define RSBAC_SOFTMODE_KEY 'x'
++#define RSBAC_SOFTMODE_UKEY 'X'
++extern int rsbac_softmode;
++extern int rsbac_softmode_prohibit;
++static inline int rsbac_in_softmode(void)
++ {
++ return rsbac_softmode;
++ }
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++extern int rsbac_ind_softmode[SW_NONE];
++#endif
++#endif
++
++#if defined(CONFIG_RSBAC_FREEZE)
++extern int rsbac_freeze;
++#endif
++
++extern int rsbac_list_recover;
++
++#ifdef CONFIG_RSBAC_FD_CACHE
++extern rsbac_time_t rsbac_fd_cache_ttl;
++extern u_int rsbac_fd_cache_disable;
++#endif
++
++#if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++extern rsbac_time_t rsbac_list_check_interval;
++#endif
++
++#if defined(CONFIG_RSBAC_CAP_PROC_HIDE)
++extern int rsbac_cap_process_hiding;
++#endif
++#ifdef CONFIG_RSBAC_CAP_LOG_MISSING
++extern int rsbac_cap_log_missing;
++#endif
++#ifdef CONFIG_RSBAC_JAIL_LOG_MISSING
++extern int rsbac_jail_log_missing;
++#endif
++
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_FULL
++extern int rsbac_dac_disable;
++extern int rsbac_dac_is_disabled(void);
++#endif
++
++#ifdef CONFIG_RSBAC_RMSG_NOSYSLOG
++extern int rsbac_nosyslog;
++#endif
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++extern int rsbac_no_delay_init;
++extern kdev_t rsbac_delayed_root;
++extern char rsbac_delayed_root_str[];
++#endif
++
++/* rsbac_printk(): You must always prepend the loglevel. As sequence numbers
++ * are per rsbac_printk() message, it is strongly recommended to output single
++ * full lines only.
++ * Example:
++ * rsbac_printk(KERN_DEBUG "Test value: %u\n", testval);
++ */
++extern int rsbac_printk(const char *, ...);
++
++#ifdef CONFIG_RSBAC_DEBUG
++#define rsbac_pr_debug(type, fmt, arg...) \
++ do { if (rsbac_debug_##type) \
++ rsbac_printk(KERN_DEBUG "%s(): " fmt, __FUNCTION__, ##arg); \
++ } while (0)
++#else
++#define rsbac_pr_debug(type, fmt, arg...) do { } while (0)
++#endif
++
++#define rsbac_pr_get_error(attr) \
++ do { rsbac_ds_get_error (__FUNCTION__, attr); \
++ } while (0)
++#define rsbac_pr_set_error(attr) \
++ do { rsbac_ds_set_error (__FUNCTION__, attr); \
++ } while (0)
++#define rsbac_pr_get_error_num(attr, num) \
++ do { rsbac_ds_get_error_num (__FUNCTION__, attr, num); \
++ } while (0)
++#define rsbac_pr_set_error_num(attr, num) \
++ do { rsbac_ds_set_error_num (__FUNCTION__, attr, num); \
++ } while (0)
++
++#define rsbac_rc_pr_get_error(item) \
++ do { rsbac_rc_ds_get_error (__FUNCTION__, item); \
++ } while (0)
++#define rsbac_rc_pr_set_error(item) \
++ do { rsbac_rc_ds_set_error (__FUNCTION__, item); \
++ } while (0)
++
++#define RSBAC_LOG_MAXLINE 2040
++
++#if defined(CONFIG_RSBAC_RMSG)
++extern int rsbac_log(int, char *, int);
++
++#define RSBAC_LOG_MAXREADBUF (rsbac_min(8192,RSBAC_MAX_KMALLOC))
++
++struct rsbac_log_list_item_t {
++ struct rsbac_log_list_item_t *next;
++ u16 size;
++ char buffer[0];
++};
++
++struct rsbac_log_list_head_t {
++ struct rsbac_log_list_item_t *head;
++ struct rsbac_log_list_item_t *tail;
++ u_int count;
++ u_long lost;
++};
++#if defined(CONFIG_RSBAC_LOG_REMOTE)
++extern rsbac_pid_t rsbaclogd_pid;
++#endif
++#endif
++
++#ifdef CONFIG_RSBAC_NET
++extern int rsbac_debug_ds_net;
++extern int rsbac_debug_aef_net;
++extern int rsbac_debug_adf_net;
++#endif
++
++extern void wakeup_rsbacd(u_long dummy);
++
++/* switch log level for request */
++void rsbac_adf_log_switch(rsbac_adf_request_int_t request,
++ enum rsbac_target_t target,
++ rsbac_enum_t value);
++
++int rsbac_get_adf_log(rsbac_adf_request_int_t request,
++ enum rsbac_target_t target,
++ u_int * value_p);
++
++#ifdef CONFIG_RSBAC_DEBUG
++#if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++extern int rsbac_debug_auto;
++#endif /* CONFIG_RSBAC_AUTO_WRITE > 0 */
++
++#if defined(CONFIG_RSBAC_MAC)
++extern int rsbac_debug_ds_mac;
++extern int rsbac_debug_aef_mac;
++extern int rsbac_debug_adf_mac;
++#endif
++
++#if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT)
++extern int rsbac_debug_ds_pm;
++extern int rsbac_debug_aef_pm;
++extern int rsbac_debug_adf_pm;
++#endif
++
++#if defined(CONFIG_RSBAC_DAZ) || defined(CONFIG_RSBAC_DAZ_MAINT)
++extern int rsbac_debug_adf_daz;
++#endif
++
++#if defined(CONFIG_RSBAC_RC) || defined(CONFIG_RSBAC_RC_MAINT)
++extern int rsbac_debug_ds_rc;
++extern int rsbac_debug_aef_rc;
++extern int rsbac_debug_adf_rc;
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH) || defined(CONFIG_RSBAC_AUTH_MAINT)
++extern int rsbac_debug_ds_auth;
++extern int rsbac_debug_aef_auth;
++extern int rsbac_debug_adf_auth;
++#endif
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++extern int rsbac_debug_reg;
++#endif
++
++#if defined(CONFIG_RSBAC_ACL) || defined(CONFIG_RSBAC_ACL_MAINT)
++extern int rsbac_debug_ds_acl;
++extern int rsbac_debug_aef_acl;
++extern int rsbac_debug_adf_acl;
++#endif
++
++#if defined(CONFIG_RSBAC_JAIL)
++extern int rsbac_debug_aef_jail;
++extern int rsbac_debug_adf_jail;
++#endif
++
++#if defined(CONFIG_RSBAC_PAX)
++extern int rsbac_debug_adf_pax;
++#endif
++
++#if defined(CONFIG_RSBAC_UM)
++extern int rsbac_debug_ds_um;
++extern int rsbac_debug_aef_um;
++extern int rsbac_debug_adf_um;
++#endif
++
++#endif /* DEBUG */
++
++#if defined(CONFIG_RSBAC_UM_EXCL)
++extern int rsbac_um_no_excl;
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH) || defined(CONFIG_RSBAC_AUTH_MAINT)
++extern int rsbac_auth_enable_login;
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++extern int rsbac_auth_learn;
++#define RSBAC_AUTH_LEARN_TA_NAME "AUTH-learn"
++#endif
++#endif
++
++#if defined(CONFIG_RSBAC_RC_LEARN)
++extern int rsbac_rc_learn;
++#define RSBAC_RC_LEARN_TA_NAME "RC-learn"
++#endif
++
++#if defined(CONFIG_RSBAC_CAP_LEARN)
++extern int rsbac_cap_learn;
++#define RSBAC_CAP_LEARN_TA_NAME "CAP-learn"
++#endif
++
++#if defined(CONFIG_RSBAC_ACL_LEARN)
++extern int rsbac_acl_learn_fd;
++#define RSBAC_ACL_LEARN_TA_NAME "ACL-FD-learn"
++#endif
++
++#endif
+diff --git a/include/rsbac/error.h b/include/rsbac/error.h
+new file mode 100644
+index 0000000..33fa1da
+--- /dev/null
++++ b/include/rsbac/error.h
+@@ -0,0 +1,66 @@
++/************************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2008: Amon Ott */
++/* Helper functions for all parts */
++/* Last modified: 03/Mar/2008 */
++/************************************* */
++
++#ifndef __RSBAC_ERROR_H
++#define __RSBAC_ERROR_H
++
++#ifdef __KERNEL__
++#include <linux/errno.h>
++#else
++#include <errno.h>
++#endif
++
++/* Error values */
++
++#define RSBAC_EPERM 1001
++#define RSBAC_EACCESS 1002
++#define RSBAC_EREADFAILED 1003
++#define RSBAC_EWRITEFAILED 1004
++#define RSBAC_EINVALIDPOINTER 1005
++#define RSBAC_ENOROOTDIR 1006
++#define RSBAC_EPATHTOOLONG 1007
++#define RSBAC_ENOROOTDEV 1008
++#define RSBAC_ENOTFOUND 1009
++#define RSBAC_ENOTINITIALIZED 1010
++#define RSBAC_EREINIT 1011
++#define RSBAC_ECOULDNOTADDDEVICE 1012
++#define RSBAC_ECOULDNOTADDITEM 1013
++#define RSBAC_ECOULDNOTCREATEPATH 1014
++#define RSBAC_EINVALIDATTR 1015
++#define RSBAC_EINVALIDDEV 1016
++#define RSBAC_EINVALIDTARGET 1017
++#define RSBAC_EINVALIDVALUE 1018
++#define RSBAC_EEXISTS 1019
++#define RSBAC_EINTERNONLY 1020
++#define RSBAC_EINVALIDREQUEST 1021
++#define RSBAC_ENOTWRITABLE 1022
++#define RSBAC_EMALWAREDETECTED 1023
++#define RSBAC_ENOMEM 1024
++#define RSBAC_EDECISIONMISMATCH 1025
++#define RSBAC_EINVALIDVERSION 1026
++#define RSBAC_EINVALIDMODULE 1027
++#define RSBAC_EEXPIRED 1028
++#define RSBAC_EMUSTCHANGE 1029
++#define RSBAC_EBUSY 1030
++#define RSBAC_EINVALIDTRANSACTION 1031
++#define RSBAC_EWEAKPASSWORD 1032
++#define RSBAC_EINVALIDLIST 1033
++#define RSBAC_EFROMINTERRUPT 1034
++
++#define RSBAC_EMAX 1034
++
++#define RSBAC_ERROR( res ) ((res <= -RSBAC_EPERM) && (res >= -RSBAC_EMAX))
++
++#ifndef __KERNEL__
++/* exit on error */
++void error_exit(int error);
++
++/* show error */
++void show_error(int error);
++#endif
++
++#endif
+diff --git a/include/rsbac/fs.h b/include/rsbac/fs.h
+new file mode 100644
+index 0000000..86452a4
+--- /dev/null
++++ b/include/rsbac/fs.h
+@@ -0,0 +1,68 @@
++/************************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2012: Amon Ott */
++/* File system */
++/* helper functions for all parts */
++/* Last modified: 19/Apt/2012 */
++/************************************* */
++
++#ifndef __RSBAC_FS_H
++#define __RSBAC_FS_H
++
++#include <linux/fs.h>
++#include <linux/major.h>
++#include <linux/root_dev.h>
++#include <linux/sched.h>
++
++/* original lookup_dentry function without rsbac patch for adf call */
++
++struct dentry * rsbac_lookup_hash(struct qstr *name, struct dentry * base);
++struct dentry * rsbac_lookup_one_len(const char * name, struct dentry * base, int len);
++
++#ifndef SOCKFS_MAGIC
++#define SOCKFS_MAGIC 0x534F434B
++#endif
++
++#ifndef SYSFS_MAGIC
++#define SYSFS_MAGIC 0x62656572
++#endif
++
++#ifndef OCFS2_SUPER_MAGIC
++#define OCFS2_SUPER_MAGIC 0x7461636f
++#endif
++
++struct vfsmount * rsbac_get_vfsmount(kdev_t kdev);
++
++extern void __fput(struct file *);
++
++#ifndef SHM_FS_MAGIC
++#define SHM_FS_MAGIC 0x02011994
++#endif
++
++static inline int init_private_file(struct file *filp, struct dentry *dentry, int mode)
++{
++ memset(filp, 0, sizeof(*filp));
++ filp->f_mode = mode;
++ atomic_long_set(&filp->f_count, 1);
++ filp->f_dentry = dentry;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
++ filp->f_cred = current_cred();
++#else
++ filp->f_uid = current->fsuid;
++ filp->f_gid = current->fsgid;
++#endif
++ filp->f_op = dentry->d_inode->i_fop;
++ filp->f_mapping = dentry->d_inode->i_mapping;
++ file_ra_state_init(&filp->f_ra, filp->f_mapping);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
++/* TODO file->f_path.mnt =
++ * fil me when switch to full 2.6 (need vfsmount passed over)
++ */
++#endif
++ if (filp->f_op->open)
++ return filp->f_op->open(dentry->d_inode, filp);
++ else
++ return 0;
++}
++
++#endif
+diff --git a/include/rsbac/gen_lists.h b/include/rsbac/gen_lists.h
+new file mode 100644
+index 0000000..0a882ae
+--- /dev/null
++++ b/include/rsbac/gen_lists.h
+@@ -0,0 +1,294 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2010: Amon Ott <ao@rsbac.org> */
++/* Generic lists - internal structures */
++/* Last modified: 01/Jul/2010 */
++/*************************************************** */
++
++#ifndef __RSBAC_GEN_LISTS_H
++#define __RSBAC_GEN_LISTS_H
++
++#include <linux/init.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/lists.h>
++#include <rsbac/repl_lists.h>
++
++/* Sanity limit of list size, regardless of RSBAC_LIST_MAX_NR_ITEMS in lists.h */
++#define RSBAC_LIST_MAX_NR_ITEMS_LIMIT 1000000
++
++#define RSBAC_LIST_DISK_VERSION 10003
++#define RSBAC_LIST_DISK_OLD_VERSION 10002
++#define RSBAC_LIST_NONAME "(no name)"
++#define RSBAC_LIST_PROC_NAME "gen_lists"
++#define RSBAC_LIST_COUNTS_PROC_NAME "gen_lists_counts"
++
++#define RSBAC_LIST_TA_KEY 0xface99
++
++#define RSBAC_LIST_MAX_OLD_HASH 32
++#define RSBAC_LIST_LOL_MAX_OLD_HASH 16
++
++/* If number of items per hashed list is bigger than this and flag
++ RSBAC_LIST_AUTO_HASH_RESIZE is set, rehash */
++#define RSBAC_LIST_AUTO_REHASH_TRIGGER 30
++
++/* Rehashing interval in s - rehashing is triggered by rsbacd, so might happen
++ * less frequently, if rsbacd wakes up later.
++ */
++#define RSBAC_LIST_REHASH_INTERVAL 60
++
++/* Check lists every n seconds. Also called from rsbacd, so might take longer. */
++
++//#define RSBAC_LIST_CHECK_INTERVAL 1800
++
++/* Prototypes */
++
++/* Init */
++#ifdef CONFIG_RSBAC_INIT_DELAY
++int rsbac_list_init(void);
++#else
++int __init rsbac_list_init(void);
++#endif
++
++/* Status checking */
++int rsbac_check_lists(int correct);
++
++#if defined(CONFIG_RSBAC_AUTO_WRITE)
++int rsbac_write_lists(void);
++#endif
++
++/* Data Structures */
++
++/* All items will be organized in double linked lists
++ * However, we do not know the descriptor or item sizes, so we will access them
++ with offsets later and only define the list links here.
++ */
++
++struct rsbac_list_item_t {
++ struct rsbac_list_item_t *prev;
++ struct rsbac_list_item_t *next;
++ rsbac_time_t max_age;
++};
++
++/* lists of lists ds */
++struct rsbac_list_lol_item_t {
++ struct rsbac_list_lol_item_t *prev;
++ struct rsbac_list_lol_item_t *next;
++ struct rsbac_list_item_t *head;
++ struct rsbac_list_item_t *tail;
++ struct rsbac_list_item_t *curr;
++ u_long count;
++ rsbac_time_t max_age;
++};
++
++typedef __u32 rsbac_list_count_t;
++
++struct rsbac_list_hashed_t {
++ struct rsbac_list_item_t *head;
++ struct rsbac_list_item_t *tail;
++ struct rsbac_list_item_t *curr;
++ rsbac_list_count_t count;
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ rsbac_ta_number_t ta_copied;
++ struct rsbac_list_item_t *ta_head;
++ struct rsbac_list_item_t *ta_tail;
++ struct rsbac_list_item_t *ta_curr;
++ rsbac_list_count_t ta_count;
++#endif
++};
++
++struct rsbac_list_lol_hashed_t {
++ struct rsbac_list_lol_item_t *head;
++ struct rsbac_list_lol_item_t *tail;
++ struct rsbac_list_lol_item_t *curr;
++ rsbac_list_count_t count;
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ rsbac_ta_number_t ta_copied;
++ struct rsbac_list_lol_item_t *ta_head;
++ struct rsbac_list_lol_item_t *ta_tail;
++ struct rsbac_list_lol_item_t *ta_curr;
++ rsbac_list_count_t ta_count;
++#endif
++};
++
++/* Since all registrations will be organized in double linked lists, we must
++ * have list items and a list head.
++ * The pointer to this item will also be used as list handle. */
++
++struct rsbac_list_reg_item_t {
++ struct rsbac_list_info_t info;
++ u_int flags;
++ rsbac_list_compare_function_t *compare;
++ rsbac_list_get_conv_t *get_conv;
++ void *def_data;
++ char name[RSBAC_LIST_MAX_FILENAME + 1];
++ kdev_t device;
++ spinlock_t lock;
++ struct rsbac_list_rcu_free_head_t * rcu_free;
++ rsbac_boolean_t dirty;
++ rsbac_boolean_t no_write;
++ struct rsbac_nanotime_t lastchange;
++#ifdef CONFIG_RSBAC_LIST_STATS
++ __u64 read_count;
++ __u64 write_count;
++#endif
++ u_int nr_hashes;
++ u_int max_items_per_hash;
++ rsbac_list_hash_function_t * hash_function;
++ char old_name_base[RSBAC_LIST_MAX_FILENAME + 1];
++ struct kmem_cache * slab;
++ char * slabname;
++#if defined(CONFIG_RSBAC_PROC) && defined(CONFIG_PROC_FS)
++ struct proc_dir_entry *proc_entry_p;
++#endif
++ struct rsbac_list_reg_item_t *prev;
++ struct rsbac_list_reg_item_t *next;
++ struct rsbac_list_reg_item_t *self;
++ /* The hashed list heads are allocated dynamically! */
++ struct rsbac_list_hashed_t * hashed;
++};
++
++struct rsbac_list_lol_reg_item_t {
++ struct rsbac_list_lol_info_t info;
++ u_int flags;
++ rsbac_list_compare_function_t *compare;
++ rsbac_list_compare_function_t *subcompare;
++ rsbac_list_get_conv_t *get_conv;
++ rsbac_list_get_conv_t *get_subconv;
++ void *def_data;
++ void *def_subdata;
++ char name[RSBAC_LIST_MAX_FILENAME + 1];
++ kdev_t device;
++ spinlock_t lock;
++ struct rsbac_list_rcu_free_head_lol_t * rcu_free;
++ rsbac_boolean_t dirty;
++ rsbac_boolean_t no_write;
++ struct rsbac_nanotime_t lastchange;
++#ifdef CONFIG_RSBAC_LIST_STATS
++ __u64 read_count;
++ __u64 write_count;
++#endif
++ u_int nr_hashes;
++ u_int max_items_per_hash;
++ u_int max_subitems;
++ rsbac_list_hash_function_t * hash_function;
++ char old_name_base[RSBAC_LIST_MAX_FILENAME + 1];
++ struct kmem_cache * slab;
++ char * slabname;
++ struct kmem_cache * subslab;
++ char * subslabname;
++#if defined(CONFIG_RSBAC_PROC) && defined(CONFIG_PROC_FS)
++ struct proc_dir_entry *proc_entry_p;
++#endif
++ struct rsbac_list_lol_reg_item_t *prev;
++ struct rsbac_list_lol_reg_item_t *next;
++ struct rsbac_list_lol_reg_item_t *self;
++ /* The hashed list heads are allocated dynamically! */
++ struct rsbac_list_lol_hashed_t * hashed;
++};
++
++/* To provide consistency we use spinlocks for all list accesses. The
++ 'curr' entry is used to avoid repeated lookups for the same item. */
++
++struct rsbac_list_reg_head_t {
++ struct rsbac_list_reg_item_t *head;
++ struct rsbac_list_reg_item_t *tail;
++ struct rsbac_list_reg_item_t *curr;
++ spinlock_t lock;
++ struct lock_class_key lock_class;
++ u_int count;
++};
++
++struct rsbac_list_lol_reg_head_t {
++ struct rsbac_list_lol_reg_item_t *head;
++ struct rsbac_list_lol_reg_item_t *tail;
++ struct rsbac_list_lol_reg_item_t *curr;
++ spinlock_t lock;
++ struct lock_class_key lock_class;
++ u_int count;
++};
++
++/* Internal helper list of filled write buffers */
++
++struct rsbac_list_buffer_t {
++ struct rsbac_list_buffer_t * next;
++ u_int len;
++ char data[0];
++};
++
++#define RSBAC_LIST_BUFFER_SIZE 8192
++#define RSBAC_LIST_BUFFER_DATA_SIZE (RSBAC_LIST_BUFFER_SIZE - sizeof(struct rsbac_list_buffer_t))
++
++struct rsbac_list_write_item_t {
++ struct rsbac_list_write_item_t *prev;
++ struct rsbac_list_write_item_t *next;
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_buffer_t *buffer;
++ char name[RSBAC_LIST_MAX_FILENAME + 1];
++ kdev_t device;
++};
++
++struct rsbac_list_write_head_t {
++ struct rsbac_list_write_item_t *head;
++ struct rsbac_list_write_item_t *tail;
++ u_int count;
++};
++
++struct rsbac_list_lol_write_item_t {
++ struct rsbac_list_lol_write_item_t *prev;
++ struct rsbac_list_lol_write_item_t *next;
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_buffer_t *buffer;
++ char name[RSBAC_LIST_MAX_FILENAME + 1];
++ kdev_t device;
++};
++
++struct rsbac_list_lol_write_head_t {
++ struct rsbac_list_lol_write_item_t *head;
++ struct rsbac_list_lol_write_item_t *tail;
++ u_int count;
++};
++
++
++/* Data structs for file timeout book keeping list filelist */
++struct rsbac_list_filelist_desc_t {
++ char filename[RSBAC_LIST_MAX_FILENAME + 1];
++};
++
++struct rsbac_list_filelist_data_t {
++ rsbac_time_t timestamp;
++ rsbac_time_t max_age;
++};
++
++struct rsbac_list_ta_data_t {
++ rsbac_time_t start;
++ rsbac_time_t timeout;
++ rsbac_uid_t commit_uid;
++ char name[RSBAC_LIST_TA_MAX_NAMELEN];
++ char password[RSBAC_LIST_TA_MAX_PASSLEN];
++};
++
++struct rsbac_list_rcu_free_head_t {
++ /* rcu _must_ stay first */
++ struct rcu_head rcu;
++ struct kmem_cache * slab;
++ struct rsbac_list_rcu_free_item_t * head;
++ struct rsbac_list_item_t * item_chain;
++};
++
++struct rsbac_list_rcu_free_head_lol_t {
++ /* rcu _must_ stay first */
++ struct rcu_head rcu;
++ struct kmem_cache * slab;
++ struct kmem_cache * subslab;
++ struct rsbac_list_rcu_free_item_t * head;
++ struct rsbac_list_rcu_free_item_t * subhead;
++ struct rsbac_list_lol_item_t * lol_item_chain;
++ struct rsbac_list_item_t * lol_item_subchain;
++};
++
++struct rsbac_list_rcu_free_item_t {
++ struct rsbac_list_rcu_free_item_t * next;
++ void * mem;
++};
++
++#endif
+diff --git a/include/rsbac/getname.h b/include/rsbac/getname.h
+new file mode 100644
+index 0000000..1a1d577
+--- /dev/null
++++ b/include/rsbac/getname.h
+@@ -0,0 +1,101 @@
++/******************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2007: */
++/* Amon Ott <ao@rsbac.org> */
++/* Getname functions for all parts*/
++/* Last modified: 17/Sep/2007 */
++/******************************** */
++
++#ifndef __RSBAC_GETNAME_H
++#define __RSBAC_GETNAME_H
++
++#include <rsbac/types.h>
++#ifdef CONFIG_RSBAC_XSTATS
++#include <rsbac/syscalls.h>
++#endif
++
++#if defined(__KERNEL__) && defined(CONFIG_RSBAC_LOG_FULL_PATH)
++#include <linux/fs.h>
++#if (CONFIG_RSBAC_MAX_PATH_LEN > 2000)
++#undef CONFIG_RSBAC_MAX_PATH_LEN
++#define CONFIG_RSBAC_MAX_PATH_LEN 2000
++#endif
++#if (CONFIG_RSBAC_MAX_PATH_LEN < RSBAC_MAXNAMELEN)
++#undef CONFIG_RSBAC_MAX_PATH_LEN
++#define CONFIG_RSBAC_MAX_PATH_LEN RSBAC_MAXNAMELEN
++#endif
++#endif
++
++extern char * get_request_name(char * , enum rsbac_adf_request_t);
++
++extern enum rsbac_adf_request_t get_request_nr(const char *);
++
++extern char * get_result_name(char * , enum rsbac_adf_req_ret_t);
++
++extern enum rsbac_adf_req_ret_t get_result_nr(const char *);
++
++extern enum rsbac_switch_target_t get_attr_module(enum rsbac_attribute_t attr);
++
++extern char * get_attribute_name(char * , enum rsbac_attribute_t);
++
++extern char * get_attribute_value_name( char * attr_val_name,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t * attr_val_p);
++
++extern enum rsbac_attribute_t get_attribute_nr(const char *);
++
++extern char * get_target_name(char * , enum rsbac_target_t,
++ char * , union rsbac_target_id_t);
++
++extern char * get_target_name_only(char * target_type_name,
++ enum rsbac_target_t target);
++
++extern enum rsbac_target_t get_target_nr(const char *);
++
++extern char * get_ipc_target_name(char *,
++ enum rsbac_ipc_type_t);
++
++extern enum rsbac_ipc_type_t get_ipc_target_nr(const char *);
++
++extern char * get_scd_type_name(char *,
++ enum rsbac_scd_type_t);
++
++extern enum rsbac_scd_type_t get_scd_type_nr(const char *);
++
++extern char * get_switch_target_name(char *,
++ enum rsbac_switch_target_t);
++
++extern enum rsbac_switch_target_t get_switch_target_nr(const char *);
++
++extern char * get_error_name(char *,
++ int);
++
++#ifndef __KERNEL__
++extern char * get_attribute_param(char * , enum rsbac_attribute_t);
++#endif
++
++extern char * get_log_level_name(char *,
++ enum rsbac_log_level_t);
++
++extern enum rsbac_log_level_t get_log_level_nr(const char *);
++
++#ifdef __KERNEL__
++int rsbac_lookup_full_path(struct dentry * dentry_p, char path[], int maxlen, int pseudonymize);
++
++static inline int rsbac_get_full_path(struct dentry * dentry_p, char path[], int maxlen)
++{
++ return rsbac_lookup_full_path(dentry_p, path, maxlen, 1);
++}
++#endif
++
++char * get_cap_name(char * name,
++ u_int value);
++
++int get_cap_nr(const char * name);
++
++#ifdef CONFIG_RSBAC_XSTATS
++char *get_syscall_name(char *syscall_name,
++ enum rsbac_syscall_t syscall);
++#endif
++
++#endif
+diff --git a/include/rsbac/helpers.h b/include/rsbac/helpers.h
+new file mode 100644
+index 0000000..b8b31af
+--- /dev/null
++++ b/include/rsbac/helpers.h
+@@ -0,0 +1,157 @@
++/************************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2012: Amon Ott */
++/* Helper functions for all parts */
++/* Last modified: 07/May/2012 */
++/************************************* */
++
++#ifndef __RSBAC_HELPER_H
++#define __RSBAC_HELPER_H
++
++#include <linux/types.h>
++#include <rsbac/types.h>
++#ifdef __KERNEL__
++#include <rsbac/rkmem.h>
++#endif
++
++char * inttostr(char[], int);
++
++char * ulongtostr(char[], u_long);
++
++/* convert u_long_long to binary string representation for MAC module */
++char * u64tostrmac(char[], __u64);
++
++char * u32tostrcap(char * str, __u32 i);
++__u32 strtou32cap(char * str, __u32 * i_p);
++
++int rsbac_get_vset_num(char * sourcename, rsbac_um_set_t * vset_p);
++
++#ifndef __KERNEL__
++void locale_init(void);
++
++int rsbac_lib_version(void);
++int rsbac_u32_compare(__u32 * a, __u32 * b);
++int rsbac_u32_void_compare(const void *a, const void *b);
++
++int rsbac_user_compare(const void * a, const void * b);
++int rsbac_group_compare(const void * a, const void * b);
++int rsbac_nettemp_id_compare(const void * a, const void * b);
++
++int rsbac_dev_compare(const void * desc1,
++ const void * desc2);
++
++char * get_user_name(rsbac_uid_t user, char * name);
++
++char * get_group_name(rsbac_gid_t group, char * name);
++
++int rsbac_get_uid_name(rsbac_uid_t * uid, char * name, char * sourcename);
++
++int rsbac_get_fullname(char * fullname, rsbac_uid_t uid);
++
++static inline int rsbac_get_uid(rsbac_uid_t * uid, char * sourcename)
++ {
++ return rsbac_get_uid_name(uid, NULL, sourcename);
++ }
++
++int rsbac_get_gid_name(rsbac_gid_t * gid, char * name, char * sourcename);
++
++static inline int rsbac_get_gid(rsbac_gid_t * gid, char * sourcename)
++ {
++ return rsbac_get_gid_name(gid, NULL, sourcename);
++ }
++
++/* covert u_long_long to binary string representation for log array */
++char * u64tostrlog(char[], __u64);
++/* and back */
++__u64 strtou64log(char[], __u64 *);
++
++/* convert u_long_long to binary string representation for MAC module */
++/* and back */
++__u64 strtou64mac(char[], __u64 *);
++
++/* covert u_long_long to binary string representation for RC module */
++char * u64tostrrc(char[], __u64);
++/* and back */
++__u64 strtou64rc(char[], __u64 *);
++
++/* covert u_long_long to binary string representation for RC module / rights */
++char * u64tostrrcr(char[], __u64);
++/* and back */
++__u64 strtou64rcr(char[], __u64 *);
++
++/* ACL back */
++__u64 strtou64acl(char[], __u64 *);
++
++char * devdesctostr(char * str, struct rsbac_dev_desc_t dev);
++
++int strtodevdesc(char * str, struct rsbac_dev_desc_t * dev_p);
++#endif
++
++/* covert u_long_long to binary string representation for ACL module */
++char * u64tostracl(char[], __u64);
++
++char * longtostr(char[], long);
++
++#ifdef __KERNEL__
++#include <asm/uaccess.h>
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++rsbac_um_set_t rsbac_get_vset(void);
++#else
++static inline rsbac_um_set_t rsbac_get_vset(void)
++ {
++ return 0;
++ }
++#endif
++
++int rsbac_get_owner(rsbac_uid_t * user_p);
++
++static inline int rsbac_get_user(void * kern_p, void __user * user_p, int size)
++ {
++ if(kern_p && user_p && (size > 0))
++ {
++ return copy_from_user(kern_p, user_p, size);
++ }
++ return 0;
++ }
++
++
++static inline int rsbac_put_user(void * kern_p, void __user * user_p, int size)
++ {
++ if(kern_p && user_p && (size > 0))
++ {
++ return copy_to_user(user_p,kern_p,size);
++ }
++ return 0;
++ }
++
++static inline char * rsbac_getname(const char * name)
++ {
++ return getname(name);
++ }
++
++static inline void rsbac_putname(const char * name)
++ {
++ putname(name);
++ }
++
++static inline int clear_user_buf(char * ubuf, int len)
++ {
++ return clear_user(ubuf,len);
++ }
++
++void rsbac_get_attr_error(char * , enum rsbac_adf_request_t);
++
++void rsbac_ds_get_error(const char * function, enum rsbac_attribute_t attr);
++void rsbac_ds_get_error_num(const char * function, enum rsbac_attribute_t attr, int err);
++void rsbac_ds_set_error(const char * function, enum rsbac_attribute_t attr);
++void rsbac_ds_set_error_num(const char * function, enum rsbac_attribute_t attr, int err);
++
++#ifdef CONFIG_RSBAC_RC
++void rsbac_rc_ds_get_error(const char * function, enum rsbac_rc_item_t item);
++void rsbac_rc_ds_set_error(const char * function, enum rsbac_rc_item_t item);
++#endif
++
++#endif /* KERNEL */
++
++#endif
+diff --git a/include/rsbac/hooks.h b/include/rsbac/hooks.h
+new file mode 100644
+index 0000000..10cd794
+--- /dev/null
++++ b/include/rsbac/hooks.h
+@@ -0,0 +1,24 @@
++/******************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2006: */
++/* Amon Ott <ao@rsbac.org> */
++/* Common include file set */
++/* Last modified: 31/Mar/2006 */
++/******************************* */
++
++#ifndef __RSBAC_HOOKS_H
++#define __RSBAC_HOOKS_H
++
++#ifdef CONFIG_RSBAC
++#include <rsbac/adf.h>
++#include <rsbac/aci.h>
++#include <rsbac/helpers.h>
++#include <rsbac/fs.h>
++#include <rsbac/debug.h>
++//#include <rsbac/aci_data_structures.h>
++//#include <rsbac/adf_main.h>
++#else
++#define rsbac_kthreads_init() do {} while(0)
++#endif
++
++#endif
+diff --git a/include/rsbac/jail.h b/include/rsbac/jail.h
+new file mode 100644
+index 0000000..73d321b
+--- /dev/null
++++ b/include/rsbac/jail.h
+@@ -0,0 +1,16 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2007: */
++/* Amon Ott <ao@rsbac.org> */
++/* Global definitions for JAIL module */
++/* Last modified: 29/Jan/2007 */
++/************************************ */
++
++#ifndef __RSBAC_JAIL_H
++#define __RSBAC_JAIL_H
++
++extern rsbac_jail_id_t rsbac_jail_syslog_jail_id;
++
++rsbac_boolean_t rsbac_jail_exists(rsbac_jail_id_t jail_id);
++
++#endif
+diff --git a/include/rsbac/jail_getname.h b/include/rsbac/jail_getname.h
+new file mode 100644
+index 0000000..01a301f
+--- /dev/null
++++ b/include/rsbac/jail_getname.h
+@@ -0,0 +1,14 @@
++/********************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2005: */
++/* Amon Ott <ao@rsbac.org> */
++/* Getname functions for JAIL module */
++/* Last modified: 27/May/2005 */
++/********************************** */
++
++#ifndef __RSBAC_JAIL_GETNAME_H
++#define __RSBAC_JAIL_GETNAME_H
++
++void rsbac_jail_log_missing_cap(int cap);
++
++#endif
+diff --git a/include/rsbac/lists.h b/include/rsbac/lists.h
+new file mode 100644
+index 0000000..a432481
+--- /dev/null
++++ b/include/rsbac/lists.h
+@@ -0,0 +1,909 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2010: Amon Ott <ao@rsbac.org> */
++/* Generic List Management */
++/* Last modified: 31/May/2010 */
++/*************************************************** */
++
++/* Note: lol = list of lists, a two-level list structure */
++
++#ifndef __RSBAC_LISTS_H
++#define __RSBAC_LISTS_H
++
++#include <linux/init.h>
++#include <linux/vmalloc.h>
++//#include <rsbac/types.h>
++#include <rsbac/rkmem.h>
++
++#define RSBAC_LIST_VERSION 3
++
++typedef void *rsbac_list_handle_t;
++typedef __u32 rsbac_list_key_t;
++
++/* Maximum length for list (file)names */
++#define RSBAC_LIST_MAX_FILENAME 15
++
++/* Limit for max_age_in_seconds: ca. 10 years */
++#define RSBAC_LIST_MAX_AGE_LIMIT (3600 * 24 * 366 * 10)
++
++/* Maximum desc_size + data_size: 8K - some space for metadata */
++#define RSBAC_LIST_MAX_ITEM_SIZE (8192 - 64)
++
++#define RSBAC_LIST_MIN_MAX_HASHES 8
++
++/* standard hash functions */
++u_int rsbac_list_hash_u32(void * desc, __u32 nr_hashes);
++u_int rsbac_list_hash_fd(void * desc, __u32 nr_hashes);
++u_int rsbac_list_hash_pid(void * desc, __u32 nr_hashes);
++u_int rsbac_list_hash_uid(void * desc, __u32 nr_hashes);
++u_int rsbac_list_hash_gid(void * desc, __u32 nr_hashes);
++u_int rsbac_list_hash_ipc(void * desc, __u32 nr_hashes);
++u_int rsbac_list_hash_dev(void * desc, __u32 nr_hashes);
++u_int rsbac_list_hash_nettemp(void * desc, __u32 nr_hashes);
++u_int rsbac_list_hash_netobj(void * desc, __u32 nr_hashes);
++
++/****************************/
++/* List Registration Flags: */
++
++/* Make persistent, i.e., save to and restore from disk */
++#define RSBAC_LIST_PERSIST 1
++
++/* Ignore old list contents (still checks key, if list exists on disk) */
++#define RSBAC_LIST_IGNORE_OLD 2
++
++/* Ignore old list contents, if version upconversion is not supported
++ * (no get_conv, or get_conv returned NULL) - without this flag, registration fails, if
++ * list cannot be converted.
++ */
++#define RSBAC_LIST_IGNORE_UNSUPP_VERSION 4
++
++/* Temporarily disallow writing list to disk, e.g. for upgrade tests */
++#define RSBAC_LIST_NO_WRITE 8
++
++/* Provide a binary backup file as /proc/rsbac-info/backup/filename */
++#define RSBAC_LIST_BACKUP 16
++
++/* Use provided default data, return it for unexisting items and
++ automatically create and cleanup items with default data as necessary.
++ (only items with 0 ttl (unlimited) get removed)
++ (lol items with default data only get removed, if they have no subitems) */
++#define RSBAC_LIST_DEF_DATA 32
++
++/* Use provided default subitem data, return it for unexisting subitems and
++ automatically create and cleanup subitems with default data as necessary.
++ (only subitems with 0 ttl (unlimited) get removed) */
++#define RSBAC_LIST_DEF_SUBDATA 64
++
++/* Replicate list to replication partners.
++ Must be enabled in config. */
++#define RSBAC_LIST_REPLICATE 128
++
++/* Allow automatic online resizing of the list hashing table.
++ Requires that the provided hash function uses the nr_hashes parameter. */
++#define RSBAC_LIST_AUTO_HASH_RESIZE 256
++
++/* Disable limit of RSBAC_LIST_MAX_NR_ITEMS items per single list. */
++#define RSBAC_LIST_NO_MAX 512
++
++/* Disable warning if max_entries prevents adding of items */
++#define RSBAC_LIST_NO_MAX_WARN 1024
++
++/* Use own slab for this list */
++#define RSBAC_LIST_OWN_SLAB 2048
++
++/* Maximum number of items per single list, the total limit is at
++ * RSBAC_LIST_MAX_NR_ITEMS * nr_hashes.
++ * Limits can be disabled per list with RSBAC_LIST_NO_MAX flag and
++ * changed with rsbac_list_max_items() and rsbac_list_lol_max_items().
++ */
++
++#define RSBAC_LIST_MAX_NR_ITEMS 50000
++#define RSBAC_LIST_MAX_NR_SUBITEMS 50000
++
++/****************************/
++/* Function prototypes */
++
++/* Function to compare two descriptors, returns 0, if equal, a negative value,
++ * if desc1 < desc2 and a positive value, if desc1 > desc2 (like memcmp).
++ * Used for lookup and list optimization.
++ * Note: Non-0 values are only used for list optimization and do not necessarily
++ * imply a real order of values.
++ */
++typedef int rsbac_list_compare_function_t(void *desc1, void *desc2);
++
++int rsbac_list_compare_u32(void * desc1, void * desc2);
++
++/* Function to compare two datas, returns 0, if equal, and another value,
++ * if not.
++ * Used for lookup by data.
++ * Note: list optimization is based on descriptors, so data lookup is always
++ * linear search from first to last element in list order.
++ */
++typedef int rsbac_list_data_compare_function_t(void *data1, void *data2);
++
++/* Function to compare two descs with a parameter, returns TRUE,
++ * if item is selected, and FALSE, if not.
++ * Used for selected lists of descriptors.
++ */
++typedef int rsbac_list_desc_selector_function_t(void *desc, void * param);
++
++/* conversion function to upconvert old on-disk descs and datas to actual version */
++/* must return 0 on success or error otherwise */
++/* Attention: if old or new data_size is 0, the respective data pointer is NULL! */
++typedef int rsbac_list_conv_function_t(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data);
++
++/* callback function to return an upconvert function for on-disk-version, if versions differ */
++/* Note: Lists implementation does not assume anything about your version number apart
++ from being of type rsbac_version_t. Use it as you like. */
++typedef rsbac_list_conv_function_t *rsbac_list_get_conv_t(rsbac_version_t
++ old_version);
++
++/* hash function to return a hash for the descriptor in the range 0 to nr_hashes-1 */
++typedef u_int rsbac_list_hash_function_t(void * desc, __u32 nr_hashes);
++
++/* get generic list registration version */
++rsbac_version_t rsbac_list_version(void);
++
++
++/* List info: This struct will be written to disk */
++/*
++ * list_version: a simple __u32 version number for the list. If old on-disk version is
++ different, conversion is tried (depending on flags and get_conv function)
++ * key: secret __u32 key, which must be the same as in on-disk version, if persistent
++ * desc_size: size of the descriptor (error is returned, if list exists and value differs)
++ internally reset to sizeof(__u32) for u32 call variants
++ * data_size: size of data (error is returned, if list exists and value differs)
++ set to 0 for sets without data
++ * subdesc_size: size of the descriptor of the sublist (error is returned, if list exists
++ and value differs), internally reset to sizeof(__u32) for u32 call variants
++ * subdata_size: size of sublist data (error is returned, if list exists and value differs)
++ set to 0 for sets without data
++ * max_age: seconds until unchanged list file (no add or remove) will be purged.
++ Maximum value is RSBAC_LIST_MAX_AGE_LIMIT (s.a.), use 0 for unlimited lifetime.
++ (purging not yet implemented - only reused without key, please cleanup by hand)
++ */
++struct rsbac_list_info_t {
++ rsbac_version_t version;
++ rsbac_list_key_t key;
++ __u32 desc_size;
++ __u32 data_size;
++ rsbac_time_t max_age;
++};
++
++struct rsbac_list_lol_info_t {
++ rsbac_version_t version;
++ rsbac_list_key_t key;
++ __u32 desc_size;
++ __u32 data_size;
++ __u32 subdesc_size;
++ __u32 subdata_size;
++ rsbac_time_t max_age;
++};
++
++
++/* register a new list */
++/*
++ * If list with same name exists in memory, error -RSBAC_EEXISTS is returned.
++ * If list with same name and key exists on device, it is restored depending on
++ the flags.
++ * If list with same name, but different key exists on disk, access is denied
++ (error -EPERM).
++ *
++ * ds_version: for binary modules, must be RSBAC_LIST_VERSION. If version
++ differs, return error.
++ * handle_p: for all list accesses, an opaque handle is put into *handle_p.
++ * flags: see flag values
++ * compare: for lookup and list optimization, can be NULL, then
++ memcmp(desc1, desc2, desc_size) is used
++ * subcompare: for item lookup and optimization of sublist, can be NULL, then
++ memcmp(desc1, desc2, desc_size) is used
++ * get_conv: function to deliver conversion function for given version
++ * get_subconv: function to deliver sublist item conversion function for given
++ version
++ * def_data: default data value for flag RSBAC_LIST_DEF_DATA
++ (if NULL, flag is cleared)
++ * def_subdata: default subdata value for flag RSBAC_LIST_DEF_SUBDATA
++ (if NULL, flag is cleared)
++ * name: the on-disk name, should be distinct and max. 7 or 8.2 chars
++ (maxlen of RSBAC_LIST_MAX_FILENAME supported) (only used for statistics, if
++ non-persistent)
++ * device: the device to read list from or to save list to - use 0 for root dev
++ (ignored, if non-persistent)
++ * nr_hashes: Number of hashes for this list, maximum is RSBAC_LIST_MAX_HASHES,
++ which is derived from CONFIG_RSBAC_LIST_MAX_HASHES.
++ If > maximum, it will be reduced to maximum automatically.
++ 8 <= RSBAC_LIST_MAX_HASHES <= 256 in all cases, see above.
++ Thus, it is safe to use nr_hashes <= 8 without checks.
++ Value may vary between registrations.
++ * hash_function: Hash function for desc, must always return a value
++ from 0 to nr_hashes-1.
++ * old_base_name: If not NULL and persistent list with name cannot be read,
++ try to read all old_base_name<n> with n from 0 to 31.
++ */
++
++int rsbac_list_register_hashed(rsbac_version_t ds_version,
++ rsbac_list_handle_t * handle_p,
++ struct rsbac_list_info_t *info_p,
++ u_int flags,
++ rsbac_list_compare_function_t * compare,
++ rsbac_list_get_conv_t * get_conv,
++ void *def_data,
++ char *name, kdev_t device,
++ u_int nr_hashes,
++ rsbac_list_hash_function_t hash_function,
++ char * old_base_name);
++
++int rsbac_list_lol_register_hashed(rsbac_version_t ds_version,
++ rsbac_list_handle_t * handle_p,
++ struct rsbac_list_lol_info_t *info_p,
++ u_int flags,
++ rsbac_list_compare_function_t * compare,
++ rsbac_list_compare_function_t * subcompare,
++ rsbac_list_get_conv_t * get_conv,
++ rsbac_list_get_conv_t * get_subconv,
++ void *def_data,
++ void *def_subdata,
++ char *name, kdev_t device,
++ u_int nr_hashes,
++ rsbac_list_hash_function_t hash_function,
++ char * old_base_name);
++
++/* Old and simpler registration function, sets nr_hashes to 1,
++ * hash_function to NULL and old_base_name to NULL.
++ */
++
++int rsbac_list_register(rsbac_version_t ds_version,
++ rsbac_list_handle_t * handle_p,
++ struct rsbac_list_info_t *info_p,
++ u_int flags,
++ rsbac_list_compare_function_t * compare,
++ rsbac_list_get_conv_t * get_conv,
++ void *def_data, char *name, kdev_t device);
++
++int rsbac_list_lol_register(rsbac_version_t ds_version,
++ rsbac_list_handle_t * handle_p,
++ struct rsbac_list_lol_info_t *info_p,
++ u_int flags,
++ rsbac_list_compare_function_t * compare,
++ rsbac_list_compare_function_t * subcompare,
++ rsbac_list_get_conv_t * get_conv,
++ rsbac_list_get_conv_t * get_subconv,
++ void *def_data,
++ void *def_subdata, char *name, kdev_t device);
++
++/* destroy list */
++/* list is destroyed, disk file is deleted */
++/* list must have been opened with register */
++int rsbac_list_destroy(rsbac_list_handle_t * handle_p,
++ rsbac_list_key_t key);
++
++int rsbac_list_lol_destroy(rsbac_list_handle_t * handle_p,
++ rsbac_list_key_t key);
++
++/* detach from list */
++/* list is saved (if persistent) and removed from memory. Call register for new access. */
++/* Must not be called with spinlock held. */
++
++int rsbac_list_detach(rsbac_list_handle_t * handle_p,
++ rsbac_list_key_t key);
++
++int rsbac_list_lol_detach(rsbac_list_handle_t * handle_p,
++ rsbac_list_key_t key);
++
++/* set list's no_write flag */
++/* TRUE: do not write to disk, FALSE: writing allowed */
++int rsbac_list_no_write
++ (rsbac_list_handle_t handle, rsbac_list_key_t key,
++ rsbac_boolean_t no_write);
++
++int rsbac_list_lol_no_write
++ (rsbac_list_handle_t handle, rsbac_list_key_t key,
++ rsbac_boolean_t no_write);
++
++/* Set max_items_per_hash */
++int rsbac_list_max_items(rsbac_list_handle_t handle, rsbac_list_key_t key,
++ u_int max_items);
++
++int rsbac_list_lol_max_items(rsbac_list_handle_t handle, rsbac_list_key_t key,
++ u_int max_items, u_int max_subitems);
++
++/* Single list checking, good for cleanup of items with ttl in the past. */
++/* This functionality is also included in the big rsbac_check(). */
++
++int rsbac_list_check(rsbac_list_handle_t handle, int correct);
++
++int rsbac_list_lol_check(rsbac_list_handle_t handle, int correct);
++
++/* Transaction Support */
++#ifdef CONFIG_RSBAC_LIST_TRANS
++int rsbac_list_ta_begin(rsbac_time_t ttl,
++ rsbac_list_ta_number_t * ta_number_p,
++ rsbac_uid_t commit_uid,
++ char * name, char *password);
++
++int rsbac_list_ta_refresh(rsbac_time_t ttl,
++ rsbac_list_ta_number_t ta_number,
++ char *password);
++
++int rsbac_list_ta_commit(rsbac_list_ta_number_t ta_number, char *password);
++
++int rsbac_list_ta_forget(rsbac_list_ta_number_t ta_number, char *password);
++
++/* Returns TRUE, if transaction ta_number exists, and FALSE, if not. */
++int rsbac_list_ta_exist(rsbac_list_ta_number_t ta_number);
++#endif
++
++/* add with time-to-live - after this time in seconds the item gets automatically removed */
++/* set to 0 for unlimited (default), RSBAC_LIST_TTL_KEEP to keep previous setting */
++int rsbac_ta_list_add_ttl(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ rsbac_time_t ttl, void *desc, void *data);
++
++static inline int rsbac_list_add_ttl(rsbac_list_handle_t handle,
++ rsbac_time_t ttl, void *desc, void *data)
++{
++ return rsbac_ta_list_add_ttl(0, handle, ttl, desc, data);
++}
++
++static inline int rsbac_list_add(rsbac_list_handle_t handle, void *desc, void *data)
++{
++ return rsbac_ta_list_add_ttl(0, handle, RSBAC_LIST_TTL_KEEP, desc,
++ data);
++}
++
++/* Add list of lists sublist item, item for desc must exist */
++int rsbac_ta_list_lol_subadd_ttl(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ rsbac_time_t ttl,
++ void *desc, void *subdesc, void *subdata);
++
++static inline int rsbac_list_lol_subadd_ttl(rsbac_list_handle_t handle,
++ rsbac_time_t ttl,
++ void *desc, void *subdesc, void *subdata)
++{
++ return rsbac_ta_list_lol_subadd_ttl(0, handle, ttl, desc, subdesc,
++ subdata);
++}
++
++static inline int rsbac_list_lol_subadd(rsbac_list_handle_t handle,
++ void *desc, void *subdesc, void *subdata)
++{
++ return rsbac_ta_list_lol_subadd_ttl(0, handle, RSBAC_LIST_TTL_KEEP,
++ desc, subdesc, subdata);
++}
++
++/* add with time-to-live - after this time in seconds the item gets automatically removed */
++int rsbac_ta_list_lol_add_ttl(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ rsbac_time_t ttl, void *desc, void *data);
++
++static inline int rsbac_list_lol_add_ttl(rsbac_list_handle_t handle,
++ rsbac_time_t ttl, void *desc, void *data)
++{
++ return rsbac_ta_list_lol_add_ttl(0, handle, ttl, desc, data);
++}
++
++static inline int rsbac_list_lol_add(rsbac_list_handle_t handle, void *desc, void *data)
++{
++ return rsbac_ta_list_lol_add_ttl(0, handle, RSBAC_LIST_TTL_KEEP,
++ desc, data);
++}
++
++/* remove item */
++int rsbac_ta_list_remove(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void *desc);
++
++static inline int rsbac_list_remove(rsbac_list_handle_t handle, void *desc)
++{
++ return rsbac_ta_list_remove(0, handle, desc);
++}
++
++/* remove all items */
++int rsbac_ta_list_remove_all(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle);
++
++static inline int rsbac_list_remove_all(rsbac_list_handle_t handle)
++{
++ return rsbac_ta_list_remove_all(0, handle);
++}
++
++/* remove item from sublist - also succeeds, if item for desc or subdesc does not exist */
++int rsbac_ta_list_lol_subremove(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc, void *subdesc);
++
++static inline int rsbac_list_lol_subremove(rsbac_list_handle_t handle,
++ void *desc, void *subdesc)
++{
++ return rsbac_ta_list_lol_subremove(0, handle, desc, subdesc);
++}
++
++int rsbac_ta_list_lol_subremove_count(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc, u_long count);
++
++
++/* remove same subitem from all sublists */
++int rsbac_ta_list_lol_subremove_from_all(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *subdesc);
++
++static inline int rsbac_list_lol_subremove_from_all(rsbac_list_handle_t handle,
++ void *subdesc)
++{
++ return rsbac_ta_list_lol_subremove_from_all(0, handle, subdesc);
++}
++
++/* remove all subitems from list */
++int rsbac_ta_list_lol_subremove_all(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc);
++
++static inline int rsbac_list_lol_subremove_all(rsbac_list_handle_t handle, void *desc)
++{
++ return rsbac_ta_list_lol_subremove_all(0, handle, desc);
++}
++
++int rsbac_ta_list_lol_remove(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void *desc);
++
++static inline int rsbac_list_lol_remove(rsbac_list_handle_t handle, void *desc)
++{
++ return rsbac_ta_list_lol_remove(0, handle, desc);
++}
++
++int rsbac_ta_list_lol_remove_all(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle);
++
++static inline int rsbac_list_lol_remove_all(rsbac_list_handle_t handle)
++{
++ return rsbac_ta_list_lol_remove_all(0, handle);
++}
++
++
++/* get item data */
++/* Item data is copied - we cannot give a pointer, because item could be
++ * removed */
++/* also get time-to-live - after this time in seconds the item gets automatically removed */
++/* both ttl_p and data can be NULL, they are then simply not returned */
++int rsbac_ta_list_get_data_ttl(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ rsbac_time_t * ttl_p,
++ void *desc, void *data);
++
++static inline int rsbac_list_get_data_ttl(rsbac_list_handle_t handle,
++ rsbac_time_t * ttl_p, void *desc, void *data)
++{
++ return rsbac_ta_list_get_data_ttl(0, handle, ttl_p, desc, data);
++}
++
++static inline int rsbac_list_get_data(rsbac_list_handle_t handle, void *desc, void *data)
++{
++ return rsbac_ta_list_get_data_ttl(0, handle, NULL, desc, data);
++}
++
++/* get data from a subitem */
++/* also get time-to-live - after this time in seconds the item gets automatically removed */
++/* both ttl_p and data can be NULL, they are then simply not returned */
++int rsbac_ta_list_lol_get_subdata_ttl(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ rsbac_time_t * ttl_p,
++ void *desc,
++ void *subdesc, void *subdata);
++
++static inline int rsbac_list_lol_get_subdata_ttl(rsbac_list_handle_t handle,
++ rsbac_time_t * ttl_p,
++ void *desc,
++ void *subdesc, void *subdata)
++{
++ return rsbac_ta_list_lol_get_subdata_ttl(0, handle,
++ ttl_p, desc, subdesc,
++ subdata);
++}
++
++static inline int rsbac_list_lol_get_subdata(rsbac_list_handle_t handle,
++ void *desc, void *subdesc, void *subdata)
++{
++ return rsbac_ta_list_lol_get_subdata_ttl(0, handle, NULL, desc,
++ subdesc, subdata);
++}
++
++/* also get time-to-live - after this time in seconds the item gets automatically removed */
++/* both ttl_p and data can be NULL, they are then simply not returned */
++int rsbac_ta_list_lol_get_data_ttl(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ rsbac_time_t * ttl_p,
++ void *desc, void *data);
++
++static inline int rsbac_list_lol_get_data_ttl(rsbac_list_handle_t handle,
++ rsbac_time_t * ttl_p,
++ void *desc, void *data)
++{
++ return rsbac_ta_list_lol_get_data_ttl(0, handle, ttl_p, desc,
++ data);
++}
++
++static inline int rsbac_list_lol_get_data(rsbac_list_handle_t handle,
++ void *desc, void *data)
++{
++ return rsbac_ta_list_lol_get_data_ttl(0, handle, NULL, desc, data);
++}
++
++/* get item desc by data */
++/* Item desc is copied - we cannot give a pointer, because item could be
++ * removed.
++ * If no compare function is provided (NULL value), memcmp is used.
++ * Note: The data value given here is always used as second parameter to the
++ * compare function, so you can use different types for storage and
++ * lookup.
++ */
++int rsbac_ta_list_get_desc(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc,
++ void *data,
++ rsbac_list_data_compare_function_t compare);
++
++static inline int rsbac_list_get_desc(rsbac_list_handle_t handle,
++ void *desc,
++ void *data,
++ rsbac_list_data_compare_function_t compare)
++{
++ return rsbac_ta_list_get_desc(0, handle, desc, data, compare);
++}
++
++int rsbac_ta_list_get_desc_selector(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc,
++ void *data,
++ rsbac_list_data_compare_function_t compare,
++ rsbac_list_desc_selector_function_t selector,
++ void * param);
++
++int rsbac_ta_list_lol_get_desc(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc,
++ void *data,
++ rsbac_list_data_compare_function_t compare);
++
++static inline int rsbac_list_lol_get_desc(rsbac_list_handle_t handle,
++ void *desc,
++ void *data,
++ rsbac_list_data_compare_function_t compare)
++{
++ return rsbac_ta_list_lol_get_desc(0, handle, desc, data, compare);
++}
++
++int rsbac_ta_list_lol_get_desc_selector(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc,
++ void *data,
++ rsbac_list_data_compare_function_t compare,
++ rsbac_list_desc_selector_function_t selector,
++ void * param);
++
++/* get maximum desc (uses compare function) */
++int rsbac_ta_list_get_max_desc(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void *desc);
++
++static inline int rsbac_list_get_max_desc(rsbac_list_handle_t handle, void *desc)
++{
++ return rsbac_ta_list_get_max_desc(0, handle, desc);
++}
++
++int rsbac_ta_list_lol_get_max_subdesc(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc, void *subdesc);
++
++/* get next desc (uses compare function) */
++int rsbac_ta_list_get_next_desc(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *old_desc, void *next_desc);
++
++static inline int rsbac_list_get_next_desc(rsbac_list_handle_t handle, void *old_desc,
++ void *next_desc)
++{
++ return rsbac_ta_list_get_next_desc(0, handle, old_desc, next_desc);
++}
++
++int rsbac_ta_list_get_next_desc_selector(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *old_desc,
++ void *next_desc,
++ rsbac_list_desc_selector_function_t selector,
++ void * param);
++
++int rsbac_ta_list_lol_get_next_desc(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *old_desc, void *next_desc);
++
++static inline int rsbac_list_lol_get_next_desc(rsbac_list_handle_t handle,
++ void *old_desc, void *next_desc)
++{
++ return rsbac_ta_list_lol_get_next_desc(0, handle, old_desc,
++ next_desc);
++}
++
++int rsbac_ta_list_lol_get_next_desc_selector(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *old_desc,
++ void *next_desc,
++ rsbac_list_desc_selector_function_t selector,
++ void * param);
++
++/* does item exist? */
++/* returns TRUE, if item exists, FALSE, if not or error */
++int rsbac_ta_list_exist(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void *desc);
++
++static inline int rsbac_list_exist(rsbac_list_handle_t handle, void *desc)
++{
++ return rsbac_ta_list_exist(0, handle, desc);
++}
++
++int rsbac_ta_list_lol_subexist(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc, void *subdesc);
++
++static inline int rsbac_list_lol_subexist(rsbac_list_handle_t handle,
++ void *desc, void *subdesc)
++{
++ return rsbac_ta_list_lol_subexist(0, handle, desc, subdesc);
++}
++
++int rsbac_ta_list_lol_exist(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void *desc);
++
++static inline int rsbac_list_lol_exist(rsbac_list_handle_t handle, void *desc)
++{
++ return rsbac_ta_list_lol_exist(0, handle, desc);
++}
++
++/*
++ * Note: The subdesc/data value given here is always used as second parameter to the
++ * given subdesc compare function, so you can use different types for storage and
++ * lookup. If compare is NULL, call is forwarded to rsbac_list_lol_subexist.
++ * Warning: This function does not use the list optimization when searching the sublist!
++ */
++int rsbac_ta_list_lol_subexist_compare(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc,
++ void *subdesc,
++ rsbac_list_compare_function_t
++ compare);
++
++static inline int rsbac_list_lol_subexist_compare(rsbac_list_handle_t handle,
++ void *desc,
++ void *subdesc,
++ rsbac_list_compare_function_t compare)
++{
++ return rsbac_ta_list_lol_subexist_compare(0, handle,
++ desc, subdesc, compare);
++}
++
++/* count number of elements */
++/* returns number of elements or negative error code */
++long rsbac_ta_list_count(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle);
++
++static inline long rsbac_list_count(rsbac_list_handle_t handle)
++{
++ return rsbac_ta_list_count(0, handle);
++}
++
++long rsbac_ta_list_lol_subcount(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void *desc);
++
++static inline long rsbac_list_lol_subcount(rsbac_list_handle_t handle, void *desc)
++{
++ return rsbac_ta_list_lol_subcount(0, handle, desc);
++}
++
++long rsbac_ta_list_lol_all_subcount(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle);
++
++static inline long rsbac_list_lol_all_subcount(rsbac_list_handle_t handle)
++{
++ return rsbac_ta_list_lol_all_subcount(0, handle);
++}
++
++long rsbac_ta_list_lol_count(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle);
++
++static inline long rsbac_list_lol_count(rsbac_list_handle_t handle)
++{
++ return rsbac_ta_list_lol_count(0, handle);
++}
++
++
++/* Get array of all descriptors */
++/* Returns number of elements or negative error code */
++/* If return value > 0, *array_p contains a pointer to a rsbac_kmalloc'd array
++ of descs, otherwise *array_p is set to NULL. If *array_p has been set,
++ caller must call rsbac_kfree(*array_p) after use! */
++
++long rsbac_ta_list_get_all_desc(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void **array_p);
++
++static inline long rsbac_list_get_all_desc(rsbac_list_handle_t handle, void **array_p)
++{
++ return rsbac_ta_list_get_all_desc(0, handle, array_p);
++}
++
++long rsbac_ta_list_get_all_desc_selector (
++ rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void **array_p,
++ rsbac_list_desc_selector_function_t selector,
++ void * param);
++
++long rsbac_ta_list_lol_get_all_subdesc_ttl(rsbac_list_ta_number_t
++ ta_number,
++ rsbac_list_handle_t handle,
++ void *desc, void **array_p,
++ rsbac_time_t ** ttl_array_p);
++
++static inline long rsbac_list_lol_get_all_subdesc(rsbac_list_handle_t handle,
++ void *desc,
++ void **array_p)
++{
++ return rsbac_ta_list_lol_get_all_subdesc_ttl(0, handle,
++ desc, array_p, NULL);
++}
++
++static inline long rsbac_list_lol_get_all_subdesc_ttl(rsbac_list_handle_t handle,
++ void *desc,
++ void **array_p,
++ rsbac_time_t ** ttl_array_p)
++{
++ return rsbac_ta_list_lol_get_all_subdesc_ttl(0,
++ handle,
++ desc,
++ array_p, ttl_array_p);
++}
++
++long rsbac_ta_list_lol_get_all_desc(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void **array_p);
++
++static inline long rsbac_list_lol_get_all_desc(rsbac_list_handle_t handle,
++ void **array_p)
++{
++ return rsbac_ta_list_lol_get_all_desc(0, handle, array_p);
++}
++
++long rsbac_ta_list_lol_get_all_desc_selector (
++ rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void **array_p,
++ rsbac_list_desc_selector_function_t selector,
++ void * param);
++
++/* Get array of all datas */
++/* Returns number of elements or negative error code */
++/* If return value > 0, *array_p contains a pointer to a rsbac_kmalloc'd array
++ of datas, otherwise *array_p is set to NULL. If *array_p has been set,
++ caller must call rsbac_kfree(*array_p) after use! */
++
++long rsbac_ta_list_get_all_data(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void **array_p);
++
++static inline long rsbac_list_get_all_data(rsbac_list_handle_t handle, void **array_p)
++{
++ return rsbac_ta_list_get_all_data(0, handle, array_p);
++}
++
++long rsbac_ta_list_lol_get_all_subdata(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc, void **array_p);
++
++static inline long rsbac_list_lol_get_all_subdata(rsbac_list_handle_t handle,
++ void *desc, void **array_p)
++{
++ return rsbac_ta_list_lol_get_all_subdata(0, handle, desc, array_p);
++}
++
++long rsbac_ta_list_lol_get_all_data(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void **array_p);
++
++static inline long rsbac_list_lol_get_all_data(rsbac_list_handle_t handle,
++ void **array_p)
++{
++ return rsbac_ta_list_lol_get_all_data(0, handle, array_p);
++}
++
++/* Get item size */
++
++int rsbac_list_get_item_size(rsbac_list_handle_t handle);
++
++int rsbac_list_lol_get_subitem_size(rsbac_list_handle_t handle);
++
++int rsbac_list_lol_get_item_size(rsbac_list_handle_t handle);
++
++/* Get array of all items */
++/* Returns number of items or negative error code */
++/* If return value > 0, *array_p contains a pointer to a rsbac_kmalloc'd array
++ of items, where desc and data are placed directly behind each other.
++ If *array_p has been set, caller must call rsbac_kfree(*array_p) after use! */
++
++long rsbac_ta_list_get_all_items_ttl(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void **array_p,
++ rsbac_time_t ** ttl_array_p);
++
++static inline long rsbac_list_get_all_items_ttl(rsbac_list_handle_t handle,
++ void **array_p,
++ rsbac_time_t ** ttl_array_p)
++{
++ return rsbac_ta_list_get_all_items_ttl(0, handle, array_p,
++ ttl_array_p);
++}
++
++static inline long rsbac_list_get_all_items(rsbac_list_handle_t handle, void **array_p)
++{
++ return rsbac_ta_list_get_all_items_ttl(0, handle, array_p, NULL);
++}
++
++long rsbac_ta_list_lol_get_all_subitems_ttl(rsbac_list_ta_number_t
++ ta_number,
++ rsbac_list_handle_t handle,
++ void *desc, void **array_p,
++ rsbac_time_t ** ttl_array_p);
++
++static inline long rsbac_list_lol_get_all_subitems_ttl(rsbac_list_handle_t handle,
++ void *desc,
++ void **array_p,
++ rsbac_time_t ** ttl_array_p)
++{
++ return rsbac_ta_list_lol_get_all_subitems_ttl(0, handle, desc,
++ array_p,
++ ttl_array_p);
++}
++
++static inline long rsbac_list_lol_get_all_subitems(rsbac_list_handle_t handle,
++ void *desc, void **array_p)
++{
++ return rsbac_ta_list_lol_get_all_subitems_ttl(0, handle, desc,
++ array_p, NULL);
++}
++
++long rsbac_ta_list_lol_get_all_items(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void **array_p);
++
++static inline long rsbac_list_lol_get_all_items(rsbac_list_handle_t handle,
++ void **array_p)
++{
++ return rsbac_ta_list_lol_get_all_items(0, handle, array_p);
++}
++
++/* Copy a complete list
++ * Both lists must have been registered with same desc and data sizes,
++ * nr_hashes may differ. Old target list items are removed before copying.
++ * If ta_number is set and transactions are enabled, the complete
++ * target list content is in the same transaction. Forgetting the
++ * transaction will restore the old to_list.
++ */
++
++long rsbac_list_copy(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t from_handle,
++ rsbac_list_handle_t to_handle);
++
++long rsbac_list_lol_copy(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t from_handle,
++ rsbac_list_handle_t to_handle);
++
++/* Get the current number of hashes - may vary when resized */
++long rsbac_list_get_nr_hashes(rsbac_list_handle_t handle);
++
++long rsbac_list_lol_get_nr_hashes(rsbac_list_handle_t handle);
++
++#endif
++/* end of lists.h */
+diff --git a/include/rsbac/log_cap.h b/include/rsbac/log_cap.h
+new file mode 100644
+index 0000000..82195bf
+--- /dev/null
++++ b/include/rsbac/log_cap.h
+@@ -0,0 +1,14 @@
++/********************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 2005: */
++/* Amon Ott <ao@rsbac.org> */
++/* Missing Cap logging */
++/* Last modified: 27/May/2005 */
++/********************************** */
++
++#ifndef __RSBAC_LOG_CAP_H
++#define __RSBAC_LOG_CAP_H
++
++void rsbac_log_missing_cap(int cap);
++
++#endif
+diff --git a/include/rsbac/lsm.h b/include/rsbac/lsm.h
+new file mode 100644
+index 0000000..1136dc0
+--- /dev/null
++++ b/include/rsbac/lsm.h
+@@ -0,0 +1,16 @@
++/************************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 2003: Amon Ott */
++/* file system */
++/* Helper functions for all parts */
++/* Last modified: 28/Jul/2003 */
++/************************************* */
++
++#ifndef __RSBAC_LSM_H
++#define __RSBAC_LSM_H
++
++#include <linux/security.h>
++
++int rsbac_lsm_register(void);
++
++#endif
+diff --git a/include/rsbac/mac.h b/include/rsbac/mac.h
+new file mode 100644
+index 0000000..dbcf8a3
+--- /dev/null
++++ b/include/rsbac/mac.h
+@@ -0,0 +1,134 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2005: */
++/* Amon Ott <ao@rsbac.org> */
++/* API: Data structures */
++/* and functions for Access */
++/* Control Information / MAC */
++/* Last modified: 09/Feb/2005 */
++/************************************ */
++
++#ifndef __RSBAC_MAC_H
++#define __RSBAC_MAC_H
++
++#include <linux/init.h>
++#include <rsbac/types.h>
++
++/***************************************************/
++/* General Prototypes */
++/***************************************************/
++
++/* All functions return 0, if no error occurred, and a negative error code */
++/* otherwise. The error codes are defined in rsbac_error.h. */
++
++/****************************************************************************/
++/* Initialization, including ACI restoration for all mounted devices from */
++/* disk. After this call, all ACI is kept in memory for performance reasons,*/
++/* but user and file/dir object ACI are written to disk on every change. */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++extern int rsbac_init_mac(void);
++#else
++extern int rsbac_init_mac(void) __init;
++#endif
++
++/* mounting and umounting */
++int rsbac_mount_mac(kdev_t kdev);
++int rsbac_umount_mac(kdev_t kdev);
++
++/* Some information about the current status is also available */
++extern int rsbac_stats_mac(void);
++
++/* Status checking */
++extern int rsbac_check_mac(int correct, int check_inode);
++
++/* RSBAC attribute saving to disk can be triggered from outside
++ * param: call lock_kernel() before writing?
++ */
++#if defined(CONFIG_RSBAC_MAINT) || defined(CONFIG_RSBAC_AUTO_WRITE)
++extern int rsbac_write_mac(rsbac_boolean_t);
++#endif /* CONFIG_RSBAC_AUTO_WRITE */
++
++/************************************************* */
++/* Access functions */
++/************************************************* */
++
++/* All these procedures handle the semaphores to protect the targets during */
++/* access. */
++/* Trying to access a never created or removed set returns an error! */
++
++/* rsbac_mac_add_to_truset */
++/* Add a set member to a set sublist. Set behaviour: also returns success, */
++/* if member was already in set! */
++
++int rsbac_mac_add_to_p_truset(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ rsbac_uid_t member,
++ rsbac_time_t ttl);
++
++int rsbac_mac_add_to_f_truset(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_mac_file_t file,
++ rsbac_uid_t member,
++ rsbac_time_t ttl);
++
++/* rsbac_mac_remove_from_truset */
++/* Remove a set member from a sublist. Set behaviour: Returns no error, if */
++/* member is not in list. */
++
++int rsbac_mac_remove_from_p_truset(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ rsbac_uid_t member);
++
++int rsbac_mac_remove_from_f_truset(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_mac_file_t file,
++ rsbac_uid_t member);
++
++/* rsbac_mac_clear_truset */
++/* Remove all set members from a sublist. Set behaviour: Returns no error, */
++/* if list is empty. */
++
++int rsbac_mac_clear_p_truset(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid);
++
++int rsbac_mac_clear_f_truset(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_mac_file_t file);
++
++/* rsbac_mac_truset_member */
++/* Return truth value, whether member is in set */
++
++rsbac_boolean_t rsbac_mac_p_truset_member(rsbac_pid_t pid,
++ rsbac_uid_t member);
++
++/* rsbac_mac_remove_truset */
++/* Remove a full set. For cleanup, if object is deleted. */
++/* To empty an existing set use rsbac_mac_clear_truset. */
++
++int rsbac_mac_remove_p_trusets(rsbac_pid_t pid);
++
++int rsbac_mac_remove_f_trusets(rsbac_mac_file_t file);
++
++int rsbac_mac_copy_fp_truset(rsbac_mac_file_t file,
++ rsbac_pid_t p_tru_set_id);
++
++int rsbac_mac_copy_pp_truset(rsbac_pid_t old_p_set_id,
++ rsbac_pid_t new_p_set_id);
++
++int rsbac_mac_get_f_trulist(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_mac_file_t file,
++ rsbac_uid_t **trulist_p,
++ rsbac_time_t **ttllist_p);
++
++int rsbac_mac_get_p_trulist(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ rsbac_uid_t **trulist_p,
++ rsbac_time_t **ttllist_p);
++
++#endif
+diff --git a/include/rsbac/mac_data_structures.h b/include/rsbac/mac_data_structures.h
+new file mode 100644
+index 0000000..a06584b
+--- /dev/null
++++ b/include/rsbac/mac_data_structures.h
+@@ -0,0 +1,54 @@
++/**************************************/
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2006: */
++/* Amon Ott <ao@rsbac.org> */
++/* Data structures / MAC */
++/* Last modified: 12/Jan/2006 */
++/**************************************/
++
++#ifndef __RSBAC_MAC_DATA_STRUC_H
++#define __RSBAC_MAC_DATA_STRUC_H
++
++#include <linux/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/types.h>
++
++/**********************************************/
++/* Capability lists */
++/**********************************************/
++
++#define RSBAC_MAC_LIST_KEY 626281
++
++#define RSBAC_MAC_P_LIST_VERSION 1
++#define RSBAC_MAC_P_LIST_NAME "macptru"
++
++#define RSBAC_MAC_FD_FILENAME "macfdtru"
++#define RSBAC_MAC_FD_OLD_FILENAME "macfdtru."
++#define RSBAC_MAC_NR_TRU_FD_LISTS 4
++#define RSBAC_MAC_FD_LIST_VERSION 2
++#define RSBAC_MAC_FD_OLD_LIST_VERSION 1
++
++/* The list of devices is also a double linked list, so we define list */
++/* items and a list head. */
++
++struct rsbac_mac_device_list_item_t {
++ kdev_t id; /* set to 0 before deletion */
++ u_int mount_count;
++ rsbac_list_handle_t handle;
++ struct rsbac_mac_device_list_item_t *prev;
++ struct rsbac_mac_device_list_item_t *next;
++};
++
++/* To provide consistency we use spinlocks for all list accesses. The */
++/* 'curr' entry is used to avoid repeated lookups for the same item. */
++
++struct rsbac_mac_device_list_head_t {
++ struct rsbac_mac_device_list_item_t *head;
++ struct rsbac_mac_device_list_item_t *tail;
++ struct rsbac_mac_device_list_item_t *curr;
++ spinlock_t lock;
++ struct lock_class_key lock_class;
++ u_int count;
++};
++
++#endif
+diff --git a/include/rsbac/net_getname.h b/include/rsbac/net_getname.h
+new file mode 100644
+index 0000000..e0c7a70
+--- /dev/null
++++ b/include/rsbac/net_getname.h
+@@ -0,0 +1,50 @@
++/********************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2003: */
++/* Amon Ott <ao@rsbac.org> */
++/* Getname functions for CAP module */
++/* Last modified: 22/Dec/2003 */
++/********************************** */
++
++#ifndef __RSBAC_NET_GETNAME_H
++#define __RSBAC_NET_GETNAME_H
++
++#include <rsbac/types.h>
++
++#define RSBAC_NET_PROTO_MAX 256
++#define RSBAC_NET_TYPE_MAX 11
++
++#ifdef __KERNEL__
++extern int rsbac_net_str_to_inet(char * str, __u32 * addr);
++#else
++#ifndef AF_MAX
++#define AF_MAX 32
++#endif
++#endif
++
++extern char * rsbac_get_net_temp_syscall_name(char * name,
++ enum rsbac_net_temp_syscall_t value);
++
++extern char * rsbac_get_net_family_name(char * name,
++ u_int value);
++
++extern char * rsbac_get_net_netlink_family_name(char * name,
++ u_int value);
++
++extern char * rsbac_get_net_protocol_name(char * name,
++ u_int value);
++
++extern char * rsbac_get_net_type_name(char * name,
++ u_int value);
++
++#ifndef __KERNEL__
++enum rsbac_net_temp_syscall_t rsbac_get_net_temp_syscall_nr(const char * name);
++
++int rsbac_get_net_family_nr(const char * name);
++
++int rsbac_get_net_protocol_nr(const char * name);
++
++int rsbac_get_net_type_nr(const char * name);
++#endif
++
++#endif
+diff --git a/include/rsbac/network.h b/include/rsbac/network.h
+new file mode 100644
+index 0000000..fbf3a89
+--- /dev/null
++++ b/include/rsbac/network.h
+@@ -0,0 +1,91 @@
++/************************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2004: */
++/* Amon Ott <ao@rsbac.org> */
++/* Network helper functions */
++/* Last modified: 07/Dec/2004 */
++/************************************* */
++
++#ifndef __RSBAC_NETWORK_H
++#define __RSBAC_NETWORK_H
++
++#include <rsbac/types.h>
++#include <rsbac/network_types.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/un.h>
++#include <net/sock.h>
++#include <net/inet_sock.h>
++#include <net/af_unix.h>
++#include <net/route.h>
++
++/* functions */
++
++int rsbac_ta_net_list_all_netdev(rsbac_list_ta_number_t ta_number, rsbac_netdev_id_t ** id_pp);
++
++static inline int rsbac_net_list_all_netdev(rsbac_netdev_id_t ** id_pp)
++ {
++ return rsbac_ta_net_list_all_netdev(0, id_pp);
++ }
++
++//__u32 rsbac_net_make_mask_u32(__u8 valid_bits);
++
++int rsbac_net_compare_data(void * data1, void * data2);
++
++int rsbac_net_get_id(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_net_description_t * desc_p,
++ rsbac_net_temp_id_t * id_p);
++
++// void rsbac_net_obj_cleanup(rsbac_net_obj_id_t netobj);
++
++int rsbac_ta_net_lookup_templates(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_net_obj_desc_t * netobj_p,
++ rsbac_net_temp_id_t * local_temp_p,
++ rsbac_net_temp_id_t * remote_temp_p);
++
++static inline int rsbac_net_lookup_templates(
++ struct rsbac_net_obj_desc_t * netobj_p,
++ rsbac_net_temp_id_t * local_temp_p,
++ rsbac_net_temp_id_t * remote_temp_p)
++ {
++ return rsbac_ta_net_lookup_templates(0, netobj_p, local_temp_p, remote_temp_p);
++ }
++
++/* Does template exist? Returns TRUE if yes, FALSE if no */
++int rsbac_ta_net_template_exists(rsbac_list_ta_number_t ta_number,
++ rsbac_net_temp_id_t id);
++
++int rsbac_ta_net_template(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_net_temp_syscall_t call,
++ rsbac_net_temp_id_t id,
++ union rsbac_net_temp_syscall_data_t * data_p);
++
++static inline int rsbac_net_template(enum rsbac_net_temp_syscall_t call,
++ rsbac_net_temp_id_t id,
++ union rsbac_net_temp_syscall_data_t * data_p)
++ {
++ return rsbac_ta_net_template(0, call, id, data_p);
++ }
++
++int rsbac_ta_net_list_all_template(rsbac_list_ta_number_t ta_number,
++ rsbac_net_temp_id_t ** id_pp);
++
++static inline int rsbac_net_list_all_template(rsbac_net_temp_id_t ** id_pp)
++ {
++ return rsbac_ta_net_list_all_template(0, id_pp);
++ }
++
++int rsbac_ta_net_template_exist(rsbac_list_ta_number_t ta_number, rsbac_net_temp_id_t temp);
++
++static inline int rsbac_net_template_exist(rsbac_net_temp_id_t temp)
++ {
++ return rsbac_ta_net_template_exist(0, temp);
++ }
++
++/* Whether request should be checked for remote endpoint */
++int rsbac_net_remote_request(enum rsbac_adf_request_t request);
++
++#endif
+diff --git a/include/rsbac/network_types.h b/include/rsbac/network_types.h
+new file mode 100644
+index 0000000..c8a0ab8
+--- /dev/null
++++ b/include/rsbac/network_types.h
+@@ -0,0 +1,154 @@
++/************************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2009: */
++/* Amon Ott <ao@rsbac.org> */
++/* Network access control data structs */
++/* Last modified: 03/Feb/2009 */
++/************************************* */
++
++#ifndef __RSBAC_NETWORK_TYPES_H
++#define __RSBAC_NETWORK_TYPES_H
++
++#define RSBAC_NET_ANY 0
++#define RSBAC_NET_NETLINK_PROTO_ANY 255
++
++#define RSBAC_NET_UNKNOWN 0
++
++#define RSBAC_NET_TEMP_VERSION 2
++#define RSBAC_NET_TEMP_OLD_VERSION 1
++#define RSBAC_NET_TEMP_KEY 0x815affe
++#define RSBAC_NET_TEMP_NAME "nettemp"
++
++typedef __u32 rsbac_net_temp_id_t;
++
++#define RSBAC_NET_MAX_ADDRESS_LEN 128
++#define RSBAC_NET_TEMP_NAMELEN 16
++
++#define RSBAC_NET_MAX_PORT 65535
++
++#define RSBAC_NET_NR_INET_ADDR 25
++#define RSBAC_NET_NR_PORTS 10
++
++struct rsbac_net_temp_port_range_t {
++ __u16 min;
++ __u16 max;
++};
++
++struct rsbac_net_temp_inet_addr_t {
++ __u32 addr[RSBAC_NET_NR_INET_ADDR];
++ __u8 valid_bits[RSBAC_NET_NR_INET_ADDR];
++ __u8 nr_addr;
++};
++
++struct rsbac_net_temp_other_addr_t {
++ char addr[RSBAC_NET_MAX_ADDRESS_LEN];
++ __u8 valid_len;
++};
++
++struct rsbac_net_temp_ports_t {
++ struct rsbac_net_temp_port_range_t ports[RSBAC_NET_NR_PORTS];
++ __u8 nr_ports;
++};
++
++union rsbac_net_temp_addr_t {
++ struct rsbac_net_temp_inet_addr_t inet;
++ struct rsbac_net_temp_other_addr_t other;
++};
++
++struct rsbac_net_temp_data_t {
++ /* must be first for alignment */
++ union rsbac_net_temp_addr_t address;
++ __u8 address_family;
++ __u8 type;
++ __u8 protocol;
++ rsbac_netdev_id_t netdev;
++ struct rsbac_net_temp_ports_t ports; /* for those address families that support them */
++ char name[RSBAC_NET_TEMP_NAMELEN];
++};
++
++struct rsbac_net_temp_old_data_t {
++ /* must be first for alignment */
++ char address[RSBAC_NET_MAX_ADDRESS_LEN];
++ __u8 address_family;
++ __u8 valid_len; /* Bytes for AF_UNIX, Bits for all others */
++ __u8 type;
++ __u8 protocol;
++ rsbac_netdev_id_t netdev;
++ __u16 min_port; /* for those address families that support them */
++ __u16 max_port;
++ char name[RSBAC_NET_TEMP_NAMELEN];
++};
++
++#define RSBAC_NET_TEMP_LNET_ID 100101
++#define RSBAC_NET_TEMP_LNET_ADDRESS "127.0.0.0"
++#define RSBAC_NET_TEMP_LAN_ID 100102
++#define RSBAC_NET_TEMP_LAN_ADDRESS "192.168.0.0"
++#define RSBAC_NET_TEMP_AUTO_ID 100105
++#define RSBAC_NET_TEMP_AUTO_ADDRESS "0.0.0.0"
++#define RSBAC_NET_TEMP_INET_ID 100110
++#define RSBAC_NET_TEMP_ALL_ID ((rsbac_net_temp_id_t) -1)
++
++/* default templates moved into aci_data_structures.c */
++
++struct rsbac_net_description_t {
++ __u8 address_family;
++ void *address;
++ __u8 address_len;
++ __u8 type;
++ __u8 protocol;
++ rsbac_netdev_id_t netdev;
++ __u16 port;
++};
++
++enum rsbac_net_temp_syscall_t {
++ NTS_new_template,
++ NTS_copy_template,
++ NTS_delete_template,
++ NTS_check_id,
++ NTS_get_address,
++ NTS_get_address_family,
++ NTS_get_type,
++ NTS_get_protocol,
++ NTS_get_netdev,
++ NTS_get_ports,
++ NTS_get_name,
++ NTS_set_address,
++ NTS_set_address_family,
++ NTS_set_type,
++ NTS_set_protocol,
++ NTS_set_netdev,
++ NTS_set_ports,
++ NTS_set_name,
++ NTS_none
++};
++
++union rsbac_net_temp_syscall_data_t {
++ rsbac_net_temp_id_t id;
++ union rsbac_net_temp_addr_t address;
++ __u8 address_family;
++ __u8 type;
++ __u8 protocol;
++ rsbac_netdev_id_t netdev;
++ struct rsbac_net_temp_ports_t ports; /* for those address families that support them */
++ char name[RSBAC_NET_TEMP_NAMELEN];
++};
++
++/*
++ * Display an IP address in readable format.
++ */
++
++#ifndef NIPQUAD
++#define NIPQUAD(addr) \
++ ((unsigned char *)&addr)[0], \
++ ((unsigned char *)&addr)[1], \
++ ((unsigned char *)&addr)[2], \
++ ((unsigned char *)&addr)[3]
++
++#define HIPQUAD(addr) \
++ ((unsigned char *)&addr)[3], \
++ ((unsigned char *)&addr)[2], \
++ ((unsigned char *)&addr)[1], \
++ ((unsigned char *)&addr)[0]
++#endif
++
++#endif
+diff --git a/include/rsbac/pax.h b/include/rsbac/pax.h
+new file mode 100644
+index 0000000..a4bacc7
+--- /dev/null
++++ b/include/rsbac/pax.h
+@@ -0,0 +1,21 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2004: Amon Ott */
++/* API: */
++/* Functions for Access */
++/* Control Information / PAX */
++/* Last modified: 12/Jan/2004 */
++/************************************ */
++
++#ifndef __RSBAC_PAX_H
++#define __RSBAC_PAX_H
++
++#include <rsbac/types.h>
++
++/***************************************************/
++/* General Prototypes */
++/***************************************************/
++
++void rsbac_pax_set_flags_func(struct linux_binprm * bprm);
++
++#endif
+diff --git a/include/rsbac/pax_getname.h b/include/rsbac/pax_getname.h
+new file mode 100644
+index 0000000..e6611d7
+--- /dev/null
++++ b/include/rsbac/pax_getname.h
+@@ -0,0 +1,20 @@
++/********************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2004: */
++/* Amon Ott <ao@rsbac.org> */
++/* Getname functions for CAP module */
++/* Last modified: 06/Jan/2004 */
++/********************************** */
++
++#ifndef __RSBAC_PAX_GETNAME_H
++#define __RSBAC_PAX_GETNAME_H
++
++#include <rsbac/types.h>
++
++char * pax_print_flags(char * string, rsbac_pax_flags_t flags);
++
++#ifndef __KERNEL__
++rsbac_pax_flags_t pax_strtoflags(char * string, rsbac_pax_flags_t init_flags);
++#endif
++
++#endif
+diff --git a/include/rsbac/pm.h b/include/rsbac/pm.h
+new file mode 100644
+index 0000000..b251290
+--- /dev/null
++++ b/include/rsbac/pm.h
+@@ -0,0 +1,232 @@
++/******************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2005: */
++/* Amon Ott <ao@rsbac.org> */
++/* API: Data structures */
++/* and functions for Access */
++/* Control Information / PM */
++/* Last modified: 09/Feb/2005 */
++/******************************* */
++
++#ifndef __RSBAC_PM_H
++#define __RSBAC_PM_H
++
++#include <linux/init.h>
++#include <rsbac/pm_types.h>
++
++/***************************************************/
++/* General Prototypes */
++/***************************************************/
++
++/* All functions return 0, if no error occurred, and a negative error code */
++/* otherwise. The error codes are defined in rsbac_error.h. */
++
++/****************************************************************************/
++/* Initialization, including ACI restoration for all mounted devices from */
++/* disk. After this call, all ACI is kept in memory for performance reasons,*/
++/* but user and file/dir object ACI are written to disk on every change. */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++extern int rsbac_init_pm(void);
++#else
++extern int rsbac_init_pm(void) __init;
++#endif
++
++/* Some information about the current status is also available */
++
++extern int rsbac_stats_pm(void);
++
++/* RSBAC attribute saving to disk can be triggered from outside
++ * param: call lock_kernel() before writing?
++ */
++
++#ifdef CONFIG_RSBAC_AUTO_WRITE
++extern int rsbac_write_pm(rsbac_boolean_t);
++#endif /* CONFIG_RSBAC_AUTO_WRITE */
++
++/************************************************* */
++/* Access functions */
++/************************************************* */
++
++/***********************/
++/* Helper lists / sets */
++/***********************/
++
++/* All these procedures handle the semaphores to protect the targets during */
++/* access. */
++/* Trying to access a never created or removed set returns an error! */
++
++/* rsbac_pm_add_to_set */
++/* Add a set member to a set sublist. Set behaviour: also returns success, */
++/* if member was already in set! */
++
++int rsbac_pm_add_to_set(
++ rsbac_list_ta_number_t,
++ enum rsbac_pm_set_t, /* set type */
++ union rsbac_pm_set_id_t, /* set id */
++ union rsbac_pm_set_member_t); /* set member to add */
++
++
++/* rsbac_pm_remove_from_set */
++/* Remove a set member from a sublist. Set behaviour: Returns no error, if */
++/* member is not in list. */
++
++int rsbac_pm_remove_from_set(
++ rsbac_list_ta_number_t,
++ enum rsbac_pm_set_t, /* see above */
++ union rsbac_pm_set_id_t,
++ union rsbac_pm_set_member_t);
++
++
++/* rsbac_pm_clear_set */
++/* Remove all members from a set. Set behaviour: Returns no error, */
++/* if list is empty. */
++
++int rsbac_pm_clear_set(
++ rsbac_list_ta_number_t,
++ enum rsbac_pm_set_t, /* set type */
++ union rsbac_pm_set_id_t); /* set id */
++
++
++/* rsbac_pm_set_member */
++/* Return truth value, whether member is in set */
++
++rsbac_boolean_t rsbac_pm_set_member(
++ rsbac_list_ta_number_t,
++ enum rsbac_pm_set_t, /* set type */
++ union rsbac_pm_set_id_t, /* set id */
++ union rsbac_pm_set_member_t); /* member */
++
++
++/* rsbac_pm_pp_subset */
++/* Return truth value, whether pp_set is subset of in_pp_set */
++
++rsbac_boolean_t rsbac_pm_pp_subset(
++ rsbac_pm_pp_set_id_t,
++ rsbac_pm_in_pp_set_id_t);
++
++
++/* rsbac_pm_pp_superset */
++/* Return truth value, whether pp_set is superset of out_pp_set */
++
++rsbac_boolean_t rsbac_pm_pp_superset(
++ rsbac_pm_pp_set_id_t,
++ rsbac_pm_out_pp_set_id_t);
++
++
++/* rsbac_pm_pp_only */
++/* Return truth value, if there is not other item in out_pp_set than purpose */
++
++rsbac_boolean_t rsbac_pm_pp_only(
++ rsbac_pm_purpose_id_t,
++ rsbac_pm_out_pp_set_id_t);
++
++
++/* rsbac_pm_pp_intersec */
++/* Create intersection of pp_set and in_pp_set in in_pp_set */
++/* If in_pp_set does not exist, it is created with all members of pp_set */
++/* If pp_set does not exist or one of them is invalid, an error is returned */
++
++int rsbac_pm_pp_intersec (rsbac_pm_pp_set_id_t,
++ rsbac_pm_in_pp_set_id_t);
++
++
++/* rsbac_pm_pp_union */
++/* Create union of pp_set and out_pp_set in out_pp_set */
++/* If out_pp_set does not exist, it is created with all members of pp_set */
++/* If pp_set does not exist or one of them is invalid, an error is returned */
++
++int rsbac_pm_pp_union (rsbac_pm_pp_set_id_t,
++ rsbac_pm_out_pp_set_id_t);
++
++
++/* rsbac_pm_create_set */
++/* Create a new set of given type, using id id. Using any other set */
++/* function for a set id without creating this set returns an error. */
++/* To empty an existing set use rsbac_pm_clear_set. */
++
++int rsbac_pm_create_set(
++ rsbac_list_ta_number_t,
++ enum rsbac_pm_set_t, /* set type */
++ union rsbac_pm_set_id_t); /* set id */
++
++
++/* rsbac_pm_set_exist */
++/* Return truth value whether set exists, returns FALSE for invalid */
++/* values. */
++
++rsbac_boolean_t rsbac_pm_set_exist(
++ rsbac_list_ta_number_t,
++ enum rsbac_pm_set_t, /* set type */
++ union rsbac_pm_set_id_t); /* set id */
++
++
++/* rsbac_pm_remove_set */
++/* Remove a full set. After this call the given id can only be used for */
++/* creating a new set, anything else returns an error. */
++/* To empty an existing set use rsbac_pm_clear_set. */
++
++int rsbac_pm_remove_set(
++ rsbac_list_ta_number_t,
++ enum rsbac_pm_set_t, /* set type */
++ union rsbac_pm_set_id_t); /* set id */
++
++
++/**************/
++/* Main lists */
++/**************/
++
++/* rsbac_pm_get_data() and rsbac_pm_set_data() change single data values. */
++/* rsbac_pm_add_target() adds a new list item and sets all data values as */
++/* given. rsbac_pm_remove_target() removes an item. */
++
++/* A rsbac_pm_[sg]et_data() call for a non-existing target will return an */
++/* error.*/
++/* Invalid parameter combinations return an error. */
++
++/* All these procedures handle the semaphores to protect the targets during */
++/* access. */
++
++int rsbac_pm_get_data(
++ rsbac_list_ta_number_t,
++ enum rsbac_pm_target_t, /* list type */
++ union rsbac_pm_target_id_t, /* item id in list */
++ enum rsbac_pm_data_t, /* data item */
++ union rsbac_pm_data_value_t *); /* for return value */
++
++
++int rsbac_pm_get_all_data(
++ rsbac_list_ta_number_t,
++ enum rsbac_pm_target_t, /* list type */
++ union rsbac_pm_target_id_t, /* item id in list */
++ union rsbac_pm_all_data_value_t *); /* for return value */
++
++
++rsbac_boolean_t rsbac_pm_exists(
++ rsbac_list_ta_number_t,
++ enum rsbac_pm_target_t, /* list type */
++ union rsbac_pm_target_id_t); /* item id in list */
++
++
++int rsbac_pm_set_data(
++ rsbac_list_ta_number_t,
++ enum rsbac_pm_target_t, /* list type */
++ union rsbac_pm_target_id_t, /* item id in list */
++ enum rsbac_pm_data_t, /* data item */
++ union rsbac_pm_data_value_t); /* data value */
++
++
++int rsbac_pm_add_target(
++ rsbac_list_ta_number_t,
++ enum rsbac_pm_target_t, /* list type */
++ union rsbac_pm_all_data_value_t); /* values for all */
++ /* data items, */
++ /* incl. item id */
++
++
++int rsbac_pm_remove_target(
++ rsbac_list_ta_number_t,
++ enum rsbac_pm_target_t, /* list type */
++ union rsbac_pm_target_id_t); /* item id in list */
++
++#endif
+diff --git a/include/rsbac/pm_data_structures.h b/include/rsbac/pm_data_structures.h
+new file mode 100644
+index 0000000..8179994
+--- /dev/null
++++ b/include/rsbac/pm_data_structures.h
+@@ -0,0 +1,77 @@
++/**************************************/
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2009: Amon Ott */
++/* Data structures / PM */
++/* Last modified: 26/Mar/2009 */
++/**************************************/
++
++#ifndef __RSBAC_PM_DATA_STRUC_H
++#define __RSBAC_PM_DATA_STRUC_H
++
++#include <linux/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/types.h>
++#include <rsbac/pm_types.h>
++
++#define RSBAC_PM_TASK_SET_LIST_NAME "pm_ta_s"
++#define RSBAC_PM_TASK_SET_LIST_PROC_NAME "task_set"
++
++#define RSBAC_PM_TP_SET_LIST_NAME "pm_tp_s"
++#define RSBAC_PM_TP_SET_LIST_PROC_NAME "tp_set"
++
++#define RSBAC_PM_RU_SET_LIST_NAME "pm_ru_s"
++#define RSBAC_PM_RU_SET_LIST_PROC_NAME "responsible_user_set"
++
++#define RSBAC_PM_PP_SET_LIST_NAME "pm_pp_s"
++#define RSBAC_PM_PP_SET_LIST_PROC_NAME "purpose_set"
++
++#define RSBAC_PM_IN_PP_SET_LIST_NAME "input_pp_set"
++#define RSBAC_PM_IN_PP_SET_LIST_PROC_NAME "input_purpose_set"
++
++#define RSBAC_PM_OUT_PP_SET_LIST_NAME "output_pp_set"
++#define RSBAC_PM_OUT_PP_SET_LIST_PROC_NAME "output_purpose_set"
++
++
++#define RSBAC_PM_TASK_LIST_NAME "pm_task"
++#define RSBAC_PM_TASK_LIST_PROC_NAME "task"
++
++#define RSBAC_PM_CLASS_LIST_NAME "pm_clas"
++#define RSBAC_PM_CLASS_LIST_PROC_NAME "object_class"
++
++#define RSBAC_PM_NA_LIST_NAME "pm_na"
++#define RSBAC_PM_NA_LIST_PROC_NAME "necessary_accesses"
++
++#define RSBAC_PM_CS_LIST_NAME "pm_cs"
++#define RSBAC_PM_CS_LIST_PROC_NAME "consent"
++
++#define RSBAC_PM_TP_LIST_NAME "pm_tp"
++#define RSBAC_PM_TP_LIST_PROC_NAME "tp"
++
++#define RSBAC_PM_PP_LIST_NAME "pm_pp"
++#define RSBAC_PM_PP_LIST_PROC_NAME "purpose"
++
++#define RSBAC_PM_TKT_LIST_NAME "pm_tkt"
++#define RSBAC_PM_TKT_LIST_PROC_NAME "ticket"
++
++
++#define RSBAC_PM_NO_VERSION 1
++
++#define RSBAC_PM_TASK_SET_LIST_VERSION 1
++#define RSBAC_PM_TP_SET_LIST_VERSION 1
++#define RSBAC_PM_RU_SET_LIST_VERSION 2
++#define RSBAC_PM_PP_SET_LIST_VERSION 1
++
++#define RSBAC_PM_TASK_LIST_VERSION 1
++#define RSBAC_PM_CLASS_LIST_VERSION 1
++#define RSBAC_PM_NA_LIST_VERSION 1
++#define RSBAC_PM_CS_LIST_VERSION 1
++#define RSBAC_PM_TP_LIST_VERSION 1
++#define RSBAC_PM_PP_LIST_VERSION 1
++#define RSBAC_PM_TKT_LIST_VERSION 2
++
++#define RSBAC_PM_LIST_KEY 19990820
++
++#define RSBAC_PM_PROC_STATS_NAME "stats_pm"
++#define RSBAC_PM_PROC_DIR_NAME "pm"
++
++#endif
+diff --git a/include/rsbac/pm_getname.h b/include/rsbac/pm_getname.h
+new file mode 100644
+index 0000000..af667f8
+--- /dev/null
++++ b/include/rsbac/pm_getname.h
+@@ -0,0 +1,81 @@
++/******************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999: Amon Ott */
++/* Getname functions for PM parts */
++/* Last modified: 08/Feb/99 */
++/******************************** */
++
++#ifndef __RSBAC_PM_GETNAME_H
++#define __RSBAC_PM_GETNAME_H
++
++#include <rsbac/types.h>
++
++#ifndef NULL
++#define NULL ((void *) 0)
++#endif
++
++#include <rsbac/helpers.h>
++#include <rsbac/error.h>
++
++char * get_pm_list_name(char *,
++ enum rsbac_pm_list_t);
++
++enum rsbac_pm_list_t get_pm_list_nr(const char *);
++
++char * get_pm_all_list_name(char *,
++ enum rsbac_pm_all_list_t);
++
++enum rsbac_pm_all_list_t get_pm_all_list_nr(const char *);
++
++char * get_pm_role_name(char *,
++ enum rsbac_pm_role_t);
++
++enum rsbac_pm_role_t get_pm_role_nr(const char *);
++
++char * get_pm_process_type_name(char *,
++ enum rsbac_pm_process_type_t);
++
++enum rsbac_pm_process_type_t get_pm_process_type_nr(const char *);
++
++char * get_pm_object_type_name(char *,
++ enum rsbac_pm_object_type_t);
++
++enum rsbac_pm_object_type_t get_pm_object_type_nr(const char *);
++
++#ifdef __KERNEL__
++char * get_pm_set_name(char *,
++ enum rsbac_pm_set_t);
++
++enum rsbac_pm_set_t get_pm_set_nr(const char *);
++
++char * get_pm_target_name(char *,
++ enum rsbac_pm_target_t);
++
++enum rsbac_pm_target_t get_pm_target_nr(const char *);
++
++char * get_pm_data_name(char *,
++ enum rsbac_pm_data_t);
++
++enum rsbac_pm_data_t get_pm_data_nr(const char *);
++#endif
++
++char * get_pm_function_type_name(char *,
++ enum rsbac_pm_function_type_t);
++
++enum rsbac_pm_function_type_t get_pm_function_type_nr(const char *);
++
++#ifndef __KERNEL__
++char * get_pm_function_param(char *,
++ enum rsbac_pm_function_type_t);
++
++char * get_pm_tkt_function_param(char *,
++ enum rsbac_pm_tkt_function_type_t);
++#endif
++
++char * get_pm_tkt_function_type_name(char *,
++ enum rsbac_pm_tkt_function_type_t);
++
++enum rsbac_pm_tkt_function_type_t
++ get_pm_tkt_function_type_nr(const char *);
++
++#endif
+diff --git a/include/rsbac/pm_ticket.h b/include/rsbac/pm_ticket.h
+new file mode 100644
+index 0000000..8989398
+--- /dev/null
++++ b/include/rsbac/pm_ticket.h
+@@ -0,0 +1,409 @@
++/******************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2012: */
++/* Amon Ott <ao@rsbac.org> */
++/* API: Data types for privacy */
++/* model calls / tickets */
++/* Last modified: 07/May/2012 */
++/******************************* */
++
++#ifndef __RSBAC_PM_TICKET_H
++#define __RSBAC_PM_TICKET_H
++
++#include <linux/types.h>
++
++enum rsbac_pm_tkt_function_type_t {/* issued by data_prot_officer */
++ PTF_add_na, PTF_delete_na, PTF_add_task,
++ PTF_delete_task, PTF_add_object_class,
++ PTF_delete_object_class,
++ PTF_add_authorized_tp,
++ PTF_delete_authorized_tp,
++ PTF_add_consent, PTF_delete_consent,
++ PTF_add_purpose, PTF_delete_purpose,
++ PTF_add_responsible_user,
++ PTF_delete_responsible_user,
++ PTF_delete_user_aci,
++ PTF_set_role,
++ PTF_set_object_class,
++ PTF_switch_pm,
++ PTF_switch_auth,
++ PTF_set_device_object_type,
++ PTF_set_auth_may_setuid,
++ PTF_set_auth_may_set_cap,
++ /* issued by user also */
++ PTF_add_authorized_task,
++ PTF_delete_authorized_task,
++ /* never issued, internal */
++ PTF_none};
++
++struct rsbac_pm_add_na_t
++ {
++ rsbac_pm_task_id_t task;
++ rsbac_pm_object_class_id_t object_class;
++ rsbac_pm_tp_id_t tp;
++ rsbac_pm_accesses_t accesses;
++ };
++
++struct rsbac_pm_delete_na_t
++ {
++ rsbac_pm_task_id_t task;
++ rsbac_pm_object_class_id_t object_class;
++ rsbac_pm_tp_id_t tp;
++ rsbac_pm_accesses_t accesses;
++ };
++
++struct rsbac_pm_add_task_t
++ {
++ rsbac_pm_task_id_t id;
++ rsbac_pm_purpose_id_t purpose;
++ };
++
++struct rsbac_pm_delete_task_t
++ {
++ rsbac_pm_task_id_t id;
++ };
++
++#ifdef __KERNEL__
++struct rsbac_pm_tkt_add_object_class_t
++ {
++ rsbac_pm_object_class_id_t id;
++ rsbac_pm_pp_set_id_t pp_set;
++ };
++#endif
++
++struct rsbac_pm_add_object_class_t
++ {
++ rsbac_pm_object_class_id_t id;
++ struct rsbac_pm_purpose_list_item_t __user * pp_list_p;
++ };
++
++struct rsbac_pm_delete_object_class_t
++ {
++ rsbac_pm_object_class_id_t id;
++ };
++
++struct rsbac_pm_add_authorized_tp_t
++ {
++ rsbac_pm_task_id_t task;
++ rsbac_pm_tp_id_t tp;
++ };
++
++struct rsbac_pm_delete_authorized_tp_t
++ {
++ rsbac_pm_task_id_t task;
++ rsbac_pm_tp_id_t tp;
++ };
++
++#ifdef __KERNEL__
++struct rsbac_pm_tkt_add_consent_t
++ {
++ struct rsbac_fs_file_t file;
++ rsbac_pm_purpose_id_t purpose;
++ };
++#endif
++
++struct rsbac_pm_add_consent_t
++ {
++ char __user * filename;
++ rsbac_pm_purpose_id_t purpose;
++ };
++
++#ifdef __KERNEL__
++struct rsbac_pm_tkt_delete_consent_t
++ {
++ struct rsbac_fs_file_t file;
++ rsbac_pm_purpose_id_t purpose;
++ };
++#endif
++
++struct rsbac_pm_delete_consent_t
++ {
++ char __user * filename;
++ rsbac_pm_purpose_id_t purpose;
++ };
++
++struct rsbac_pm_add_purpose_t
++ {
++ rsbac_pm_purpose_id_t id;
++ rsbac_pm_object_class_id_t def_class;
++ };
++
++struct rsbac_pm_delete_purpose_t
++ {
++ rsbac_pm_purpose_id_t id;
++ };
++
++struct rsbac_pm_add_responsible_user_t
++ {
++ rsbac_uid_t user;
++ rsbac_pm_task_id_t task;
++ };
++
++struct rsbac_pm_delete_responsible_user_t
++ {
++ rsbac_uid_t user;
++ rsbac_pm_task_id_t task;
++ };
++
++struct rsbac_pm_delete_user_aci_t
++ {
++ rsbac_uid_t id;
++ };
++
++struct rsbac_pm_set_role_t
++ {
++ rsbac_uid_t user;
++ enum rsbac_pm_role_t role;
++ };
++
++#ifdef __KERNEL__
++struct rsbac_pm_tkt_set_object_class_t
++ {
++ struct rsbac_fs_file_t file;
++ rsbac_pm_object_class_id_t object_class;
++ };
++#endif
++
++struct rsbac_pm_set_object_class_t
++ {
++ char __user * filename;
++ rsbac_pm_object_class_id_t object_class;
++ };
++
++struct rsbac_pm_switch_pm_t
++ {
++ rsbac_boolean_t value;
++ };
++
++struct rsbac_pm_switch_auth_t
++ {
++ rsbac_boolean_t value;
++ };
++
++#ifdef __KERNEL__
++struct rsbac_pm_tkt_set_device_object_type_t
++ {
++ struct rsbac_dev_desc_t dev;
++ enum rsbac_pm_object_type_t object_type;
++ rsbac_pm_object_class_id_t object_class;
++ };
++#endif
++
++struct rsbac_pm_set_device_object_type_t
++ {
++ char __user * filename;
++ enum rsbac_pm_object_type_t object_type;
++ rsbac_pm_object_class_id_t object_class;
++ };
++
++#ifdef __KERNEL__
++struct rsbac_pm_tkt_set_auth_may_setuid_t
++ {
++ struct rsbac_fs_file_t file;
++ rsbac_boolean_t value;
++ };
++#endif
++
++struct rsbac_pm_set_auth_may_setuid_t
++ {
++ char __user * filename;
++ rsbac_boolean_t value;
++ };
++
++#ifdef __KERNEL__
++struct rsbac_pm_tkt_set_auth_may_set_cap_t
++ {
++ struct rsbac_fs_file_t file;
++ rsbac_boolean_t value;
++ };
++#endif
++
++struct rsbac_pm_set_auth_may_set_cap_t
++ {
++ char __user * filename;
++ rsbac_boolean_t value;
++ };
++
++/***************/
++
++struct rsbac_pm_add_authorized_task_t
++ {
++ rsbac_uid_t user;
++ rsbac_pm_task_id_t task;
++ };
++
++struct rsbac_pm_delete_authorized_task_t
++ {
++ rsbac_uid_t user;
++ rsbac_pm_task_id_t task;
++ };
++
++/***************/
++
++struct rsbac_pm_create_tp_t
++ {
++ rsbac_pm_tp_id_t id;
++ };
++
++struct rsbac_pm_delete_tp_t
++ {
++ rsbac_pm_tp_id_t id;
++ };
++
++struct rsbac_pm_set_tp_t
++ {
++ char __user * filename;
++ rsbac_pm_tp_id_t tp;
++ };
++
++/***************/
++
++#ifdef __KERNEL__
++union rsbac_pm_tkt_internal_function_param_t
++ {
++ struct rsbac_pm_add_na_t add_na;
++ struct rsbac_pm_delete_na_t delete_na;
++ struct rsbac_pm_add_task_t add_task;
++ struct rsbac_pm_delete_task_t delete_task;
++ struct rsbac_pm_tkt_add_object_class_t tkt_add_object_class;
++ struct rsbac_pm_delete_object_class_t delete_object_class;
++ struct rsbac_pm_add_authorized_tp_t add_authorized_tp;
++ struct rsbac_pm_delete_authorized_tp_t delete_authorized_tp;
++ struct rsbac_pm_tkt_add_consent_t tkt_add_consent;
++ struct rsbac_pm_tkt_delete_consent_t tkt_delete_consent;
++ struct rsbac_pm_add_purpose_t add_purpose;
++ struct rsbac_pm_delete_purpose_t delete_purpose;
++ struct rsbac_pm_add_responsible_user_t add_responsible_user;
++ struct rsbac_pm_delete_responsible_user_t delete_responsible_user;
++ struct rsbac_pm_delete_user_aci_t delete_user_aci;
++ struct rsbac_pm_set_role_t set_role;
++ struct rsbac_pm_tkt_set_object_class_t tkt_set_object_class;
++ struct rsbac_pm_switch_pm_t switch_pm;
++ struct rsbac_pm_switch_pm_t switch_auth;
++ struct rsbac_pm_tkt_set_device_object_type_t tkt_set_device_object_type;
++ struct rsbac_pm_tkt_set_auth_may_setuid_t tkt_set_auth_may_setuid;
++ struct rsbac_pm_tkt_set_auth_may_set_cap_t tkt_set_auth_may_set_cap;
++ struct rsbac_pm_add_authorized_task_t add_authorized_task;
++ struct rsbac_pm_delete_authorized_task_t delete_authorized_task;
++ int dummy;
++ };
++#endif
++
++union rsbac_pm_tkt_function_param_t
++ {
++ struct rsbac_pm_add_na_t add_na;
++ struct rsbac_pm_delete_na_t delete_na;
++ struct rsbac_pm_add_task_t add_task;
++ struct rsbac_pm_delete_task_t delete_task;
++ struct rsbac_pm_add_object_class_t add_object_class;
++ struct rsbac_pm_delete_object_class_t delete_object_class;
++ struct rsbac_pm_add_authorized_tp_t add_authorized_tp;
++ struct rsbac_pm_delete_authorized_tp_t delete_authorized_tp;
++ struct rsbac_pm_add_consent_t add_consent;
++ struct rsbac_pm_delete_consent_t delete_consent;
++ struct rsbac_pm_add_purpose_t add_purpose;
++ struct rsbac_pm_delete_purpose_t delete_purpose;
++ struct rsbac_pm_add_responsible_user_t add_responsible_user;
++ struct rsbac_pm_delete_responsible_user_t delete_responsible_user;
++ struct rsbac_pm_delete_user_aci_t delete_user_aci;
++ struct rsbac_pm_set_role_t set_role;
++ struct rsbac_pm_set_object_class_t set_object_class;
++ struct rsbac_pm_switch_pm_t switch_pm;
++ struct rsbac_pm_switch_pm_t switch_auth;
++ struct rsbac_pm_set_device_object_type_t set_device_object_type;
++ struct rsbac_pm_set_auth_may_setuid_t set_auth_may_setuid;
++ struct rsbac_pm_set_auth_may_set_cap_t set_auth_may_set_cap;
++ struct rsbac_pm_add_authorized_task_t add_authorized_task;
++ struct rsbac_pm_delete_authorized_task_t delete_authorized_task;
++ int dummy;
++ };
++
++/***********************/
++
++enum rsbac_pm_function_type_t {/* tkt issued by data_prot_officer, */
++ /* called by security_officer */
++ PF_add_na, PF_delete_na, PF_add_task,
++ PF_delete_task, PF_add_object_class,
++ PF_delete_object_class,
++ PF_add_authorized_tp,
++ PF_delete_authorized_tp,
++ PF_add_consent, PF_delete_consent,
++ PF_add_purpose, PF_delete_purpose,
++ PF_add_responsible_user,
++ PF_delete_responsible_user,
++ PF_delete_user_aci,
++ PF_set_role,
++ PF_set_object_class,
++ PF_switch_pm,
++ PF_switch_auth,
++ PF_set_device_object_type,
++ PF_set_auth_may_setuid,
++ PF_set_auth_may_set_cap,
++ /* tkt issued by data_prot_officer and */
++ /* resp. user, called by security_officer */
++ PF_add_authorized_task,
++ PF_delete_authorized_task,
++ /* called by tp_manager, no ticket */
++ PF_create_tp, PF_delete_tp, PF_set_tp,
++ /* called by data_prot_officer and */
++ /* responsible user */
++ PF_create_ticket,
++ /* never to be called, internal */
++ PF_none};
++
++struct rsbac_pm_create_ticket_t
++ {
++ rsbac_pm_tkt_id_t id;
++ rsbac_pm_time_stamp_t valid_for; /* validity in secs */
++ enum rsbac_pm_tkt_function_type_t function_type;
++ union rsbac_pm_tkt_function_param_t function_param;
++ };
++
++union rsbac_pm_function_param_t
++ {
++ struct rsbac_pm_add_na_t add_na;
++ struct rsbac_pm_delete_na_t delete_na;
++ struct rsbac_pm_add_task_t add_task;
++ struct rsbac_pm_delete_task_t delete_task;
++ struct rsbac_pm_add_object_class_t add_object_class;
++ struct rsbac_pm_delete_object_class_t delete_object_class;
++ struct rsbac_pm_add_authorized_tp_t add_authorized_tp;
++ struct rsbac_pm_delete_authorized_tp_t delete_authorized_tp;
++ struct rsbac_pm_add_consent_t add_consent;
++ struct rsbac_pm_delete_consent_t delete_consent;
++ struct rsbac_pm_add_purpose_t add_purpose;
++ struct rsbac_pm_delete_purpose_t delete_purpose;
++ struct rsbac_pm_add_responsible_user_t add_responsible_user;
++ struct rsbac_pm_delete_responsible_user_t delete_responsible_user;
++ struct rsbac_pm_delete_user_aci_t delete_user_aci;
++ struct rsbac_pm_set_role_t set_role;
++ struct rsbac_pm_set_object_class_t set_object_class;
++ struct rsbac_pm_switch_pm_t switch_pm;
++ struct rsbac_pm_switch_pm_t switch_auth;
++ struct rsbac_pm_set_device_object_type_t set_device_object_type;
++ struct rsbac_pm_set_auth_may_setuid_t set_auth_may_setuid;
++ struct rsbac_pm_set_auth_may_set_cap_t set_auth_may_set_cap;
++ struct rsbac_pm_add_authorized_task_t add_authorized_task;
++ struct rsbac_pm_delete_authorized_task_t delete_authorized_task;
++ struct rsbac_pm_create_tp_t create_tp;
++ struct rsbac_pm_delete_tp_t delete_tp;
++ struct rsbac_pm_set_tp_t set_tp;
++ struct rsbac_pm_create_ticket_t create_ticket;
++ int dummy;
++ };
++
++
++/*******************/
++
++#ifdef __KERNEL__
++struct rsbac_pm_tkt_data_t
++ {
++ rsbac_pm_tkt_id_t id;
++ rsbac_uid_t issuer;
++ enum rsbac_pm_tkt_function_type_t function_type;
++ union rsbac_pm_tkt_internal_function_param_t function_param;
++ rsbac_pm_time_stamp_t valid_until;
++ };
++#endif
++
++#endif
+diff --git a/include/rsbac/pm_types.h b/include/rsbac/pm_types.h
+new file mode 100644
+index 0000000..ee70a67
+--- /dev/null
++++ b/include/rsbac/pm_types.h
+@@ -0,0 +1,240 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2001: */
++/* Amon Ott <ao@rsbac.org> */
++/* API: Data types for privacy */
++/* model calls */
++/* Last modified: 06/Sep/2001 */
++/************************************ */
++
++#ifndef __RSBAC_PM_TYPES_H
++#define __RSBAC_PM_TYPES_H
++
++#include <linux/types.h>
++
++/* Basic types */
++
++typedef __u32 rsbac_pm_task_id_t;
++typedef __u32 rsbac_pm_task_set_id_t;
++typedef __u32 rsbac_pm_tp_id_t; /* transformation procedure id */
++typedef __u32 rsbac_pm_tp_set_id_t; /* transformation procedure set id */
++typedef __u32 rsbac_pm_ru_set_id_t; /* responsible user set id */
++typedef __u32 rsbac_pm_purpose_id_t;
++typedef __s32 rsbac_pm_pp_set_id_t; /* purpose set id */
++typedef rsbac_pid_t rsbac_pm_in_pp_set_id_t; /* input purpose set id */
++typedef rsbac_pm_in_pp_set_id_t rsbac_pm_out_pp_set_id_t;
++ /* output purpose set id */
++typedef __u32 rsbac_pm_object_class_id_t;
++typedef __u32 rsbac_pm_tkt_id_t; /* ticket id */
++typedef rsbac_time_t rsbac_pm_time_stamp_t; /* for ticket time stamps, same as */
++ /* parameter for sys_time */
++typedef __u8 rsbac_pm_accesses_t; /* for necessary accesses */
++#define RSBAC_PM_A_READ 1
++#define RSBAC_PM_A_WRITE 2
++#define RSBAC_PM_A_DELETE 4
++#define RSBAC_PM_A_CREATE 8
++#define RSBAC_PM_A_APPEND 16
++#define RSBAC_PM_A_ALL 31
++#define RSBAC_PM_A_WRITING (RSBAC_PM_A_WRITE | RSBAC_PM_A_DELETE \
++ | RSBAC_PM_A_CREATE | RSBAC_PM_A_APPEND)
++#define RSBAC_PM_A_WRITE_TO_FILE (RSBAC_PM_A_WRITE | RSBAC_PM_A_APPEND)
++
++#define RSBAC_PM_ROOT_TASK_SET_ID (rsbac_pm_task_set_id_t) -1
++#define RSBAC_PM_IPC_OBJECT_CLASS_ID (rsbac_pm_object_class_id_t) 60000
++#define RSBAC_PM_DEV_OBJECT_CLASS_ID (rsbac_pm_object_class_id_t) 60001
++
++/* enum attributes */
++
++enum rsbac_pm_list_t {PL_task,PL_class,PL_na,PL_cs,PL_tp,PL_pp,PL_tkt,PL_none};
++
++enum rsbac_pm_all_list_t {PA_task,PA_class,PA_na,PA_cs,PA_tp,PA_pp,PA_tkt,
++ PA_task_set,PA_tp_set,PA_ru_set,PA_pp_set,
++ PA_in_pp_set,PA_out_pp_set,PA_none};
++
++enum rsbac_pm_role_t {PR_user, PR_security_officer,
++ PR_data_protection_officer,
++ PR_tp_manager, PR_system_admin,
++ PR_none};
++typedef rsbac_enum_t rsbac_pm_role_int_t;
++
++enum rsbac_pm_process_type_t {PP_none, PP_TP};
++typedef rsbac_enum_t rsbac_pm_process_type_int_t;
++
++enum rsbac_pm_object_type_t {PO_none, PO_TP, PO_personal_data,
++ PO_non_personal_data, PO_ipc, PO_dir};
++typedef rsbac_enum_t rsbac_pm_object_type_int_t;
++
++typedef rsbac_pm_process_type_int_t rsbac_pm_program_type_int_t;
++
++#ifdef __KERNEL__
++enum rsbac_pm_set_t {PS_TASK,PS_TP,PS_RU,PS_PP,PS_IN_PP,PS_OUT_PP,PS_NONE};
++
++/* unions */
++
++union rsbac_pm_set_id_t
++ {
++ rsbac_pm_task_set_id_t task_set;
++ rsbac_pm_tp_set_id_t tp_set;
++ rsbac_pm_ru_set_id_t ru_set;
++ rsbac_pm_pp_set_id_t pp_set;
++ rsbac_pm_in_pp_set_id_t in_pp_set;
++ rsbac_pm_out_pp_set_id_t out_pp_set;
++ };
++
++union rsbac_pm_set_member_t
++ {
++ rsbac_pm_task_id_t task;
++ rsbac_pm_tp_id_t tp;
++ rsbac_uid_t ru;
++ rsbac_pm_purpose_id_t pp;
++ };
++
++struct rsbac_pm_na_id_t
++ {
++ rsbac_pm_task_id_t task;
++ rsbac_pm_object_class_id_t object_class;
++ rsbac_pm_tp_id_t tp;
++ };
++
++struct rsbac_pm_cs_id_t
++ {
++ rsbac_pm_purpose_id_t purpose;
++ struct rsbac_fs_file_t file;
++ };
++
++/*****************/
++/* api types */
++/*****************/
++
++struct rsbac_pm_task_data_t
++ {
++ rsbac_pm_task_id_t id;
++ rsbac_pm_purpose_id_t purpose;
++ rsbac_pm_tp_set_id_t tp_set;
++ rsbac_pm_ru_set_id_t ru_set;
++ };
++
++struct rsbac_pm_class_data_t
++ {
++ rsbac_pm_object_class_id_t id;
++ rsbac_pm_pp_set_id_t pp_set;
++ };
++
++struct rsbac_pm_na_data_t
++ {
++ rsbac_pm_task_id_t task;
++ rsbac_pm_object_class_id_t object_class;
++ rsbac_pm_tp_id_t tp;
++ rsbac_pm_accesses_t accesses;
++ };
++
++struct rsbac_pm_cs_data_t
++ {
++ rsbac_pm_purpose_id_t purpose;
++ struct rsbac_fs_file_t file;
++ };
++
++struct rsbac_pm_tp_data_t
++ {
++ rsbac_pm_tp_id_t id;
++ };
++
++struct rsbac_pm_pp_data_t
++ {
++ rsbac_pm_purpose_id_t id;
++ rsbac_pm_object_class_id_t def_class;
++ };
++#endif /* __KERNEL__ */
++
++struct rsbac_pm_purpose_list_item_t
++ {
++ rsbac_pm_purpose_id_t id;
++ struct rsbac_pm_purpose_list_item_t * next;
++ };
++
++/******* ticket ********/
++
++#include <rsbac/pm_ticket.h>
++
++#ifdef __KERNEL__
++/****************************************************************************/
++/* For all pm lists all manipulation is encapsulated by the function calls */
++/* rsbac_pm_set_data, rsbac_pm_get_data and rsbac_pm_remove_target. */
++
++/* For those, we declare some extra types to specify target and attribute. */
++
++enum rsbac_pm_target_t {PMT_TASK,
++ PMT_CLASS,
++ PMT_NA,
++ PMT_CS,
++ PMT_TP,
++ PMT_PP,
++ PMT_TKT,
++ PMT_NONE};
++typedef rsbac_enum_t rsbac_pm_target_int_t;
++
++union rsbac_pm_target_id_t
++ {
++ rsbac_pm_task_id_t task;
++ rsbac_pm_object_class_id_t object_class;
++ struct rsbac_pm_na_id_t na;
++ struct rsbac_pm_cs_id_t cs;
++ rsbac_pm_tp_id_t tp;
++ rsbac_pm_purpose_id_t pp;
++ rsbac_pm_tkt_id_t tkt;
++ int dummy;
++ };
++
++enum rsbac_pm_data_t
++ { PD_purpose,
++ PD_tp_set,
++ PD_ru_set,
++ PD_pp_set,
++ PD_task,
++ PD_class,
++ PD_tp,
++ PD_accesses,
++ PD_file,
++ PD_issuer,
++ PD_function_type,
++ PD_function_param,
++ PD_valid_until,
++ PD_def_class,
++ PD_none
++ };
++typedef rsbac_enum_t rsbac_pm_data_int_t;
++
++union rsbac_pm_data_value_t
++ {
++ rsbac_pm_purpose_id_t purpose;
++ rsbac_pm_tp_set_id_t tp_set;
++ rsbac_pm_ru_set_id_t ru_set;
++ rsbac_pm_pp_set_id_t pp_set;
++ rsbac_pm_task_id_t task;
++ rsbac_pm_object_class_id_t object_class;
++ rsbac_pm_tp_id_t tp;
++ rsbac_pm_accesses_t accesses;
++ struct rsbac_fs_file_t file;
++ rsbac_uid_t issuer;
++ enum rsbac_pm_tkt_function_type_t function_type;
++ union rsbac_pm_tkt_internal_function_param_t function_param;
++ rsbac_pm_time_stamp_t valid_until;
++ rsbac_pm_object_class_id_t def_class;
++ int dummy;
++ };
++
++
++union rsbac_pm_all_data_value_t
++ {
++ struct rsbac_pm_task_data_t task;
++ struct rsbac_pm_class_data_t object_class;
++ struct rsbac_pm_na_data_t na;
++ struct rsbac_pm_cs_data_t cs;
++ struct rsbac_pm_tp_data_t tp;
++ struct rsbac_pm_pp_data_t pp;
++ struct rsbac_pm_tkt_data_t tkt;
++ int dummy;
++ };
++#endif
++
++#endif
+diff --git a/include/rsbac/proc_fs.h b/include/rsbac/proc_fs.h
+new file mode 100644
+index 0000000..8b1e39b
+--- /dev/null
++++ b/include/rsbac/proc_fs.h
+@@ -0,0 +1,20 @@
++/************************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2001: Amon Ott */
++/* proc fs functions */
++/* Last modified: 17/Jul/2001 */
++/************************************* */
++
++#ifndef __RSBAC_PROC_FS_H
++#define __RSBAC_PROC_FS_H
++
++#include <linux/proc_fs.h>
++
++#ifndef PROC_BLOCK_SIZE
++#define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */
++#endif
++
++extern struct proc_dir_entry * proc_rsbac_root_p;
++extern struct proc_dir_entry * proc_rsbac_backup_p;
++
++#endif
+diff --git a/include/rsbac/rc.h b/include/rsbac/rc.h
+new file mode 100644
+index 0000000..8efdbbb
+--- /dev/null
++++ b/include/rsbac/rc.h
+@@ -0,0 +1,104 @@
++/******************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2009: */
++/* Amon Ott <ao@rsbac.org> */
++/* API: Data structures */
++/* and functions for Access */
++/* Control Information / RC */
++/* Last modified: 15/Oct/2009 */
++/******************************* */
++
++#ifndef __RSBAC_RC_H
++#define __RSBAC_RC_H
++
++#include <linux/init.h>
++#include <rsbac/rc_types.h>
++
++/***************************************************/
++/* General Prototypes */
++/***************************************************/
++
++/* All functions return 0, if no error occurred, and a negative error code */
++/* otherwise. The error codes are defined in rsbac_error.h. */
++
++/****************************************************************************/
++/* Initialization, including ACI restoration for all mounted devices from */
++/* disk. After this call, all ACI is kept in memory for performance reasons.*/
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++int rsbac_init_rc(void);
++#else
++int rsbac_init_rc(void) __init;
++#endif
++
++/* Find the boot role */
++#ifdef CONFIG_RSBAC_INIT_DELAY
++int rsbac_rc_get_boot_role(rsbac_rc_role_id_t * role_p);
++#else
++int rsbac_rc_get_boot_role(rsbac_rc_role_id_t * role_p) __init;
++#endif
++
++/* Some information about the current status is also available */
++
++int rsbac_stats_rc(void);
++
++/************************************************* */
++/* Access functions */
++/************************************************* */
++
++/* All these procedures handle the spinlocks to protect the targets during */
++/* access. */
++
++/* All roles are always there, so instead of creation, we supply a copy for */
++/* initialization. There is always the well-defined role general to copy */
++int rsbac_rc_copy_role(rsbac_list_ta_number_t ta_number,
++ rsbac_rc_role_id_t from_role,
++ rsbac_rc_role_id_t to_role);
++
++int rsbac_rc_copy_type(rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ rsbac_rc_type_id_t from_type,
++ rsbac_rc_type_id_t to_type);
++
++/* Getting item values */
++int rsbac_rc_get_item(rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ union rsbac_rc_target_id_t tid,
++ union rsbac_rc_target_id_t subtid,
++ enum rsbac_rc_item_t item,
++ union rsbac_rc_item_value_t *value_p,
++ rsbac_time_t * ttl_p);
++
++/* Setting item values */
++int rsbac_rc_set_item(rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ union rsbac_rc_target_id_t tid,
++ union rsbac_rc_target_id_t subtid,
++ enum rsbac_rc_item_t item,
++ union rsbac_rc_item_value_t value, rsbac_time_t ttl);
++
++/* Checking role's compatibility */
++rsbac_boolean_t rsbac_rc_check_comp(rsbac_rc_role_id_t role,
++ union rsbac_rc_target_id_t subtid,
++ enum rsbac_rc_item_t item,
++ enum rsbac_rc_special_rights_t right);
++
++/* Checking whether role exists */
++rsbac_boolean_t rsbac_rc_role_exists(rsbac_list_ta_number_t ta_number,
++ rsbac_rc_role_id_t role);
++
++rsbac_boolean_t rsbac_rc_type_exists(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ rsbac_rc_type_id_t type);
++
++/* Get list of defined items. Returns number or negative error.
++ * Allocates array via rsbac_kmalloc, if number > 0 - rsbac_kfree after use! */
++int rsbac_rc_get_list(rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ union rsbac_rc_target_id_t tid,
++ enum rsbac_rc_item_t item,
++ __u32 ** array_pp, rsbac_time_t ** ttl_array_pp);
++
++int rsbac_rc_select_fd_create_type(rsbac_rc_type_id_t type);
++
++#endif
+diff --git a/include/rsbac/rc_data_structures.h b/include/rsbac/rc_data_structures.h
+new file mode 100644
+index 0000000..a297b16
+--- /dev/null
++++ b/include/rsbac/rc_data_structures.h
+@@ -0,0 +1,352 @@
++/*********************************/
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2005: */
++/* Amon Ott <ao@rsbac.org> */
++/* Data structures for Role */
++/* Compatibility module */
++/* Last modified: 21/Dec/2005 */
++/*********************************/
++
++
++#ifndef __RSBAC_RC_DATA_STRUC_H
++#define __RSBAC_RC_DATA_STRUC_H
++
++#ifdef __KERNEL__ /* only include in kernel code */
++#include <linux/types.h>
++#include <rsbac/types.h>
++#endif /* __KERNEL__ */
++
++/* First of all we define dirname and filenames for saving the roles to disk. */
++/* The path must be a valid single dir name! Each mounted device gets its */
++/* own file set, residing in 'DEVICE_ROOT/RSBAC_ACI_PATH/'. */
++/* All user access to these files will be denied. */
++/* Backups are kept in FILENAMEb. */
++
++#ifdef __KERNEL__
++#define RSBAC_RC_LIST_KEY 77788855
++
++#define RSBAC_RC_NR_ROLE_LISTS 4
++#define RSBAC_RC_NR_TYPE_LISTS 4
++
++/* roles */
++#define RSBAC_RC_ROLE_FILENAME "rc_r"
++
++/* roles we are compatible with ( = we can change to) */
++#define RSBAC_RC_ROLE_RC_FILENAME "rc_rc"
++
++/* roles we may administrate (replaces admin_type) */
++#define RSBAC_RC_ROLE_ADR_FILENAME "rc_adr"
++
++/* roles we may read and assign to users, if they were in one of these before. */
++#define RSBAC_RC_ROLE_ASR_FILENAME "rc_asr"
++
++/* file/dir/fifo/symlink types for new items, by parent efftype */
++/* If not found, use old global value def_fd_create_type */
++#define RSBAC_RC_ROLE_DFDC_FILENAME "rc_dfdc"
++
++/* file/dir/fifo/symlink types and requests we are compatible with */
++#define RSBAC_RC_ROLE_TCFD_FILENAME "rc_tcfd"
++
++/* dev types and requests we are compatible with */
++#define RSBAC_RC_ROLE_TCDV_FILENAME "rc_tcdv"
++
++/* user types and requests we are compatible with */
++#define RSBAC_RC_ROLE_TCUS_FILENAME "rc_tcus"
++
++/* process types and requests we are compatible with */
++#define RSBAC_RC_ROLE_TCPR_FILENAME "rc_tcpr"
++
++/* IPC types and requests we are compatible with */
++#define RSBAC_RC_ROLE_TCIP_FILENAME "rc_tcip"
++
++/* SCD types and requests we are compatible with */
++#define RSBAC_RC_ROLE_TCSC_FILENAME "rc_tcsc"
++
++/* group types and requests we are compatible with */
++#define RSBAC_RC_ROLE_TCGR_FILENAME "rc_tcgr"
++
++/* NETDEV types and requests we are compatible with */
++#define RSBAC_RC_ROLE_TCND_FILENAME "rc_tcnd"
++
++/* NETTEMP types and requests we are compatible with */
++#define RSBAC_RC_ROLE_TCNT_FILENAME "rc_tcnt"
++
++/* NETOBJ types and requests we are compatible with */
++#define RSBAC_RC_ROLE_TCNO_FILENAME "rc_tcno"
++
++#define RSBAC_RC_ROLE_LIST_VERSION 5
++#define RSBAC_RC_ROLE_OLD_LIST_VERSION 4
++#define RSBAC_RC_ROLE_OLD_OLD_LIST_VERSION 3
++#define RSBAC_RC_ROLE_OLD_OLD_OLD_LIST_VERSION 2
++#define RSBAC_RC_ROLE_OLD_OLD_OLD_OLD_LIST_VERSION 1
++#define RSBAC_RC_ROLE_RC_LIST_VERSION 1
++#define RSBAC_RC_ROLE_ADR_LIST_VERSION 1
++#define RSBAC_RC_ROLE_ASR_LIST_VERSION 1
++#define RSBAC_RC_ROLE_DFDC_LIST_VERSION 1
++#define RSBAC_RC_ROLE_TCFD_LIST_VERSION 2
++#define RSBAC_RC_ROLE_TCDV_LIST_VERSION 2
++#define RSBAC_RC_ROLE_TCUS_LIST_VERSION 2
++#define RSBAC_RC_ROLE_TCPR_LIST_VERSION 2
++#define RSBAC_RC_ROLE_TCIP_LIST_VERSION 2
++#define RSBAC_RC_ROLE_TCSC_LIST_VERSION 2
++#define RSBAC_RC_ROLE_TCGR_LIST_VERSION 2
++#define RSBAC_RC_ROLE_TCND_LIST_VERSION 2
++#define RSBAC_RC_ROLE_TCNT_LIST_VERSION 2
++#define RSBAC_RC_ROLE_TCNO_LIST_VERSION 2
++#define RSBAC_RC_ROLE_TCFD_OLD_LIST_VERSION 1
++#define RSBAC_RC_ROLE_TCDV_OLD_LIST_VERSION 1
++#define RSBAC_RC_ROLE_TCUS_OLD_LIST_VERSION 1
++#define RSBAC_RC_ROLE_TCPR_OLD_LIST_VERSION 1
++#define RSBAC_RC_ROLE_TCIP_OLD_LIST_VERSION 1
++#define RSBAC_RC_ROLE_TCSC_OLD_LIST_VERSION 1
++#define RSBAC_RC_ROLE_TCGR_OLD_LIST_VERSION 1
++#define RSBAC_RC_ROLE_TCND_OLD_LIST_VERSION 1
++#define RSBAC_RC_ROLE_TCNT_OLD_LIST_VERSION 1
++#define RSBAC_RC_ROLE_TCNO_OLD_LIST_VERSION 1
++
++#define RSBAC_RC_TYPE_FD_FILENAME "rc_tfd"
++#define RSBAC_RC_TYPE_DEV_FILENAME "rc_tdv"
++#define RSBAC_RC_TYPE_IPC_FILENAME "rc_tip"
++#define RSBAC_RC_TYPE_USER_FILENAME "rc_tus"
++#define RSBAC_RC_TYPE_PROCESS_FILENAME "rc_tpr"
++#define RSBAC_RC_TYPE_GROUP_FILENAME "rc_tgr"
++#define RSBAC_RC_TYPE_NETDEV_FILENAME "rc_tnd"
++#define RSBAC_RC_TYPE_NETTEMP_FILENAME "rc_tnt"
++#define RSBAC_RC_TYPE_NETOBJ_FILENAME "rc_tno"
++
++#define RSBAC_RC_TYPE_FD_LIST_VERSION 1
++#define RSBAC_RC_TYPE_DEV_LIST_VERSION 1
++#define RSBAC_RC_TYPE_IPC_LIST_VERSION 1
++#define RSBAC_RC_TYPE_USER_LIST_VERSION 1
++#define RSBAC_RC_TYPE_PROCESS_LIST_VERSION 1
++#define RSBAC_RC_TYPE_GROUP_LIST_VERSION 1
++#define RSBAC_RC_TYPE_NETDEV_LIST_VERSION 1
++#define RSBAC_RC_TYPE_NETTEMP_LIST_VERSION 1
++#define RSBAC_RC_TYPE_NETOBJ_LIST_VERSION 1
++#endif /* __KERNEL__ */
++
++/*
++ * The following structures provide the role model data structures.
++ * All RSBAC_RC_NR_ROLES roles and RSBAC_RC_NR_TYPES x target-no. types
++ * and SCD-type definitions are kept in arrays and saved to disk as such.
++ */
++
++/***************************************
++ * Roles *
++ ***************************************/
++
++/* Caution: whenever role struct changes, version and old_version must be increased! */
++
++struct rsbac_rc_role_entry_t {
++ rsbac_enum_t admin_type; /* role admin: none, system or role admin? */
++ char name[RSBAC_RC_NAME_LEN];
++ rsbac_rc_type_id_t def_fd_create_type;
++ rsbac_rc_type_id_t def_user_create_type;
++ rsbac_rc_type_id_t def_process_create_type;
++ rsbac_rc_type_id_t def_process_chown_type;
++ rsbac_rc_type_id_t def_process_execute_type;
++ rsbac_rc_type_id_t def_ipc_create_type;
++ rsbac_rc_type_id_t def_group_create_type;
++ rsbac_rc_type_id_t def_unixsock_create_type;
++ rsbac_enum_t boot_role;
++ rsbac_enum_t req_reauth;
++};
++
++struct rsbac_rc_old_role_entry_t {
++ rsbac_enum_t admin_type; /* role admin: none, system or role admin? */
++ char name[RSBAC_RC_NAME_LEN];
++ rsbac_rc_type_id_t def_fd_create_type;
++ rsbac_rc_type_id_t def_user_create_type;
++ rsbac_rc_type_id_t def_process_create_type;
++ rsbac_rc_type_id_t def_process_chown_type;
++ rsbac_rc_type_id_t def_process_execute_type;
++ rsbac_rc_type_id_t def_ipc_create_type;
++ rsbac_rc_type_id_t def_group_create_type;
++ rsbac_enum_t boot_role;
++ rsbac_enum_t req_reauth;
++};
++
++struct rsbac_rc_old_old_role_entry_t {
++ rsbac_enum_t admin_type; /* role admin: none, system or role admin? */
++ char name[RSBAC_RC_NAME_LEN];
++ rsbac_rc_type_id_t def_fd_create_type;
++ rsbac_rc_type_id_t def_user_create_type;
++ rsbac_rc_type_id_t def_process_create_type;
++ rsbac_rc_type_id_t def_process_chown_type;
++ rsbac_rc_type_id_t def_process_execute_type;
++ rsbac_rc_type_id_t def_ipc_create_type;
++ rsbac_rc_type_id_t def_group_create_type;
++ rsbac_enum_t boot_role;
++};
++
++struct rsbac_rc_old_old_old_role_entry_t {
++ rsbac_enum_t admin_type; /* role admin: none, system or role admin? */
++ char name[RSBAC_RC_NAME_LEN];
++ rsbac_rc_type_id_t def_fd_create_type;
++ rsbac_rc_type_id_t def_user_create_type;
++ rsbac_rc_type_id_t def_process_create_type;
++ rsbac_rc_type_id_t def_process_chown_type;
++ rsbac_rc_type_id_t def_process_execute_type;
++ rsbac_rc_type_id_t def_ipc_create_type;
++ rsbac_enum_t boot_role;
++};
++
++struct rsbac_rc_old_old_old_old_role_entry_t {
++ rsbac_enum_t admin_type; /* role admin: none, system or role admin? */
++ char name[RSBAC_RC_NAME_LEN];
++ rsbac_rc_type_id_t def_fd_create_type;
++ rsbac_rc_type_id_t def_process_create_type;
++ rsbac_rc_type_id_t def_process_chown_type;
++ rsbac_rc_type_id_t def_process_execute_type;
++ rsbac_rc_type_id_t def_ipc_create_type;
++};
++
++#define RSBAC_RC_NR_ROLE_ENTRY_ITEMS 25
++#define RSBAC_RC_ROLE_ENTRY_ITEM_LIST { \
++ RI_role_comp, \
++ RI_admin_roles, \
++ RI_assign_roles, \
++ RI_type_comp_fd, \
++ RI_type_comp_dev, \
++ RI_type_comp_user, \
++ RI_type_comp_process, \
++ RI_type_comp_ipc, \
++ RI_type_comp_scd, \
++ RI_type_comp_group, \
++ RI_type_comp_netdev, \
++ RI_type_comp_nettemp, \
++ RI_type_comp_netobj, \
++ RI_admin_type, \
++ RI_name, \
++ RI_def_fd_create_type, \
++ RI_def_fd_ind_create_type, \
++ RI_def_user_create_type, \
++ RI_def_process_create_type, \
++ RI_def_process_chown_type, \
++ RI_def_process_execute_type, \
++ RI_def_ipc_create_type, \
++ RI_def_group_create_type, \
++ RI_boot_role, \
++ RI_req_reauth \
++ }
++
++/***************************************
++ * Type names *
++ ***************************************/
++
++/* Caution: whenever role struct changes, version and old_version must be increased! */
++
++/* #define RSBAC_RC_OLD_TYPE_VERSION 1 */
++#define RSBAC_RC_TYPE_VERSION 1
++
++struct rsbac_rc_type_fd_entry_t {
++ char name[RSBAC_RC_NAME_LEN];
++ __u8 need_secdel; /* rsbac_boolean_t */
++};
++
++#define RSBAC_RC_NR_TYPE_ENTRY_ITEMS 10
++#define RSBAC_RC_TYPE_ENTRY_ITEM_LIST { \
++ RI_type_fd_name, \
++ RI_type_dev_name, \
++ RI_type_ipc_name, \
++ RI_type_scd_name, \
++ RI_type_process_name, \
++ RI_type_group_name, \
++ RI_type_netdev_name, \
++ RI_type_nettemp_name, \
++ RI_type_netobj_name, \
++ RI_type_fd_need_secdel \
++ }
++
++/**********************************************/
++/* Default values */
++/**********************************************/
++
++#define RSBAC_RC_GENERAL_ROLE_ENTRY \
++ { \
++ .admin_type = RC_no_admin, \
++ .name = "General User", \
++ .def_fd_create_type = RC_type_inherit_parent, \
++ .def_user_create_type = RSBAC_RC_GENERAL_TYPE, \
++ .def_process_create_type = RC_type_inherit_parent, \
++ .def_process_chown_type = RC_type_use_new_role_def_create, \
++ .def_process_execute_type = RC_type_inherit_parent, \
++ .def_ipc_create_type = RSBAC_RC_GENERAL_TYPE, \
++ .def_group_create_type = RSBAC_RC_GENERAL_TYPE, \
++ .def_unixsock_create_type = RC_type_use_fd, \
++ .boot_role = FALSE, \
++ .req_reauth = FALSE, \
++ }
++
++#define RSBAC_RC_ROLE_ADMIN_ROLE_ENTRY \
++ { \
++ .admin_type = RC_role_admin, \
++ .name = "Role Admin", \
++ .def_fd_create_type = RC_type_inherit_parent, \
++ .def_user_create_type = RSBAC_RC_GENERAL_TYPE, \
++ .def_process_create_type = RC_type_inherit_parent, \
++ .def_process_chown_type = RC_type_use_new_role_def_create, \
++ .def_process_execute_type = RC_type_inherit_parent, \
++ .def_ipc_create_type = RSBAC_RC_GENERAL_TYPE, \
++ .def_group_create_type = RSBAC_RC_GENERAL_TYPE, \
++ .def_unixsock_create_type = RC_type_use_fd, \
++ .boot_role = FALSE, \
++ .req_reauth = FALSE, \
++ }
++
++#define RSBAC_RC_SYSTEM_ADMIN_ROLE_ENTRY \
++ { \
++ .admin_type = RC_system_admin, \
++ .name = "System Admin", \
++ .def_fd_create_type = RC_type_inherit_parent, \
++ .def_user_create_type = RSBAC_RC_GENERAL_TYPE, \
++ .def_process_create_type = RC_type_inherit_parent, \
++ .def_process_chown_type = RC_type_use_new_role_def_create, \
++ .def_process_execute_type = RC_type_inherit_parent, \
++ .def_ipc_create_type = RSBAC_RC_GENERAL_TYPE, \
++ .def_group_create_type = RSBAC_RC_GENERAL_TYPE, \
++ .def_unixsock_create_type = RC_type_use_fd, \
++ .boot_role = FALSE, \
++ .req_reauth = FALSE, \
++ }
++
++#define RSBAC_RC_BOOT_ROLE_ENTRY \
++ { \
++ .admin_type = RC_no_admin, \
++ .name = "System Boot", \
++ .def_fd_create_type = RC_type_inherit_parent, \
++ .def_user_create_type = RSBAC_RC_GENERAL_TYPE, \
++ .def_process_create_type = RC_type_inherit_parent, \
++ .def_process_chown_type = RC_type_use_new_role_def_create, \
++ .def_process_execute_type = RC_type_inherit_parent, \
++ .def_ipc_create_type = RSBAC_RC_GENERAL_TYPE, \
++ .def_group_create_type = RSBAC_RC_GENERAL_TYPE, \
++ .def_unixsock_create_type = RC_type_use_fd, \
++ .boot_role = TRUE, \
++ .req_reauth = FALSE, \
++ }
++
++#define RSBAC_RC_AUDITOR_ROLE_ENTRY \
++ { \
++ .admin_type = RC_no_admin, \
++ .name = "Auditor", \
++ .def_fd_create_type = RC_type_inherit_parent, \
++ .def_user_create_type = RSBAC_RC_GENERAL_TYPE, \
++ .def_process_create_type = RC_type_inherit_parent, \
++ .def_process_chown_type = RC_type_use_new_role_def_create, \
++ .def_process_execute_type = RC_type_inherit_parent, \
++ .def_ipc_create_type = RSBAC_RC_GENERAL_TYPE, \
++ .def_group_create_type = RSBAC_RC_GENERAL_TYPE, \
++ .def_unixsock_create_type = RC_type_use_fd, \
++ .boot_role = FALSE, \
++ .req_reauth = FALSE, \
++ }
++
++/**********************************************/
++/* Declarations */
++/**********************************************/
++
++#ifdef __KERNEL__
++#endif /* __KERNEL__ */
++
++#endif /* __RSBAC_RC_DATA_STRUC_H */
+diff --git a/include/rsbac/rc_getname.h b/include/rsbac/rc_getname.h
+new file mode 100644
+index 0000000..562fa20
+--- /dev/null
++++ b/include/rsbac/rc_getname.h
+@@ -0,0 +1,44 @@
++/******************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999: Amon Ott */
++/* Getname functions for RC parts */
++/* Last modified: 18/Jan/99 */
++/******************************** */
++
++#ifndef __RSBAC_RC_GETNAME_H
++#define __RSBAC_RC_GETNAME_H
++
++#include <rsbac/rc_types.h>
++
++#ifndef NULL
++#define NULL ((void *) 0)
++#endif
++
++char *get_rc_target_name(char *name, enum rsbac_rc_target_t value);
++
++enum rsbac_rc_target_t get_rc_target_nr(const char *name);
++
++char *get_rc_admin_name(char *name, enum rsbac_rc_admin_type_t value);
++
++enum rsbac_rc_admin_type_t get_rc_admin_nr(const char *name);
++
++char *get_rc_scd_type_name(char *name, enum rsbac_rc_scd_type_t value);
++
++enum rsbac_rc_scd_type_t get_rc_scd_type_nr(const char *name);
++
++char *get_rc_item_name(char *name, enum rsbac_rc_item_t value);
++
++enum rsbac_rc_item_t get_rc_item_nr(const char *name);
++
++#ifndef __KERNEL__
++char *get_rc_item_param(char *name, enum rsbac_rc_item_t value);
++#endif
++
++char *get_rc_special_right_name(char *name,
++ enum rsbac_rc_special_rights_t value);
++
++#ifndef __KERNEL__
++enum rsbac_rc_special_rights_t get_rc_special_right_nr(const char *name);
++#endif
++
++#endif
+diff --git a/include/rsbac/rc_types.h b/include/rsbac/rc_types.h
+new file mode 100644
+index 0000000..c8207fe
+--- /dev/null
++++ b/include/rsbac/rc_types.h
+@@ -0,0 +1,376 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2005: Amon Ott */
++/* API: Data types for */
++/* Role Compatibility Module */
++/* Last modified: 21/Dec/2005 */
++/************************************ */
++
++#ifndef __RSBAC_RC_TYPES_H
++#define __RSBAC_RC_TYPES_H
++
++#include <linux/types.h>
++
++/***** RC *****/
++
++#define RSBAC_RC_GENERAL_ROLE 0
++#define RSBAC_RC_ROLE_ADMIN_ROLE 1
++#define RSBAC_RC_SYSTEM_ADMIN_ROLE 2
++#define RSBAC_RC_AUDITOR_ROLE 3
++#define RSBAC_RC_BOOT_ROLE 999999
++#define RSBAC_RC_GENERAL_TYPE 0
++#define RSBAC_RC_SEC_TYPE 1
++#define RSBAC_RC_SYS_TYPE 2
++#define RSBAC_RC_KERNEL_P_TYPE 999999
++
++#define RSBAC_RC_NAME_LEN 16
++#define RSBAC_RC_ALL_REQUESTS ((rsbac_rc_request_vector_t) -1)
++
++#define RSBAC_RC_OLD_SPECIAL_RIGHT_BASE 48
++#define RSBAC_RC_SPECIAL_RIGHT_BASE 56
++
++enum rsbac_rc_special_rights_t { RCR_ADMIN = RSBAC_RC_SPECIAL_RIGHT_BASE,
++ RCR_ASSIGN,
++ RCR_ACCESS_CONTROL,
++ RCR_SUPERVISOR,
++ RCR_MODIFY_AUTH,
++ RCR_CHANGE_AUTHED_OWNER,
++ RCR_SELECT,
++ RCR_NONE
++};
++
++typedef __u64 rsbac_rc_rights_vector_t;
++
++/* backwards compatibility only! */
++typedef __u64 rsbac_rc_role_vector_t;
++
++#define RSBAC_RC_RIGHTS_VECTOR(x) ((rsbac_rc_rights_vector_t) 1 << (x))
++#define RSBAC_RC_ROLE_VECTOR(x) ((rsbac_rc_role_vector_t) 1 << (x))
++#define RSBAC_RC_TYPE_VECTOR(x) ((rsbac_rc_type_vector_t) 1 << (x))
++
++#define RSBAC_RC_SPECIAL_RIGHTS_VECTOR (\
++ RSBAC_RC_RIGHTS_VECTOR(RCR_ADMIN) | \
++ RSBAC_RC_RIGHTS_VECTOR(RCR_ASSIGN) | \
++ RSBAC_RC_RIGHTS_VECTOR(RCR_ACCESS_CONTROL) | \
++ RSBAC_RC_RIGHTS_VECTOR(RCR_SUPERVISOR) | \
++ RSBAC_RC_RIGHTS_VECTOR(RCR_MODIFY_AUTH) | \
++ RSBAC_RC_RIGHTS_VECTOR(RCR_CHANGE_AUTHED_OWNER) | \
++ RSBAC_RC_RIGHTS_VECTOR(RCR_SELECT) \
++ )
++
++#define RSBAC_RC_SUPERVISOR_RIGHT_VECTOR (\
++ RSBAC_RC_RIGHTS_VECTOR(RCR_SUPERVISOR) | \
++ )
++
++#define RSBAC_RC_ALL_RIGHTS_VECTOR (RSBAC_ALL_REQUEST_VECTOR | RSBAC_RC_SPECIAL_RIGHTS_VECTOR)
++
++#define RSBAC_RC_PROCESS_RIGHTS_VECTOR (RSBAC_PROCESS_REQUEST_VECTOR | \
++ RSBAC_RC_RIGHTS_VECTOR(R_CONNECT) | \
++ RSBAC_RC_RIGHTS_VECTOR(R_ACCEPT) | \
++ RSBAC_RC_RIGHTS_VECTOR(R_SEND) | \
++ RSBAC_RC_RIGHTS_VECTOR(R_RECEIVE) \
++)
++
++#define RSBAC_RC_DEFAULT_RIGHTS_VECTOR 0
++
++#define RSBAC_RC_GEN_RIGHTS_VECTOR RSBAC_RC_DEFAULT_RIGHTS_VECTOR
++
++typedef __u32 rsbac_rc_role_id_t;
++typedef __u32 rsbac_rc_type_id_t;
++typedef rsbac_request_vector_t rsbac_rc_request_vector_t;
++
++enum rsbac_rc_admin_type_t { RC_no_admin, RC_role_admin, RC_system_admin,
++ RC_none };
++
++/*
++ * System Control Types, including general SCD types
++ * (start at 32 to allow future SCD types, max is 63)
++ */
++#define RST_min 32
++enum rsbac_rc_scd_type_t { RST_auth_administration = RST_min,
++ RST_none
++};
++
++/* what should always be there to keep system functional */
++#ifdef CONFIG_RSBAC_USER_MOD_IOPERM
++#define RSBAC_RC_GENERAL_COMP_SCD { \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ /* ST_ioports */ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA), \
++ /* ST_rlimit */ RSBAC_REQUEST_VECTOR(GET_STATUS_DATA) | RSBAC_REQUEST_VECTOR(MODIFY_SYSTEM_DATA), \
++ /* ST_swap */ 0, \
++ /* ST_syslog */ 0, \
++ /* ST_rsbac */ 0, \
++ /* ST_rsbac_log */ 0, \
++ /* ST_other */ ( \
++ ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \
++ ), \
++ /* ST_kmem */ 0, \
++ /* ST_network */ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA), \
++ /* 13 = ST_none */ 0 \
++ }
++#else
++#define RSBAC_RC_GENERAL_COMP_SCD { \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ /* ST_rlimit */ RSBAC_REQUEST_VECTOR(GET_STATUS_DATA) | RSBAC_REQUEST_VECTOR(MODIFY_SYSTEM_DATA), \
++ /* ST_swap */ 0, \
++ /* ST_syslog */ 0, \
++ /* ST_rsbac */ 0, \
++ /* ST_rsbac_log */ 0, \
++ /* ST_other */ ( \
++ ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \
++ ), \
++ /* ST_kmem */ 0, \
++ /* ST_network */ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA), \
++ /* ST_firewall */ 0, \
++ /* ST_priority */ 0, \
++ /* 15 = ST_none */ 0 \
++ }
++#endif
++
++#define RSBAC_RC_ROLEADM_COMP_SCD { \
++ /* 0 = ST_time_structs */ 0, \
++ /* ST_clock */ 0, \
++ /* ST_host_id */ 0, \
++ /* ST_net_id */ 0, \
++ /* ST_ioports */ 0, \
++ /* ST_rlimit */ RSBAC_SCD_REQUEST_VECTOR | RSBAC_RC_SPECIAL_RIGHTS_VECTOR, \
++ /* ST_swap */ 0, \
++ /* ST_syslog */ 0, \
++ /* ST_rsbac */ RSBAC_SCD_REQUEST_VECTOR | RSBAC_RC_SPECIAL_RIGHTS_VECTOR, \
++ /* ST_rsbac_log */ RSBAC_SCD_REQUEST_VECTOR | RSBAC_RC_SPECIAL_RIGHTS_VECTOR, \
++ /* ST_other */ ( \
++ ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \
++ | ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) \
++ | ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) \
++ | ((rsbac_request_vector_t) 1 << R_SWITCH_LOG) \
++ | ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) \
++ ) | RSBAC_RC_SPECIAL_RIGHTS_VECTOR, \
++ /* ST_kmem */ 0, \
++ /* ST_network */ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | RSBAC_RC_SPECIAL_RIGHTS_VECTOR, \
++ /* ST_firewall */ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | RSBAC_RC_SPECIAL_RIGHTS_VECTOR, \
++ /* ST_nice */ 0, \
++ /* 15 = ST_none */ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ /* 20 */ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ /* 30 */ 0, \
++ 0, \
++ /* 32 = RST_auth_admin */ RSBAC_SCD_REQUEST_VECTOR | RSBAC_RC_SPECIAL_RIGHTS_VECTOR, \
++ /* 33 = RST_none */ 0 \
++ }
++
++#define RSBAC_RC_SYSADM_COMP_SCD { \
++ /* 0 = ST_time_structs */ RSBAC_SCD_REQUEST_VECTOR & RSBAC_SYSTEM_REQUEST_VECTOR, \
++ /* ST_clock */ RSBAC_SCD_REQUEST_VECTOR & RSBAC_SYSTEM_REQUEST_VECTOR, \
++ /* ST_host_id */ RSBAC_SCD_REQUEST_VECTOR & RSBAC_SYSTEM_REQUEST_VECTOR, \
++ /* ST_net_id */ RSBAC_SCD_REQUEST_VECTOR & RSBAC_SYSTEM_REQUEST_VECTOR, \
++ /* ST_ioports */ RSBAC_SCD_REQUEST_VECTOR & RSBAC_SYSTEM_REQUEST_VECTOR, \
++ /* ST_rlimit */ RSBAC_SCD_REQUEST_VECTOR & RSBAC_SYSTEM_REQUEST_VECTOR, \
++ /* ST_swap */ RSBAC_SCD_REQUEST_VECTOR & RSBAC_SYSTEM_REQUEST_VECTOR, \
++ /* ST_syslog */ RSBAC_SCD_REQUEST_VECTOR & RSBAC_SYSTEM_REQUEST_VECTOR, \
++ /* ST_rsbac */ RSBAC_SCD_REQUEST_VECTOR & RSBAC_SYSTEM_REQUEST_VECTOR, \
++ /* ST_rsbac_log */ 0, \
++ /* ST_other */ ( \
++ ((rsbac_request_vector_t) 1 << R_ADD_TO_KERNEL) \
++ | ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) \
++ | ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \
++ | ((rsbac_request_vector_t) 1 << R_MOUNT) \
++ | ((rsbac_request_vector_t) 1 << R_REMOVE_FROM_KERNEL) \
++ | ((rsbac_request_vector_t) 1 << R_UMOUNT) \
++ | ((rsbac_request_vector_t) 1 << R_SHUTDOWN) \
++ ), \
++ /* ST_kmem */ RSBAC_SCD_REQUEST_VECTOR & RSBAC_SYSTEM_REQUEST_VECTOR, \
++ /* ST_network */ RSBAC_SCD_REQUEST_VECTOR & RSBAC_SYSTEM_REQUEST_VECTOR, \
++ /* ST_firewall */ RSBAC_SCD_REQUEST_VECTOR & RSBAC_SYSTEM_REQUEST_VECTOR, \
++ /* ST_priority */ RSBAC_SCD_REQUEST_VECTOR & RSBAC_SYSTEM_REQUEST_VECTOR, \
++ /* 15 = ST_none */ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ /* 20 */ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ /* 30 */ 0, \
++ 0, \
++ /* 32 = RST_auth_admin */ 0, \
++ /* 33 = RST_none */ 0 \
++ }
++#ifdef CONFIG_RSBAC_USER_MOD_IOPERM
++#define RSBAC_RC_AUDITOR_COMP_SCD { \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ /* ST_ioports */ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA), \
++ /* ST_rlimit */ RSBAC_REQUEST_VECTOR(GET_STATUS_DATA) | RSBAC_REQUEST_VECTOR(MODIFY_SYSTEM_DATA), \
++ /* ST_swap */ 0, \
++ /* ST_syslog */ 0, \
++ /* ST_rsbac */ 0, \
++ /* ST_rsbac_log */ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA), \
++ /* ST_other */ ( \
++ ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \
++ ), \
++ /* ST_kmem */ 0, \
++ /* ST_network */ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA), \
++ /* ST_firewall */ 0, \
++ /* ST_priority */ 0, \
++ /* 15 = ST_none */ 0 \
++ }
++#else
++#define RSBAC_RC_AUDITOR_COMP_SCD { \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ 0, \
++ /* ST_rlimit */ RSBAC_REQUEST_VECTOR(GET_STATUS_DATA) | RSBAC_REQUEST_VECTOR(MODIFY_SYSTEM_DATA), \
++ /* ST_swap */ 0, \
++ /* ST_syslog */ 0, \
++ /* ST_rsbac */ 0, \
++ /* ST_rsbac_log */ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA), \
++ /* ST_other */ ( \
++ ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \
++ ), \
++ /* ST_kmem */ 0, \
++ /* ST_network */ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA), \
++ /* ST_firewall */ 0, \
++ /* ST_priority */ 0, \
++ /* 15 = ST_none */ 0 \
++ }
++#endif
++
++
++#define RC_type_inherit_process ((rsbac_rc_type_id_t) -1)
++#define RC_type_inherit_parent ((rsbac_rc_type_id_t) -2)
++#define RC_type_no_create ((rsbac_rc_type_id_t) -3)
++#define RC_type_no_execute ((rsbac_rc_type_id_t) -4)
++#define RC_type_use_new_role_def_create ((rsbac_rc_type_id_t) -5) /* for process chown (setuid) */
++#define RC_type_no_chown ((rsbac_rc_type_id_t) -6)
++#define RC_type_use_fd ((rsbac_rc_type_id_t) -7)
++#define RC_type_min_special ((rsbac_rc_type_id_t) -7)
++#define RC_type_max_value ((rsbac_rc_type_id_t) -32)
++
++#define RC_role_inherit_user ((rsbac_rc_role_id_t) -1)
++#define RC_role_inherit_process ((rsbac_rc_role_id_t) -2)
++#define RC_role_inherit_parent ((rsbac_rc_role_id_t) -3)
++#define RC_role_inherit_up_mixed ((rsbac_rc_role_id_t) -4)
++#define RC_role_use_force_role ((rsbac_rc_role_id_t) -5)
++#define RC_role_min_special ((rsbac_rc_role_id_t) -5)
++#define RC_role_max_value ((rsbac_rc_role_id_t) -32)
++
++#define RC_default_force_role RC_role_inherit_parent
++#define RC_default_root_dir_force_role RC_role_inherit_up_mixed
++#define RC_default_init_force_role RC_role_inherit_user
++#define RC_default_initial_role RC_role_inherit_parent
++#define RC_default_root_dir_initial_role RC_role_use_force_role
++
++/****************************************************************************/
++/* RC ACI types */
++/****************************************************************************/
++
++enum rsbac_rc_target_t { RT_ROLE, RT_TYPE, RT_NONE };
++
++union rsbac_rc_target_id_t {
++ rsbac_rc_role_id_t role;
++ rsbac_rc_type_id_t type;
++};
++
++enum rsbac_rc_item_t { RI_role_comp,
++ RI_admin_roles,
++ RI_assign_roles,
++ RI_type_comp_fd,
++ RI_type_comp_dev,
++ RI_type_comp_user,
++ RI_type_comp_process,
++ RI_type_comp_ipc,
++ RI_type_comp_scd,
++ RI_type_comp_group,
++ RI_type_comp_netdev,
++ RI_type_comp_nettemp,
++ RI_type_comp_netobj,
++ RI_admin_type,
++ RI_name,
++ RI_def_fd_create_type,
++ RI_def_fd_ind_create_type,
++ RI_def_user_create_type,
++ RI_def_process_create_type,
++ RI_def_process_chown_type,
++ RI_def_process_execute_type,
++ RI_def_ipc_create_type,
++ RI_def_group_create_type,
++ RI_def_unixsock_create_type,
++ RI_boot_role,
++ RI_req_reauth,
++ RI_type_fd_name,
++ RI_type_dev_name,
++ RI_type_ipc_name,
++ RI_type_user_name,
++ RI_type_process_name,
++ RI_type_group_name,
++ RI_type_netdev_name,
++ RI_type_nettemp_name,
++ RI_type_netobj_name,
++ RI_type_fd_need_secdel,
++ RI_type_scd_name, /* Pseudo, using get_rc_scd_name() */
++ RI_remove_role,
++ RI_def_fd_ind_create_type_remove,
++ RI_type_fd_remove,
++ RI_type_dev_remove,
++ RI_type_ipc_remove,
++ RI_type_user_remove,
++ RI_type_process_remove,
++ RI_type_group_remove,
++ RI_type_netdev_remove,
++ RI_type_nettemp_remove,
++ RI_type_netobj_remove,
++#ifdef __KERNEL__
++#endif
++ RI_none
++};
++
++union rsbac_rc_item_value_t {
++ rsbac_rc_rights_vector_t rights;
++ enum rsbac_rc_admin_type_t admin_type;
++ char name[RSBAC_RC_NAME_LEN];
++ rsbac_rc_role_id_t role_id;
++ rsbac_rc_type_id_t type_id;
++ rsbac_boolean_t need_secdel;
++ rsbac_boolean_t comp;
++ rsbac_boolean_t boot_role;
++ rsbac_boolean_t req_reauth;
++#ifdef __KERNEL__
++#endif
++ u_char u_char_dummy;
++ int dummy;
++ u_int u_dummy;
++ long long_dummy;
++ long long long_long_dummy;
++};
++
++#endif
+diff --git a/include/rsbac/reg.h b/include/rsbac/reg.h
+new file mode 100644
+index 0000000..cd276a5
+--- /dev/null
++++ b/include/rsbac/reg.h
+@@ -0,0 +1,152 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2012: Amon Ott */
++/* API: for REG */
++/* Module Registration */
++/* Last modified: 07/May/2012 */
++/************************************ */
++
++#ifndef __RSBAC_REG_H
++#define __RSBAC_REG_H
++
++#include <rsbac/types.h>
++#include <rsbac/debug.h>
++
++#define RSBAC_REG_VERSION 1
++
++/***************************************************/
++/* Types */
++/***************************************************/
++
++#define RSBAC_REG_NAME_LEN 30
++
++/* Decision function */
++typedef \
++ int rsbac_reg_request_func_t ( enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++/* Attribute setting / notification function */
++typedef \
++ int rsbac_reg_set_attr_func_t ( enum rsbac_adf_request_t,
++ rsbac_pid_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_target_t,
++ union rsbac_target_id_t,
++ enum rsbac_attribute_t,
++ union rsbac_attribute_value_t,
++ rsbac_uid_t); /* process owner */
++
++/* Whether module wants this file to be overwritten on delete / truncate */
++typedef rsbac_boolean_t rsbac_reg_need_overwrite_func_t(struct dentry * dentry_p);
++
++/*
++ * rsbac_reg_write_func_t
++ *
++ * Called by rsbac_write function to save all dirty lists, must return number
++ * of files written or negative error. If auto_write is active, this function
++ * will be called regularly and allows for asynchronous data writing to disk.
++ *
++ * If need_lock is TRUE, a lock_kernel() / unlock_kernel() pair must be used
++ * around the write function.
++ */
++typedef int rsbac_reg_write_func_t(rsbac_boolean_t need_lock);
++
++/* Called on every mount, allows updating of fs based data */
++typedef int rsbac_reg_mount_func_t(kdev_t kdev);
++
++/* Called on every umount, allows updating of fs based data */
++typedef int rsbac_reg_umount_func_t(kdev_t kdev);
++
++/* Called on rsbac_reg syscalls for handle syscall_handle */
++/* Generic Syscall interface - note: data is a user space pointer! */
++typedef int rsbac_reg_syscall_func_t(void __user * data);
++
++/* Status and data structures integrity checking, called from sys_rsbac_check */
++/* correct: if TRUE, errors are corrected, else just report */
++/* check_inode: for inode number based data, check, if inode still exists */
++typedef int rsbac_reg_check_func_t(int correct, int check_inode);
++
++/*********/
++
++struct rsbac_reg_entry_t
++ {
++ rsbac_reg_handle_t handle;
++ char name[RSBAC_REG_NAME_LEN+1];
++ rsbac_reg_request_func_t * request_func;
++ rsbac_reg_set_attr_func_t * set_attr_func;
++ rsbac_reg_need_overwrite_func_t * need_overwrite_func;
++ rsbac_reg_write_func_t * write_func;
++ rsbac_reg_mount_func_t * mount_func;
++ rsbac_reg_umount_func_t * umount_func;
++ rsbac_reg_check_func_t * check_func;
++ rsbac_boolean_t switch_on; /* turned on initially? */
++ };
++
++struct rsbac_reg_syscall_entry_t
++ {
++ rsbac_reg_handle_t registration_handle;
++ rsbac_reg_handle_t dispatcher_handle;
++ char name[RSBAC_REG_NAME_LEN+1];
++ rsbac_reg_syscall_func_t * syscall_func;
++ };
++
++/***************************************************/
++/* Prototypes */
++/***************************************************/
++
++/* See rsbac/types.h for types */
++
++/*
++ * Register an ADF decision module
++ * Returns given positive handle or negative error code from rsbac/error.h
++ * Errors: -RSBAC_EINVALIDVALUE (all functions are empty or handle is not positive)
++ * -RSBAC_EEXISTS (handle exists - choose another one)
++ * -RSBAC_ECOULDNOTADDITEM (no entry available)
++ * -RSBAC_EINVALIDVERSION (wrong REG version)
++ */
++
++rsbac_reg_handle_t rsbac_reg_register( rsbac_version_t version,
++ struct rsbac_reg_entry_t entry);
++
++/*
++ * Switch module on or off - for 'normal' modules this is done by general
++ * function. This is a dummy, if module switching is disabled.
++ * Returns 0 on success or -EINVALIDTARGET, if handle is invalid.
++ */
++
++int rsbac_reg_switch (rsbac_reg_handle_t handle, rsbac_boolean_t value);
++
++/*
++ * Unregister an ADF decision module
++ * Returns 0 on success or -EINVALIDTARGET, if handle is invalid.
++ */
++
++int rsbac_reg_unregister(rsbac_reg_handle_t handle);
++
++
++/*
++ * Register a system call
++ * Returns given positive handle or negative error code from rsbac/error.h
++ * Errors: -RSBAC_EINVALIDVALUE (function is empty or handle is not positive)
++ * -RSBAC_EEXISTS (handle exists - choose another one)
++ * -RSBAC_ECOULDNOTADDITEM (no entry available)
++ * -RSBAC_EINVALIDVERSION (wrong REG version)
++ */
++
++rsbac_reg_handle_t rsbac_reg_register_syscall( rsbac_version_t version,
++ struct rsbac_reg_syscall_entry_t entry);
++
++/*
++ * Unregister a system call
++ * Returns 0 on success or -EINVALIDTARGET, if handle is invalid.
++ */
++
++int rsbac_reg_unregister_syscall(rsbac_reg_handle_t handle);
++
++#endif
+diff --git a/include/rsbac/reg_main.h b/include/rsbac/reg_main.h
+new file mode 100644
+index 0000000..c6fe1c4
+--- /dev/null
++++ b/include/rsbac/reg_main.h
+@@ -0,0 +1,70 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2005: Amon Ott */
++/* REG - Module Registration */
++/* Internal declarations and types */
++/* Last modified: 22/Jul/2005 */
++/************************************ */
++
++#ifndef __RSBAC_REG_MAIN_H
++#define __RSBAC_REG_MAIN_H
++
++#include <rsbac/types.h>
++#include <rsbac/debug.h>
++#include <rsbac/reg.h>
++
++#define RSBAC_REG_PROC_NAME "reg_entries"
++
++/***************************************************/
++/* Types */
++/***************************************************/
++
++#ifdef __KERNEL__
++
++/* Since all registrations will be organized in double linked lists, we must */
++/* have list items and a list head. */
++
++struct rsbac_reg_list_item_t
++ {
++ struct rsbac_reg_entry_t entry;
++ struct rsbac_reg_list_item_t * prev;
++ struct rsbac_reg_list_item_t * next;
++ };
++
++struct rsbac_reg_sc_list_item_t
++ {
++ struct rsbac_reg_syscall_entry_t entry;
++ struct rsbac_reg_sc_list_item_t * prev;
++ struct rsbac_reg_sc_list_item_t * next;
++ };
++
++/* To provide consistency we use spinlocks for all list accesses. The */
++/* 'curr' entry is used to avoid repeated lookups for the same item. */
++
++struct rsbac_reg_list_head_t
++ {
++ struct rsbac_reg_list_item_t * head;
++ struct rsbac_reg_list_item_t * tail;
++ struct rsbac_reg_list_item_t * curr;
++ spinlock_t lock;
++ int readers;
++ u_int count;
++ };
++
++struct rsbac_reg_sc_list_head_t
++ {
++ struct rsbac_reg_sc_list_item_t * head;
++ struct rsbac_reg_sc_list_item_t * tail;
++ struct rsbac_reg_sc_list_item_t * curr;
++ spinlock_t lock;
++ int readers;
++ u_int count;
++ };
++
++#endif /* __KERNEL__ */
++
++/***************************************************/
++/* Prototypes */
++/***************************************************/
++
++#endif
+diff --git a/include/rsbac/repl_lists.h b/include/rsbac/repl_lists.h
+new file mode 100644
+index 0000000..a377685
+--- /dev/null
++++ b/include/rsbac/repl_lists.h
+@@ -0,0 +1,18 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2005: Amon Ott <ao@rsbac.org> */
++/* Generic lists - internal structures */
++/* Last modified: 04/Apr/2005 */
++/*************************************************** */
++
++#ifndef __RSBAC_REPL_LISTS_H
++#define __RSBAC_REPL_LISTS_H
++
++#include <rsbac/repl_types.h>
++
++#define RSBAC_LIST_REPL_PROC_NAME "repl_lists"
++#define RSBAC_LIST_REPL_PARTNER_VERSION 1
++#define RSBAC_LIST_REPL_PARTNER_KEY 0x3632f7ae
++#define RSBAC_LIST_REPL_PARTNER_FILENAME "replpar"
++
++#endif
+diff --git a/include/rsbac/repl_types.h b/include/rsbac/repl_types.h
+new file mode 100644
+index 0000000..860d258
+--- /dev/null
++++ b/include/rsbac/repl_types.h
+@@ -0,0 +1,28 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2005: Amon Ott <ao@rsbac.org> */
++/* Generic lists - internal structures */
++/* Last modified: 04/Apr/2005 */
++/*************************************************** */
++
++#ifndef __RSBAC_REPL_TYPES_H
++#define __RSBAC_REPL_TYPES_H
++
++#include <rsbac/types.h>
++
++#define RSBAC_LIST_REPL_NAME_LEN 16
++#define RSBAC_LIST_REPL_CRYPTKEY_LEN 256
++#define RSBAC_LIST_REPL_CRYPTALGO_LEN 64
++
++typedef __u32 rsbac_list_repl_partner_number_t;
++
++struct rsbac_list_repl_partner_entry_t
++ {
++ char name[RSBAC_LIST_REPL_NAME_LEN];
++ __u32 ip_addr;
++ char crypt_algo[RSBAC_LIST_REPL_CRYPTALGO_LEN];
++ char crypt_key[RSBAC_LIST_REPL_CRYPTKEY_LEN];
++ __u32 crypt_key_len;
++ };
++
++#endif
+diff --git a/include/rsbac/request_groups.h b/include/rsbac/request_groups.h
+new file mode 100644
+index 0000000..a45c83b
+--- /dev/null
++++ b/include/rsbac/request_groups.h
+@@ -0,0 +1,420 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2008: Amon Ott */
++/* Groups of ADF request for */
++/* administration */
++/* Last modified: 21/Jan/2008 */
++/************************************ */
++
++#ifndef __RSBAC_REQUEST_GROUPS_H
++#define __RSBAC_REQUEST_GROUPS_H
++
++#define RSBAC_READ_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_CHDIR) | \
++ ((rsbac_request_vector_t) 1 << R_CLOSE) | \
++ ((rsbac_request_vector_t) 1 << R_GET_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_READ_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_SEARCH) | \
++ ((rsbac_request_vector_t) 1 << R_TERMINATE) | \
++ ((rsbac_request_vector_t) 1 << R_AUTHENTICATE) \
++ )
++
++#define RSBAC_WRITE_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_ALTER) | \
++ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CLONE) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_LINK_HARD) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ACCESS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_RENAME) | \
++ ((rsbac_request_vector_t) 1 << R_SEND_SIGNAL) | \
++ ((rsbac_request_vector_t) 1 << R_TRACE) | \
++ ((rsbac_request_vector_t) 1 << R_TRUNCATE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_IOCTL) | \
++ ((rsbac_request_vector_t) 1 << R_LOCK) \
++ )
++
++#define RSBAC_READ_WRITE_REQUEST_VECTOR (\
++ RSBAC_READ_REQUEST_VECTOR | \
++ ((rsbac_request_vector_t) 1 << R_ALTER) | \
++ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CLONE) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_LINK_HARD) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ACCESS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_RENAME) | \
++ ((rsbac_request_vector_t) 1 << R_SEND_SIGNAL) | \
++ ((rsbac_request_vector_t) 1 << R_TRACE) | \
++ ((rsbac_request_vector_t) 1 << R_TRUNCATE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_BIND) | \
++ ((rsbac_request_vector_t) 1 << R_LISTEN) | \
++ ((rsbac_request_vector_t) 1 << R_ACCEPT) | \
++ ((rsbac_request_vector_t) 1 << R_CONNECT) | \
++ ((rsbac_request_vector_t) 1 << R_SEND) | \
++ ((rsbac_request_vector_t) 1 << R_RECEIVE) | \
++ ((rsbac_request_vector_t) 1 << R_NET_SHUTDOWN) | \
++ ((rsbac_request_vector_t) 1 << R_IOCTL) | \
++ ((rsbac_request_vector_t) 1 << R_LOCK) \
++ )
++
++#define RSBAC_READ_WRITE_OPEN_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) \
++ )
++
++#define RSBAC_EXECUTE_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \
++ )
++
++
++#define RSBAC_SYSTEM_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_ADD_TO_KERNEL) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_REMOVE_FROM_KERNEL) | \
++ ((rsbac_request_vector_t) 1 << R_SHUTDOWN) | \
++ ((rsbac_request_vector_t) 1 << R_UMOUNT) \
++ )
++
++#define RSBAC_SECURITY_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_LOG) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) \
++ )
++
++#define RSBAC_FD_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_ADD_TO_KERNEL) | \
++ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHDIR) | \
++ ((rsbac_request_vector_t) 1 << R_CLOSE) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) | \
++ ((rsbac_request_vector_t) 1 << R_GET_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_LINK_HARD) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ACCESS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_REMOVE_FROM_KERNEL) | \
++ ((rsbac_request_vector_t) 1 << R_RENAME) | \
++ ((rsbac_request_vector_t) 1 << R_SEARCH) | \
++ ((rsbac_request_vector_t) 1 << R_TRUNCATE) | \
++ ((rsbac_request_vector_t) 1 << R_UMOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_MAP_EXEC) | \
++ ((rsbac_request_vector_t) 1 << R_LISTEN) | \
++ ((rsbac_request_vector_t) 1 << R_ACCEPT) | \
++ ((rsbac_request_vector_t) 1 << R_CONNECT) | \
++ ((rsbac_request_vector_t) 1 << R_SEND) | \
++ ((rsbac_request_vector_t) 1 << R_RECEIVE) | \
++ ((rsbac_request_vector_t) 1 << R_NET_SHUTDOWN) | \
++ ((rsbac_request_vector_t) 1 << R_IOCTL) | \
++ ((rsbac_request_vector_t) 1 << R_LOCK) \
++ )
++
++#define RSBAC_DEV_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_ADD_TO_KERNEL) | \
++ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_CLOSE) | \
++ ((rsbac_request_vector_t) 1 << R_GET_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_REMOVE_FROM_KERNEL) | \
++ ((rsbac_request_vector_t) 1 << R_UMOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_SEND) | \
++ ((rsbac_request_vector_t) 1 << R_IOCTL) \
++ )
++
++#define RSBAC_IPC_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_ALTER) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CLOSE) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_GET_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_NET_SHUTDOWN) | \
++ ((rsbac_request_vector_t) 1 << R_BIND) | \
++ ((rsbac_request_vector_t) 1 << R_LISTEN) | \
++ ((rsbac_request_vector_t) 1 << R_ACCEPT) | \
++ ((rsbac_request_vector_t) 1 << R_CONNECT) | \
++ ((rsbac_request_vector_t) 1 << R_SEND) | \
++ ((rsbac_request_vector_t) 1 << R_RECEIVE) | \
++ ((rsbac_request_vector_t) 1 << R_IOCTL) | \
++ ((rsbac_request_vector_t) 1 << R_LOCK) \
++ )
++
++#define RSBAC_SCD_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_GET_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) \
++ )
++
++#define RSBAC_USER_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_GET_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_RENAME) | \
++ ((rsbac_request_vector_t) 1 << R_SEARCH) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_AUTHENTICATE) \
++ )
++
++#define RSBAC_GROUP_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_GET_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_RENAME) | \
++ ((rsbac_request_vector_t) 1 << R_SEARCH) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) \
++ )
++
++#define RSBAC_PROCESS_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CLONE) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_SEND_SIGNAL) | \
++ ((rsbac_request_vector_t) 1 << R_TERMINATE) | \
++ ((rsbac_request_vector_t) 1 << R_TRACE) \
++ )
++
++#define RSBAC_NETDEV_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_BIND) \
++ )
++
++#define RSBAC_NETTEMP_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) \
++ )
++
++#define RSBAC_NETOBJ_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_CLOSE) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_GET_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_NET_SHUTDOWN) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_BIND) | \
++ ((rsbac_request_vector_t) 1 << R_LISTEN) | \
++ ((rsbac_request_vector_t) 1 << R_ACCEPT) | \
++ ((rsbac_request_vector_t) 1 << R_CONNECT) | \
++ ((rsbac_request_vector_t) 1 << R_SEND) | \
++ ((rsbac_request_vector_t) 1 << R_RECEIVE) | \
++ ((rsbac_request_vector_t) 1 << R_IOCTL) \
++ )
++
++#define RSBAC_NONE_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_ADD_TO_KERNEL) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_REMOVE_FROM_KERNEL) | \
++ ((rsbac_request_vector_t) 1 << R_SHUTDOWN) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_LOG) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) | \
++ ((rsbac_request_vector_t) 1 << R_MAP_EXEC) \
++ )
++
++#define RSBAC_ALL_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_ADD_TO_KERNEL) | \
++ ((rsbac_request_vector_t) 1 << R_ALTER) | \
++ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHDIR) | \
++ ((rsbac_request_vector_t) 1 << R_CLONE) | \
++ ((rsbac_request_vector_t) 1 << R_CLOSE) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) | \
++ ((rsbac_request_vector_t) 1 << R_DELETE) | \
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) | \
++ ((rsbac_request_vector_t) 1 << R_GET_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_LINK_HARD) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ACCESS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_SYSTEM_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_READ_ATTRIBUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_READ_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_REMOVE_FROM_KERNEL) | \
++ ((rsbac_request_vector_t) 1 << R_RENAME) | \
++ ((rsbac_request_vector_t) 1 << R_SEARCH) | \
++ ((rsbac_request_vector_t) 1 << R_SEND_SIGNAL) | \
++ ((rsbac_request_vector_t) 1 << R_SHUTDOWN) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_LOG) | \
++ ((rsbac_request_vector_t) 1 << R_SWITCH_MODULE) | \
++ ((rsbac_request_vector_t) 1 << R_TERMINATE) | \
++ ((rsbac_request_vector_t) 1 << R_TRACE) | \
++ ((rsbac_request_vector_t) 1 << R_TRUNCATE) | \
++ ((rsbac_request_vector_t) 1 << R_UMOUNT) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_MAP_EXEC) | \
++ ((rsbac_request_vector_t) 1 << R_BIND) | \
++ ((rsbac_request_vector_t) 1 << R_LISTEN) | \
++ ((rsbac_request_vector_t) 1 << R_ACCEPT) | \
++ ((rsbac_request_vector_t) 1 << R_CONNECT) | \
++ ((rsbac_request_vector_t) 1 << R_SEND) | \
++ ((rsbac_request_vector_t) 1 << R_RECEIVE) | \
++ ((rsbac_request_vector_t) 1 << R_NET_SHUTDOWN) | \
++ ((rsbac_request_vector_t) 1 << R_IOCTL) | \
++ ((rsbac_request_vector_t) 1 << R_LOCK) \
++ )
++
++/* NW specials */
++
++/* NWS == RSBAC_ACL_SUPERVISOR_RIGHT_VECTOR in ACL types */
++
++#define RSBAC_NWR_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_CLOSE) | \
++ ((rsbac_request_vector_t) 1 << R_EXECUTE) | \
++ ((rsbac_request_vector_t) 1 << R_READ_OPEN) \
++ )
++
++#define RSBAC_NWW_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_ALTER) | \
++ ((rsbac_request_vector_t) 1 << R_APPEND_OPEN) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CLOSE) | \
++ ((rsbac_request_vector_t) 1 << R_TRUNCATE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE) | \
++ ((rsbac_request_vector_t) 1 << R_WRITE_OPEN) \
++ )
++
++#define RSBAC_NWC_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_CLOSE) | \
++ ((rsbac_request_vector_t) 1 << R_CREATE) \
++ )
++
++#define RSBAC_NWE_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_DELETE) \
++ )
++
++/* NWA == RSBAC_ACL_ACCESS_CONTROL_RIGHT_VECTOR in ACL types */
++
++#define RSBAC_NWF_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_CHDIR) | \
++ ((rsbac_request_vector_t) 1 << R_CLOSE) | \
++ ((rsbac_request_vector_t) 1 << R_GET_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_GET_STATUS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_READ) | \
++ ((rsbac_request_vector_t) 1 << R_SEARCH) \
++ )
++
++#define RSBAC_NWM_REQUEST_VECTOR (\
++ ((rsbac_request_vector_t) 1 << R_CHANGE_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_GROUP) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_EFF_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_CHANGE_DAC_FS_OWNER) | \
++ ((rsbac_request_vector_t) 1 << R_LINK_HARD) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_ACCESS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_MODIFY_PERMISSIONS_DATA) | \
++ ((rsbac_request_vector_t) 1 << R_RENAME) \
++ )
++
++#endif
+diff --git a/include/rsbac/res_getname.h b/include/rsbac/res_getname.h
+new file mode 100644
+index 0000000..37512ad
+--- /dev/null
++++ b/include/rsbac/res_getname.h
+@@ -0,0 +1,20 @@
++/********************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 2002: */
++/* Amon Ott <ao@rsbac.org> */
++/* Getname functions for RES module */
++/* Last modified: 22/Nov/2002 */
++/********************************** */
++
++#ifndef __RSBAC_RES_GETNAME_H
++#define __RSBAC_RES_GETNAME_H
++
++#include <rsbac/types.h>
++
++#ifndef __KERNEL__
++char * get_res_name(char * name,
++ u_int value);
++int get_res_nr(const char * name);
++#endif
++
++#endif
+diff --git a/include/rsbac/rkmem.h b/include/rsbac/rkmem.h
+new file mode 100644
+index 0000000..a765a0c
+--- /dev/null
++++ b/include/rsbac/rkmem.h
+@@ -0,0 +1,72 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2012: Amon Ott <ao@rsbac.org> */
++/* Memory allocation */
++/* Last modified: 19/Apr/2012 */
++/*************************************************** */
++
++#ifndef __RSBAC_RKMEM_H
++#define __RSBAC_RKMEM_H
++
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/timer.h>
++
++#define RSBAC_MAX_KMALLOC KMALLOC_MAX_SIZE
++#define RSBAC_MAX_SLABNAME 32
++
++/* alloc mem spinlock safe with GFP_ATOMIC */
++void * rsbac_kmalloc (size_t size);
++void * rsbac_kmalloc_clear (size_t size);
++
++/* alloc outside locks with GFP_KERNEL */
++void * rsbac_kmalloc_unlocked (size_t size);
++void * rsbac_kmalloc_clear_unlocked (size_t size);
++
++void rsbac_kfree (const void * objp);
++
++/* Separate slabs for RSBAC */
++
++/* name must stay available until after destroy, keep locally */
++static inline struct kmem_cache * rsbac_slab_create(
++ const char * name,
++ size_t size) {
++ return kmem_cache_create(name, size, 0, 0, NULL);
++}
++
++/* remember to free up name after calling, if it has been allocated */
++static inline void rsbac_slab_destroy(struct kmem_cache * cache)
++{
++ kmem_cache_destroy(cache);
++}
++
++static inline void * rsbac_smalloc(struct kmem_cache * cache)
++{
++ return kmem_cache_alloc(cache, GFP_ATOMIC);
++}
++
++static inline void * rsbac_smalloc_clear(struct kmem_cache * cache)
++{
++ return kmem_cache_alloc(cache, GFP_ATOMIC | __GFP_ZERO);
++}
++
++static inline void * rsbac_smalloc_unlocked(struct kmem_cache * cache)
++{
++ return kmem_cache_alloc(cache, GFP_KERNEL);
++}
++
++static inline void * rsbac_smalloc_clear_unlocked(struct kmem_cache * cache)
++{
++ return kmem_cache_alloc(cache, GFP_KERNEL | __GFP_ZERO);
++}
++
++static inline void rsbac_sfree(struct kmem_cache * cache, void * mem)
++{
++ if (cache)
++ kmem_cache_free(cache, mem);
++ else
++ kfree(mem);
++}
++
++#endif
+diff --git a/include/rsbac/syscall_rsbac.h b/include/rsbac/syscall_rsbac.h
+new file mode 100644
+index 0000000..8ceb81d
+--- /dev/null
++++ b/include/rsbac/syscall_rsbac.h
+@@ -0,0 +1,37 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* */
++/* Author and (c) 1999-2004: */
++/* Amon Ott <ao@rsbac.org> */
++/* */
++/* System Calls */
++/* */
++/* Last modified: 13/Apr/2004 */
++/************************************ */
++
++#ifndef __RSBAC_SYSCALL_RSBAC_H
++#define __RSBAC_SYSCALL_RSBAC_H
++
++/* to keep include/asm-alpha/unistd.h happy */
++//#define __LIBRARY__
++
++#include <linux/unistd.h>
++#include <rsbac/types.h>
++#include <rsbac/syscalls.h>
++
++#ifdef __PIC__
++#undef _syscall3
++#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
++ type name(type1 arg1,type2 arg2,type3 arg3) \
++{\
++ return syscall(__NR_##name, arg1, arg2, arg3);\
++}
++#endif
++
++static inline _syscall3(int, rsbac,
++ rsbac_version_t, version,
++ enum rsbac_syscall_t, call,
++ union rsbac_syscall_arg_t *, arg_p);
++
++#define sys_rsbac(a,b,c) rsbac(a,b,c)
++#endif
+diff --git a/include/rsbac/syscalls.h b/include/rsbac/syscalls.h
+new file mode 100644
+index 0000000..71c2f10
+--- /dev/null
++++ b/include/rsbac/syscalls.h
+@@ -0,0 +1,1583 @@
++/************************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2012: */
++/* Amon Ott <ao@rsbac.org> */
++/* Syscall wrapper functions for all */
++/* parts */
++/* Last modified: 07/May/2012 */
++/************************************* */
++
++#ifndef __RSBAC_SYSCALLS_H
++#define __RSBAC_SYSCALLS_H
++
++#include <linux/unistd.h>
++#include <rsbac/types.h>
++#include <rsbac/helpers.h>
++#include <rsbac/error.h>
++
++enum rsbac_syscall_t
++ {
++ RSYS_version,
++ RSYS_stats,
++ RSYS_check,
++ RSYS_get_attr,
++ RSYS_get_attr_n,
++ RSYS_set_attr,
++ RSYS_set_attr_n,
++ RSYS_remove_target,
++ RSYS_remove_target_n,
++ RSYS_net_list_all_netdev,
++ RSYS_net_template,
++ RSYS_net_list_all_template,
++ RSYS_switch,
++ RSYS_get_switch,
++ RSYS_adf_log_switch,
++ RSYS_get_adf_log,
++ RSYS_write,
++ RSYS_log,
++ RSYS_mac_set_curr_level,
++ RSYS_mac_get_curr_level,
++ RSYS_mac_get_max_level,
++ RSYS_mac_get_min_level,
++ RSYS_mac_add_p_tru,
++ RSYS_mac_remove_p_tru,
++ RSYS_mac_add_f_tru,
++ RSYS_mac_remove_f_tru,
++ RSYS_mac_get_f_trulist,
++ RSYS_mac_get_p_trulist,
++ RSYS_stats_pm,
++ RSYS_pm,
++ RSYS_pm_change_current_task,
++ RSYS_pm_create_file,
++ RSYS_daz_flush_cache,
++ RSYS_rc_copy_role,
++ RSYS_rc_copy_type,
++ RSYS_rc_get_item,
++ RSYS_rc_set_item,
++ RSYS_rc_change_role,
++ RSYS_rc_get_eff_rights_n,
++ RSYS_rc_get_list,
++ RSYS_auth_add_p_cap,
++ RSYS_auth_remove_p_cap,
++ RSYS_auth_add_f_cap,
++ RSYS_auth_remove_f_cap,
++ RSYS_auth_get_f_caplist,
++ RSYS_auth_get_p_caplist,
++ RSYS_acl,
++ RSYS_acl_n,
++ RSYS_acl_get_rights,
++ RSYS_acl_get_rights_n,
++ RSYS_acl_get_tlist,
++ RSYS_acl_get_tlist_n,
++ RSYS_acl_get_mask,
++ RSYS_acl_get_mask_n,
++ RSYS_acl_group,
++ RSYS_reg,
++ RSYS_jail,
++ RSYS_init,
++ RSYS_rc_get_current_role,
++ RSYS_um_auth_name,
++ RSYS_um_auth_uid,
++ RSYS_um_add_user,
++ RSYS_um_add_group,
++ RSYS_um_add_gm,
++ RSYS_um_mod_user,
++ RSYS_um_mod_group,
++ RSYS_um_get_user_item,
++ RSYS_um_get_group_item,
++ RSYS_um_remove_user,
++ RSYS_um_remove_group,
++ RSYS_um_remove_gm,
++ RSYS_um_user_exists,
++ RSYS_um_group_exists,
++ RSYS_um_get_next_user,
++ RSYS_um_get_user_list,
++ RSYS_um_get_gm_list,
++ RSYS_um_get_gm_user_list,
++ RSYS_um_get_group_list,
++ RSYS_um_get_uid,
++ RSYS_um_get_gid,
++ RSYS_um_set_pass,
++ RSYS_um_set_pass_name,
++ RSYS_um_set_group_pass,
++ RSYS_um_check_account,
++ RSYS_um_check_account_name,
++ RSYS_list_ta_begin,
++ RSYS_list_ta_refresh,
++ RSYS_list_ta_commit,
++ RSYS_list_ta_forget,
++ RSYS_list_all_dev,
++ RSYS_acl_list_all_dev,
++ RSYS_list_all_user,
++ RSYS_acl_list_all_user,
++ RSYS_list_all_group,
++ RSYS_acl_list_all_group,
++ RSYS_list_all_ipc,
++ RSYS_rc_select_fd_create_type,
++ RSYS_um_select_vset,
++ RSYS_um_add_onetime,
++ RSYS_um_add_onetime_name,
++ RSYS_um_remove_all_onetime,
++ RSYS_um_remove_all_onetime_name,
++ RSYS_um_count_onetime,
++ RSYS_um_count_onetime_name,
++ RSYS_list_ta_begin_name,
++ RSYS_um_get_max_history,
++ RSYS_um_get_max_history_name,
++ RSYS_um_set_max_history,
++ RSYS_um_set_max_history_name,
++ RSYS_none
++ };
++
++
++struct rsys_check_t
++ {
++ int correct;
++ int check_inode;
++ };
++
++struct rsys_get_attr_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t module;
++ rsbac_enum_t target;
++ union rsbac_target_id_t __user * tid;
++ rsbac_enum_t attr;
++ union rsbac_attribute_value_t __user * value;
++ int inherit;
++ };
++
++struct rsys_get_attr_n_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t module;
++ rsbac_enum_t target;
++ char __user * t_name;
++ rsbac_enum_t attr;
++ union rsbac_attribute_value_t __user * value;
++ int inherit;
++ };
++
++struct rsys_set_attr_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t module;
++ rsbac_enum_t target;
++ union rsbac_target_id_t __user * tid;
++ rsbac_enum_t attr;
++ union rsbac_attribute_value_t __user * value;
++ };
++
++struct rsys_set_attr_n_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t module;
++ rsbac_enum_t target;
++ char __user * t_name;
++ rsbac_enum_t attr;
++ union rsbac_attribute_value_t __user * value;
++ };
++
++struct rsys_remove_target_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t target;
++ union rsbac_target_id_t __user * tid;
++ };
++
++struct rsys_remove_target_n_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t target;
++ char __user * t_name;
++ };
++
++struct rsys_net_list_all_netdev_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_netdev_id_t __user * id_p;
++ u_long maxnum;
++ };
++
++struct rsys_net_template_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t call;
++ rsbac_net_temp_id_t id;
++ union rsbac_net_temp_syscall_data_t __user * data_p;
++ };
++
++struct rsys_net_list_all_template_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_net_temp_id_t __user * id_p;
++ u_long maxnum;
++ };
++
++struct rsys_switch_t
++ {
++ rsbac_enum_t module;
++ int value;
++ };
++
++struct rsys_get_switch_t
++ {
++ rsbac_enum_t module;
++ int __user * value_p;
++ int __user * switchable_p;
++ };
++
++struct rsys_adf_log_switch_t
++ {
++ rsbac_enum_t request;
++ rsbac_enum_t target;
++ u_int value;
++ };
++
++struct rsys_get_adf_log_t
++ {
++ rsbac_enum_t request;
++ rsbac_enum_t target;
++ u_int __user * value_p;
++ };
++
++struct rsys_log_t
++ {
++ int type;
++ char __user * buf;
++ int len;
++ };
++
++struct rsys_mac_set_curr_level_t
++ {
++ rsbac_security_level_t level;
++ rsbac_mac_category_vector_t __user * categories_p;
++ };
++
++struct rsys_mac_get_curr_level_t
++ {
++ rsbac_security_level_t __user * level_p;
++ rsbac_mac_category_vector_t __user * categories_p;
++ };
++
++struct rsys_mac_get_max_level_t
++ {
++ rsbac_security_level_t __user * level_p;
++ rsbac_mac_category_vector_t __user * categories_p;
++ };
++
++struct rsys_mac_get_min_level_t
++ {
++ rsbac_security_level_t __user * level_p;
++ rsbac_mac_category_vector_t __user * categories_p;
++ };
++
++struct rsys_mac_add_p_tru_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_upid_t pid;
++ rsbac_uid_t uid;
++ rsbac_time_t ttl;
++ };
++
++struct rsys_mac_remove_p_tru_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_upid_t pid;
++ rsbac_uid_t uid;
++ };
++
++struct rsys_mac_add_f_tru_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ char __user * filename;
++ rsbac_uid_t uid;
++ rsbac_time_t ttl;
++ };
++
++struct rsys_mac_remove_f_tru_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ char __user * filename;
++ rsbac_uid_t uid;
++ };
++
++struct rsys_mac_get_f_trulist_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ char __user * filename;
++ rsbac_uid_t __user * trulist;
++ rsbac_time_t __user * ttllist;
++ u_int maxnum;
++ };
++
++struct rsys_mac_get_p_trulist_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_upid_t pid;
++ rsbac_uid_t __user * trulist;
++ rsbac_time_t __user * ttllist;
++ u_int maxnum;
++ };
++
++struct rsys_pm_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t function;
++ union rsbac_pm_function_param_t __user * param_p;
++ rsbac_pm_tkt_id_t ticket;
++ };
++
++struct rsys_pm_change_current_task_t
++ {
++ rsbac_pm_task_id_t task;
++ };
++
++struct rsys_pm_create_file_t
++ {
++ const char __user * filename;
++ int mode;
++ rsbac_pm_object_class_id_t object_class;
++ };
++
++struct rsys_rc_copy_role_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_rc_role_id_t from_role;
++ rsbac_rc_role_id_t to_role;
++ };
++
++struct rsys_rc_copy_type_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t target;
++ rsbac_rc_type_id_t from_type;
++ rsbac_rc_type_id_t to_type;
++ };
++
++struct rsys_rc_get_item_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t target;
++ union rsbac_rc_target_id_t __user * tid_p;
++ union rsbac_rc_target_id_t __user * subtid_p;
++ rsbac_enum_t item;
++ union rsbac_rc_item_value_t __user * value_p;
++ rsbac_time_t __user * ttl_p;
++ };
++
++struct rsys_rc_set_item_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t target;
++ union rsbac_rc_target_id_t __user * tid_p;
++ union rsbac_rc_target_id_t __user * subtid_p;
++ rsbac_enum_t item;
++ union rsbac_rc_item_value_t __user * value_p;
++ rsbac_time_t ttl;
++ };
++
++struct rsys_rc_get_list_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t target;
++ union rsbac_rc_target_id_t __user * tid_p;
++ rsbac_enum_t item;
++ u_int maxnum;
++ __u32 __user * array_p;
++ rsbac_time_t __user * ttl_array_p;
++ };
++
++struct rsys_rc_change_role_t
++ {
++ rsbac_rc_role_id_t role;
++ char __user * pass;
++ };
++
++struct rsys_rc_get_eff_rights_n_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t target;
++ char __user * t_name;
++ rsbac_rc_request_vector_t __user * request_vector_p;
++ rsbac_time_t __user * ttl_p;
++ };
++
++struct rsys_rc_get_current_role_t
++ {
++ rsbac_rc_role_id_t __user * role_p;
++ };
++
++struct rsys_auth_add_p_cap_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_upid_t pid;
++ rsbac_enum_t cap_type;
++ struct rsbac_auth_cap_range_t cap_range;
++ rsbac_time_t ttl;
++ };
++
++struct rsys_auth_remove_p_cap_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_upid_t pid;
++ rsbac_enum_t cap_type;
++ struct rsbac_auth_cap_range_t cap_range;
++ };
++
++struct rsys_auth_add_f_cap_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ char __user * filename;
++ rsbac_enum_t cap_type;
++ struct rsbac_auth_cap_range_t cap_range;
++ rsbac_time_t ttl;
++ };
++
++struct rsys_auth_remove_f_cap_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ char __user * filename;
++ rsbac_enum_t cap_type;
++ struct rsbac_auth_cap_range_t cap_range;
++ };
++
++struct rsys_auth_get_f_caplist_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ char __user * filename;
++ rsbac_enum_t cap_type;
++ struct rsbac_auth_cap_range_t __user * caplist;
++ rsbac_time_t __user * ttllist;
++ u_int maxnum;
++ };
++
++struct rsys_auth_get_p_caplist_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_upid_t pid;
++ rsbac_enum_t cap_type;
++ struct rsbac_auth_cap_range_t __user * caplist;
++ rsbac_time_t __user * ttllist;
++ u_int maxnum;
++ };
++
++struct rsys_acl_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t call;
++ struct rsbac_acl_syscall_arg_t __user * arg;
++ };
++
++struct rsys_acl_n_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t call;
++ struct rsbac_acl_syscall_n_arg_t __user * arg;
++ };
++
++struct rsys_acl_get_rights_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ struct rsbac_acl_syscall_arg_t __user * arg;
++ rsbac_acl_rights_vector_t __user * rights_p;
++ u_int effective;
++ };
++
++struct rsys_acl_get_rights_n_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ struct rsbac_acl_syscall_n_arg_t __user * arg;
++ rsbac_acl_rights_vector_t __user * rights_p;
++ u_int effective;
++ };
++
++struct rsys_acl_get_tlist_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t target;
++ union rsbac_target_id_t __user * tid;
++ struct rsbac_acl_entry_t __user * entry_array;
++ rsbac_time_t __user * ttl_array;
++ u_int maxnum;
++ };
++
++struct rsys_acl_get_tlist_n_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t target;
++ char __user * t_name;
++ struct rsbac_acl_entry_t __user * entry_array;
++ rsbac_time_t __user * ttl_array;
++ u_int maxnum;
++ };
++
++struct rsys_acl_get_mask_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t target;
++ union rsbac_target_id_t __user * tid;
++ rsbac_acl_rights_vector_t __user * mask_p;
++ };
++
++struct rsys_acl_get_mask_n_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t target;
++ char __user * t_name;
++ rsbac_acl_rights_vector_t __user * mask_p;
++ };
++
++struct rsys_acl_group_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_enum_t call;
++ union rsbac_acl_group_syscall_arg_t __user * arg_p;
++ };
++
++struct rsys_reg_t
++ {
++ long handle;
++ void __user * arg;
++ };
++
++struct rsys_jail_t
++ {
++ rsbac_version_t version;
++ char __user * path;
++ rsbac_jail_ip_t ip;
++ rsbac_jail_flags_t flags;
++ rsbac_cap_vector_t max_caps;
++ rsbac_jail_scd_vector_t scd_get;
++ rsbac_jail_scd_vector_t scd_modify;
++ };
++
++struct rsys_init_t
++ {
++ char __user * root_dev;
++ };
++
++struct rsys_um_auth_name_t
++ {
++ char __user * name;
++ char __user * pass;
++ };
++
++struct rsys_um_auth_uid_t
++ {
++ rsbac_uid_t uid;
++ char __user * pass;
++ };
++
++struct rsys_um_add_user_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_uid_t uid;
++ struct rsbac_um_user_entry_t __user * entry_p;
++ char __user * pass;
++ rsbac_time_t ttl;
++ };
++
++struct rsys_um_add_group_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_gid_t gid;
++ struct rsbac_um_group_entry_t __user * entry_p;
++ char __user * pass;
++ rsbac_time_t ttl;
++ };
++
++struct rsys_um_add_gm_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_uid_t uid;
++ rsbac_gid_num_t gid;
++ rsbac_time_t ttl;
++ };
++
++struct rsys_um_mod_user_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_uid_t uid;
++ rsbac_enum_t mod;
++ union rsbac_um_mod_data_t __user * data_p;
++ };
++
++struct rsys_um_mod_group_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_gid_t gid;
++ rsbac_enum_t mod;
++ union rsbac_um_mod_data_t __user * data_p;
++ };
++
++struct rsys_um_get_user_item_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_uid_t uid;
++ rsbac_enum_t mod;
++ union rsbac_um_mod_data_t __user * data_p;
++ };
++
++struct rsys_um_get_group_item_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_gid_t gid;
++ rsbac_enum_t mod;
++ union rsbac_um_mod_data_t __user * data_p;
++ };
++
++struct rsys_um_remove_user_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_uid_t uid;
++ };
++
++struct rsys_um_remove_group_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_gid_t gid;
++ };
++
++struct rsys_um_remove_gm_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_uid_t uid;
++ rsbac_gid_num_t gid;
++ };
++
++struct rsys_um_user_exists_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_uid_t uid;
++ };
++
++struct rsys_um_group_exists_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_gid_t gid;
++ };
++
++struct rsys_um_get_next_user_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_uid_t old_user;
++ rsbac_uid_t __user * next_user_p;
++ };
++
++struct rsys_um_get_user_list_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_um_set_t vset;
++ rsbac_uid_t __user * user_array;
++ u_int maxnum;
++ };
++
++struct rsys_um_get_gm_list_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_uid_t user;
++ rsbac_gid_num_t __user * group_array;
++ u_int maxnum;
++ };
++
++struct rsys_um_get_gm_user_list_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_gid_t group;
++ rsbac_uid_num_t __user * user_array;
++ u_int maxnum;
++ };
++
++struct rsys_um_get_group_list_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_um_set_t vset;
++ rsbac_gid_t __user * group_array;
++ u_int maxnum;
++ };
++
++struct rsys_um_get_uid_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ char __user * name;
++ rsbac_uid_t __user * uid_p;
++ };
++
++struct rsys_um_get_gid_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ char __user * name;
++ rsbac_gid_t __user * gid_p;
++ };
++
++struct rsys_um_set_pass_t
++ {
++ rsbac_uid_t uid;
++ char __user * old_pass;
++ char __user * new_pass;
++ };
++
++struct rsys_um_set_pass_name_t
++ {
++ char __user * name;
++ char __user * old_pass;
++ char __user * new_pass;
++ };
++
++struct rsys_um_add_onetime_t
++ {
++ rsbac_uid_t uid;
++ char __user * old_pass;
++ char __user * new_pass;
++ rsbac_time_t ttl;
++ };
++
++struct rsys_um_add_onetime_name_t
++ {
++ char __user * name;
++ char __user * old_pass;
++ char __user * new_pass;
++ rsbac_time_t ttl;
++ };
++
++struct rsys_um_remove_all_onetime_t
++ {
++ rsbac_uid_t uid;
++ char __user * old_pass;
++ };
++
++struct rsys_um_remove_all_onetime_name_t
++ {
++ char __user * name;
++ char __user * old_pass;
++ };
++
++struct rsys_um_count_onetime_t
++ {
++ rsbac_uid_t uid;
++ char __user * old_pass;
++ };
++
++struct rsys_um_count_onetime_name_t
++ {
++ char __user * name;
++ char __user * old_pass;
++ };
++
++struct rsys_um_set_group_pass_t
++ {
++ rsbac_gid_t gid;
++ char __user * new_pass;
++ };
++
++struct rsys_um_check_account_t
++ {
++ rsbac_uid_t uid;
++ };
++
++struct rsys_um_check_account_name_t
++ {
++ char __user * name;
++ };
++
++struct rsys_um_get_max_history_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_uid_t uid;
++ };
++
++struct rsys_um_get_max_history_name_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ char __user * name;
++ };
++
++struct rsys_um_set_max_history_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_uid_t uid;
++ __u8 max_history;
++ };
++
++struct rsys_um_set_max_history_name_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ char __user * name;
++ __u8 max_history;
++ };
++
++struct rsys_um_select_vset_t
++ {
++ rsbac_um_set_t vset;
++ };
++
++struct rsys_list_ta_begin_t
++ {
++ rsbac_time_t ttl;
++ rsbac_list_ta_number_t __user * ta_number_p;
++ rsbac_uid_t commit_uid;
++ char __user * password;
++ };
++
++struct rsys_list_ta_begin_name_t
++ {
++ rsbac_time_t ttl;
++ rsbac_list_ta_number_t __user * ta_number_p;
++ rsbac_uid_t commit_uid;
++ char __user * name;
++ char __user * password;
++ };
++
++struct rsys_list_ta_refresh_t
++ {
++ rsbac_time_t ttl;
++ rsbac_list_ta_number_t ta_number;
++ char __user * password;
++ };
++
++struct rsys_list_ta_commit_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ char __user * password;
++ };
++
++struct rsys_list_ta_forget_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ char __user * password;
++ };
++
++struct rsys_list_all_dev_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ struct rsbac_dev_desc_t __user * id_p;
++ u_long maxnum;
++ };
++
++struct rsys_acl_list_all_dev_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ struct rsbac_dev_desc_t __user * id_p;
++ u_long maxnum;
++ };
++
++struct rsys_list_all_user_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_uid_t __user * id_p;
++ u_long maxnum;
++ };
++
++struct rsys_acl_list_all_user_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_uid_t __user * id_p;
++ u_long maxnum;
++ };
++
++struct rsys_list_all_group_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_gid_t __user * id_p;
++ u_long maxnum;
++ };
++
++struct rsys_acl_list_all_group_t
++ {
++ rsbac_list_ta_number_t ta_number;
++ rsbac_gid_t __user * id_p;
++ u_long maxnum;
++ };
++
++struct rsys_list_all_ipc_t {
++ rsbac_list_ta_number_t ta_number;
++ struct rsbac_ipc_t __user *id_p;
++ u_long maxnum;
++};
++
++struct rsys_rc_select_fd_create_type_t {
++ rsbac_rc_type_id_t type;
++};
++
++
++union rsbac_syscall_arg_t
++ {
++ struct rsys_check_t check;
++ struct rsys_get_attr_t get_attr;
++ struct rsys_get_attr_n_t get_attr_n;
++ struct rsys_set_attr_t set_attr;
++ struct rsys_set_attr_n_t set_attr_n;
++ struct rsys_remove_target_t remove_target;
++ struct rsys_remove_target_n_t remove_target_n;
++ struct rsys_net_list_all_netdev_t net_list_all_netdev;
++ struct rsys_net_template_t net_template;
++ struct rsys_net_list_all_template_t net_list_all_template;
++ struct rsys_switch_t switch_module;
++ struct rsys_get_switch_t get_switch_module;
++ struct rsys_adf_log_switch_t adf_log_switch;
++ struct rsys_get_adf_log_t get_adf_log;
++ struct rsys_log_t log;
++ struct rsys_mac_set_curr_level_t mac_set_curr_level;
++ struct rsys_mac_get_curr_level_t mac_get_curr_level;
++ struct rsys_mac_get_max_level_t mac_get_max_level;
++ struct rsys_mac_get_min_level_t mac_get_min_level;
++ struct rsys_mac_add_p_tru_t mac_add_p_tru;
++ struct rsys_mac_remove_p_tru_t mac_remove_p_tru;
++ struct rsys_mac_add_f_tru_t mac_add_f_tru;
++ struct rsys_mac_remove_f_tru_t mac_remove_f_tru;
++ struct rsys_mac_get_f_trulist_t mac_get_f_trulist;
++ struct rsys_mac_get_p_trulist_t mac_get_p_trulist;
++ struct rsys_pm_t pm;
++ struct rsys_pm_change_current_task_t pm_change_current_task;
++ struct rsys_pm_create_file_t pm_create_file;
++ struct rsys_rc_copy_role_t rc_copy_role;
++ struct rsys_rc_copy_type_t rc_copy_type;
++ struct rsys_rc_get_item_t rc_get_item;
++ struct rsys_rc_set_item_t rc_set_item;
++ struct rsys_rc_get_list_t rc_get_list;
++ struct rsys_rc_change_role_t rc_change_role;
++ struct rsys_rc_get_eff_rights_n_t rc_get_eff_rights_n;
++ struct rsys_rc_get_current_role_t rc_get_current_role;
++ struct rsys_auth_add_p_cap_t auth_add_p_cap;
++ struct rsys_auth_remove_p_cap_t auth_remove_p_cap;
++ struct rsys_auth_add_f_cap_t auth_add_f_cap;
++ struct rsys_auth_remove_f_cap_t auth_remove_f_cap;
++ struct rsys_auth_get_f_caplist_t auth_get_f_caplist;
++ struct rsys_auth_get_p_caplist_t auth_get_p_caplist;
++ struct rsys_acl_t acl;
++ struct rsys_acl_n_t acl_n;
++ struct rsys_acl_get_rights_t acl_get_rights;
++ struct rsys_acl_get_rights_n_t acl_get_rights_n;
++ struct rsys_acl_get_tlist_t acl_get_tlist;
++ struct rsys_acl_get_tlist_n_t acl_get_tlist_n;
++ struct rsys_acl_get_mask_t acl_get_mask;
++ struct rsys_acl_get_mask_n_t acl_get_mask_n;
++ struct rsys_acl_group_t acl_group;
++ struct rsys_reg_t reg;
++ struct rsys_jail_t jail;
++ struct rsys_init_t init;
++ struct rsys_um_auth_name_t um_auth_name;
++ struct rsys_um_auth_uid_t um_auth_uid;
++ struct rsys_um_add_user_t um_add_user;
++ struct rsys_um_add_group_t um_add_group;
++ struct rsys_um_add_gm_t um_add_gm;
++ struct rsys_um_mod_user_t um_mod_user;
++ struct rsys_um_mod_group_t um_mod_group;
++ struct rsys_um_get_user_item_t um_get_user_item;
++ struct rsys_um_get_group_item_t um_get_group_item;
++ struct rsys_um_remove_user_t um_remove_user;
++ struct rsys_um_remove_group_t um_remove_group;
++ struct rsys_um_remove_gm_t um_remove_gm;
++ struct rsys_um_user_exists_t um_user_exists;
++ struct rsys_um_group_exists_t um_group_exists;
++ struct rsys_um_get_next_user_t um_get_next_user;
++ struct rsys_um_get_user_list_t um_get_user_list;
++ struct rsys_um_get_gm_list_t um_get_gm_list;
++ struct rsys_um_get_gm_user_list_t um_get_gm_user_list;
++ struct rsys_um_get_group_list_t um_get_group_list;
++ struct rsys_um_get_uid_t um_get_uid;
++ struct rsys_um_get_gid_t um_get_gid;
++ struct rsys_um_set_pass_t um_set_pass;
++ struct rsys_um_set_pass_name_t um_set_pass_name;
++ struct rsys_um_add_onetime_t um_add_onetime;
++ struct rsys_um_add_onetime_name_t um_add_onetime_name;
++ struct rsys_um_remove_all_onetime_t um_remove_all_onetime;
++ struct rsys_um_remove_all_onetime_name_t um_remove_all_onetime_name;
++ struct rsys_um_count_onetime_t um_count_onetime;
++ struct rsys_um_count_onetime_name_t um_count_onetime_name;
++ struct rsys_um_set_group_pass_t um_set_group_pass;
++ struct rsys_um_check_account_t um_check_account;
++ struct rsys_um_check_account_name_t um_check_account_name;
++ struct rsys_um_get_max_history_t um_get_max_history;
++ struct rsys_um_get_max_history_name_t um_get_max_history_name;
++ struct rsys_um_set_max_history_t um_set_max_history;
++ struct rsys_um_set_max_history_name_t um_set_max_history_name;
++ struct rsys_list_ta_begin_t list_ta_begin;
++ struct rsys_list_ta_begin_name_t list_ta_begin_name;
++ struct rsys_list_ta_refresh_t list_ta_refresh;
++ struct rsys_list_ta_commit_t list_ta_commit;
++ struct rsys_list_ta_forget_t list_ta_forget;
++ struct rsys_list_all_dev_t list_all_dev;
++ struct rsys_acl_list_all_dev_t acl_list_all_dev;
++ struct rsys_list_all_user_t list_all_user;
++ struct rsys_acl_list_all_user_t acl_list_all_user;
++ struct rsys_list_all_group_t list_all_group;
++ struct rsys_acl_list_all_group_t acl_list_all_group;
++ struct rsys_list_all_ipc_t list_all_ipc;
++ struct rsys_rc_select_fd_create_type_t rc_select_fd_create_type;
++ struct rsys_um_select_vset_t um_select_vset;
++ int dummy;
++ };
++
++#ifndef __KERNEL__
++int rsbac_version(void);
++
++int rsbac_stats(void);
++
++int rsbac_check(int correct, int check_inode);
++
++int rsbac_write(void);
++
++int rsbac_get_attr(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t __user * tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t __user * value,
++ int inherit);
++
++int rsbac_get_attr_n(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ char __user * t_name,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t __user * value,
++ int inherit);
++
++int rsbac_set_attr(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t __user * tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t __user * value);
++
++
++int rsbac_set_attr_n(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ char __user * t_name,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t __user * value);
++
++int rsbac_remove_target(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t __user * tid);
++
++int rsbac_remove_target_n(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ char __user * t_name);
++
++int rsbac_net_list_all_netdev(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_netdev_id_t __user * id_p,
++ u_long maxnum);
++
++int rsbac_net_template(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_net_temp_syscall_t call,
++ rsbac_net_temp_id_t id,
++ union rsbac_net_temp_syscall_data_t __user * data_p);
++
++int rsbac_net_list_all_template(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_net_temp_id_t __user * id_p,
++ u_long maxnum);
++
++int rsbac_switch(enum rsbac_switch_target_t module, int value);
++
++int rsbac_get_switch(enum rsbac_switch_target_t module, int __user * value_p, int __user * switchable_p);
++
++/************** MAC ***************/
++
++int rsbac_mac_set_curr_level(rsbac_security_level_t level,
++ rsbac_mac_category_vector_t __user * categories_p);
++
++int rsbac_mac_get_curr_level(rsbac_security_level_t __user * level_p,
++ rsbac_mac_category_vector_t __user * categories_p);
++
++int rsbac_mac_get_max_level(rsbac_security_level_t __user * level_p,
++ rsbac_mac_category_vector_t __user * categories_p);
++
++int rsbac_mac_get_min_level(rsbac_security_level_t __user * level_p,
++ rsbac_mac_category_vector_t __user * categories_p);
++
++int rsbac_mac_add_p_tru(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_upid_t pid,
++ rsbac_uid_t uid,
++ rsbac_time_t ttl);
++
++int rsbac_mac_remove_p_tru(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_upid_t pid,
++ rsbac_uid_t uid);
++
++int rsbac_mac_add_f_tru(
++ rsbac_list_ta_number_t ta_number,
++ char __user * filename,
++ rsbac_uid_t uid,
++ rsbac_time_t ttl);
++
++int rsbac_mac_remove_f_tru(
++ rsbac_list_ta_number_t ta_number,
++ char __user * filename,
++ rsbac_uid_t uid);
++
++/* trulist must have space for maxnum rsbac_uid_t entries! */
++int rsbac_mac_get_f_trulist(
++ rsbac_list_ta_number_t ta_number,
++ char __user * filename,
++ rsbac_uid_t trulist[],
++ rsbac_time_t ttllist[],
++ u_int maxnum);
++
++int rsbac_mac_get_p_trulist(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_upid_t pid,
++ rsbac_uid_t trulist[],
++ rsbac_time_t ttllist[],
++ u_int maxnum);
++
++/************** PM ***************/
++
++int rsbac_stats_pm(void);
++
++int rsbac_pm(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_function_type_t function,
++ union rsbac_pm_function_param_t __user * param_p,
++ rsbac_pm_tkt_id_t ticket);
++
++int rsbac_pm_change_current_task(rsbac_pm_task_id_t task);
++
++int rsbac_pm_create_file(const char __user * filename,
++ int mode,
++ rsbac_pm_object_class_id_t object_class);
++
++/************** DAZ **************/
++
++int rsbac_daz_flush_cache(void);
++
++/************** RC ***************/
++
++int rsbac_rc_copy_role(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_rc_role_id_t from_role,
++ rsbac_rc_role_id_t to_role);
++
++int rsbac_rc_copy_type(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ rsbac_rc_type_id_t from_type,
++ rsbac_rc_type_id_t to_type);
++
++int rsbac_rc_get_item(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ union rsbac_rc_target_id_t __user * tid_p,
++ union rsbac_rc_target_id_t __user * subtid_p,
++ enum rsbac_rc_item_t item,
++ union rsbac_rc_item_value_t __user * value_p,
++ rsbac_time_t __user * ttl_p);
++
++/* Setting values */
++int rsbac_rc_set_item(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ union rsbac_rc_target_id_t __user * tid_p,
++ union rsbac_rc_target_id_t __user * subtid_p,
++ enum rsbac_rc_item_t item,
++ union rsbac_rc_item_value_t __user * value_p,
++ rsbac_time_t ttl);
++
++int rsbac_rc_get_list(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ union rsbac_rc_target_id_t __user * tid_p,
++ enum rsbac_rc_item_t item,
++ u_int maxnum,
++ __u32 __user * array_p,
++ rsbac_time_t __user * ttl_array_p);
++
++int rsbac_rc_change_role (rsbac_rc_role_id_t role, char __user * pass);
++
++int rsbac_rc_get_eff_rights_n(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ char __user * t_name,
++ rsbac_rc_request_vector_t __user * request_vector_p,
++ rsbac_time_t __user * ttl_p);
++
++int rsbac_rc_get_current_role (rsbac_rc_role_id_t __user * role_p);
++
++int rsbac_rc_select_fd_create_type(rsbac_rc_type_id_t type);
++
++/************** AUTH ***************/
++
++/* Provide means for adding and removing of capabilities */
++int rsbac_auth_add_p_cap(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_upid_t pid,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range,
++ rsbac_time_t ttl);
++
++int rsbac_auth_remove_p_cap(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_upid_t pid,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range);
++
++int rsbac_auth_add_f_cap(
++ rsbac_list_ta_number_t ta_number,
++ char __user * filename,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range,
++ rsbac_time_t ttl);
++
++int rsbac_auth_remove_f_cap(
++ rsbac_list_ta_number_t ta_number,
++ char __user * filename,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range);
++
++/* caplist must have space for maxnum cap_range entries - first and last each! */
++int rsbac_auth_get_f_caplist(
++ rsbac_list_ta_number_t ta_number,
++ char __user * filename,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t caplist[],
++ rsbac_time_t ttllist[],
++ u_int maxnum);
++
++int rsbac_auth_get_p_caplist(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_upid_t pid,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t caplist[],
++ rsbac_time_t ttllist[],
++ u_int maxnum);
++
++/**********************************/
++/************** REG ***************/
++
++int rsbac_reg(rsbac_reg_handle_t handle,
++ void __user * arg);
++
++
++/**********************************/
++/************** ACL ***************/
++
++int rsbac_acl(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_acl_syscall_type_t call,
++ struct rsbac_acl_syscall_arg_t __user * arg);
++
++int rsbac_acl_n(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_acl_syscall_type_t call,
++ struct rsbac_acl_syscall_n_arg_t __user * arg);
++
++int rsbac_acl_get_rights(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_acl_syscall_arg_t __user * arg,
++ rsbac_acl_rights_vector_t __user * rights_p,
++ u_int effective);
++
++
++int rsbac_acl_get_rights_n(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_acl_syscall_n_arg_t __user * arg,
++ rsbac_acl_rights_vector_t __user * rights_p,
++ u_int effective);
++
++int rsbac_acl_get_tlist (
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t __user * tid,
++ struct rsbac_acl_entry_t entry_array[],
++ rsbac_time_t ttl_array[],
++ u_int maxnum);
++
++int rsbac_acl_get_tlist_n(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ char __user * t_name,
++ struct rsbac_acl_entry_t entry_array[],
++ rsbac_time_t ttl_array[],
++ u_int maxnum);
++
++int rsbac_acl_get_mask (
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t __user * tid,
++ rsbac_acl_rights_vector_t __user * mask_p);
++
++int rsbac_acl_get_mask_n(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ char __user * t_name,
++ rsbac_acl_rights_vector_t __user * mask_p);
++
++/******** ACL groups *********/
++
++int rsbac_acl_group(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_acl_group_syscall_type_t call,
++ union rsbac_acl_group_syscall_arg_t __user * arg_p);
++
++
++/**********************************/
++/************** JAIL **************/
++
++int rsbac_jail(rsbac_version_t version,
++ char __user * path,
++ rsbac_jail_ip_t ip,
++ rsbac_jail_flags_t flags,
++ rsbac_cap_vector_t max_caps,
++ rsbac_jail_scd_vector_t scd_get,
++ rsbac_jail_scd_vector_t scd_modify
++ );
++
++int rsbac_list_all_ipc(rsbac_list_ta_number_t ta_number,
++ struct rsbac_ipc_t __user * id_p, u_long maxnum);
++
++/**********************************/
++/************** UM **************/
++
++int rsbac_um_auth_name(char __user * name,
++ char __user * pass);
++
++int rsbac_um_auth_uid(rsbac_uid_t uid,
++ char __user * pass);
++
++int rsbac_um_add_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t uid,
++ struct rsbac_um_user_entry_t __user * entry_p,
++ char __user * pass,
++ rsbac_time_t ttl);
++
++int rsbac_um_add_group(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t gid,
++ struct rsbac_um_group_entry_t __user * entry_p,
++ char __user * pass,
++ rsbac_time_t ttl);
++
++int rsbac_um_add_gm(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t uid,
++ rsbac_gid_num_t gid,
++ rsbac_time_t ttl);
++
++int rsbac_um_mod_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t uid,
++ enum rsbac_um_mod_t mod,
++ union rsbac_um_mod_data_t __user * data_p);
++
++int rsbac_um_mod_group(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t gid,
++ enum rsbac_um_mod_t mod,
++ union rsbac_um_mod_data_t __user * data_p);
++
++int rsbac_um_get_user_item(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t uid,
++ enum rsbac_um_mod_t mod,
++ union rsbac_um_mod_data_t __user * data_p);
++
++int rsbac_um_get_group_item(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t gid,
++ enum rsbac_um_mod_t mod,
++ union rsbac_um_mod_data_t __user * data_p);
++
++int rsbac_um_remove_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t uid);
++
++int rsbac_um_remove_group(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t gid);
++
++int rsbac_um_remove_gm(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t uid,
++ rsbac_gid_num_t gid);
++
++int rsbac_um_user_exists(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t uid);
++
++int rsbac_um_group_exists(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t gid);
++
++int rsbac_um_get_next_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t old_user,
++ rsbac_uid_t __user * next_user_p);
++
++int rsbac_um_get_user_list(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_um_set_t vset,
++ rsbac_uid_t user_array[],
++ u_int maxnum);
++
++int rsbac_um_get_gm_list(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user,
++ rsbac_gid_num_t group_array[],
++ u_int maxnum);
++
++int rsbac_um_get_gm_user_list(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t group,
++ rsbac_uid_num_t user_array[],
++ u_int maxnum);
++
++int rsbac_um_get_group_list(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_um_set_t vset,
++ rsbac_gid_t group_array[],
++ u_int maxnum);
++
++int rsbac_um_get_uid(
++ rsbac_list_ta_number_t ta_number,
++ char __user * name,
++ rsbac_uid_t __user * uid_p);
++
++int rsbac_um_get_gid(
++ rsbac_list_ta_number_t ta_number,
++ char __user * name,
++ rsbac_gid_t __user * gid_p);
++
++int rsbac_um_set_pass(rsbac_uid_t uid,
++ char __user * old_pass,
++ char __user * new_pass);
++
++int rsbac_um_set_pass_name(char __user * name,
++ char __user * old_pass,
++ char __user * new_pass);
++
++int rsbac_um_add_onetime(rsbac_uid_t uid,
++ char __user * old_pass,
++ char __user * new_pass,
++ rsbac_time_t ttl);
++
++int rsbac_um_add_onetime_name(char __user * name,
++ char __user * old_pass,
++ char __user * new_pass,
++ rsbac_time_t ttl);
++
++int rsbac_um_remove_all_onetime(rsbac_uid_t uid,
++ char __user * old_pass);
++
++int rsbac_um_remove_all_onetime_name(char __user * name,
++ char __user * old_pass);
++
++int rsbac_um_count_onetime(rsbac_uid_t uid,
++ char __user * old_pass);
++
++int rsbac_um_count_onetime_name(char __user * name,
++ char __user * old_pass);
++
++int rsbac_um_set_group_pass(rsbac_gid_t gid,
++ char __user * new_pass);
++
++int rsbac_um_check_account(rsbac_uid_t uid);
++
++int rsbac_um_check_account_name(char __user * name);
++
++int rsbac_um_get_max_history(rsbac_list_ta_number_t ta_number, rsbac_uid_t uid);
++
++int rsbac_um_get_max_history_name(rsbac_list_ta_number_t ta_number, char __user * name);
++
++int rsbac_um_set_max_history(rsbac_list_ta_number_t ta_number, rsbac_uid_t uid, __u8 max_history);
++
++int rsbac_um_set_max_history_name(rsbac_list_ta_number_t ta_number, char __user * name, __u8 max_history);
++
++int rsbac_um_select_vset(rsbac_um_set_t vset);
++
++int rsbac_list_ta_begin(rsbac_time_t ttl,
++ rsbac_list_ta_number_t __user * ta_number_p,
++ rsbac_uid_t commit_uid,
++ char __user * password);
++
++int rsbac_list_ta_begin_name(rsbac_time_t ttl,
++ rsbac_list_ta_number_t __user * ta_number_p,
++ rsbac_uid_t commit_uid,
++ char __user * name,
++ char __user * password);
++
++int rsbac_list_ta_refresh(rsbac_time_t ttl,
++ rsbac_list_ta_number_t ta_number,
++ char __user * password);
++
++int rsbac_list_ta_commit(rsbac_list_ta_number_t ta_number,
++ char __user * password);
++
++int rsbac_list_ta_forget(rsbac_list_ta_number_t ta_number,
++ char __user * password);
++
++int rsbac_list_all_dev(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_dev_desc_t __user * id_p,
++ u_long maxnum);
++
++int rsbac_acl_list_all_dev(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_dev_desc_t __user * id_p,
++ u_long maxnum);
++
++int rsbac_list_all_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t __user * id_p,
++ u_long maxnum);
++
++int rsbac_acl_list_all_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t __user * id_p,
++ u_long maxnum);
++
++int rsbac_list_all_group(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t __user * id_p,
++ u_long maxnum);
++
++int rsbac_acl_list_all_group(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t __user * id_p,
++ u_long maxnum);
++
++/************************************************* */
++/* DEBUG/LOG functions */
++/************************************************* */
++
++int rsbac_adf_log_switch(enum rsbac_adf_request_t request,
++ enum rsbac_target_t target,
++ u_int value);
++
++int rsbac_get_adf_log(enum rsbac_adf_request_t request,
++ enum rsbac_target_t target,
++ u_int __user * value_p);
++
++/*
++ * Commands to rsbac_log:
++ *
++ * 0 -- Close the log. Currently a NOP.
++ * 1 -- Open the log. Currently a NOP.
++ * 2 -- Read from the log.
++ * 3 -- Read up to the last 4k of messages in the ring buffer.
++ * 4 -- Read and clear last 4k of messages in the ring buffer
++ * 5 -- Clear ring buffer.
++ */
++int rsbac_log(int type,
++ char __user * buf,
++ int len);
++
++int rsbac_init(char __user * root_dev);
++
++#endif /* ifndef __KERNEL__ */
++
++#endif
+diff --git a/include/rsbac/types.h b/include/rsbac/types.h
+new file mode 100644
+index 0000000..d0dc84f
+--- /dev/null
++++ b/include/rsbac/types.h
+@@ -0,0 +1,1015 @@
++/*********************************** */
++/* Rule Set Based Access Control */
++/* Author and (c)1999-2011: */
++/* Amon Ott <ao@rsbac.org> */
++/* API: Data types for attributes */
++/* and standard module calls */
++/* Last modified: 12/Jul/2011 */
++/*********************************** */
++
++#ifndef __RSBAC_TYPES_H
++#define __RSBAC_TYPES_H
++
++
++/* trigger module dependency for EXPORT_SYMBOL */
++#ifdef CONFIG_MODULES
++#endif
++
++#define RSBAC_VERSION "1.4.6"
++#define RSBAC_VERSION_MAJOR 1
++#define RSBAC_VERSION_MID 4
++#define RSBAC_VERSION_MINOR 6
++#define RSBAC_VERSION_NR \
++ ((RSBAC_VERSION_MAJOR << 16) | (RSBAC_VERSION_MID << 8) | RSBAC_VERSION_MINOR)
++#define RSBAC_VERSION_MAKE_NR(x,y,z) \
++ ((x << 16) | (y << 8) | z)
++
++#ifdef __KERNEL__
++#include <linux/types.h>
++#include <linux/capability.h>
++#include <linux/resource.h>
++#else
++#include <asm/types.h>
++#include <sys/types.h>
++#endif
++
++typedef __u32 rsbac_version_t;
++typedef __u64 rsbac_uid_t; /* High 32 Bit virtual set, low uid */
++typedef __u64 rsbac_gid_t; /* High 32 Bit virtual set, low gid */
++typedef __u32 rsbac_old_uid_t; /* Same as user in Linux kernel */
++typedef __u32 rsbac_uid_num_t; /* Same as user in Linux kernel */
++typedef __u32 rsbac_old_gid_t; /* Same as group in Linux kernel */
++typedef __u32 rsbac_gid_num_t; /* Same as user in Linux kernel */
++typedef __u32 rsbac_um_set_t;
++typedef __u32 rsbac_time_t; /* Same as time_t in Linux kernel */
++typedef kernel_cap_t rsbac_cap_vector_t; /* Same as kernel_cap_t in Linux kernel */
++typedef __u32 rsbac_cap_old_vector_t; /* Same as kernel_cap_t in Linux kernel */
++
++#define RSBAC_UID_SET(x) ((rsbac_um_set_t) (x >> 32))
++#define RSBAC_UID_NUM(x) ((rsbac_uid_num_t) (x & (rsbac_uid_num_t) -1))
++#define RSBAC_GEN_UID(x,y) ((rsbac_uid_t) x << 32 | RSBAC_UID_NUM(y))
++#define RSBAC_GID_SET(x) ((rsbac_um_set_t) (x >> 32))
++#define RSBAC_GID_NUM(x) ((rsbac_gid_num_t) (x & (rsbac_gid_num_t) -1))
++#define RSBAC_GEN_GID(x,y) ((rsbac_gid_t) x << 32 | RSBAC_GID_NUM(y))
++#define RSBAC_UM_VIRTUAL_KEEP ((rsbac_um_set_t) -1)
++#define RSBAC_UM_VIRTUAL_ALL ((rsbac_um_set_t) -2)
++#define RSBAC_UM_VIRTUAL_MAX ((rsbac_um_set_t) -10)
++
++typedef __u32 rsbac_list_ta_number_t;
++
++struct rsbac_nanotime_t
++ {
++ rsbac_time_t sec;
++ __u32 nsec;
++ };
++
++#ifdef __KERNEL__
++#include <linux/fs.h>
++#include <linux/socket.h>
++#include <linux/pipe_fs_i.h>
++#include <linux/kdev_t.h>
++
++/* version checks */
++#ifndef LINUX_VERSION_CODE
++#include <linux/version.h>
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19)
++#error "RSBAC: unsupported kernel version"
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++#include <linux/pid.h>
++#endif
++#define RSBAC_MAJOR MAJOR
++#define RSBAC_MINOR MINOR
++#define RSBAC_MKDEV(major,minor) MKDEV(major,minor)
++static inline rsbac_time_t rsbac_current_time(void)
++ {
++ struct timespec ts = CURRENT_TIME;
++ return ts.tv_sec;
++ }
++static inline void rsbac_get_current_nanotime(struct rsbac_nanotime_t * nanotime)
++ {
++ struct timespec ts = CURRENT_TIME;
++ nanotime->sec = ts.tv_sec;
++ nanotime->nsec = ts.tv_nsec;
++ }
++#ifndef kdev_t
++#define kdev_t dev_t
++#endif
++#define RSBAC_CURRENT_TIME (rsbac_current_time())
++
++
++#define RSBAC_ZERO_DEV RSBAC_MKDEV(0,0)
++#define RSBAC_AUTO_DEV RSBAC_MKDEV(99,99)
++#define RSBAC_IS_ZERO_DEV(kdev) (!RSBAC_MAJOR(kdev) && !RSBAC_MINOR(kdev))
++#define RSBAC_IS_AUTO_DEV(kdev) ((RSBAC_MAJOR(kdev) == 99) && (RSBAC_MINOR(kdev) == 99))
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++#define R_INIT
++#else
++#define R_INIT __init
++#endif
++
++#endif
++
++/* General */
++
++#ifndef NULL
++#define NULL ((void *) 0)
++#endif
++
++#define rsbac_min(a,b) (((a)<(b))?(a):(b))
++#define rsbac_max(a,b) (((a)>(b))?(a):(b))
++
++#define RSBAC_OLD_NO_USER 65533
++#define RSBAC_OLD_ALL_USERS 65532
++#define RSBAC_NO_USER ((rsbac_uid_num_t) -3)
++#define RSBAC_ALL_USERS ((rsbac_uid_num_t) -4)
++#define RSBAC_NO_GROUP ((rsbac_gid_num_t) -3)
++#define RSBAC_ALL_GROUPS ((rsbac_gid_num_t) -4)
++
++#ifndef FALSE
++#define FALSE 0
++#endif
++#ifndef TRUE
++#define TRUE 1
++#endif
++
++typedef u_int rsbac_boolean_t;
++
++typedef __u8 rsbac_boolean_int_t;
++
++#define RSBAC_IFNAMSIZ 16
++typedef u_char rsbac_netdev_id_t[RSBAC_IFNAMSIZ + 1];
++
++#define RSBAC_SEC_DEL_CHUNK_SIZE 65536
++
++/* Adjust these, if you have to, but if you do, adjust them all! */
++/* Note: no / allowed, file must be exactly in second level! */
++#define RSBAC_AUTH_LOGIN_PATH "/bin/login"
++#define RSBAC_AUTH_LOGIN_PATH_DIR "bin"
++#define RSBAC_AUTH_LOGIN_PATH_FILE "login"
++
++/* These data structures work parallel to the Linux data structures, */
++/* so all data for RSBAC decisions is maintained seperately. */
++/* Any change to RSBAC data will NOT modify any other linux data, */
++/* e.g. userlists, process lists or inodes. */
++
++/* Special generic lists time-to-live (ttl) value to keep old setting */
++#define RSBAC_LIST_TTL_KEEP ((rsbac_time_t) -1)
++
++typedef __u8 rsbac_enum_t; /* internally used for all enums */
++
++#define RSBAC_SYSADM_UID 0
++#define RSBAC_BIN_UID 1
++#ifdef CONFIG_RSBAC_SECOFF_UID
++#define RSBAC_SECOFF_UID CONFIG_RSBAC_SECOFF_UID
++#else
++#define RSBAC_SECOFF_UID 400
++#endif
++#define RSBAC_DATAPROT_UID (RSBAC_SECOFF_UID+1)
++#define RSBAC_TPMAN_UID (RSBAC_SECOFF_UID+2)
++#define RSBAC_AUDITOR_UID (RSBAC_SECOFF_UID+4)
++
++typedef __u32 rsbac_pseudo_t; /* For Pseudonymic Logging */
++typedef __kernel_pid_t rsbac_upid_t; /* Same as pid in Linux < 2.6.24 */
++
++typedef struct pid * rsbac_pid_t; /* use new pid struct */
++
++typedef __u32 rsbac_ta_number_t;
++
++typedef __u8 rsbac_security_level_t;
++#define SL_max 252
++#define SL_min 0
++// #define SL_rsbac_internal 253
++#define SL_inherit 254
++#define SL_none 255
++enum rsbac_old_security_level_t {SL_unclassified, SL_confidential, SL_secret,
++ SL_top_secret, SL_old_rsbac_internal,
++ SL_old_inherit, SL_old_none};
++ /* MAC security levels */
++typedef __u64 rsbac_mac_category_vector_t; /* MAC category sets */
++#define RSBAC_MAC_GENERAL_CATEGORY 0
++#define RSBAC_MAC_DEF_CAT_VECTOR ((rsbac_mac_category_vector_t) 1)
++ /* 1 << GENERAL_CAT */
++#define RSBAC_MAC_MAX_CAT_VECTOR ((rsbac_mac_category_vector_t) -1)
++ /* all bits set */
++#define RSBAC_MAC_MIN_CAT_VECTOR ((rsbac_mac_category_vector_t) 0)
++ /* no bits set */
++#define RSBAC_MAC_INHERIT_CAT_VECTOR ((rsbac_mac_category_vector_t) 0)
++ /* for fd: no bits set */
++#define RSBAC_MAC_NR_CATS 64
++#define RSBAC_MAC_MAX_CAT 63
++
++#define RSBAC_MAC_CAT_VECTOR(x) ((rsbac_mac_category_vector_t) 1 << (x))
++
++typedef u_int rsbac_cwi_relation_id_t;
++
++/* For MAC, FF, AUTH */
++enum rsbac_system_role_t {SR_user, SR_security_officer, SR_administrator,
++ SR_auditor, SR_none};
++typedef rsbac_enum_t rsbac_system_role_int_t;
++
++/* For all models */
++enum rsbac_fake_root_uid_t {FR_off, FR_uid_only, FR_euid_only, FR_both,
++ FR_none};
++typedef rsbac_enum_t rsbac_fake_root_uid_int_t;
++
++enum rsbac_scd_type_t {ST_time_strucs, ST_clock, ST_host_id,
++ ST_net_id, ST_ioports, ST_rlimit,
++ ST_swap, ST_syslog, ST_rsbac, ST_rsbac_log,
++ ST_other, ST_kmem, ST_network, ST_firewall,
++ ST_priority, ST_sysfs, ST_rsbac_remote_log,
++ ST_quota, ST_sysctl, ST_nfsd, ST_ksyms,
++ ST_mlock, ST_capability, ST_kexec, ST_videomem,
++ ST_none};
++
++typedef __u32 rsbac_scd_vector_t;
++#define RSBAC_SCD_VECTOR(x) ((rsbac_scd_vector_t) 1 << (x))
++
++enum rsbac_dev_type_t {D_block, D_char, D_block_major, D_char_major, D_none};
++
++
++enum rsbac_ipc_type_t {I_sem, I_msg, I_shm, I_anonpipe, I_mqueue,
++ I_anonunix, I_none};
++union rsbac_ipc_id_t
++ {
++ u_long id_nr;
++ };
++
++typedef __u32 rsbac_inode_nr_t;
++
++enum rsbac_linux_dac_disable_t {LDD_false, LDD_true, LDD_inherit, LDD_none};
++typedef rsbac_enum_t rsbac_linux_dac_disable_int_t;
++
++#ifdef __KERNEL__
++/* We need unique identifiers for each file/dir. inode means inode in */
++/* the file system. */
++struct rsbac_fs_file_t
++ {
++ kdev_t device;
++ rsbac_inode_nr_t inode;
++ struct dentry * dentry_p; /* used for inheritance recursion */
++ };
++
++struct rsbac_dev_t
++ {
++ enum rsbac_dev_type_t type;
++ kdev_t id;
++ };
++#endif /* __KERNEL */
++
++/* We need unique ids for dev objects */
++struct rsbac_dev_desc_t
++ {
++ __u32 type;
++ __u32 major;
++ __u32 minor;
++ };
++
++static inline struct rsbac_dev_desc_t
++ rsbac_mkdev_desc(__u32 type, __u32 major, __u32 minor)
++ {
++ struct rsbac_dev_desc_t dev_desc;
++
++ dev_desc.type = type;
++ dev_desc.major = major;
++ dev_desc.minor = minor;
++ return dev_desc;
++ }
++
++#define RSBAC_ZERO_DEV_DESC rsbac_mkdev_desc(D_none, 0, 0)
++#define RSBAC_AUTO_DEV_DESC rsbac_mkdev_desc(D_none, 99, 99)
++#define RSBAC_IS_ZERO_DEV_DESC(dev) ((dev.type == D_none) && !dev.major && !dev.minor)
++#define RSBAC_IS_AUTO_DEV_DESC(dev) ((dev.type == D_none) && (dev.major == 99) && (dev.minor == 99))
++
++/* And we need unique ids for ipc objects */
++struct rsbac_ipc_t
++ {
++ enum rsbac_ipc_type_t type;
++ union rsbac_ipc_id_t id;
++ };
++
++/* log levels: nothing, denied requests only, all, refer to request log level */
++enum rsbac_log_level_t {LL_none, LL_denied, LL_full, LL_request, LL_invalid};
++typedef __u64 rsbac_log_array_t;
++
++/* request bitvectors */
++typedef __u64 rsbac_request_vector_t;
++#define RSBAC_REQUEST_VECTOR(x) ((rsbac_request_vector_t) 1 << (x))
++
++/* The max length of each filename is kept in a macro */
++#define RSBAC_MAXNAMELEN 256
++
++#define RSBAC_LIST_TA_MAX_NAMELEN 32
++#define RSBAC_LIST_TA_MAX_PASSLEN 36
++
++/* MAC */
++
++typedef __u8 rsbac_mac_user_flags_t;
++typedef __u16 rsbac_mac_process_flags_t;
++typedef __u8 rsbac_mac_file_flags_t;
++typedef struct rsbac_fs_file_t rsbac_mac_file_t;
++#define RSBAC_MAC_MAX_MAXNUM 1000000
++
++#define MAC_override 1
++#define MAC_auto 2
++#define MAC_trusted 4
++#define MAC_write_up 8
++#define MAC_read_up 16
++#define MAC_write_down 32
++#define MAC_allow_auto 64
++#define MAC_prop_trusted 128
++#define MAC_program_auto 256
++
++#define RSBAC_MAC_U_FLAGS (MAC_override | MAC_trusted | MAC_write_up | MAC_read_up | MAC_write_down | MAC_allow_auto)
++#define RSBAC_MAC_P_FLAGS (MAC_override | MAC_auto | MAC_trusted | MAC_write_up | MAC_read_up | MAC_write_down | MAC_prop_trusted | MAC_program_auto)
++#define RSBAC_MAC_F_FLAGS (MAC_auto | MAC_trusted | MAC_write_up | MAC_read_up | MAC_write_down)
++
++#define RSBAC_MAC_DEF_U_FLAGS 0
++#define RSBAC_MAC_DEF_SYSADM_U_FLAGS MAC_allow_auto
++#define RSBAC_MAC_DEF_SECOFF_U_FLAGS MAC_override
++
++#define RSBAC_MAC_DEF_P_FLAGS 0
++#define RSBAC_MAC_DEF_INIT_P_FLAGS MAC_auto
++
++typedef rsbac_enum_t rsbac_mac_auto_int_t;
++enum rsbac_mac_auto_t {MA_no, MA_yes, MA_inherit};
++
++/* PM */
++
++#include <rsbac/pm_types.h>
++
++/* DAZ */
++typedef __u8 rsbac_daz_scanned_t;
++#define DAZ_unscanned 0
++#define DAZ_infected 1
++#define DAZ_clean 2
++#define DAZ_max 2
++#define DEFAULT_DAZ_FD_SCANNED DAZ_unscanned
++typedef __u8 rsbac_daz_scanner_t;
++typedef __u8 rsbac_daz_do_scan_t;
++#define DAZ_never 0
++#define DAZ_registered 1
++#define DAZ_always 2
++#define DAZ_inherit 3
++#define DAZ_max_do_scan 3
++#define DEFAULT_DAZ_FD_DO_SCAN DAZ_inherit
++#define DEFAULT_DAZ_FD_ROOT_DO_SCAN DAZ_registered
++
++/* FF */
++
++typedef __u16 rsbac_ff_flags_t;
++#define FF_read_only 1
++#define FF_execute_only 2
++#define FF_search_only 4
++#define FF_write_only 8
++#define FF_secure_delete 16
++#define FF_no_execute 32
++#define FF_no_delete_or_rename 64
++#define FF_append_only 256
++#define FF_no_mount 512
++#define FF_no_search 1024
++
++#define FF_add_inherited 128
++
++#define RSBAC_FF_DEF FF_add_inherited
++#define RSBAC_FF_ROOT_DEF 0
++
++/***** RC *****/
++
++#include <rsbac/rc_types.h>
++
++/**** AUTH ****/
++/* special cap value, replaced by process owner at execute time */
++#define RSBAC_AUTH_MAX_MAXNUM 1000000
++#define RSBAC_AUTH_OWNER_F_CAP ((rsbac_uid_num_t) -3)
++#define RSBAC_AUTH_DAC_OWNER_F_CAP ((rsbac_uid_num_t) -4)
++#define RSBAC_AUTH_MAX_RANGE_UID ((rsbac_uid_num_t) -10)
++#define RSBAC_AUTH_GROUP_F_CAP ((rsbac_uid_num_t) -3)
++#define RSBAC_AUTH_DAC_GROUP_F_CAP ((rsbac_uid_num_t) -4)
++#define RSBAC_AUTH_MAX_RANGE_GID ((rsbac_uid_num_t) -10)
++typedef struct rsbac_fs_file_t rsbac_auth_file_t;
++struct rsbac_auth_cap_range_t
++ {
++ rsbac_uid_t first;
++ rsbac_uid_t last;
++ };
++struct rsbac_auth_old_cap_range_t
++ {
++ rsbac_old_uid_t first;
++ rsbac_old_uid_t last;
++ };
++enum rsbac_auth_cap_type_t {ACT_real, ACT_eff, ACT_fs,
++ ACT_group_real, ACT_group_eff, ACT_group_fs,
++ ACT_none};
++typedef rsbac_enum_t rsbac_auth_cap_type_int_t;
++
++enum rsbac_auth_may_setuid_t {AMS_off, AMS_full, AMS_last_auth_only,
++ AMS_last_auth_and_gid, AMS_none};
++
++typedef rsbac_enum_t rsbac_auth_may_setuid_int_t;
++
++/**** ACL ****/
++/* include at end of types.h */
++
++/**** CAP ****/
++enum rsbac_cap_process_hiding_t {PH_off, PH_from_other_users, PH_full,
++ PH_none};
++typedef rsbac_enum_t rsbac_cap_process_hiding_int_t;
++
++enum rsbac_cap_ld_env_t { LD_deny, LD_allow, LD_keep, LD_inherit };
++typedef rsbac_enum_t rsbac_cap_ld_env_int_t;
++
++#define RSBAC_CAP_DEFAULT_MIN (__u32) 0
++#define RSBAC_CAP_DEFAULT_MAX (__u32) -1
++
++#include <linux/capability.h>
++#define CAP_NONE 34
++#define RSBAC_CAP_MAX CAP_NONE
++
++/**** JAIL ****/
++
++#define RSBAC_JAIL_VERSION 1
++
++typedef __u32 rsbac_jail_id_t;
++#define RSBAC_JAIL_DEF_ID 0
++typedef __u32 rsbac_jail_ip_t;
++typedef __u32 rsbac_jail_scd_vector_t;
++
++typedef __u32 rsbac_jail_flags_t;
++#define JAIL_allow_external_ipc 1
++#define JAIL_allow_all_net_family 2
++#define JAIL_allow_inet_raw 8
++#define JAIL_auto_adjust_inet_any 16
++#define JAIL_allow_inet_localhost 32
++#define JAIL_allow_dev_get_status 128
++#define JAIL_allow_dev_mod_system 256
++#define JAIL_allow_dev_read 512
++#define JAIL_allow_dev_write 1024
++#define JAIL_allow_tty_open 2048
++#define JAIL_allow_parent_ipc 4096
++#define JAIL_allow_suid_files 8192
++#define JAIL_allow_mount 16384
++#define JAIL_this_is_syslog 32768
++#define JAIL_allow_ipc_to_syslog 65536
++#define JAIL_allow_netlink 131072
++
++#define RSBAC_JAIL_LOCALHOST ((1 << 24) | 127)
++
++/**** PAX ****/
++
++typedef unsigned long rsbac_pax_flags_t;
++
++/* for PaX defines */
++#ifdef __KERNEL__
++#include <linux/elf.h>
++#include <linux/random.h>
++#endif
++#ifndef PF_PAX_PAGEEXEC
++#define PF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
++#define PF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
++#define PF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
++#define PF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
++#define PF_PAX_RANDEXEC 0x10000000 /* Randomize ET_EXEC base */
++#define PF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
++#endif
++
++#define RSBAC_PAX_DEF_FLAGS (PF_PAX_SEGMEXEC | PF_PAX_PAGEEXEC | PF_PAX_MPROTECT | PF_PAX_RANDMMAP)
++#define RSBAC_PAX_ALL_FLAGS ((rsbac_pax_flags_t) 255 << 24)
++
++/**** UM User management ****/
++/* Included from um_types.h */
++
++/**** RES ****/
++
++typedef __u32 rsbac_res_limit_t;
++#define RSBAC_RES_UNSET 0
++
++#define RSBAC_RES_MAX 10 /* RLIMIT_LOCKS in 2.4.x kernels */
++#define RSBAC_RES_NONE 11
++
++typedef rsbac_res_limit_t rsbac_res_array_t[RSBAC_RES_MAX + 1];
++
++/**** REG ****/
++typedef __s32 rsbac_reg_handle_t;
++
++
++/****************************************************************************/
++/* ADF types */
++/****************************************************************************/
++
++#include <rsbac/network_types.h>
++
++#ifdef __KERNEL__
++ typedef struct socket * rsbac_net_obj_id_t;
++#else
++ typedef void * rsbac_net_obj_id_t;
++#endif
++
++struct rsbac_net_obj_desc_t
++ {
++ rsbac_net_obj_id_t sock_p;
++ void * local_addr;
++ u_int local_len;
++ void * remote_addr;
++ u_int remote_len;
++ rsbac_net_temp_id_t local_temp;
++ rsbac_net_temp_id_t remote_temp;
++ };
++
++#define RSBAC_ADF_REQUEST_ARRAY_VERSION 2
++
++enum rsbac_adf_request_t {
++ R_ADD_TO_KERNEL,
++ R_ALTER,
++ R_APPEND_OPEN,
++ R_CHANGE_GROUP,
++ R_CHANGE_OWNER,
++ R_CHDIR,
++ R_CLONE,
++ R_CLOSE,
++ R_CREATE,
++ R_DELETE,
++ R_EXECUTE,
++ R_GET_PERMISSIONS_DATA,
++ R_GET_STATUS_DATA,
++ R_LINK_HARD,
++ R_MODIFY_ACCESS_DATA,
++ R_MODIFY_ATTRIBUTE,
++ R_MODIFY_PERMISSIONS_DATA,
++ R_MODIFY_SYSTEM_DATA,
++ R_MOUNT,
++ R_READ,
++ R_READ_ATTRIBUTE,
++ R_READ_WRITE_OPEN,
++ R_READ_OPEN,
++ R_REMOVE_FROM_KERNEL,
++ R_RENAME,
++ R_SEARCH,
++ R_SEND_SIGNAL,
++ R_SHUTDOWN,
++ R_SWITCH_LOG,
++ R_SWITCH_MODULE,
++ R_TERMINATE,
++ R_TRACE,
++ R_TRUNCATE,
++ R_UMOUNT,
++ R_WRITE,
++ R_WRITE_OPEN,
++ R_MAP_EXEC,
++ R_BIND,
++ R_LISTEN,
++ R_ACCEPT,
++ R_CONNECT,
++ R_SEND,
++ R_RECEIVE,
++ R_NET_SHUTDOWN,
++ R_CHANGE_DAC_EFF_OWNER,
++ R_CHANGE_DAC_FS_OWNER,
++ R_CHANGE_DAC_EFF_GROUP,
++ R_CHANGE_DAC_FS_GROUP,
++ R_IOCTL,
++ R_LOCK,
++ R_AUTHENTICATE,
++ R_NONE
++ };
++
++typedef rsbac_enum_t rsbac_adf_request_int_t;
++
++#include <rsbac/request_groups.h>
++
++/* This type is returned from the rsbac_adf_request() function. Since a */
++/* decision of undefined means an error, it is never returned. */
++
++enum rsbac_adf_req_ret_t {NOT_GRANTED,GRANTED,DO_NOT_CARE,UNDEFINED};
++
++/****************************************************************************/
++/* ACI types */
++/****************************************************************************/
++
++/* For switching adf-modules */
++enum rsbac_switch_target_t {SW_GEN,SW_MAC,SW_PM,SW_DAZ,SW_FF,SW_RC,SW_AUTH,
++ SW_REG,SW_ACL,SW_CAP,SW_JAIL,SW_RES,SW_PAX,SW_SOFTMODE,
++ SW_DAC_DISABLE,SW_UM,SW_FREEZE,SW_NONE};
++#define RSBAC_MAX_MOD (SW_SOFTMODE - 1)
++typedef rsbac_enum_t rsbac_switch_target_int_t;
++
++/****************************************************************************/
++/* For objects, users and processes all manipulation is encapsulated by the */
++/* function calls rsbac_set_attr, rsbac_get_attr and rsbac_remove_target. */
++
++/* For those, we declare some extra types to specify target and attribute. */
++
++enum rsbac_target_t {T_FILE, T_DIR, T_FIFO, T_SYMLINK, T_DEV, T_IPC, T_SCD, T_USER, T_PROCESS,
++ T_NETDEV, T_NETTEMP, T_NETOBJ, T_NETTEMP_NT, T_GROUP,
++ T_FD, T_UNIXSOCK,
++ T_NONE};
++
++union rsbac_target_id_t
++ {
++#ifdef __KERNEL__
++ struct rsbac_fs_file_t file;
++ struct rsbac_fs_file_t dir;
++ struct rsbac_fs_file_t fifo;
++ struct rsbac_fs_file_t symlink;
++ struct rsbac_fs_file_t unixsock;
++#endif
++ struct rsbac_dev_desc_t dev;
++ struct rsbac_ipc_t ipc;
++ rsbac_enum_t scd;
++ rsbac_uid_t user;
++ rsbac_gid_t group;
++ rsbac_pid_t process; /* new struct pid * */
++ rsbac_upid_t uprocess; /* old fashioned pid from user space */
++ rsbac_netdev_id_t netdev;
++ rsbac_net_temp_id_t nettemp;
++ struct rsbac_net_obj_desc_t netobj;
++ int dummy;
++ };
++
++#ifdef __KERNEL__
++typedef rsbac_enum_t rsbac_log_entry_t[T_NONE+1];
++typedef rsbac_enum_t rsbac_old_log_entry_t[T_NONE];
++
++struct rsbac_create_data_t
++ {
++ enum rsbac_target_t target;
++ struct dentry * dentry_p;
++ int mode;
++ kdev_t device; /* for mknod etc. */
++ };
++
++struct rsbac_rlimit_t
++ {
++ u_int resource;
++ struct rlimit limit;
++ };
++#endif
++
++enum rsbac_attribute_t
++ {
++ A_pseudo,
++ A_security_level,
++ A_initial_security_level,
++ A_local_sec_level,
++ A_remote_sec_level,
++ A_min_security_level,
++ A_mac_categories,
++ A_mac_initial_categories,
++ A_local_mac_categories,
++ A_remote_mac_categories,
++ A_mac_min_categories,
++ A_mac_user_flags,
++ A_mac_process_flags,
++ A_mac_file_flags,
++ A_system_role,
++ A_mac_role,
++ A_daz_role,
++ A_ff_role,
++ A_auth_role,
++ A_cap_role,
++ A_jail_role,
++ A_pax_role,
++ A_current_sec_level,
++ A_mac_curr_categories,
++ A_min_write_open,
++ A_min_write_categories,
++ A_max_read_open,
++ A_max_read_categories,
++ A_mac_auto,
++ A_mac_check,
++ A_mac_prop_trusted,
++ A_pm_role,
++ A_pm_process_type,
++ A_pm_current_task,
++ A_pm_object_class,
++ A_local_pm_object_class,
++ A_remote_pm_object_class,
++ A_pm_ipc_purpose,
++ A_local_pm_ipc_purpose,
++ A_remote_pm_ipc_purpose,
++ A_pm_object_type,
++ A_local_pm_object_type,
++ A_remote_pm_object_type,
++ A_pm_program_type,
++ A_pm_tp,
++ A_pm_task_set,
++ A_daz_scanned,
++ A_daz_scanner,
++ A_ff_flags,
++ A_rc_type,
++ A_rc_select_type,
++ A_local_rc_type,
++ A_remote_rc_type,
++ A_rc_type_fd,
++ A_rc_type_nt,
++ A_rc_force_role,
++ A_rc_initial_role,
++ A_rc_role,
++ A_rc_def_role,
++ A_auth_may_setuid,
++ A_auth_may_set_cap,
++ A_auth_learn,
++ A_min_caps,
++ A_max_caps,
++ A_max_caps_user,
++ A_max_caps_program,
++ A_jail_id,
++ A_jail_parent,
++ A_jail_ip,
++ A_jail_flags,
++ A_jail_max_caps,
++ A_jail_scd_get,
++ A_jail_scd_modify,
++ A_pax_flags,
++ A_res_role,
++ A_res_min,
++ A_res_max,
++ A_log_array_low,
++ A_local_log_array_low,
++ A_remote_log_array_low,
++ A_log_array_high,
++ A_local_log_array_high,
++ A_remote_log_array_high,
++ A_log_program_based,
++ A_log_user_based,
++ A_symlink_add_remote_ip,
++ A_symlink_add_uid,
++ A_symlink_add_mac_level,
++ A_symlink_add_rc_role,
++ A_linux_dac_disable,
++ A_cap_process_hiding,
++ A_fake_root_uid,
++ A_audit_uid,
++ A_auid_exempt,
++ A_auth_last_auth,
++ A_remote_ip,
++ A_cap_ld_env,
++ A_daz_do_scan,
++ A_vset,
++#ifdef __KERNEL__
++ /* adf-request helpers */
++ A_owner,
++ A_group,
++ A_signal,
++ A_mode,
++ A_nlink,
++ A_switch_target,
++ A_mod_name,
++ A_request,
++ A_trace_request,
++ A_auth_add_f_cap,
++ A_auth_remove_f_cap,
++ A_auth_get_caplist,
++ A_prot_bits,
++ A_internal,
++ /* used with CREATE on DIR */
++ A_create_data,
++ A_new_object,
++ A_rlimit,
++ A_new_dir_dentry_p,
++ A_program_file,
++ A_auth_start_uid,
++ A_auth_start_euid,
++ A_auth_start_gid,
++ A_auth_start_egid,
++ A_acl_learn,
++ A_priority,
++ A_pgid,
++ A_kernel_thread,
++ A_open_flag,
++ A_reboot_cmd,
++ A_setsockopt_level,
++ A_ioctl_cmd,
++ A_f_mode,
++ A_process,
++ A_sock_type,
++ A_pagenr,
++ A_cap_learn,
++ A_rc_learn,
++#endif
++ A_none};
++
++union rsbac_attribute_value_t
++ {
++ rsbac_uid_t owner; /* process owner */
++ rsbac_pseudo_t pseudo;
++ rsbac_system_role_int_t system_role;
++#if !defined(__KERNEL__) || defined(CONFIG_RSBAC_MAC)
++ rsbac_security_level_t security_level;
++ rsbac_mac_category_vector_t mac_categories;
++ rsbac_security_level_t current_sec_level;
++ rsbac_security_level_t min_write_open;
++ rsbac_security_level_t max_read_open;
++ rsbac_mac_user_flags_t mac_user_flags;
++ rsbac_mac_process_flags_t mac_process_flags;
++ rsbac_mac_file_flags_t mac_file_flags;
++ rsbac_mac_auto_int_t mac_auto;
++ rsbac_boolean_t mac_check;
++ rsbac_boolean_t mac_prop_trusted;
++#endif
++#if !defined(__KERNEL__) || defined(CONFIG_RSBAC_PM)
++ rsbac_pm_role_int_t pm_role;
++ rsbac_pm_process_type_int_t pm_process_type;
++ rsbac_pm_task_id_t pm_current_task;
++ rsbac_pm_object_class_id_t pm_object_class;
++ rsbac_pm_purpose_id_t pm_ipc_purpose;
++ rsbac_pm_object_type_int_t pm_object_type;
++ rsbac_pm_program_type_int_t pm_program_type;
++ rsbac_pm_tp_id_t pm_tp;
++ rsbac_pm_task_set_id_t pm_task_set;
++#endif
++#if !defined(__KERNEL__) || defined(CONFIG_RSBAC_DAZ)
++ rsbac_daz_scanned_t daz_scanned;
++ rsbac_daz_scanner_t daz_scanner;
++ rsbac_daz_do_scan_t daz_do_scan;
++#endif
++#if !defined(__KERNEL__) || defined(CONFIG_RSBAC_FF)
++ rsbac_ff_flags_t ff_flags;
++#endif
++#if !defined(__KERNEL__) || defined(CONFIG_RSBAC_RC)
++ rsbac_rc_type_id_t rc_type;
++ rsbac_rc_type_id_t rc_type_fd;
++ rsbac_rc_role_id_t rc_force_role;
++ rsbac_rc_role_id_t rc_initial_role;
++ rsbac_rc_role_id_t rc_role;
++ rsbac_rc_role_id_t rc_def_role;
++ rsbac_rc_type_id_t rc_select_type;
++#endif
++#if !defined(__KERNEL__) || defined(CONFIG_RSBAC_AUTH)
++ rsbac_auth_may_setuid_int_t auth_may_setuid;
++ rsbac_boolean_t auth_may_set_cap;
++ rsbac_pid_t auth_p_capset;
++ rsbac_inode_nr_t auth_f_capset;
++ rsbac_boolean_t auth_learn;
++ rsbac_uid_t auth_last_auth;
++#endif
++#if !defined(__KERNEL__) || defined(CONFIG_RSBAC_CAP)
++ rsbac_cap_vector_t min_caps;
++ rsbac_cap_vector_t max_caps;
++ rsbac_cap_vector_t max_caps_user;
++ rsbac_cap_vector_t max_caps_program;
++ rsbac_cap_process_hiding_int_t cap_process_hiding;
++ rsbac_cap_ld_env_int_t cap_ld_env;
++#endif
++#if !defined(__KERNEL__) || defined(CONFIG_RSBAC_JAIL)
++ rsbac_jail_id_t jail_id;
++ rsbac_jail_id_t jail_parent;
++ rsbac_jail_ip_t jail_ip;
++ rsbac_jail_flags_t jail_flags;
++ rsbac_jail_scd_vector_t jail_scd_get;
++ rsbac_jail_scd_vector_t jail_scd_modify;
++ rsbac_cap_vector_t jail_max_caps;
++#endif
++#if !defined(__KERNEL__) || defined(CONFIG_RSBAC_PAX)
++ rsbac_pax_flags_t pax_flags;
++#endif
++#if !defined(__KERNEL__) || defined(CONFIG_RSBAC_RES)
++ rsbac_res_array_t res_array;
++#endif
++ rsbac_log_array_t log_array_low;
++ rsbac_log_array_t log_array_high;
++ rsbac_request_vector_t log_program_based;
++ rsbac_request_vector_t log_user_based;
++ rsbac_enum_t symlink_add_remote_ip;
++ rsbac_boolean_t symlink_add_uid;
++ rsbac_boolean_t symlink_add_mac_level;
++ rsbac_boolean_t symlink_add_rc_role;
++ rsbac_linux_dac_disable_int_t linux_dac_disable;
++// rsbac_net_temp_id_t net_temp;
++ rsbac_fake_root_uid_int_t fake_root_uid;
++ rsbac_uid_t audit_uid;
++ rsbac_uid_t auid_exempt;
++ __u32 remote_ip;
++ rsbac_um_set_t vset;
++#ifdef __KERNEL__
++ rsbac_gid_t group; /* process/fd group */
++ struct sockaddr * sockaddr_p; /* socket address */
++ long signal; /* signal for kill */
++ int mode; /* mode for create/mount */
++ int nlink; /* for DELETE/unlink */
++ enum rsbac_switch_target_t switch_target; /* for SWITCH_MODULE */
++ char * mod_name; /* for ADD_TO_KERNEL */
++ enum rsbac_adf_request_t request; /* for SWITCH_LOG */
++ long trace_request; /* request for sys_trace */
++ struct rsbac_auth_cap_range_t auth_cap_range;
++ int prot_bits;/* prot bits for mmap()/mprotect() */
++ rsbac_boolean_t internal;
++ /* used with CREATE on DIR */
++ struct rsbac_create_data_t create_data;
++ /* newly created object in OPEN requests? */
++ rsbac_boolean_t new_object;
++ struct rsbac_rlimit_t rlimit;
++ struct dentry * new_dir_dentry_p;
++ struct rsbac_fs_file_t program_file; /* for learning mode */
++ rsbac_uid_t auth_start_uid;
++ rsbac_uid_t auth_start_euid;
++ rsbac_gid_t auth_start_gid;
++ rsbac_gid_t auth_start_egid;
++ rsbac_boolean_t acl_learn;
++ int priority;
++ rsbac_pid_t pgid;
++ rsbac_boolean_t kernel_thread;
++ u_int open_flag;
++ u_int reboot_cmd;
++ int setsockopt_level;
++ u_int ioctl_cmd;
++ mode_t f_mode;
++ rsbac_pid_t process;
++ short sock_type;
++ u_int pagenr;
++ rsbac_boolean_t cap_learn;
++ rsbac_boolean_t rc_learn;
++#endif
++ u_char u_char_dummy;
++ u_short u_short_dummy;
++ int dummy;
++ u_int u_dummy;
++ long long_dummy;
++ u_long u_long_dummy;
++ };
++
++/* List all values possibly used in FD Cache to find data size */
++
++#ifdef CONFIG_RSBAC_FD_CACHE
++union rsbac_attribute_value_cache_t
++ {
++ rsbac_uid_t owner; /* process owner */
++ rsbac_pseudo_t pseudo;
++ rsbac_system_role_int_t system_role;
++#if !defined(__KERNEL__) || defined(CONFIG_RSBAC_MAC)
++ rsbac_security_level_t security_level;
++ rsbac_mac_category_vector_t mac_categories;
++ rsbac_security_level_t current_sec_level;
++ rsbac_security_level_t min_write_open;
++ rsbac_security_level_t max_read_open;
++ rsbac_mac_user_flags_t mac_user_flags;
++ rsbac_mac_process_flags_t mac_process_flags;
++ rsbac_mac_file_flags_t mac_file_flags;
++ rsbac_mac_auto_int_t mac_auto;
++ rsbac_boolean_t mac_check;
++ rsbac_boolean_t mac_prop_trusted;
++#endif
++#if !defined(__KERNEL__) || defined(CONFIG_RSBAC_DAZ)
++ rsbac_daz_scanned_t daz_scanned;
++ rsbac_daz_scanner_t daz_scanner;
++ rsbac_daz_do_scan_t daz_do_scan;
++#endif
++#if !defined(__KERNEL__) || defined(CONFIG_RSBAC_FF)
++ rsbac_ff_flags_t ff_flags;
++#endif
++#if !defined(__KERNEL__) || defined(CONFIG_RSBAC_RC)
++ rsbac_rc_type_id_t rc_type;
++ rsbac_rc_type_id_t rc_type_fd;
++ rsbac_rc_role_id_t rc_force_role;
++ rsbac_rc_role_id_t rc_initial_role;
++ rsbac_rc_role_id_t rc_role;
++ rsbac_rc_role_id_t rc_def_role;
++ rsbac_rc_type_id_t rc_select_type;
++#endif
++ rsbac_log_array_t log_array_low;
++ rsbac_log_array_t log_array_high;
++ rsbac_request_vector_t log_program_based;
++ rsbac_request_vector_t log_user_based;
++ rsbac_enum_t symlink_add_remote_ip;
++ rsbac_boolean_t symlink_add_uid;
++ rsbac_boolean_t symlink_add_mac_level;
++ rsbac_boolean_t symlink_add_rc_role;
++ rsbac_linux_dac_disable_int_t linux_dac_disable;
++// rsbac_net_temp_id_t net_temp;
++ rsbac_fake_root_uid_int_t fake_root_uid;
++ rsbac_uid_t audit_uid;
++ rsbac_uid_t auid_exempt;
++ __u32 remote_ip;
++ rsbac_um_set_t vset;
++ u_char u_char_dummy;
++ u_short u_short_dummy;
++ int dummy;
++ u_int u_dummy;
++ long long_dummy;
++ u_long u_long_dummy;
++ };
++#endif
++
++/**** ACL + UM ****/
++
++#include <rsbac/acl_types.h>
++#include <rsbac/um_types.h>
++
++/* not aligned, yet */
++struct rsbac_rw_req {
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ enum rsbac_attribute_t rsbac_attribute;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ enum rsbac_adf_request_t rsbac_request;
++};
++
++int rsbac_handle_rw_req(const struct file *file, struct rsbac_rw_req *rsbac_rw_req_obj);
++int rsbac_handle_rw_up(struct rsbac_rw_req *rsbac_rw_req_obj);
++
++#endif
++
+diff --git a/include/rsbac/um.h b/include/rsbac/um.h
+new file mode 100644
+index 0000000..b7b6e6e
+--- /dev/null
++++ b/include/rsbac/um.h
+@@ -0,0 +1,178 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2011: */
++/* Amon Ott <ao@rsbac.org> */
++/* API: Data structures */
++/* and functions for User Management */
++/* Last modified: 19/Apt/2011 */
++/************************************ */
++
++#ifndef __RSBAC_UM_H
++#define __RSBAC_UM_H
++
++#include <linux/init.h>
++#include <rsbac/types.h>
++#include <rsbac/um_types.h>
++
++/***************************************************/
++/* General Prototypes */
++/***************************************************/
++
++/* All functions return 0, if no error occurred, and a negative error code */
++/* otherwise. The error codes are defined in rsbac_error.h. */
++
++/****************************************************************************/
++/* Initialization, including ACI restoration for all mounted devices from */
++/* disk. After this call, all ACI is kept in memory for performance reasons,*/
++/* but user and file/dir object ACI are written to disk on every change. */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++extern int rsbac_init_um(void);
++#else
++extern int rsbac_init_um(void) __init;
++#endif
++
++/* Some information about the current status is also available */
++extern int rsbac_stats_um(void);
++
++/************************************************* */
++/* Access functions */
++/************************************************* */
++
++/* Trying to access a never created or removed user entry returns an error! */
++
++/* rsbac_um_add_user (fills *user_p with new uid) */
++
++int rsbac_um_add_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t * user_p,
++ struct rsbac_um_user_entry_t * entry_p,
++ char * pass,
++ rsbac_time_t ttl);
++
++int rsbac_um_add_group(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t * group_p,
++ struct rsbac_um_group_entry_t * entry_p,
++ char * pass,
++ rsbac_time_t ttl);
++
++int rsbac_um_add_gm(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user,
++ rsbac_gid_num_t group,
++ rsbac_time_t ttl);
++
++int rsbac_um_mod_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user,
++ enum rsbac_um_mod_t mod,
++ union rsbac_um_mod_data_t * data_p);
++
++int rsbac_um_mod_group(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t group,
++ enum rsbac_um_mod_t mod,
++ union rsbac_um_mod_data_t * data_p);
++
++int rsbac_um_get_user_item(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user,
++ enum rsbac_um_mod_t mod,
++ union rsbac_um_mod_data_t * data_p);
++
++int rsbac_um_get_group_item(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t group,
++ enum rsbac_um_mod_t mod,
++ union rsbac_um_mod_data_t * data_p);
++
++int rsbac_um_user_exists(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user);
++
++int rsbac_um_group_exists(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t group);
++
++int rsbac_um_remove_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user);
++
++int rsbac_um_remove_group(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t group);
++
++int rsbac_um_remove_gm(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user,
++ rsbac_gid_num_t group);
++
++int rsbac_um_get_next_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t old_user,
++ rsbac_uid_t * next_user_p);
++
++int rsbac_um_get_user_list(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_um_set_t vset,
++ rsbac_uid_t ** list_pp);
++
++int rsbac_um_get_gm_list(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user,
++ rsbac_gid_num_t ** list_pp);
++
++int rsbac_um_get_gm_user_list(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t group,
++ rsbac_uid_num_t ** list_pp);
++
++int rsbac_um_get_group_list(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_um_set_t vset,
++ rsbac_gid_t ** list_pp);
++
++int rsbac_um_get_user_entry(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user,
++ struct rsbac_um_user_entry_t * entry_p,
++ rsbac_time_t * ttl_p);
++
++int rsbac_um_get_uid(
++ rsbac_list_ta_number_t ta_number,
++ char * name,
++ rsbac_uid_t * uid_p);
++
++int rsbac_um_get_gid(
++ rsbac_list_ta_number_t ta_number,
++ char * name,
++ rsbac_gid_t * gid_p);
++
++int rsbac_um_check_pass(rsbac_uid_t uid,
++ char * pass);
++
++/* Check for good password (min length etc.) */
++int rsbac_um_good_pass(rsbac_uid_t uid, char * pass);
++
++#ifdef CONFIG_RSBAC_UM_ONETIME
++int rsbac_um_add_onetime(rsbac_uid_t uid, char * pass, rsbac_time_t ttl);
++
++int rsbac_um_remove_all_onetime(rsbac_uid_t uid);
++
++int rsbac_um_count_onetime(rsbac_uid_t uid);
++#endif
++
++int rsbac_um_set_pass(rsbac_uid_t uid,
++ char * pass);
++
++int rsbac_um_set_group_pass(rsbac_gid_t gid,
++ char * pass);
++
++int rsbac_um_check_account(rsbac_uid_t user);
++
++int rsbac_um_get_max_history(rsbac_list_ta_number_t ta_number, rsbac_uid_t uid);
++
++int rsbac_um_set_max_history(rsbac_list_ta_number_t ta_number, rsbac_uid_t uid, __u8 max_history);
++
++#endif
+diff --git a/include/rsbac/um_types.h b/include/rsbac/um_types.h
+new file mode 100644
+index 0000000..866bdd1
+--- /dev/null
++++ b/include/rsbac/um_types.h
+@@ -0,0 +1,139 @@
++/**************************************/
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2008: Amon Ott */
++/* User Management Data structures */
++/* Last modified: 28/Oct/2008 */
++/**************************************/
++
++#ifndef __RSBAC_UM_TYPES_H
++#define __RSBAC_UM_TYPES_H
++
++//#include <rsbac/types.h>
++
++#if 0
++#ifdef __KERNEL__ /* only include in kernel code */
++#include <rsbac/debug.h>
++#include <rsbac/lists.h>
++#endif /* __KERNEL__ */
++#endif
++
++#define RSBAC_UM_MAX_MAXNUM 1000000
++
++#define RSBAC_UM_USER_LIST_NAME "um_user"
++#define RSBAC_UM_GROUP_LIST_NAME "um_grp"
++#define RSBAC_UM_USER_PWHISTORY_LIST_NAME "um_pwh"
++#define RSBAC_UM_ONETIME_LIST_NAME "um_pwot"
++#define RSBAC_UM_OLD_USER_LIST_NAME "um_u."
++#define RSBAC_UM_OLD_GROUP_LIST_NAME "um_g."
++#define RSBAC_UM_OLD_USER_PWHISTORY_LIST_NAME "um_pwh."
++
++#define RSBAC_UM_NR_USER_LISTS 8
++#define RSBAC_UM_NR_GROUP_LISTS 8
++#define RSBAC_UM_NR_USER_PWHISTORY_LISTS 8
++
++#define RSBAC_UM_USER_LIST_VERSION 3
++#define RSBAC_UM_GROUP_LIST_VERSION 3
++#define RSBAC_UM_USER_PWHISTORY_LIST_VERSION 2
++#define RSBAC_UM_ONETIME_LIST_VERSION 1
++
++#define RSBAC_UM_USER_OLD_LIST_VERSION 2
++#define RSBAC_UM_USER_OLD_OLD_LIST_VERSION 1
++#define RSBAC_UM_GROUP_OLD_LIST_VERSION 2
++#define RSBAC_UM_GROUP_OLD_OLD_LIST_VERSION 1
++#define RSBAC_UM_USER_PWHISTORY_OLD_LIST_VERSION 1
++
++#define RSBAC_UM_USER_LIST_KEY 6363636
++#define RSBAC_UM_GROUP_LIST_KEY 9847298
++#define RSBAC_UM_USER_PWHISTORY_LIST_KEY 8854687
++#define RSBAC_UM_ONETIME_LIST_KEY 63273279
++
++#define RSBAC_UM_NAME_LEN 65
++#define RSBAC_UM_OLD_NAME_LEN 16
++#define RSBAC_UM_PASS_LEN 24
++#define RSBAC_UM_FULLNAME_LEN 65
++#define RSBAC_UM_OLD_FULLNAME_LEN 30
++#define RSBAC_UM_HOMEDIR_LEN 101
++#define RSBAC_UM_OLD_HOMEDIR_LEN 50
++#define RSBAC_UM_SHELL_LEN 45
++#define RSBAC_UM_OLD_SHELL_LEN 24
++
++typedef __s32 rsbac_um_days_t;
++
++typedef char rsbac_um_password_t[RSBAC_UM_PASS_LEN];
++
++enum rsbac_um_mod_t { UM_name, UM_pass, UM_fullname, UM_homedir, UM_shell,
++ UM_group, UM_lastchange, UM_minchange, UM_maxchange,
++ UM_warnchange, UM_inactive, UM_expire, UM_ttl,
++ UM_cryptpass, UM_none
++};
++
++union rsbac_um_mod_data_t {
++ char string[RSBAC_MAXNAMELEN];
++ rsbac_gid_num_t group;
++ rsbac_um_days_t days;
++ rsbac_time_t ttl;
++};
++
++struct rsbac_um_user_entry_t {
++ rsbac_gid_num_t group;
++ rsbac_um_days_t lastchange;
++ rsbac_um_days_t minchange;
++ rsbac_um_days_t maxchange;
++ rsbac_um_days_t warnchange;
++ rsbac_um_days_t inactive;
++ rsbac_um_days_t expire;
++ char name[RSBAC_UM_NAME_LEN];
++ char pass[RSBAC_UM_PASS_LEN];
++ char fullname[RSBAC_UM_FULLNAME_LEN];
++ char homedir[RSBAC_UM_HOMEDIR_LEN];
++ char shell[RSBAC_UM_SHELL_LEN];
++};
++
++struct rsbac_um_old_user_entry_t {
++ char name[RSBAC_UM_OLD_NAME_LEN];
++ char pass[RSBAC_UM_PASS_LEN];
++ char fullname[RSBAC_UM_OLD_FULLNAME_LEN];
++ char homedir[RSBAC_UM_OLD_HOMEDIR_LEN];
++ char shell[RSBAC_UM_OLD_SHELL_LEN];
++ rsbac_gid_num_t group;
++ rsbac_um_days_t lastchange;
++ rsbac_um_days_t minchange;
++ rsbac_um_days_t maxchange;
++ rsbac_um_days_t warnchange;
++ rsbac_um_days_t inactive;
++ rsbac_um_days_t expire;
++};
++
++#define DEFAULT_UM_U_ENTRY \
++ { \
++ 65534, /* group */ \
++ 100000, /* lastchange */ \
++ 0, /* minchange */ \
++ 365, /* maxchange */ \
++ 10, /* warnchange */ \
++ 3, /* inactive */ \
++ 100000, /* expire */ \
++ "", /* name */ \
++ "", /* pass */ \
++ "", /* fullname */ \
++ "/home", /* homedir */ \
++ "/bin/sh" /* shell */ \
++ }
++
++struct rsbac_um_group_entry_t {
++ char name[RSBAC_UM_NAME_LEN];
++ char pass[RSBAC_UM_PASS_LEN];
++};
++
++struct rsbac_um_old_group_entry_t {
++ char name[RSBAC_UM_OLD_NAME_LEN];
++ char pass[RSBAC_UM_PASS_LEN];
++};
++
++#define DEFAULT_UM_G_ENTRY \
++ { \
++ "", /* name */ \
++ "" /* pass */ \
++ }
++
++#endif
+diff --git a/include/rsbac/unistd-alpha.h b/include/rsbac/unistd-alpha.h
+new file mode 100644
+index 0000000..d6014f2
+--- /dev/null
++++ b/include/rsbac/unistd-alpha.h
+@@ -0,0 +1,16 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2002: Amon Ott */
++/* System Call definitions - alpha */
++/* Last modified: 20/Mar/2002 */
++/************************************ */
++
++#ifndef __RSBAC_UNISTD_ALPHA_H
++#define __RSBAC_UNISTD_ALPHA_H
++
++#ifndef __NR_security
++#define __NR_security 380
++#endif
++#define __NR_rsbac __NR_security
++
++#endif
+diff --git a/include/rsbac/unistd-i386.h b/include/rsbac/unistd-i386.h
+new file mode 100644
+index 0000000..5d600fb
+--- /dev/null
++++ b/include/rsbac/unistd-i386.h
+@@ -0,0 +1,16 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2002: Amon Ott */
++/* System Call definitions - i386 */
++/* Last modified: 20/Mar/2002 */
++/************************************ */
++
++#ifndef __RSBAC_UNISTD_I386_H
++#define __RSBAC_UNISTD_I386_H
++
++#ifndef __NR_security
++#define __NR_security 223
++#endif
++#define __NR_rsbac __NR_security
++
++#endif
+diff --git a/include/rsbac/unistd-ppc.h b/include/rsbac/unistd-ppc.h
+new file mode 100644
+index 0000000..cab4714
+--- /dev/null
++++ b/include/rsbac/unistd-ppc.h
+@@ -0,0 +1,16 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2002: Amon Ott */
++/* System Call definitions - i386 */
++/* Last modified: 07/Apr/2002 */
++/************************************ */
++
++#ifndef __RSBAC_UNISTD_PPC_H
++#define __RSBAC_UNISTD_PPC_H
++
++#ifndef __NR_security
++#define __NR_security 220
++#endif
++#define __NR_rsbac __NR_security
++
++#endif
+diff --git a/init/do_mounts.c b/init/do_mounts.c
+index 42b0707..3717f8f 100644
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -23,6 +23,14 @@
+
+ #include "do_mounts.h"
+
++#ifdef CONFIG_RSBAC
++#include <rsbac/aci.h>
++#include <rsbac/debug.h>
++#ifdef CONFIG_BLK_DEV_INITRD
++#include <linux/initrd.h>
++#endif
++#endif
++
+ int __initdata rd_doload; /* 1 = load RAM disk, 0 = don't load */
+
+ int root_mountflags = MS_RDONLY | MS_SILENT;
+@@ -557,4 +565,12 @@ out:
+ devtmpfs_mount("dev");
+ sys_mount(".", "/", NULL, MS_MOVE, NULL);
+ sys_chroot((const char __user __force *)".");
++
++#ifdef CONFIG_RSBAC
++#ifdef CONFIG_RSBAC_INIT_DELAY
++ if(rsbac_no_delay_init)
++#endif
++ rsbac_init(ROOT_DEV);
++#endif
++
+ }
+diff --git a/init/main.c b/init/main.c
+index cb54cd3..5e3ec81 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -75,6 +75,8 @@
+ #include <asm/sections.h>
+ #include <asm/cacheflush.h>
+
++#include <rsbac/hooks.h>
++
+ #ifdef CONFIG_X86_LOCAL_APIC
+ #include <asm/smp.h>
+ #endif
+@@ -613,6 +615,9 @@ asmlinkage void __init start_kernel(void)
+ key_init();
+ security_init();
+ dbg_late_init();
++#ifdef CONFIG_RSBAC
++ rsbac_kthreads_init();
++#endif
+ vfs_caches_init(totalram_pages);
+ signals_init();
+ /* rootfs populating might need page-writeback */
+diff --git a/ipc/msg.c b/ipc/msg.c
+index 7385de2..00e92b5 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -41,6 +41,7 @@
+ #include <asm/current.h>
+ #include <asm/uaccess.h>
+ #include "util.h"
++#include <rsbac/hooks.h>
+
+ /*
+ * one msg_receiver structure for each sleeping receiver:
+@@ -184,6 +185,12 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
+ key_t key = params->key;
+ int msgflg = params->flg;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ msq = ipc_rcu_alloc(sizeof(*msq));
+ if (!msq)
+ return -ENOMEM;
+@@ -191,6 +198,23 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
+ msq->q_perm.mode = msgflg & S_IRWXUGO;
+ msq->q_perm.key = key;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_msgget()]: calling ADF\n");
++ rsbac_target_id.ipc.type = I_msg;
++ rsbac_target_id.ipc.id.id_nr = 0;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_CREATE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ ipc_rcu_putref(msq);
++ return -EPERM;
++ }
++#endif
++
+ msq->q_perm.security = NULL;
+ retval = security_msg_queue_alloc(msq);
+ if (retval) {
+@@ -217,6 +241,24 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
+ INIT_LIST_HEAD(&msq->q_receivers);
+ INIT_LIST_HEAD(&msq->q_senders);
+
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.ipc.type = I_msg;
++ rsbac_target_id.ipc.id.id_nr = msq->q_perm.id;
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_CREATE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "newque() [sys_msgget()]: rsbac_adf_set_attr() returned error");
++ }
++#endif
++
++
+ msg_unlock(msq);
+
+ return msq->q_perm.id;
+@@ -415,6 +457,11 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
+ struct msqid64_ds uninitialized_var(msqid64);
+ struct msg_queue *msq;
+ int err;
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
+
+ if (cmd == IPC_SET) {
+ if (copy_msqid_from_user(&msqid64, buf, version))
+@@ -434,7 +481,36 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
+
+ switch (cmd) {
+ case IPC_RMID:
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.ipc.type = I_msg;
++ rsbac_target_id.ipc.id.id_nr = msqid;
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_DELETE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out_unlock;
++ }
++#endif
+ freeque(ns, ipcp);
++#ifdef CONFIG_RSBAC
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_DELETE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_msgctl(): rsbac_adf_set_attr() returned error");
++ }
++#endif
+ goto out_up;
+ case IPC_SET:
+ if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
+@@ -443,6 +519,50 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
+ goto out_unlock;
+ }
+
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.ipc.type = I_msg;
++ rsbac_target_id.ipc.id.id_nr = msqid;
++ if (ipcp->uid != msqid64.msg_perm.uid) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_attribute_value.owner = msqid64.msg_perm.uid;
++ if (!rsbac_adf_request(R_CHANGE_OWNER,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out_unlock;
++ }
++ }
++ if (ipcp->gid != msqid64.msg_perm.gid) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_attribute_value.group = msqid64.msg_perm.gid;
++ if (!rsbac_adf_request(R_CHANGE_GROUP,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out_unlock;
++ }
++ }
++ if (ipcp->mode != ((ipcp->mode & ~S_IRWXUGO) | (S_IRWXUGO & msqid64.msg_perm.mode))) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_attribute_value.mode = (S_IRWXUGO & msqid64.msg_perm.mode);
++ if (!rsbac_adf_request(R_ALTER,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_mode,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out_unlock;
++ }
++ }
++#endif
++
+ msq->q_qbytes = msqid64.msg_qbytes;
+
+ ipc_update_perm(&msqid64.msg_perm, ipcp);
+@@ -640,6 +760,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
+ int err;
+ struct ipc_namespace *ns;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ ns = current->nsproxy->ipc_ns;
+
+ if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
+@@ -647,6 +773,21 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
+ if (mtype < 1)
+ return -EINVAL;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.ipc.type = I_msg;
++ rsbac_target_id.ipc.id.id_nr = msqid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_SEND,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ msg = load_msg(mtext, msgsz);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+@@ -712,6 +853,21 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
+ atomic_inc(&ns->msg_hdrs);
+ }
+
++#ifdef CONFIG_RSBAC
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_SEND,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_msgsnd(): rsbac_adf_set_attr() returned error");
++ }
++#endif
++
+ err = 0;
+ msg = NULL;
+
+@@ -760,11 +916,32 @@ long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
+ int mode;
+ struct ipc_namespace *ns;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (msqid < 0 || (long) msgsz < 0)
+ return -EINVAL;
+ mode = convert_mode(&msgtyp, msgflg);
+ ns = current->nsproxy->ipc_ns;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.ipc.type = I_msg;
++ rsbac_target_id.ipc.id.id_nr = msqid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_RECEIVE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ msq = msg_lock_check(ns, msqid);
+ if (IS_ERR(msq))
+ return PTR_ERR(msq);
+@@ -808,6 +985,23 @@ long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
+ msg = ERR_PTR(-E2BIG);
+ goto out_unlock;
+ }
++
++ /* RSBAC: notify ADF of opened ipc */
++#ifdef CONFIG_RSBAC
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_RECEIVE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_msgrcv(): rsbac_adf_set_attr() returned error");
++ }
++#endif
++
+ list_del(&msg->m_list);
+ msq->q_qnum--;
+ msq->q_rtime = get_seconds();
+diff --git a/ipc/sem.c b/ipc/sem.c
+index 5215a81..5be561c 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -89,6 +89,7 @@
+
+ #include <asm/uaccess.h>
+ #include "util.h"
++#include <rsbac/hooks.h>
+
+ /* One semaphore structure for each semaphore in the system. */
+ struct sem {
+@@ -292,6 +293,12 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
+ int semflg = params->flg;
+ int i;
+
++#ifdef CONFIG_RSBAC_IPC_SEM
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!nsems)
+ return -EINVAL;
+ if (ns->used_sems + nsems > ns->sc_semmns)
+@@ -307,6 +314,22 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
+ sma->sem_perm.mode = (semflg & S_IRWXUGO);
+ sma->sem_perm.key = key;
+
++#ifdef CONFIG_RSBAC_IPC_SEM
++ rsbac_pr_debug(aef, "[sys_semget()]: calling ADF\n");
++ rsbac_target_id.ipc.type = I_sem;
++ rsbac_target_id.ipc.id.id_nr = 0;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_CREATE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ ipc_rcu_putref(sma);
++ return -EPERM;
++ }
++#endif
++
+ sma->sem_perm.security = NULL;
+ retval = security_sem_alloc(sma);
+ if (retval) {
+@@ -334,6 +357,21 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
+ sma->sem_ctime = get_seconds();
+ sem_unlock(sma);
+
++/* RSBAC: notify ADF of new shm */
++#ifdef CONFIG_RSBAC_IPC_SEM
++ rsbac_target_id.ipc.id.id_nr = sma->sem_perm.id;
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_CREATE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value))
++ rsbac_printk(KERN_WARNING
++ "newary() [sys_semget()]: rsbac_adf_set_attr() returned error\n");
++#endif
+ return sma->sem_perm.id;
+ }
+
+@@ -894,6 +932,12 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
+ int nsems;
+ struct list_head tasks;
+
++#ifdef CONFIG_RSBAC_IPC_SEM
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ sma = sem_lock_check(ns, semid);
+ if (IS_ERR(sma))
+ return PTR_ERR(sma);
+@@ -934,12 +978,48 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
+ }
+ }
+
++#ifdef CONFIG_RSBAC_IPC_SEM
++ rsbac_target_id.ipc.type = I_sem;
++ rsbac_target_id.ipc.id.id_nr = semid;
++ rsbac_pr_debug(aef, "[sys_semctl()]: calling ADF\n");
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_READ,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ sem_unlock(sma);
++ err = -EPERM;
++ goto out_free;
++ }
++#endif
++
+ for (i = 0; i < sma->sem_nsems; i++)
+ sem_io[i] = sma->sem_base[i].semval;
+ sem_unlock(sma);
+ err = 0;
+ if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
+ err = -EFAULT;
++
++ /* RSBAC: notify ADF of read sem */
++#ifdef CONFIG_RSBAC_IPC_SEM
++ if(!err) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_READ,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "semctl_main() [sys_semctl()]: rsbac_adf_set_attr() returned error");
++ }
++ }
++#endif
++
+ goto out_free;
+ }
+ case SETALL:
+@@ -963,6 +1043,23 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
+ goto out_free;
+ }
+
++#ifdef CONFIG_RSBAC_IPC_SEM
++ rsbac_target_id.ipc.type = I_sem;
++ rsbac_target_id.ipc.id.id_nr = semid;
++ rsbac_pr_debug(aef, "[sys_semctl()]: calling ADF\n");
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_WRITE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ sem_putref(sma);
++ goto out_free;
++ }
++#endif
++
+ for (i = 0; i < nsems; i++) {
+ if (sem_io[i] > SEMVMX) {
+ sem_putref(sma);
+@@ -986,6 +1083,23 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
+ un->semadj[i] = 0;
+ }
+ sma->sem_ctime = get_seconds();
++
++ /* RSBAC: notify ADF of written sem */
++#ifdef CONFIG_RSBAC_IPC_SEM
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_WRITE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "semctl_main() [sys_semctl()]: rsbac_adf_set_attr() returned error");
++ }
++#endif
++
+ /* maybe some queued-up processes were waiting for this */
+ do_smart_update(sma, NULL, 0, 0, &tasks);
+ err = 0;
+@@ -1021,6 +1135,23 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
+ if (val > SEMVMX || val < 0)
+ goto out_unlock;
+
++#ifdef CONFIG_RSBAC_IPC_SEM
++ rsbac_target_id.ipc.type = I_sem;
++ rsbac_target_id.ipc.id.id_nr = semid;
++ rsbac_pr_debug(aef, "[sys_semctl()]: calling ADF\n");
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_WRITE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out_unlock;
++ }
++#endif
++
+ assert_spin_locked(&sma->sem_perm.lock);
+ list_for_each_entry(un, &sma->list_id, list_id)
+ un->semadj[semnum] = 0;
+@@ -1083,6 +1214,12 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
+ struct semid64_ds semid64;
+ struct kern_ipc_perm *ipcp;
+
++#ifdef CONFIG_RSBAC_IPC_SEM
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if(cmd == IPC_SET) {
+ if (copy_semid_from_user(&semid64, arg.buf, version))
+ return -EFAULT;
+@@ -1101,9 +1238,85 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
+
+ switch(cmd){
+ case IPC_RMID:
++#ifdef CONFIG_RSBAC_IPC_SEM
++ rsbac_target_id.ipc.type = I_sem;
++ rsbac_target_id.ipc.id.id_nr = semid;
++ rsbac_pr_debug(aef, "[sys_semctl()]: calling ADF\n");
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_DELETE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out_unlock;
++ }
++#endif
++
+ freeary(ns, ipcp);
++
++ /* RSBAC: notify ADF of deleted sem */
++#ifdef CONFIG_RSBAC_IPC_SEM
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_DELETE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "semctl_down() [sys_semctl()]: rsbac_adf_set_attr() returned error");
++ }
++#endif
++
+ goto out_up;
+ case IPC_SET:
++#ifdef CONFIG_RSBAC_IPC_SEM
++ rsbac_target_id.ipc.type = I_sem;
++ rsbac_target_id.ipc.id.id_nr = semid;
++ if (ipcp->uid != semid64.sem_perm.uid) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_attribute_value.owner = semid64.sem_perm.uid;
++ if (!rsbac_adf_request(R_CHANGE_OWNER,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out_unlock;
++ }
++ }
++ if (ipcp->gid != semid64.sem_perm.gid) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_attribute_value.group = semid64.sem_perm.gid;
++ if (!rsbac_adf_request(R_CHANGE_GROUP,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out_unlock;
++ }
++ }
++ if (ipcp->mode != ((ipcp->mode & ~S_IRWXUGO) | (S_IRWXUGO & semid64.sem_perm.mode))) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_attribute_value.mode = (S_IRWXUGO & semid64.sem_perm.mode);
++ if (!rsbac_adf_request(R_ALTER,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_mode,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out_unlock;
++ }
++ }
++#endif
+ ipc_update_perm(&semid64.sem_perm, ipcp);
+ sma->sem_ctime = get_seconds();
+ break;
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 406c5b2..62c8cd5 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -44,6 +44,8 @@
+
+ #include "util.h"
+
++#include <rsbac/hooks.h>
++
+ struct shm_file_data {
+ int id;
+ struct ipc_namespace *ns;
+@@ -85,6 +87,11 @@ void shm_init_ns(struct ipc_namespace *ns)
+ */
+ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
+ {
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
+ struct shmid_kernel *shp;
+ shp = container_of(ipcp, struct shmid_kernel, shm_perm);
+
+@@ -93,8 +100,25 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
+ /* Do not find it any more */
+ shp->shm_perm.key = IPC_PRIVATE;
+ shm_unlock(shp);
+- } else
++ } else {
+ shm_destroy(ns, shp);
++
++ /* RSBAC: notify ADF of deleted shm */
++#ifdef CONFIG_RSBAC
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_DELETE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_shmctl(): rsbac_adf_set_attr() returned error");
++ }
++#endif
++ }
+ }
+
+ #ifdef CONFIG_IPC_NS
+@@ -187,6 +211,10 @@ static void shm_open(struct vm_area_struct *vma)
+ */
+ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
+ {
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++#endif
++
+ ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ shm_rmid(ns, shp);
+ shm_unlock(shp);
+@@ -197,6 +225,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
+ shp->mlock_user);
+ fput (shp->shm_file);
+ security_shm_free(shp);
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ACI remove_target()\n");
++ rsbac_target_id.ipc.type = I_shm;
++ rsbac_target_id.ipc.id.id_nr = shp->shm_perm.id;
++ rsbac_remove_target(T_IPC, rsbac_target_id);
++#endif
++
+ ipc_rcu_putref(shp);
+ }
+
+@@ -230,6 +266,27 @@ static void shm_close(struct vm_area_struct *vma)
+ struct shmid_kernel *shp;
+ struct ipc_namespace *ns = sfd->ns;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_shmdt() et al.]: calling ADF\n");
++ rsbac_target_id.ipc.type = I_shm;
++ rsbac_target_id.ipc.id.id_nr = sfd->id;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_CLOSE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "shm_close() [sys_shmdt() et al.]: rsbac_adf_request() for CLOSE returned NOT_GRANTED\n");
++ }
++#endif
++
+ down_write(&shm_ids(ns).rw_mutex);
+ /* remove from the list of attaches of the shm segment */
+ shp = shm_lock(ns, sfd->id);
+@@ -456,6 +513,12 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+ int id;
+ vm_flags_t acctflag = 0;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (size < SHMMIN || size > ns->shm_ctlmax)
+ return -EINVAL;
+
+@@ -466,6 +529,22 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+ if (!shp)
+ return -ENOMEM;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_shmget()]: calling ADF\n");
++ rsbac_target_id.ipc.type = I_shm;
++ rsbac_target_id.ipc.id.id_nr = 0;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_CREATE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ ipc_rcu_putref(shp);
++ return -EPERM;
++ }
++#endif
++
+ shp->shm_perm.key = key;
+ shp->shm_perm.mode = (shmflg & S_IRWXUGO);
+ shp->mlock_user = NULL;
+@@ -521,6 +600,24 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+ ns->shm_tot += numpages;
+ error = shp->shm_perm.id;
+ shm_unlock(shp);
++
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.ipc.id.id_nr = file->f_dentry->d_inode->i_ino;
++ rsbac_new_target_id.ipc.type = I_shm;
++ rsbac_new_target_id.ipc.id.id_nr = file->f_dentry->d_inode->i_ino;
++ if (rsbac_adf_set_attr(R_CREATE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_IPC,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "newseg() [sys_shmget()]: rsbac_adf_set_attr() returned error");
++ }
++#endif
++
+ return error;
+
+ no_id:
+@@ -765,6 +862,11 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+ int err, version;
+ struct ipc_namespace *ns;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (cmd < 0 || shmid < 0) {
+ err = -EINVAL;
+ goto out;
+@@ -922,6 +1024,23 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+ }
+ case IPC_RMID:
+ case IPC_SET:
++
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.ipc.type = I_shm;
++ rsbac_target_id.ipc.id.id_nr = shmid;
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_DELETE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out;
++ }
++#endif
++
+ err = shmctl_down(ns, shmid, cmd, buf, version);
+ return err;
+ default:
+@@ -957,6 +1076,13 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
+ struct path path;
+ fmode_t f_mode;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_adf_request_t rsbac_request = R_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ err = -EINVAL;
+ if (shmid < 0)
+ goto out;
+@@ -1011,6 +1137,26 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
+ if (err)
+ goto out_unlock;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ if ((shmflg & SHM_RDONLY))
++ rsbac_request = R_READ_OPEN;
++ else
++ rsbac_request = R_READ_WRITE_OPEN;
++ rsbac_target_id.ipc.type = I_shm;
++ rsbac_target_id.ipc.id.id_nr = shp->shm_perm.id;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(rsbac_request,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out_unlock;
++ }
++#endif
++
+ path = shp->shm_file->f_path;
+ path_get(&path);
+ shp->shm_nattch++;
+@@ -1072,6 +1218,25 @@ out_nattch:
+ up_write(&shm_ids(ns).rw_mutex);
+
+ out:
++
++/* RSBAC: notify ADF of attached shm */
++#ifdef CONFIG_RSBAC
++ if(!err) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(rsbac_request,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_shmat(): rsbac_adf_set_attr() returned error");
++ }
++ }
++#endif
++
+ return err;
+
+ out_unlock:
+diff --git a/kernel/capability.c b/kernel/capability.c
+index 3f1adb6..8a85f33 100644
+--- a/kernel/capability.c
++++ b/kernel/capability.c
+@@ -17,6 +17,8 @@
+ #include <linux/user_namespace.h>
+ #include <asm/uaccess.h>
+
++#include <rsbac/hooks.h>
++
+ /*
+ * Leveraged for setting/resetting capabilities
+ */
+@@ -128,6 +130,11 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
+ {
+ int ret;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (pid && (pid != task_pid_vnr(current))) {
+ struct task_struct *target;
+
+@@ -136,12 +143,34 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
+ target = find_task_by_vpid(pid);
+ if (!target)
+ ret = -ESRCH;
+- else
++ else {
+ ret = security_capget(target, pEp, pIp, pPp);
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.process = task_pid(target);
++#endif
++ }
+
+ rcu_read_unlock();
+- } else
++ } else {
+ ret = security_capget(current, pEp, pIp, pPp);
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.process = task_pid(current);
++#endif
++ }
++#ifdef CONFIG_RSBAC
++ if(!ret) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_attribute_value.dummy = 0;
++ if(!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ ret = -EPERM;
++ }
++ }
++#endif
+
+ return ret;
+ }
+@@ -238,6 +267,12 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
+ int ret;
+ pid_t pid;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ ret = cap_validate_magic(header, &tocopy);
+ if (ret != 0)
+ return ret;
+@@ -272,6 +307,25 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
+ if (!new)
+ return -ENOMEM;
+
++#ifdef CONFIG_RSBAC
++ if (!cap_issubset(effective, new->cap_effective)
++ || !cap_issubset(permitted, new->cap_permitted)
++ || !cap_issubset(inheritable, new->cap_inheritable)) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_capability;
++ rsbac_attribute_value.dummy = 0;
++ if(!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ ret = -EPERM;
++ goto error;
++ }
++ }
++#endif
++
+ ret = security_capset(new, current_cred(),
+ &effective, &inheritable, &permitted);
+ if (ret < 0)
+@@ -279,7 +333,26 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
+
+ audit_log_capset(pid, new, current_cred());
+
+- return commit_creds(new);
++ ret = commit_creds(new);
++
++#ifdef CONFIG_RSBAC
++ if (!ret) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_setcap(): rsbac_adf_set_attr() returned error");
++ }
++ }
++#endif
++
++ return ret;
+
+ error:
+ abort_creds(new);
+@@ -388,6 +461,9 @@ bool ns_capable(struct user_namespace *ns, int cap)
+ current->flags |= PF_SUPERPRIV;
+ return true;
+ }
++#if defined(CONFIG_RSBAC_CAP_LOG_MISSING) || defined(CONFIG_RSBAC_JAIL_LOG_MISSING)
++ rsbac_log_missing_cap(cap);
++#endif
+ return false;
+ }
+ EXPORT_SYMBOL(ns_capable);
+diff --git a/kernel/exit.c b/kernel/exit.c
+index d8bd3b42..03a6ff3 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -59,6 +59,8 @@
+ #include <asm/pgtable.h>
+ #include <asm/mmu_context.h>
+
++#include <rsbac/hooks.h>
++
+ static void exit_mm(struct task_struct * tsk);
+
+ static void __unhash_process(struct task_struct *p, bool group_dead)
+@@ -900,6 +902,11 @@ void do_exit(long code)
+ struct task_struct *tsk = current;
+ int group_dead;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ profile_task_exit(tsk);
+
+ WARN_ON(blk_needs_flush_plug(tsk));
+@@ -987,6 +994,23 @@ void do_exit(long code)
+ exit_shm(tsk);
+ exit_files(tsk);
+ exit_fs(tsk);
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_exit()]: calling ADF\n");
++ rsbac_target_id.process = task_pid(tsk);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_TERMINATE,
++ rsbac_target_id.process,
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ rsbac_printk(KERN_WARNING
++ "do_exit() [sys_exit()]: ADF request for TERMINATE returned NOT_GRANTED!\n");
++ }
++#endif
++
+ check_stack_usage();
+ exit_thread();
+
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 687a15d..8c41258 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -76,6 +76,8 @@
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
+
++#include <rsbac/hooks.h>
++
+ #include <trace/events/sched.h>
+
+ #define CREATE_TRACE_POINTS
+@@ -588,6 +590,27 @@ void mmput(struct mm_struct *mm)
+ }
+ EXPORT_SYMBOL_GPL(mmput);
+
++#ifdef CONFIG_RSBAC
++/* yes, i hate putting new functions here as much as ao does. seems like we
++ * have no choice because mmput() is beeing used from rsbac_adf_request_int()
++ * which in turn cannot be sleeping when called from do_exit(). michal. */
++void mmput_nosleep(struct mm_struct *mm)
++{
++ if (atomic_dec_and_test(&mm->mm_users)) {
++ exit_aio(mm);
++ exit_mmap(mm);
++ if (!list_empty(&mm->mmlist)) {
++ spin_lock(&mmlist_lock);
++ list_del(&mm->mmlist);
++ spin_unlock(&mmlist_lock);
++ }
++ put_swap_token(mm);
++ mmdrop(mm);
++ }
++}
++EXPORT_SYMBOL_GPL(mmput_nosleep);
++#endif
++
+ /*
+ * We added or removed a vma mapping the executable. The vmas are only mapped
+ * during exec and are not mapped with the mmap system call.
+@@ -1541,7 +1564,12 @@ struct task_struct * __cpuinit fork_idle(int cpu)
+ * It copies the process, and if successful kick-starts
+ * it and waits for it to finish using the VM if required.
+ */
++
++#ifdef CONFIG_RSBAC
++long do_fork(unsigned long long clone_flags,
++#else
+ long do_fork(unsigned long clone_flags,
++#endif
+ unsigned long stack_start,
+ struct pt_regs *regs,
+ unsigned long stack_size,
+@@ -1552,6 +1580,31 @@ long do_fork(unsigned long clone_flags,
+ int trace = 0;
+ long nr;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ enum rsbac_attribute_t rsbac_attribute;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_attribute = A_none;
++ rsbac_attribute_value.dummy = 0;
++ if(current->pid) {
++ rsbac_pr_debug(aef, "[sys_fork(),sys_clone(),sys_vfork]: calling ADF\n");
++ rsbac_target_id.process = task_pid(current);
++ if (!rsbac_adf_request(R_CLONE,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++ }
++#endif
++
+ /*
+ * Do some preliminary argument and permissions checking before we
+ * actually start allocating stuff
+@@ -1607,6 +1660,33 @@ long do_fork(unsigned long clone_flags,
+ get_task_struct(p);
+ }
+
++#ifdef CONFIG_RSBAC
++ if (clone_flags & CLONE_KTHREAD) {
++ rsbac_attribute = A_kernel_thread;
++ rsbac_attribute_value.kernel_thread = 1;
++ rsbac_mark_kthread(task_pid(p));
++ rsbac_kthread_notify(task_pid(p));
++ }
++
++ if (current->pid)
++ {
++ rsbac_pr_debug(aef, "[sys_fork(),sys_clone(),sys_vfork()]: calling ADF_set_attr\n");
++ rsbac_new_target_id.process = task_pid(p);
++ if (rsbac_adf_set_attr(R_CLONE,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ T_PROCESS,
++ rsbac_new_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value))
++ {
++ rsbac_printk(KERN_WARNING
++ "do_fork() [sys_fork(), sys_clone()]: rsbac_adf_set_attr() returned error!\n");
++ }
++ }
++#endif
++
+ wake_up_new_task(p);
+
+ /* forking complete and child started to run, tell ptracer */
+diff --git a/kernel/groups.c b/kernel/groups.c
+index 99b53d1..506b5fb 100644
+--- a/kernel/groups.c
++++ b/kernel/groups.c
+@@ -8,6 +8,8 @@
+ #include <linux/syscalls.h>
+ #include <asm/uaccess.h>
+
++#include <rsbac/hooks.h>
++
+ /* init to 2 - one for init_task, one to ensure it is never freed */
+ struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
+
+@@ -233,6 +235,12 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
+ struct group_info *group_info;
+ int retval;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ int i;
++#endif
++
+ if (!nsown_capable(CAP_SETGID))
+ return -EPERM;
+ if ((unsigned)gidsetsize > NGROUPS_MAX)
+@@ -247,6 +255,25 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
+ return retval;
+ }
+
++#ifdef CONFIG_RSBAC
++ if (gidsetsize > 0) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = task_pid(current);
++ for (i=0; i < gidsetsize; i++) {
++ rsbac_attribute_value.group = RSBAC_GEN_GID(RSBAC_UM_VIRTUAL_KEEP, group_info->blocks[i / NGROUPS_PER_BLOCK][i]);
++ if(!rsbac_adf_request(R_CHANGE_GROUP,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value)) {
++ put_group_info(group_info);
++ return -EPERM;
++ }
++ }
++ }
++#endif
++
+ retval = set_current_groups(group_info);
+ put_group_info(group_info);
+
+diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
+index 079f1d3..5279edbb 100644
+--- a/kernel/kallsyms.c
++++ b/kernel/kallsyms.c
+@@ -26,6 +26,8 @@
+
+ #include <asm/sections.h>
+
++#include <rsbac/hooks.h>
++
+ #ifdef CONFIG_KALLSYMS_ALL
+ #define all_var 1
+ #else
+@@ -540,6 +542,26 @@ static int kallsyms_open(struct inode *inode, struct file *file)
+ struct kallsym_iter *iter;
+ int ret;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.scd = ST_ksyms;
++ rsbac_attribute_value.dummy = 0;
++ rsbac_pr_debug(aef, "calling ADF\n");
++ if(!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return -ENOMEM;
+diff --git a/kernel/kexec.c b/kernel/kexec.c
+index 4e2e472..204b478 100644
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -39,6 +39,8 @@
+ #include <asm/io.h>
+ #include <asm/sections.h>
+
++#include <rsbac/hooks.h>
++
+ /* Per cpu memory for storing cpu states in case of system crash. */
+ note_buf_t __percpu *crash_notes;
+
+@@ -943,10 +945,30 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
+ struct kimage **dest_image, *image;
+ int result;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ /* We only trust the superuser with rebooting the system. */
+ if (!capable(CAP_SYS_BOOT))
+ return -EPERM;
+
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.scd = ST_kexec;
++ rsbac_attribute_value.dummy = 0;
++ rsbac_pr_debug(aef, "calling ADF\n");
++ if(!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ /*
+ * Verify we have a legal set of flags
+ * This leaves us room for future extensions.
+diff --git a/kernel/module.c b/kernel/module.c
+index 78ac6ec..df694ee 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -59,6 +59,8 @@
+ #include <linux/pfn.h>
+ #include <linux/bsearch.h>
+
++#include <rsbac/hooks.h>
++
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/module.h>
+
+@@ -772,6 +774,11 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
+ char name[MODULE_NAME_LEN];
+ int ret, forced = 0;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!capable(CAP_SYS_MODULE) || modules_disabled)
+ return -EPERM;
+
+@@ -781,6 +788,19 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
+
+ if (mutex_lock_interruptible(&module_mutex) != 0)
+ return -EINTR;
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.dummy = 0;
++ rsbac_attribute_value.mod_name = name;
++ if (!rsbac_adf_request(R_REMOVE_FROM_KERNEL,
++ task_pid(current),
++ T_NONE,
++ rsbac_target_id,
++ A_mod_name,
++ rsbac_attribute_value))
++ ret = -EPERM;
++ goto out;
++#endif
+
+ mod = find_module(name);
+ if (!mod) {
+@@ -3012,10 +3032,28 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
+ struct module *mod;
+ int ret = 0;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ /* Must have permission */
+ if (!capable(CAP_SYS_MODULE) || modules_disabled)
+ return -EPERM;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.dummy = 0;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_ADD_TO_KERNEL,
++ task_pid(current),
++ T_NONE,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
+ /* Do all the hard work */
+ mod = load_module(umod, len, uargs);
+ if (IS_ERR(mod))
+diff --git a/kernel/printk.c b/kernel/printk.c
+index b663c2c..faec19e 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -47,6 +47,8 @@
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/printk.h>
+
++#include <rsbac/hooks.h>
++
+ /*
+ * Architectures can override it:
+ */
+@@ -339,6 +341,11 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
+ char c;
+ int error;
+
++#ifdef CONFIG_RSBAC_SYSLOG
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ error = check_syslog_permissions(type, from_file);
+ if (error)
+ goto out;
+@@ -347,6 +354,44 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
+ if (error)
+ return error;
+
++#ifdef CONFIG_RSBAC_SYSLOG
++ rsbac_pr_debug(aef, "[sys_syslog()]: calling ADF\n");
++ rsbac_target_id.scd = ST_syslog;
++ rsbac_attribute_value.dummy = 0;
++ switch(type) {
++ case 2:
++ case 3:
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ error = -EPERM;
++ goto out;
++ }
++ break;
++ case 4:
++ case 5:
++ case 6:
++ case 7:
++ case 8:
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ error = -EPERM;
++ goto out;
++ }
++ break;
++
++ default:
++ break;
++ }
++#endif
++
+ switch (type) {
+ case SYSLOG_ACTION_CLOSE: /* Close log */
+ break;
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index ee8d49b..d8324b2 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -32,6 +32,8 @@ static int ptrace_trapping_sleep_fn(void *flags)
+ return 0;
+ }
+
++#include <rsbac/hooks.h>
++
+ /*
+ * ptrace a task: make the debugger its new parent and
+ * move it to the ptrace list.
+@@ -861,7 +863,26 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+ struct task_struct *child;
+ long ret;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (request == PTRACE_TRACEME) {
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_ptrace] calling ADF\n");
++ rsbac_target_id.process = task_pid(current);
++ rsbac_attribute_value.trace_request = PTRACE_TRACEME;
++ if (!rsbac_adf_request(R_TRACE,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_trace_request,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ ret = ptrace_traceme();
+ if (!ret)
+ arch_ptrace_attach(current);
+@@ -874,6 +895,23 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+ goto out;
+ }
+
++#ifdef CONFIG_RSBAC
++ if (request != PTRACE_DETACH) {
++ rsbac_pr_debug(aef, "[sys_ptrace] calling ADF\n");
++ rsbac_target_id.process = task_pid(child);
++ rsbac_attribute_value.trace_request = request;
++ if (!rsbac_adf_request(R_TRACE,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_trace_request,
++ rsbac_attribute_value)) {
++ ret = -EPERM;
++ goto out_put_task_struct;
++ }
++ }
++#endif
++
+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
+ ret = ptrace_attach(child, request, addr, data);
+ /*
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index e5212ae..bd4a927 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -81,6 +81,8 @@
+ #include <asm/paravirt.h>
+ #endif
+
++#include <rsbac/hooks.h>
++
+ #include "sched.h"
+ #include "../workqueue_sched.h"
+
+@@ -3923,6 +3925,10 @@ int can_nice(const struct task_struct *p, const int nice)
+ SYSCALL_DEFINE1(nice, int, increment)
+ {
+ long nice, retval;
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
+
+ /*
+ * Setpriority might change our priority at the same moment.
+@@ -3943,6 +3949,22 @@ SYSCALL_DEFINE1(nice, int, increment)
+ if (increment < 0 && !can_nice(current, nice))
+ return -EPERM;
+
++#ifdef CONFIG_RSBAC
++ if (increment < 0) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_priority;
++ rsbac_attribute_value.priority = nice;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++ }
++#endif
++
+ retval = security_task_setnice(current, nice);
+ if (retval)
+ return retval;
+@@ -4245,6 +4267,12 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+ struct task_struct *p;
+ int retval;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!param || pid < 0)
+ return -EINVAL;
+ if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
+@@ -4253,8 +4281,31 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+ rcu_read_lock();
+ retval = -ESRCH;
+ p = find_process_by_pid(pid);
+- if (p != NULL)
++ if (p != NULL) {
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_sched_setscheduler, sys_sched_setparam]: calling ADF\n");
++ if (!pid || (pid == current->pid)) {
++ rsbac_target = T_SCD;
++ rsbac_target_id.scd = ST_priority;
++ } else {
++ rsbac_target = T_PROCESS;
++ rsbac_target_id.process = task_pid(p);
++ }
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rcu_read_unlock();
++ return -EPERM;
++ }
++#endif
++
+ retval = sched_setscheduler(p, policy, &lparam);
++ }
+ rcu_read_unlock();
+
+ return retval;
+@@ -4295,9 +4346,35 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
+ struct task_struct *p;
+ int retval;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (pid < 0)
+ return -EINVAL;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_sched_getscheduler]: calling ADF\n");
++ if (!pid || (pid == current->pid)) {
++ rsbac_target = T_SCD;
++ rsbac_target_id.scd = ST_priority;
++ } else {
++ rsbac_target = T_PROCESS;
++ rsbac_target_id.process = find_pid_ns(pid, &init_pid_ns);
++ }
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ retval = -ESRCH;
+ rcu_read_lock();
+ p = find_process_by_pid(pid);
+@@ -4322,9 +4399,35 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
+ struct task_struct *p;
+ int retval;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!param || pid < 0)
+ return -EINVAL;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_sched_getparam]: calling ADF\n");
++ if (!pid || (pid == current->pid)) {
++ rsbac_target = T_SCD;
++ rsbac_target_id.scd = ST_priority;
++ } else {
++ rsbac_target = T_PROCESS;
++ rsbac_target_id.process = find_pid_ns(pid, &init_pid_ns);
++ }
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ rcu_read_lock();
+ p = find_process_by_pid(pid);
+ retval = -ESRCH;
+@@ -4352,6 +4455,12 @@ out_unlock:
+
+ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+ {
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ cpumask_var_t cpus_allowed, new_mask;
+ struct task_struct *p;
+ int retval;
+@@ -4386,6 +4495,27 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+ if (retval)
+ goto out_unlock;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_sched_setaffinity]: calling ADF\n");
++ if (p == current) {
++ rsbac_target = T_SCD;
++ rsbac_target_id.scd = ST_priority;
++ } else {
++ rsbac_target = T_PROCESS;
++ rsbac_target_id.process = task_pid(p);
++ }
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ retval = -EPERM;
++ goto out_unlock;
++ }
++#endif
++
+ cpuset_cpus_allowed(p, cpus_allowed);
+ cpumask_and(new_mask, in_mask, cpus_allowed);
+ again:
+@@ -4451,6 +4581,31 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
+ struct task_struct *p;
+ unsigned long flags;
+ int retval;
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sched_getaffinity]: calling ADF\n");
++ if (!pid || (pid == current->pid)) {
++ rsbac_target = T_SCD;
++ rsbac_target_id.scd = ST_priority;
++ } else {
++ rsbac_target = T_PROCESS;
++ rsbac_target_id.process = find_pid_ns(pid, &init_pid_ns);
++ }
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
+
+ get_online_cpus();
+ rcu_read_lock();
+@@ -4799,9 +4954,35 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
+ int retval;
+ struct timespec t;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (pid < 0)
+ return -EINVAL;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_sched_rr_get_interval]: calling ADF\n");
++ if (!pid || (pid == current->pid)) {
++ rsbac_target = T_SCD;
++ rsbac_target_id.scd = ST_priority;
++ } else {
++ rsbac_target = T_PROCESS;
++ rsbac_target_id.process = find_pid_ns(pid, &init_pid_ns);
++ }
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ retval = -ESRCH;
+ rcu_read_lock();
+ p = find_process_by_pid(pid);
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 17afcaf..73ddc9f 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -39,6 +39,8 @@
+ #include <asm/cacheflush.h>
+ #include "audit.h" /* audit_signal_info() */
+
++#include <rsbac/hooks.h>
++
+ /*
+ * SLAB caches for signal bits.
+ */
+@@ -790,6 +792,11 @@ static int check_kill_permission(int sig, struct siginfo *info,
+ struct pid *sid;
+ int error;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!valid_signal(sig))
+ return -EINVAL;
+
+@@ -816,6 +823,23 @@ static int check_kill_permission(int sig, struct siginfo *info,
+ }
+ }
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[group_send_sig_info(), sys_tgkill(),sys_tkill()]: calling ADF\n");
++ rsbac_target_id.process = task_pid(t);
++ rsbac_attribute_value.dummy = 0;
++ if ((!info || ((unsigned long)info != 1
++ && (unsigned long)info != 2 && SI_FROMUSER(info)))
++ && ((sig != SIGCONT) || (task_session(current) != task_session(t)))
++ && !rsbac_adf_request(R_SEND_SIGNAL,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)
++ )
++ return -EPERM;
++#endif
++
+ return security_task_kill(t, info, sig, 0);
+ }
+
+diff --git a/kernel/sys.c b/kernel/sys.c
+index e7006eb..eceeec1 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -54,6 +54,8 @@
+ #include <asm/io.h>
+ #include <asm/unistd.h>
+
++#include <rsbac/hooks.h>
++
+ #ifndef SET_UNALIGN_CTL
+ # define SET_UNALIGN_CTL(a,b) (-EINVAL)
+ #endif
+@@ -178,6 +180,12 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
+ int error = -EINVAL;
+ struct pid *pgrp;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (which > PRIO_USER || which < PRIO_PROCESS)
+ goto out;
+
+@@ -188,6 +196,37 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
+ if (niceval > 19)
+ niceval = 19;
+
++#ifdef CONFIG_RSBAC
++ if ((niceval < (current->static_prio - MAX_RT_PRIO - 20)) || ((which == PRIO_PROCESS)
++ && (who != 0)
++ && (who != current->pid))
++ || ((which == PRIO_PGRP)
++ && (who != 0)
++ && (who != current->pid))) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rcu_read_lock();
++ if (niceval < (current->static_prio - MAX_RT_PRIO - 20)) {
++ rsbac_target = T_SCD;
++ rsbac_target_id.scd = ST_priority;
++ } else {
++ rsbac_target = T_PROCESS;
++ rsbac_target_id.process = find_pid_ns(who, &init_pid_ns);
++ }
++ rsbac_attribute_value.priority = niceval;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_priority,
++ rsbac_attribute_value)) {
++ rcu_read_unlock();
++ error = -EPERM;
++ goto out;
++ }
++ rcu_read_unlock();
++ }
++#endif
++
+ rcu_read_lock();
+ read_lock(&tasklist_lock);
+ switch (which) {
+@@ -432,6 +471,11 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
+ char buffer[256];
+ int ret = 0;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ /* We only trust the superuser with rebooting the system. */
+ if (!capable(CAP_SYS_BOOT))
+ return -EPERM;
+@@ -459,6 +503,20 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
+ if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
+ cmd = LINUX_REBOOT_CMD_HALT;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.dummy = 0;
++ rsbac_attribute_value.reboot_cmd = cmd;
++ if (!rsbac_adf_request(R_SHUTDOWN,
++ task_pid(current),
++ T_NONE,
++ rsbac_target_id,
++ A_reboot_cmd,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ mutex_lock(&reboot_mutex);
+ switch (cmd) {
+ case LINUX_REBOOT_CMD_RESTART:
+@@ -564,18 +622,66 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
+
+ retval = -EPERM;
+ if (rgid != (gid_t) -1) {
+- if (old->gid == rgid ||
++
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = task_pid(current);
++ rsbac_attribute_value.long_dummy = 0;
++ rsbac_attribute_value.group = RSBAC_GEN_GID(RSBAC_UM_VIRTUAL_KEEP, rgid);
++#endif
++
++ if ((old->gid == rgid ||
+ old->egid == rgid ||
+ nsown_capable(CAP_SETGID))
++
++#ifdef CONFIG_RSBAC
++ && rsbac_adf_request(R_CHANGE_GROUP,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value)
++#endif
++ )
+ new->gid = rgid;
+ else
+ goto error;
+ }
+ if (egid != (gid_t) -1) {
+- if (old->gid == egid ||
++
++#ifdef CONFIG_RSBAC_DAC_GROUP
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = task_pid(current);
++ rsbac_attribute_value.long_dummy = 0;
++ rsbac_attribute_value.group = RSBAC_GEN_GID(RSBAC_UM_VIRTUAL_KEEP, egid);
++#endif
++
++ if ((old->gid == egid ||
+ old->egid == egid ||
+ old->sgid == egid ||
+ nsown_capable(CAP_SETGID))
++
++#ifdef CONFIG_RSBAC_DAC_GROUP
++ && rsbac_adf_request(R_CHANGE_DAC_EFF_GROUP,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value)
++ && rsbac_adf_request(R_CHANGE_DAC_FS_GROUP,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value)
++#endif
++ )
+ new->egid = egid;
+ else
+ goto error;
+@@ -604,16 +710,68 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
+ struct cred *new;
+ int retval;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ old = current_cred();
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = task_pid(current);
++ rsbac_attribute_value.group = RSBAC_GEN_GID(RSBAC_UM_VIRTUAL_KEEP, gid);
++#endif
++
+ retval = -EPERM;
+- if (nsown_capable(CAP_SETGID))
++ if ((nsown_capable(CAP_SETGID))
++#ifdef CONFIG_RSBAC
++ && rsbac_adf_request(R_CHANGE_GROUP,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value)
++#ifdef CONFIG_RSBAC_DAC_GROUP
++ && rsbac_adf_request(R_CHANGE_DAC_EFF_GROUP,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value)
++ && rsbac_adf_request(R_CHANGE_DAC_FS_GROUP,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value)
++#endif
++#endif
++ )
++ {
+ new->gid = new->egid = new->sgid = new->fsgid = gid;
+- else if (gid == old->gid || gid == old->sgid)
++ } else if ((gid == old->gid || gid == old->sgid)
++#ifdef CONFIG_RSBAC_DAC_GROUP
++ && rsbac_adf_request(R_CHANGE_DAC_EFF_GROUP,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value)
++ && rsbac_adf_request(R_CHANGE_DAC_FS_GROUP,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value)
++#endif
++ )
++ {
+ new->egid = new->fsgid = gid;
++ }
+ else
+ goto error;
+
+@@ -674,6 +832,12 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
+ struct cred *new;
+ int retval;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+@@ -707,11 +871,106 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
+ new->suid = new->euid;
+ new->fsuid = new->euid;
+
++#ifdef CONFIG_RSBAC
++ if (ruid != (uid_t) -1) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = task_pid(current);
++ rsbac_attribute_value.long_dummy = 0;
++ rsbac_attribute_value.owner = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, ruid);
++ if (!rsbac_adf_request(R_CHANGE_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ retval = -EPERM;
++ goto error;
++ }
++ }
++#ifdef CONFIG_RSBAC_DAC_OWNER
++ if (euid != (uid_t) -1) {
++ rsbac_pr_debug(aef, "calling ADF for euid\n");
++ rsbac_target_id.process = task_pid(current);
++ rsbac_attribute_value.long_dummy = 0;
++ rsbac_attribute_value.owner = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, euid);
++ if (!rsbac_adf_request(R_CHANGE_DAC_EFF_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ retval = -EPERM;
++ goto error;
++ }
++ if (!rsbac_adf_request(R_CHANGE_DAC_FS_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ retval = -EPERM;
++ goto error;
++ }
++ }
++#endif
++#endif
++
+ retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
+ if (retval < 0)
+ goto error;
+
+- return commit_creds(new);
++ retval = commit_creds(new);
++
++#ifdef CONFIG_RSBAC
++ if(!retval) {
++ if(ruid != (uid_t) -1) {
++ rsbac_set_audit_uid(old->uid);
++ rsbac_attribute_value.owner = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, current_uid());
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_CHANGE_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_setreuid(): rsbac_adf_set_attr() returned error");
++ }
++ }
++#ifdef CONFIG_RSBAC_DAC_OWNER
++ if(euid != (uid_t) -1) {
++ rsbac_attribute_value.owner = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, current_euid());
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_CHANGE_DAC_EFF_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_setreuid(): rsbac_adf_set_attr() for euid returned error");
++ }
++ if (rsbac_adf_set_attr(R_CHANGE_DAC_FS_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ printk(KERN_WARNING
++ "sys_setreuid(): rsbac_adf_set_attr() for fsuid returned error");
++ }
++ }
++#endif
++ }
++#endif
++
++ return retval;
+
+ error:
+ abort_creds(new);
+@@ -735,11 +994,60 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
+ struct cred *new;
+ int retval;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_FAKE_ROOT_UID
++ if(!uid && rsbac_uid_faked())
++ return 0;
++#endif
++
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ old = current_cred();
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = task_pid(current);
++ rsbac_attribute_value.long_dummy = 0;
++ rsbac_attribute_value.owner = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, uid);
++ if(!rsbac_adf_request(R_CHANGE_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ retval = -EPERM;
++ goto error;
++ }
++#ifdef CONFIG_RSBAC_DAC_OWNER
++ rsbac_pr_debug(aef, "calling ADF for euid\n");
++ if (!rsbac_adf_request(R_CHANGE_DAC_EFF_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ retval = -EPERM;
++ goto error;
++ }
++ rsbac_pr_debug(aef, "calling ADF for fsuid\n");
++ if (!rsbac_adf_request(R_CHANGE_DAC_FS_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ retval = -EPERM;
++ goto error;
++ }
++#endif
++#endif
++
+ retval = -EPERM;
+ if (nsown_capable(CAP_SETUID)) {
+ new->suid = new->uid = uid;
+@@ -758,7 +1066,53 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
+ if (retval < 0)
+ goto error;
+
+- return commit_creds(new);
++ retval = commit_creds(new);
++
++#ifdef CONFIG_RSBAC
++ if (!retval) {
++ rsbac_set_audit_uid(old->uid);
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_CHANGE_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_setuid(): rsbac_adf_set_attr() returned error");
++ }
++#ifdef CONFIG_RSBAC_DAC_OWNER
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_CHANGE_DAC_EFF_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_setuid(): rsbac_adf_set_attr() for euid returned error");
++ }
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_CHANGE_DAC_FS_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_setuid(): rsbac_adf_set_attr() for fsuid returned error");
++ }
++#endif
++ }
++#endif
++
++ return retval;
+
+ error:
+ abort_creds(new);
+@@ -776,6 +1130,12 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
+ struct cred *new;
+ int retval;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+@@ -795,6 +1155,51 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
+ goto error;
+ }
+
++#ifdef CONFIG_RSBAC
++ if(ruid != (uid_t) -1) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = task_pid(current);
++ rsbac_attribute_value.long_dummy = 0;
++ rsbac_attribute_value.owner = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, ruid);
++ if(!rsbac_adf_request(R_CHANGE_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ retval = -EPERM;
++ goto error;
++ }
++ }
++#ifdef CONFIG_RSBAC_DAC_OWNER
++ if(euid != (uid_t) -1) {
++ rsbac_pr_debug(aef, "calling ADF for euid\n");
++ rsbac_target_id.process = task_pid(current);
++ rsbac_attribute_value.long_dummy = 0;
++ rsbac_attribute_value.owner = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, euid);
++ if(!rsbac_adf_request(R_CHANGE_DAC_EFF_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ retval = -EPERM;
++ goto error;
++ }
++ rsbac_pr_debug(aef, "calling ADF for fsuid\n");
++ if(!rsbac_adf_request(R_CHANGE_DAC_FS_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ retval = -EPERM;
++ goto error;
++ }
++ }
++#endif
++#endif
++
+ if (ruid != (uid_t) -1) {
+ new->uid = ruid;
+ if (ruid != old->uid) {
+@@ -813,7 +1218,58 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
+ if (retval < 0)
+ goto error;
+
+- return commit_creds(new);
++ retval = commit_creds(new);
++
++#ifdef CONFIG_RSBAC
++ if (!retval) {
++ if(ruid != (uid_t) -1) {
++ rsbac_set_audit_uid(old->uid);
++ rsbac_new_target_id.dummy = 0;
++ rsbac_attribute_value.owner = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, current_uid());
++ if (rsbac_adf_set_attr(R_CHANGE_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_setresuid(): rsbac_adf_set_attr() returned error");
++ }
++ }
++#ifdef CONFIG_RSBAC_DAC_OWNER
++ if(euid != (uid_t) -1) {
++ rsbac_new_target_id.dummy = 0;
++ rsbac_attribute_value.owner = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, current_euid());
++ if (rsbac_adf_set_attr(R_CHANGE_DAC_EFF_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_setreuid(): rsbac_adf_set_attr() for euid returned error\n");
++ }
++ if (rsbac_adf_set_attr(R_CHANGE_DAC_FS_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_setreuid(): rsbac_adf_set_attr() for fsuid returned error\n");
++ }
++ }
++#endif
++ }
++#endif
++
++ return retval;
+
+ error:
+ abort_creds(new);
+@@ -841,6 +1297,11 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
+ struct cred *new;
+ int retval;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+@@ -859,10 +1320,50 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
+ goto error;
+ }
+
+- if (rgid != (gid_t) -1)
++ if (rgid != (gid_t) -1) {
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = task_pid(current);
++ rsbac_attribute_value.group = RSBAC_GEN_GID(RSBAC_UM_VIRTUAL_KEEP, rgid);
++ if(!rsbac_adf_request(R_CHANGE_GROUP,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value)) {
++ retval = -EPERM;
++ goto error;
++ }
++#endif
+ new->gid = rgid;
+- if (egid != (gid_t) -1)
++ }
++
++ if (egid != (gid_t) -1) {
++#ifdef CONFIG_RSBAC_DAC_GROUP
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = task_pid(current);
++ rsbac_attribute_value.group = RSBAC_GEN_GID(RSBAC_UM_VIRTUAL_KEEP, egid);
++ if (!rsbac_adf_request(R_CHANGE_DAC_EFF_GROUP,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value)) {
++ retval = -EPERM;
++ goto error;
++ }
++ if (!rsbac_adf_request(R_CHANGE_DAC_FS_GROUP,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value)) {
++ retval = -EPERM;
++ goto error;
++ }
++#endif
+ new->egid = egid;
++ }
+ if (sgid != (gid_t) -1)
+ new->sgid = sgid;
+ new->fsgid = new->egid;
+@@ -899,12 +1400,32 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
+ struct cred *new;
+ uid_t old_fsuid;
+
++#ifdef CONFIG_RSBAC_DAC_OWNER
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ new = prepare_creds();
+ if (!new)
+ return current_fsuid();
+ old = current_cred();
+ old_fsuid = old->fsuid;
+
++#ifdef CONFIG_RSBAC_DAC_OWNER
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = task_pid(current);
++ rsbac_attribute_value.owner = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, uid);
++ if (!rsbac_adf_request(R_CHANGE_DAC_FS_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ abort_creds(new);
++ return old_fsuid;
++ }
++#endif
+ if (uid == old->uid || uid == old->euid ||
+ uid == old->suid || uid == old->fsuid ||
+ nsown_capable(CAP_SETUID)) {
+@@ -920,6 +1441,24 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
+
+ change_okay:
+ commit_creds(new);
++
++#ifdef CONFIG_RSBAC_DAC_OWNER
++ rsbac_target_id.process = task_pid(current);
++ rsbac_new_target_id.dummy = 0;
++ rsbac_attribute_value.owner = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, uid);
++ if (rsbac_adf_set_attr(R_CHANGE_DAC_FS_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_owner,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_setfsuid(): rsbac_adf_set_attr() returned error\n");
++ }
++#endif
++
+ return old_fsuid;
+ }
+
+@@ -932,12 +1471,31 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
+ struct cred *new;
+ gid_t old_fsgid;
+
++#ifdef CONFIG_RSBAC_DAC_GROUP
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ new = prepare_creds();
+ if (!new)
+ return current_fsgid();
+ old = current_cred();
+ old_fsgid = old->fsgid;
+
++#ifdef CONFIG_RSBAC_DAC_GROUP
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = task_pid(current);
++ rsbac_attribute_value.group = RSBAC_GEN_GID(RSBAC_UM_VIRTUAL_KEEP, gid);
++ if (!rsbac_adf_request(R_CHANGE_DAC_FS_GROUP,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value)) {
++ abort_creds(new);
++ return old_fsgid;
++ }
++#endif
+ if (gid == old->gid || gid == old->egid ||
+ gid == old->sgid || gid == old->fsgid ||
+ nsown_capable(CAP_SETGID)) {
+@@ -1002,6 +1560,11 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
+ struct pid *pgrp;
+ int err;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!pid)
+ pid = task_pid_vnr(group_leader);
+ if (!pgid)
+@@ -1010,6 +1573,21 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
+ return -EINVAL;
+ rcu_read_lock();
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = find_pid_ns(pid, &init_pid_ns);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ rcu_read_unlock();
++ return -EPERM;
++ }
++#endif
++
+ /* From this point forward we keep holding onto the tasklist lock
+ * so that our parent does not change from under us. -DaveM
+ */
+@@ -1076,6 +1654,11 @@ SYSCALL_DEFINE1(getpgid, pid_t, pid)
+ if (!pid)
+ grp = task_pgrp(current);
+ else {
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ retval = -ESRCH;
+ p = find_task_by_vpid(pid);
+ if (!p)
+@@ -1084,6 +1667,22 @@ SYSCALL_DEFINE1(getpgid, pid_t, pid)
+ if (!grp)
+ goto out;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = task_pid(p);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ retval = -EPERM;
++ goto out;
++ }
++#endif
++
+ retval = security_task_getpgid(p);
+ if (retval)
+ goto out;
+@@ -1113,6 +1712,11 @@ SYSCALL_DEFINE1(getsid, pid_t, pid)
+ if (!pid)
+ sid = task_session(current);
+ else {
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ retval = -ESRCH;
+ p = find_task_by_vpid(pid);
+ if (!p)
+@@ -1121,6 +1725,21 @@ SYSCALL_DEFINE1(getsid, pid_t, pid)
+ if (!sid)
+ goto out;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = task_pid(p);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ retval = -EPERM;
++ goto out;
++ }
++#endif
++
+ retval = security_task_getsid(p);
+ if (retval)
+ goto out;
+@@ -1282,11 +1901,31 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
+ int errno;
+ char tmp[__NEW_UTS_LEN];
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (len < 0 || len > __NEW_UTS_LEN)
+ return -EINVAL;
++
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_host_id;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ down_write(&uts_sem);
+ errno = -EFAULT;
+ if (!copy_from_user(tmp, name, len)) {
+@@ -1333,11 +1972,30 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
+ int errno;
+ char tmp[__NEW_UTS_LEN];
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
+ return -EPERM;
+ if (len < 0 || len > __NEW_UTS_LEN)
+ return -EINVAL;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_net_id;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ down_write(&uts_sem);
+ errno = -EFAULT;
+ if (!copy_from_user(tmp, name, len)) {
+@@ -1429,6 +2087,12 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
+ struct rlimit *rlim;
+ int retval = 0;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (resource >= RLIM_NLIMITS)
+ return -EINVAL;
+ if (new_rlim) {
+@@ -1454,6 +2118,23 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
+ if (new_rlim->rlim_max > rlim->rlim_max &&
+ !capable(CAP_SYS_RESOURCE))
+ retval = -EPERM;
++
++#ifdef CONFIG_RSBAC
++ if (!retval) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_rlimit;
++ rsbac_attribute_value.rlimit.resource = resource;
++ rsbac_attribute_value.rlimit.limit = *new_rlim;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_rlimit,
++ rsbac_attribute_value))
++ retval = -EPERM;
++ }
++#endif
++
+ if (!retval)
+ retval = security_task_setrlimit(tsk->group_leader,
+ resource, new_rlim);
+@@ -1475,6 +2156,21 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
+ }
+ task_unlock(tsk->group_leader);
+
++#ifdef CONFIG_RSBAC
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_rlimit,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_setrlimit(): rsbac_adf_set_attr() returned error");
++ }
++#endif
++
+ /*
+ * RLIMIT_CPU handling. Note that the kernel fails to return an error
+ * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 4ab1187..ad89e44 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -64,6 +64,8 @@
+ #include <asm/uaccess.h>
+ #include <asm/processor.h>
+
++#include <rsbac/hooks.h>
++
+ #ifdef CONFIG_X86
+ #include <asm/nmi.h>
+ #include <asm/stacktrace.h>
+diff --git a/kernel/time.c b/kernel/time.c
+index ba744cf..5717b98 100644
+--- a/kernel/time.c
++++ b/kernel/time.c
+@@ -41,6 +41,8 @@
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+
++#include <rsbac/hooks.h>
++
+ #include "timeconst.h"
+
+ /*
+@@ -83,9 +85,29 @@ SYSCALL_DEFINE1(stime, time_t __user *, tptr)
+ struct timespec tv;
+ int err;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (get_user(tv.tv_sec, tptr))
+ return -EFAULT;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_clock;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ tv.tv_nsec = 0;
+
+ err = security_settime(&tv, NULL);
+@@ -155,6 +177,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
+ static int firsttime = 1;
+ int error = 0;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (tv && !timespec_valid(tv))
+ return -EINVAL;
+
+@@ -162,6 +189,21 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
+ if (error)
+ return error;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_settimeofday()]: calling ADF\n");
++ rsbac_target_id.scd = ST_clock;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ if (tz) {
+ sys_tz = *tz;
+ update_vsyscall_tz();
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index f03fd83..9026d13 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -18,6 +18,8 @@
+
+ #include "tick-internal.h"
+
++#include <rsbac/hooks.h>
++
+ /*
+ * NTP timekeeping variables:
+ */
+@@ -620,6 +622,11 @@ int do_adjtimex(struct timex *txc)
+ struct timespec ts;
+ int result;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ /* Validate the data before disabling interrupts */
+ if (txc->modes & ADJ_ADJTIME) {
+ /* singleshot must not be used with any other mode bits */
+@@ -633,6 +640,19 @@ int do_adjtimex(struct timex *txc)
+ if (txc->modes && !capable(CAP_SYS_TIME))
+ return -EPERM;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_time_strucs;
++ rsbac_attribute_value.dummy = 0;
++ if (txc->modes && !rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ return -EPERM;
++#endif
++
+ /*
+ * if the quartz is off by more than 10% then
+ * something is VERY wrong!
+diff --git a/kernel/timer.c b/kernel/timer.c
+index a297ffc..a917355 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -47,6 +47,8 @@
+ #include <asm/timex.h>
+ #include <asm/io.h>
+
++#include <rsbac/hooks.h>
++
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/timer.h>
+
+@@ -1426,14 +1428,22 @@ SYSCALL_DEFINE0(getppid)
+
+ SYSCALL_DEFINE0(getuid)
+ {
++#ifdef CONFIG_RSBAC_FAKE_ROOT_UID
++ return rsbac_fake_uid();
++#else
+ /* Only we change this so SMP safe */
+ return current_uid();
++#endif
+ }
+
+ SYSCALL_DEFINE0(geteuid)
+ {
++#ifdef CONFIG_RSBAC_FAKE_ROOT_UID
++ return rsbac_fake_euid();
++#else
+ /* Only we change this so SMP safe */
+ return current_euid();
++#endif
+ }
+
+ SYSCALL_DEFINE0(getgid)
+diff --git a/kernel/uid16.c b/kernel/uid16.c
+index 51c6e89..b47e71b 100644
+--- a/kernel/uid16.c
++++ b/kernel/uid16.c
+@@ -16,6 +16,8 @@
+
+ #include <asm/uaccess.h>
+
++#include <rsbac/hooks.h>
++
+ SYSCALL_DEFINE3(chown16, const char __user *, filename, old_uid_t, user, old_gid_t, group)
+ {
+ long ret = sys_chown(filename, low2highuid(user), low2highgid(group));
+@@ -189,6 +191,12 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
+ struct group_info *group_info;
+ int retval;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ int i;
++#endif
++
+ if (!nsown_capable(CAP_SETGID))
+ return -EPERM;
+ if ((unsigned)gidsetsize > NGROUPS_MAX)
+@@ -203,6 +211,26 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
+ return retval;
+ }
+
++#ifdef CONFIG_RSBAC
++ if (gidsetsize > 0) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.process = task_pid(current);
++ for (i=0; i < gidsetsize; i++) {
++ rsbac_attribute_value.group = RSBAC_GEN_GID(RSBAC_UM_VIRTUAL_KEEP, group_info->blocks[i / NGROUPS_PER_BLOCK][i]);
++ if(!rsbac_adf_request(R_CHANGE_GROUP,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value))
++ {
++ put_group_info(group_info);
++ return -EPERM;
++ }
++ }
++ }
++#endif
++
+ retval = set_current_groups(group_info);
+ put_group_info(group_info);
+
+@@ -211,12 +239,20 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
+
+ SYSCALL_DEFINE0(getuid16)
+ {
++#ifdef CONFIG_RSBAC_FAKE_ROOT_UID
++ return high2lowuid(rsbac_fake_uid());
++#else
+ return high2lowuid(current_uid());
++#endif
+ }
+
+ SYSCALL_DEFINE0(geteuid16)
+ {
++#ifdef CONFIG_RSBAC_FAKE_ROOT_UID
++ return high2lowuid(rsbac_fake_euid());
++#else
+ return high2lowuid(current_euid());
++#endif
+ }
+
+ SYSCALL_DEFINE0(getgid16)
+diff --git a/mm/mlock.c b/mm/mlock.c
+index ef726e8..cab4f5e 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -13,6 +13,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/mempolicy.h>
+ #include <linux/syscalls.h>
++#include <rsbac/hooks.h>
+ #include <linux/sched.h>
+ #include <linux/export.h>
+ #include <linux/rmap.h>
+@@ -485,9 +486,29 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
+ unsigned long lock_limit;
+ int error = -ENOMEM;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!can_do_mlock())
+ return -EPERM;
+
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.scd = ST_mlock;
++ rsbac_attribute_value.dummy = 0;
++ rsbac_pr_debug(aef, "calling ADF\n");
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ lru_add_drain_all(); /* flush pagevec */
+
+ down_write(&current->mm->mmap_sem);
+@@ -551,6 +572,11 @@ SYSCALL_DEFINE1(mlockall, int, flags)
+ unsigned long lock_limit;
+ int ret = -EINVAL;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
+ goto out;
+
+@@ -558,6 +584,21 @@ SYSCALL_DEFINE1(mlockall, int, flags)
+ if (!can_do_mlock())
+ goto out;
+
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.scd = ST_mlock;
++ rsbac_attribute_value.dummy = 0;
++ rsbac_pr_debug(aef, "calling ADF\n");
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ ret = -EPERM;
++ goto out;
++ }
++#endif
++
+ if (flags & MCL_CURRENT)
+ lru_add_drain_all(); /* flush pagevec */
+
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 848ef52..388d04c 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -34,6 +34,7 @@
+ #include <asm/uaccess.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlb.h>
++#include <rsbac/hooks.h>
+ #include <asm/mmu_context.h>
+
+ #include "internal.h"
+@@ -963,6 +964,12 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ int error;
+ unsigned long reqprot = prot;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ /*
+ * Does the application expect PROT_READ to imply PROT_EXEC?
+ *
+@@ -1087,6 +1094,33 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ if (error)
+ return error;
+
++#ifdef CONFIG_RSBAC
++ if (prot & PROT_EXEC) {
++ rsbac_pr_debug(aef, "[do_mmap() [sys_mmap()]]: calling ADF\n");
++ if (file) {
++ rsbac_target = T_FILE;
++ rsbac_target_id.file.device = file->f_dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.file.inode = file->f_dentry->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = file->f_dentry;
++ } else {
++ rsbac_target = T_NONE;
++ rsbac_target_id.dummy = 0;
++ }
++ rsbac_attribute_value.prot_bits = prot;
++ if (!rsbac_adf_request(R_MAP_EXEC,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_prot_bits,
++ rsbac_attribute_value))
++ {
++ rsbac_pr_debug(aef, "[do_mmap() [sys_mmap()]]: request not granted, my PID: %i\n",
++ current->pid);
++ return -EPERM;
++ }
++ }
++#endif
++
+ return mmap_region(file, addr, len, flags, vm_flags, pgoff);
+ }
+
+diff --git a/mm/mprotect.c b/mm/mprotect.c
+index a409926..e3dfa32 100644
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -27,6 +27,7 @@
+ #include <asm/pgtable.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
++#include <rsbac/hooks.h>
+
+ #ifndef pgprot_modify
+ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+@@ -236,6 +237,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+ struct vm_area_struct *vma, *prev;
+ int error = -EINVAL;
+ const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
++
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ int need_notify = FALSE;
++#endif
++
+ prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
+ if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
+ return -EINVAL;
+@@ -305,6 +314,34 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+ if (error)
+ goto out;
+
++#ifdef CONFIG_RSBAC
++ if ((prot & PROT_EXEC) && !(vma->vm_flags & PROT_EXEC)) {
++ rsbac_pr_debug(aef, "calling ADF\n");
++ if (vma->vm_file) {
++ rsbac_target = T_FILE;
++ rsbac_target_id.file.device = vma->vm_file->f_dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.file.inode = vma->vm_file->f_dentry->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = vma->vm_file->f_dentry;
++ } else {
++ rsbac_target = T_NONE;
++ rsbac_target_id.dummy = 0;
++ }
++ rsbac_attribute_value.prot_bits = prot;
++ if (!rsbac_adf_request(R_MAP_EXEC,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_prot_bits,
++ rsbac_attribute_value))
++ {
++ rsbac_pr_debug(aef, "request NOT_GRANTED\n");
++ error = -EPERM;
++ goto out;
++ } else
++ need_notify = TRUE;
++ }
++#endif
++
+ tmp = vma->vm_end;
+ if (tmp > end)
+ tmp = end;
+@@ -326,5 +363,28 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+ }
+ out:
+ up_write(&current->mm->mmap_sem);
++
++ /* RSBAC: notify ADF of mapped segment */
++#ifdef CONFIG_RSBAC
++ if (need_notify && !error) {
++ union rsbac_target_id_t rsbac_new_target_id;
++
++ rsbac_pr_debug(aef, "calling ADF_set_attr\n");
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_MAP_EXEC,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_mprotect: rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
++
+ return error;
+ }
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index fafc26d..76ab648 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -35,6 +35,7 @@
+ #include <asm/pgtable.h>
+ #include <asm/tlbflush.h>
+ #include <linux/swapops.h>
++#include <rsbac/hooks.h>
+ #include <linux/page_cgroup.h>
+
+ static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
+@@ -1558,9 +1559,29 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ int i, type, prev;
+ int err;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_swap;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++}
++#endif
++
+ BUG_ON(!current->mm);
+
+ pathname = getname(specialfile);
+@@ -1574,6 +1595,35 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ if (IS_ERR(victim))
+ goto out;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF for DEV / FILE\n");
++ if (S_ISBLK(victim->f_dentry->d_inode->i_mode)) {
++ rsbac_target = T_DEV;
++ rsbac_target_id.dev.type = D_block;
++ rsbac_target_id.dev.major = RSBAC_MAJOR(victim->f_dentry->d_inode->i_rdev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(victim->f_dentry->d_inode->i_rdev);
++ } else
++ if (S_ISREG(victim->f_dentry->d_inode->i_mode)) {
++ rsbac_target = T_FILE;
++ rsbac_target_id.file.device = victim->f_dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = victim->f_dentry->d_inode->i_ino;
++ rsbac_target_id.file.dentry_p = victim->f_dentry;
++ } else {
++ rsbac_target = T_NONE;
++ rsbac_target_id.dummy = 0;
++ }
++ rsbac_attribute_value.dummy = 0;
++ if ((rsbac_target != T_NONE) && !rsbac_adf_request(R_REMOVE_FROM_KERNEL,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out_dput;
++ }
++#endif
++
+ mapping = victim->f_mapping;
+ prev = -1;
+ spin_lock(&swap_lock);
+@@ -2022,12 +2072,31 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+ struct page *page = NULL;
+ struct inode *inode = NULL;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (swap_flags & ~SWAP_FLAGS_VALID)
+ return -EINVAL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_swap;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ return -EPERM;
++#endif
++
+ p = alloc_swap_info();
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+@@ -2060,6 +2129,38 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+ }
+
+ inode = mapping->host;
++
++/* RSBAC */
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "calling ADF for DEV / FILE\n");
++ if(S_ISBLK(inode->i_mode)) {
++ rsbac_target = T_DEV;
++ rsbac_target_id.dev.type = D_block;
++ rsbac_target_id.dev.major = RSBAC_MAJOR(inode->i_rdev);
++ rsbac_target_id.dev.minor = RSBAC_MINOR(inode->i_rdev);
++ } else if(S_ISREG(inode->i_mode)) {
++ rsbac_target = T_FILE;
++ rsbac_target_id.file.device = swap_file->f_dentry->d_sb->s_dev;
++ rsbac_target_id.file.inode = inode->i_ino;
++ rsbac_target_id.file.dentry_p = swap_file->f_dentry;
++ } else {
++ rsbac_target = T_NONE;
++ rsbac_target_id.dummy = 0;
++ }
++ rsbac_attribute_value.dummy = 0;
++ if( (rsbac_target != T_NONE)
++ && !rsbac_adf_request(R_ADD_TO_KERNEL,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)
++ ) {
++ error = -EPERM;
++ goto bad_swap;
++ }
++#endif
++
+ /* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */
+ error = claim_swapfile(p, inode);
+ if (unlikely(error))
+diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
+index 0a942fb..751747b 100644
+--- a/net/bridge/br_if.c
++++ b/net/bridge/br_if.c
+@@ -24,6 +24,8 @@
+ #include <linux/slab.h>
+ #include <net/sock.h>
+
++#include <rsbac/hooks.h>
++
+ #include "br_private.h"
+
+ /*
+@@ -323,6 +325,11 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
+ int err = 0;
+ bool changed_addr;
+
++#ifdef CONFIG_RSBAC_NET
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ /* Don't allow bridging non-ethernet like devices */
+ if ((dev->flags & IFF_LOOPBACK) ||
+ dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
+@@ -337,6 +344,34 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
+ if (br_port_exists(dev))
+ return -EBUSY;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_pr_debug(aef, "calling ADF\n");
++ strncpy(rsbac_target_id.netdev, dev->name, RSBAC_IFNAMSIZ);
++ rsbac_target_id.netdev[RSBAC_IFNAMSIZ] = 0;
++#ifndef CONFIG_RSBAC_NET_DEV_VIRT
++ {
++ char * p = rsbac_target_id.netdev;
++ while (*p) {
++ if (*p == ':') {
++ *p=' ';
++ break;
++ }
++ p++;
++ }
++ }
++#endif
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_NETDEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ /* No bridging devices that dislike that (e.g. wireless) */
+ if (dev->priv_flags & IFF_DONT_BRIDGE)
+ return -EOPNOTSUPP;
+@@ -422,10 +457,45 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
+ struct net_bridge_port *p;
+ bool changed_addr;
+
++#ifdef CONFIG_RSBAC_NET
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ p = br_port_get_rtnl(dev);
+ if (!p || p->br != br)
+ return -EINVAL;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_pr_debug(aef, "calling ADF\n");
++ strncpy(rsbac_target_id.netdev, dev->name, RSBAC_IFNAMSIZ);
++ rsbac_target_id.netdev[RSBAC_IFNAMSIZ] = 0;
++#ifndef CONFIG_RSBAC_NET_DEV_VIRT
++ {
++ char * p = rsbac_target_id.netdev;
++ while (*p)
++ {
++ if (*p == ':')
++ {
++ *p=' ';
++ break;
++ }
++ p++;
++ }
++ }
++#endif
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_NETDEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ del_nbp(p);
+
+ spin_lock_bh(&br->lock);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 99e1d75..cb9462a 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -136,6 +136,8 @@
+ #include <linux/static_key.h>
+ #include <net/flow_keys.h>
+
++#include <rsbac/hooks.h>
++
+ #include "net-sysfs.h"
+
+ /* Instead of increasing this, you should create a hash table. */
+@@ -4989,6 +4991,11 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+ int ret;
+ char *colon;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ /* One special case: SIOCGIFCONF takes ifconf argument
+ and requires shared lock, because it sleeps writing
+ to user space.
+@@ -5008,10 +5015,20 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+
+ ifr.ifr_name[IFNAMSIZ-1] = 0;
+
++#ifdef CONFIG_RSBAC_NET_DEV_VIRT
++ strncpy(rsbac_target_id.netdev, ifr.ifr_name, RSBAC_IFNAMSIZ);
++ rsbac_target_id.netdev[RSBAC_IFNAMSIZ] = 0;
++#endif
++
+ colon = strchr(ifr.ifr_name, ':');
+ if (colon)
+ *colon = 0;
+
++#if defined(CONFIG_RSBAC_NET_DEV) && !defined(CONFIG_RSBAC_NET_DEV_VIRT)
++ strncpy(rsbac_target_id.netdev, ifr.ifr_name, RSBAC_IFNAMSIZ);
++ rsbac_target_id.netdev[RSBAC_IFNAMSIZ] = 0;
++#endif
++
+ /*
+ * See which interface the caller is talking about.
+ */
+@@ -5031,6 +5048,21 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+ case SIOCGIFMAP:
+ case SIOCGIFINDEX:
+ case SIOCGIFTXQLEN:
++
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_NETDEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ dev_load(net, ifr.ifr_name);
+ rcu_read_lock();
+ ret = dev_ifsioc_locked(net, &ifr, cmd);
+@@ -5108,6 +5140,21 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+ case SIOCSHWTSTAMP:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
++
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_NETDEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ /* fall through */
+ case SIOCBONDSLAVEINFOQUERY:
+ case SIOCBONDINFOQUERY:
+@@ -5133,6 +5180,21 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+ if (cmd == SIOCWANDEV ||
+ (cmd >= SIOCDEVPRIVATE &&
+ cmd <= SIOCDEVPRIVATE + 15)) {
++
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_NETDEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ dev_load(net, ifr.ifr_name);
+ rtnl_lock();
+ ret = dev_ifsioc(net, &ifr, cmd);
+diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
+index c02e63c..8cde146 100644
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -16,6 +16,7 @@
+ #include <net/net_namespace.h>
+ #include <net/sock.h>
+ #include <net/fib_rules.h>
++#include <rsbac/hooks.h>
+
+ int fib_default_rule_add(struct fib_rules_ops *ops,
+ u32 pref, u32 table, u32 flags)
+@@ -272,6 +273,10 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+ struct fib_rule *rule, *r, *last = NULL;
+ struct nlattr *tb[FRA_MAX+1];
+ int err = -EINVAL, unresolved = 0;
++#ifdef CONFIG_RSBAC_NET
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
+ goto errout;
+@@ -290,6 +295,22 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+ if (err < 0)
+ goto errout;
+
++#ifdef CONFIG_RSBAC_NET
++ rsbac_pr_debug(aef, "fib_nl_newrule(): calling ADF\n");
++ rsbac_target_id.scd = ST_network;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto errout;
++ }
++#endif
++
+ rule = kzalloc(ops->rule_size, GFP_KERNEL);
+ if (rule == NULL) {
+ err = -ENOMEM;
+@@ -421,6 +442,10 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+ struct fib_rule *rule, *tmp;
+ struct nlattr *tb[FRA_MAX+1];
+ int err = -EINVAL;
++#ifdef CONFIG_RSBAC_NET
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
+ goto errout;
+@@ -439,6 +464,22 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+ if (err < 0)
+ goto errout;
+
++#ifdef CONFIG_RSBAC_NET
++ rsbac_pr_debug(aef, "fib_nl_delrule(): calling ADF\n");
++ rsbac_target_id.scd = ST_network;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto errout;
++ }
++#endif
++
+ list_for_each_entry(rule, &ops->rules_list, list) {
+ if (frh->action && (frh->action != rule->action))
+ continue;
+@@ -534,6 +575,25 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
+ {
+ struct nlmsghdr *nlh;
+ struct fib_rule_hdr *frh;
++#ifdef CONFIG_RSBAC_NET
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_NET
++ rsbac_pr_debug(aef, "fib_nl_fill_rule(): calling ADF\n");
++ rsbac_target_id.scd = ST_network;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
+ if (nlh == NULL)
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 18d9b81..8feb23b 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -117,6 +117,8 @@
+
+ #include <linux/netfilter_arp.h>
+
++#include <rsbac/hooks.h>
++
+ /*
+ * Interface to generic neighbour cache.
+ */
+@@ -1168,15 +1170,32 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+ struct arpreq r;
+ struct net_device *dev = NULL;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ enum rsbac_adf_request_t rsbac_request = R_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ switch (cmd) {
+ case SIOCDARP:
+ case SIOCSARP:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
++
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_request = R_MODIFY_SYSTEM_DATA;
++#endif
++
+ case SIOCGARP:
+ err = copy_from_user(&r, arg, sizeof(struct arpreq));
+ if (err)
+ return -EFAULT;
++
++#ifdef CONFIG_RSBAC_NET_DEV
++ if (rsbac_request == R_NONE)
++ rsbac_request = R_GET_STATUS_DATA;
++#endif
++
+ break;
+ default:
+ return -EINVAL;
+@@ -1204,6 +1223,24 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+ err = -EINVAL;
+ if ((r.arp_flags & ATF_COM) && r.arp_ha.sa_family != dev->type)
+ goto out;
++
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_pr_debug(aef, "calling ADF\n");
++ strncpy(rsbac_target_id.netdev, r.arp_dev, RSBAC_IFNAMSIZ);
++ rsbac_target_id.netdev[RSBAC_IFNAMSIZ] = 0;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(rsbac_request,
++ task_pid(current),
++ T_NETDEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++#endif
++
+ } else if (cmd == SIOCGARP) {
+ err = -ENODEV;
+ goto out;
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 6e447ff..682c211 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -65,6 +65,8 @@
+
+ #include "fib_lookup.h"
+
++#include <rsbac/hooks.h>
++
+ static struct ipv4_devconf ipv4_devconf = {
+ .data = {
+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
+@@ -540,6 +542,11 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
+ struct in_ifaddr *ifa, **ifap;
+ int err = -EINVAL;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ ASSERT_RTNL();
+
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
+@@ -553,6 +560,37 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
+ goto errout;
+ }
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_pr_debug(aef, "calling ADF\n");
++ strncpy(rsbac_target_id.netdev, in_dev->dev->name, RSBAC_IFNAMSIZ);
++ rsbac_target_id.netdev[RSBAC_IFNAMSIZ] = 0;
++#ifndef CONFIG_RSBAC_NET_DEV_VIRT
++ {
++ char * p = rsbac_target_id.netdev;
++
++ while (*p)
++ {
++ if (*p == ':')
++ {
++ *p=' ';
++ break;
++ }
++ p++;
++ }
++ }
++#endif
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_BIND,
++ task_pid(current),
++ T_NETDEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto errout;
++ }
++#endif
++
+ for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
+ ifap = &ifa->ifa_next) {
+ if (tb[IFA_LOCAL] &&
+@@ -585,6 +623,11 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh)
+ struct in_device *in_dev;
+ int err;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
+ if (err < 0)
+ goto errout;
+@@ -604,6 +647,37 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh)
+ if (in_dev == NULL)
+ goto errout;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_pr_debug(aef, "calling ADF\n");
++ strncpy(rsbac_target_id.netdev, in_dev->dev->name, RSBAC_IFNAMSIZ);
++ rsbac_target_id.netdev[RSBAC_IFNAMSIZ] = 0;
++#ifndef CONFIG_RSBAC_NET_DEV_VIRT
++ {
++ char * p = rsbac_target_id.netdev;
++ while (*p)
++ {
++ if (*p == ':')
++ {
++ *p=' ';
++ break;
++ }
++ p++;
++ }
++ }
++#endif
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_BIND,
++ task_pid(current),
++ T_NETDEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto errout;
++ }
++#endif
++
+ ifa = inet_alloc_ifa();
+ if (ifa == NULL)
+ /*
+@@ -694,6 +768,12 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+ int ret = -EFAULT;
+ int tryaddrmatch = 0;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ enum rsbac_adf_request_t rsbac_request = R_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ /*
+ * Fetch the caller's info block into kernel space
+ */
+@@ -702,6 +782,11 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+ goto out;
+ ifr.ifr_name[IFNAMSIZ - 1] = 0;
+
++#ifdef CONFIG_RSBAC_NET_DEV_VIRT
++ strncpy(rsbac_target_id.netdev, ifr.ifr_name, RSBAC_IFNAMSIZ);
++ rsbac_target_id.netdev[RSBAC_IFNAMSIZ] = 0;
++#endif
++
+ /* save original address for comparison */
+ memcpy(&sin_orig, sin, sizeof(*sin));
+
+@@ -709,6 +794,11 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+ if (colon)
+ *colon = 0;
+
++#if defined(CONFIG_RSBAC_NET_DEV) && !defined(CONFIG_RSBAC_NET_DEV_VIRT)
++ strncpy(rsbac_target_id.netdev, ifr.ifr_name, RSBAC_IFNAMSIZ);
++ rsbac_target_id.netdev[RSBAC_IFNAMSIZ] = 0;
++#endif
++
+ dev_load(net, ifr.ifr_name);
+
+ switch (cmd) {
+@@ -723,12 +813,19 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+ tryaddrmatch = (sin_orig.sin_family == AF_INET);
+ memset(sin, 0, sizeof(*sin));
+ sin->sin_family = AF_INET;
++
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_request = R_GET_STATUS_DATA;
++#endif
+ break;
+
+ case SIOCSIFFLAGS:
+ ret = -EACCES;
+ if (!capable(CAP_NET_ADMIN))
+ goto out;
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_request = R_MODIFY_SYSTEM_DATA;
++#endif
+ break;
+ case SIOCSIFADDR: /* Set interface address (and family) */
+ case SIOCSIFBRDADDR: /* Set the broadcast address */
+@@ -740,6 +837,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+ ret = -EINVAL;
+ if (sin->sin_family != AF_INET)
+ goto out;
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_request = R_BIND;
++#endif
+ break;
+ default:
+ ret = -EINVAL;
+@@ -753,6 +853,21 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+ if (!dev)
+ goto done;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(rsbac_request,
++ task_pid(current),
++ T_NETDEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ ret = -EPERM;
++ goto done;
++ }
++#endif
++
+ if (colon)
+ *colon = ':';
+
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index cbe3a68..bb1323f 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -45,6 +45,8 @@
+ #include <net/rtnetlink.h>
+ #include <net/xfrm.h>
+
++#include <rsbac/hooks.h>
++
+ #ifndef CONFIG_IP_MULTIPLE_TABLES
+
+ static int __net_init fib4_rules_init(struct net *net)
+@@ -433,6 +435,11 @@ int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+ struct rtentry rt;
+ int err;
+
++#ifdef CONFIG_RSBAC_NET
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ switch (cmd) {
+ case SIOCADDRT: /* Add a route */
+ case SIOCDELRT: /* Delete a route */
+@@ -442,6 +449,20 @@ int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+ if (copy_from_user(&rt, arg, sizeof(rt)))
+ return -EFAULT;
+
++#ifdef CONFIG_RSBAC_NET
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_network;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ rtnl_lock();
+ err = rtentry_to_fib_config(net, cmd, &rt, &cfg);
+ if (err == 0) {
+@@ -561,10 +582,30 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *ar
+ struct fib_table *tb;
+ int err;
+
++#ifdef CONFIG_RSBAC_NET
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ err = rtm_to_fib_config(net, skb, nlh, &cfg);
+ if (err < 0)
+ goto errout;
+
++#ifdef CONFIG_RSBAC_NET
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_network;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto errout;
++ }
++#endif
++
+ tb = fib_get_table(net, cfg.fc_table);
+ if (tb == NULL) {
+ err = -ESRCH;
+@@ -583,10 +624,30 @@ static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *ar
+ struct fib_table *tb;
+ int err;
+
++#ifdef CONFIG_RSBAC_NET
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ err = rtm_to_fib_config(net, skb, nlh, &cfg);
+ if (err < 0)
+ goto errout;
+
++#ifdef CONFIG_RSBAC_NET
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_network;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto errout;
++ }
++#endif
++
+ tb = fib_new_table(net, cfg.fc_table);
+ if (tb == NULL) {
+ err = -ENOBUFS;
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index 8f8db72..07324a4 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -35,6 +35,8 @@
+ #include <linux/inet_diag.h>
+ #include <linux/sock_diag.h>
+
++#include <rsbac/hooks.h>
++
+ static const struct inet_diag_handler **inet_diag_table;
+
+ struct inet_diag_entry {
+@@ -943,6 +945,11 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
+
+ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
+ {
++#ifdef CONFIG_RSBAC_NET
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ int hdrlen = sizeof(struct inet_diag_req);
+
+ if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
+@@ -953,6 +960,19 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
+ if (nlmsg_attrlen(nlh, hdrlen)) {
+ struct nlattr *attr;
+
++#ifdef CONFIG_RSBAC_NET
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_network;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ return -EPERM;
++#endif
++
+ attr = nlmsg_find_attr(nlh, hdrlen,
+ INET_DIAG_REQ_BYTECODE);
+ if (attr == NULL ||
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 960fbfc..a2dd277 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -66,6 +66,8 @@
+ #include <net/netlink.h>
+ #include <net/fib_rules.h>
+
++#include <rsbac/hooks.h>
++
+ #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
+ #define CONFIG_IP_PIMSM 1
+ #endif
+@@ -1203,9 +1205,28 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
+ return -ENOENT;
+
+ if (optname != MRT_INIT) {
++#ifdef CONFIG_RSBAC_NET_DEV
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (sk != rcu_access_pointer(mrt->mroute_sk) &&
+ !capable(CAP_NET_ADMIN))
+ return -EACCES;
++
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_network;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
+ }
+
+ switch (optname) {
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index 24e556e..27c35d3 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -30,6 +30,8 @@
+ #include <net/netfilter/nf_log.h>
+ #include "../../netfilter/xt_repldata.h"
+
++#include <rsbac/hooks.h>
++
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
+ MODULE_DESCRIPTION("IPv4 packet filter");
+@@ -1137,6 +1139,24 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
+ struct ipt_get_entries get;
+ struct xt_table *t;
+
++#ifdef CONFIG_RSBAC_NET
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_firewall;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ if (*len < sizeof(get)) {
+ duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
+ return -EINVAL;
+@@ -1847,9 +1867,29 @@ compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
+ {
+ int ret;
+
++#ifdef CONFIG_RSBAC_NET
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
++#ifdef CONFIG_RSBAC_NET
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_firewall;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ switch (cmd) {
+ case IPT_SO_SET_REPLACE:
+ ret = compat_do_replace(sock_net(sk), user, len);
+@@ -1916,6 +1956,26 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
+ struct compat_ipt_get_entries get;
+ struct xt_table *t;
+
++#ifdef CONFIG_RSBAC_NET
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_NET
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_firewall;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ if (*len < sizeof(get)) {
+ duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
+ return -EINVAL;
+@@ -1984,9 +2044,29 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
+ {
+ int ret;
+
++#ifdef CONFIG_RSBAC_NET
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
++#ifdef CONFIG_RSBAC_NET
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_firewall;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++
+ switch (cmd) {
+ case IPT_SO_SET_REPLACE:
+ ret = do_replace(sock_net(sk), user, len);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 167ea10..00fb09b 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -112,6 +112,8 @@
+ #endif
+ #include <net/secure_seq.h>
+
++#include <rsbac/hooks.h>
++
+ #define RT_FL_TOS(oldflp4) \
+ ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
+
+@@ -3072,6 +3074,26 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
+ int mark;
+ struct sk_buff *skb;
+
++#ifdef CONFIG_RSBAC_NET
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_NET
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_network;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto errout;
++ }
++#endif
++
+ err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
+ if (err < 0)
+ goto errout;
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index a69d44f..0ad5d09 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -31,6 +31,8 @@
+ #include <net/pkt_sched.h>
+ #include <net/pkt_cls.h>
+
++#include <rsbac/hooks.h>
++
+ /* The list of all installed classifier types */
+
+ static struct tcf_proto_ops *tcf_proto_base __read_mostly;
+@@ -139,6 +141,28 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
+ int err;
+ int tp_created = 0;
+
++#ifdef CONFIG_RSBAC_NET
++ enum rsbac_adf_request_t rsbac_request;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ if (n->nlmsg_type == RTM_GETTFILTER)
++ rsbac_request = R_GET_STATUS_DATA;
++ else
++ rsbac_request = R_MODIFY_SYSTEM_DATA;
++ rsbac_target_id.scd = ST_network;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(rsbac_request,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ replay:
+ t = NLMSG_DATA(n);
+ protocol = TC_H_MIN(t->tcm_info);
+@@ -422,6 +446,25 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
+ const struct Qdisc_class_ops *cops;
+ struct tcf_dump_args arg;
+
++#ifdef CONFIG_RSBAC_NET
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_NET
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_network;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
+ return skb->len;
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 3d8981f..7d7c75f 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -35,6 +35,8 @@
+ #include <net/netlink.h>
+ #include <net/pkt_sched.h>
+
++#include <rsbac/hooks.h>
++
+ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
+ struct nlmsghdr *n, u32 clid,
+ struct Qdisc *old, struct Qdisc *new);
+@@ -980,10 +982,43 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
+ struct Qdisc *p = NULL;
+ int err;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ enum rsbac_adf_request_t rsbac_request;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#if !defined(CONFIG_RSBAC_NET_DEV_VIRT)
++ char * rsbac_colon;
++#endif
++#endif
++
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
+ return -ENODEV;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_pr_debug(aef, "calling ADF\n");
++ if (n->nlmsg_type == RTM_DELQDISC)
++ rsbac_request = R_MODIFY_SYSTEM_DATA;
++ else
++ rsbac_request = R_GET_STATUS_DATA;
++ strncpy(rsbac_target_id.netdev, dev->name, RSBAC_IFNAMSIZ);
++ rsbac_target_id.netdev[RSBAC_IFNAMSIZ] = 0;
++#if !defined(CONFIG_RSBAC_NET_DEV_VIRT)
++ rsbac_colon = strchr(rsbac_target_id.netdev, ':');
++ if (rsbac_colon)
++ *rsbac_colon = 0;
++#endif
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(rsbac_request,
++ task_pid(current),
++ T_NETDEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
+ if (err < 0)
+ return err;
+@@ -1043,6 +1078,13 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
+ struct Qdisc *q, *p;
+ int err;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#if !defined(CONFIG_RSBAC_NET_DEV_VIRT)
++ char * rsbac_colon;
++#endif
++#endif
+ replay:
+ /* Reinit, just in case something touches this. */
+ tcm = NLMSG_DATA(n);
+@@ -1053,6 +1095,26 @@ replay:
+ if (!dev)
+ return -ENODEV;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_pr_debug(aef, "tc_modify_qdisc(): calling ADF\n");
++ strncpy(rsbac_target_id.netdev, dev->name, RSBAC_IFNAMSIZ);
++ rsbac_target_id.netdev[RSBAC_IFNAMSIZ] = 0;
++#if !defined(CONFIG_RSBAC_NET_DEV_VIRT)
++ rsbac_colon = strchr(rsbac_target_id.netdev, ':');
++ if (rsbac_colon)
++ *rsbac_colon = 0;
++#endif
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_NETDEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
+ if (err < 0)
+ return err;
+@@ -1317,6 +1379,13 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
+ int s_idx, s_q_idx;
+ struct net_device *dev;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#if !defined(CONFIG_RSBAC_NET_DEV_VIRT)
++ char * rsbac_colon;
++#endif
++#endif
+ s_idx = cb->args[0];
+ s_q_idx = q_idx = cb->args[1];
+
+@@ -1329,6 +1398,27 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
+ goto cont;
+ if (idx > s_idx)
+ s_q_idx = 0;
++
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_pr_debug(aef, "tc_dump_qdisc(): calling ADF\n");
++ strncpy(rsbac_target_id.netdev, dev->name, RSBAC_IFNAMSIZ);
++ rsbac_target_id.netdev[RSBAC_IFNAMSIZ] = 0;
++#if !defined(CONFIG_RSBAC_NET_DEV_VIRT)
++ rsbac_colon = strchr(rsbac_target_id.netdev, ':');
++ if(rsbac_colon)
++ *rsbac_colon = 0;
++#endif
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_NETDEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ continue;
++ }
++#endif
++
+ q_idx = 0;
+
+ if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
+@@ -1376,10 +1466,38 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
+ u32 qid = TC_H_MAJ(clid);
+ int err;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#if !defined(CONFIG_RSBAC_NET_DEV_VIRT)
++ char * rsbac_colon;
++#endif
++#endif
++
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
+ return -ENODEV;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_pr_debug(aef, "tc_ctl_tclass(): calling ADF\n");
++ strncpy(rsbac_target_id.netdev, dev->name, RSBAC_IFNAMSIZ);
++ rsbac_target_id.netdev[RSBAC_IFNAMSIZ] = 0;
++#if !defined(CONFIG_RSBAC_NET_DEV_VIRT)
++ rsbac_colon = strchr(rsbac_target_id.netdev, ':');
++ if (rsbac_colon)
++ *rsbac_colon = 0;
++#endif
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_NETDEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#endif
++
+ err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
+ if (err < 0)
+ return err;
+@@ -1619,12 +1737,40 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
+ struct net_device *dev;
+ int t, s_t;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#if !defined(CONFIG_RSBAC_NET_DEV_VIRT)
++ char * rsbac_colon;
++#endif
++#endif
+ if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
+ return 0;
+ dev = dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
+ return 0;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_pr_debug(aef, "calling ADF\n");
++ strncpy(rsbac_target_id.netdev, dev->name, RSBAC_IFNAMSIZ);
++ rsbac_target_id.netdev[RSBAC_IFNAMSIZ] = 0;
++#if !defined(CONFIG_RSBAC_NET_DEV_VIRT)
++ rsbac_colon = strchr(rsbac_target_id.netdev, ':');
++ if (rsbac_colon)
++ *rsbac_colon = 0;
++#endif
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_NETDEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value)) {
++ dev_put(dev);
++ return -EPERM;
++ }
++#endif
++
+ s_t = cb->args[0];
+ t = 0;
+
+diff --git a/net/socket.c b/net/socket.c
+index 851edcd..285bfba 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -89,6 +89,14 @@
+ #include <linux/magic.h>
+ #include <linux/slab.h>
+
++#ifdef CONFIG_RSBAC
++#include <net/af_unix.h>
++#include <net/scm.h>
++#include <rsbac/hooks.h>
++#define rsbac_unix_peer(sk) (unix_sk(sk)->peer)
++#define rsbac_unix_sk_peer(sk) (unix_sk(unix_sk(sk)->peer))
++#endif
++
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+
+@@ -548,6 +556,49 @@ static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size)
+ {
+ struct sock_iocb *si = kiocb_to_siocb(iocb);
++ int err;
++
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ enum rsbac_attribute_t rsbac_attribute = A_none;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ rsbac_pr_debug(aef, "[sys_send(), sys_sendto(), sys_sendmsg()]: calling ADF\n");
++ if (sock->ops->family != AF_UNIX) {
++#if !defined(CONFIG_RSBAC_NET_OBJ_RW)
++ if(sock->type != SOCK_STREAM)
++#endif
++ {
++ rsbac_target = T_NETOBJ;
++ rsbac_target_id.netobj.sock_p = sock;
++ rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_target_id.netobj.local_len = 0;
++ rsbac_target_id.netobj.remote_addr = msg->msg_name;
++ rsbac_target_id.netobj.remote_len = msg->msg_namelen;
++ if ( sock->sk->sk_peer_pid
++ && (rsbac_attribute_value.process = sock->sk->sk_peer_pid)
++ && (pid_nr(rsbac_attribute_value.process) > 0)
++ ) {
++ rsbac_attribute = A_process;
++ } else {
++ rsbac_attribute = A_sock_type;
++ rsbac_attribute_value.sock_type = sock->type;
++ }
++ if (!rsbac_adf_request(R_SEND,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ return -EPERM;
++ }
++ }
++ }
++#endif
+
+ sock_update_classid(sock->sk);
+
+@@ -558,7 +609,26 @@ static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
+ si->msg = msg;
+ si->size = size;
+
+- return sock->ops->sendmsg(iocb, sock, msg, size);
++ err = sock->ops->sendmsg(iocb, sock, msg, size);
++
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ if (!err && (rsbac_target != T_NONE)) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_SEND,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sock_sendmsg() [sys_send(), sys_sendto(), sys_sendmsg()]: rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
++
++ return err;
+ }
+
+ static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
+@@ -729,8 +799,72 @@ static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock,
+ static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size, int flags)
+ {
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ enum rsbac_attribute_t rsbac_attribute = A_none;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ int err = security_socket_recvmsg(sock, msg, size, flags);
+
++
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ if (err > 0) {
++ rsbac_pr_debug(aef, "[sys_recv(), sys_recvfrom(), sys_recvmsg()]: calling ADF\n");
++ if (sock->ops->family != AF_UNIX) {
++#if !defined(CONFIG_RSBAC_NET_OBJ_RW)
++ if (sock->type != SOCK_STREAM)
++#endif
++ {
++ rsbac_target = T_NETOBJ;
++ rsbac_target_id.netobj.sock_p = sock;
++ rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_target_id.netobj.local_len = 0;
++ rsbac_target_id.netobj.remote_addr = msg->msg_name;
++ rsbac_target_id.netobj.remote_len = msg->msg_namelen;
++ if ( sock->sk->sk_peer_pid
++ && (rsbac_attribute_value.process = sock->sk->sk_peer_pid)
++ && (pid_nr(rsbac_attribute_value.process) > 0)
++ ) {
++ rsbac_attribute = A_process;
++ } else {
++ rsbac_attribute = A_sock_type;
++ rsbac_attribute_value.sock_type = sock->type;
++ }
++ if (!rsbac_adf_request(R_RECEIVE,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ /* clear buffer */
++ memset(msg->msg_iov->iov_base - err, 0, err);
++ return -EPERM;
++ }
++ }
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ if ((err > 0) && (rsbac_target != T_NONE)) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_RECEIVE,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sock_recvmsg() [sys_recv(), sys_recvfrom(), sys_recvmsg()]: rsbac_adf_set_attr() for RECEIVE returned error\n");
++ }
++ }
++#endif
++
+ return err ?: __sock_recvmsg_nosec(iocb, sock, msg, size, flags);
+ }
+
+@@ -1325,6 +1459,13 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
+ struct socket *sock;
+ int flags;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ /* Check the SOCK_* constants for consistency. */
+ BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC);
+ BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK);
+@@ -1343,10 +1484,58 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
+ if (retval < 0)
+ goto out;
+
++#ifdef CONFIG_RSBAC
++ if (family == AF_UNIX) {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = SOCK_INODE(sock)->i_ino;
++ }
++#ifdef CONFIG_RSBAC_NET_OBJ
++ else {
++ rsbac_pr_debug(aef, "[sys_socket()]: calling ADF\n");
++ rsbac_target = T_NETOBJ;
++ rsbac_target_id.netobj.sock_p = sock;
++ rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_target_id.netobj.local_len = 0;
++ rsbac_target_id.netobj.remote_addr = NULL;
++ rsbac_target_id.netobj.remote_len = 0;
++ }
++#endif
++ rsbac_attribute_value.sock_type = type;
++ if ((rsbac_target != T_NONE)
++ && !rsbac_adf_request(R_CREATE,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ rsbac_pr_debug(aef, "[sys_socket()]: ADF returned NOT_GRANTED\n");
++ retval = -EPERM;
++ goto out_release;
++ }
++#endif
++
+ retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
+ if (retval < 0)
+ goto out_release;
+
++#ifdef CONFIG_RSBAC
++ if(rsbac_target != T_NONE) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_CREATE,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_socket(): rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
++
+ out:
+ /* It may be already another descriptor 8) Not kernel problem. */
+ return retval;
+@@ -1368,6 +1557,12 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
+ struct file *newfile1, *newfile2;
+ int flags;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ flags = type & ~SOCK_TYPE_MASK;
+ if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
+ return -EINVAL;
+@@ -1385,10 +1580,43 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
+ if (err < 0)
+ goto out;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_socketcall()]: calling ADF\n");
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = SOCK_INODE(sock1)->i_ino;
++ rsbac_attribute_value.sock_type = type;
++ if (!rsbac_adf_request(R_CREATE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ rsbac_pr_debug(aef, "[sys_socketcall()]: ADF returned NOT_GRANTED\n");
++ err = -EPERM;
++ goto out_release_1;
++ }
++#endif
++
+ err = sock_create(family, type, protocol, &sock2);
+ if (err < 0)
+ goto out_release_1;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_socketcall()]: calling ADF\n");
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = SOCK_INODE(sock2)->i_ino;
++ if (!rsbac_adf_request(R_CREATE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ rsbac_pr_debug(aef, "[sys_socketcall()]: ADF returned NOT_GRANTED\n");
++ err = -EPERM;
++ goto out_release_both;
++ }
++#endif
++
+ err = sock1->ops->socketpair(sock1, sock2);
+ if (err < 0)
+ goto out_release_both;
+@@ -1418,8 +1646,39 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
+ err = put_user(fd1, &usockvec[0]);
+ if (!err)
+ err = put_user(fd2, &usockvec[1]);
+- if (!err)
++
++ if (!err) {
++#ifdef CONFIG_RSBAC
++ rsbac_target_id.ipc.id.id_nr = SOCK_INODE(sock1)->i_ino;
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_CREATE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_socketpair() [sys_socketcall()]: rsbac_adf_set_attr() for sock1 returned error\n");
++ }
++ rsbac_target_id.ipc.id.id_nr = SOCK_INODE(sock2)->i_ino;
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_CREATE,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_socketpair() [sys_socketcall()]: rsbac_adf_set_attr() for sock2 returned error\n");
++ }
++#endif
++
+ return 0;
++ }
+
+ sys_close(fd2);
+ sys_close(fd1);
+@@ -1447,10 +1706,38 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
+ struct sockaddr_storage address;
+ int err, fput_needed;
+
++#ifdef CONFIG_RSBAC_NET_OBJ
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock) {
+ err = move_addr_to_kernel(umyaddr, addrlen, &address);
+ if (err >= 0) {
++#ifdef CONFIG_RSBAC_NET_OBJ
++ if (sock->ops->family != AF_UNIX) {
++ rsbac_target_id.netobj.sock_p = sock;
++ rsbac_target_id.netobj.local_addr = (struct sockaddr *)&address;
++ rsbac_target_id.netobj.local_len = addrlen;
++ rsbac_target_id.netobj.remote_addr = NULL;
++ rsbac_target_id.netobj.remote_len = 0;
++ rsbac_attribute_value.sock_type = sock->type;
++ rsbac_pr_debug(aef, "[sys_socketcall()]: calling ADF");
++ if(!rsbac_adf_request(R_BIND,
++ task_pid(current),
++ T_NETOBJ,
++ rsbac_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ rsbac_pr_debug(aef, "[sys_socketcall()]: ADF returned NOT_GRANTED\n");
++ fput_light(sock->file, fput_needed);
++ return -EPERM;
++ }
++ }
++#endif
++
+ err = security_socket_bind(sock,
+ (struct sockaddr *)&address,
+ addrlen);
+@@ -1458,6 +1745,23 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
+ err = sock->ops->bind(sock,
+ (struct sockaddr *)
+ &address, addrlen);
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++ if (!err && sock->ops && (sock->ops->family != AF_UNIX)) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_BIND,
++ task_pid(current),
++ T_NETOBJ,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_bind() [sys_socketcall()]: rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
+ }
+ fput_light(sock->file, fput_needed);
+ }
+@@ -1474,10 +1778,60 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
+ {
+ struct socket *sock;
+ int err, fput_needed;
++
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ int somaxconn;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock) {
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_socketcall()]: calling ADF\n");
++ if (sock->ops->family == AF_UNIX) {
++ if (sock->file
++ && sock->file->f_dentry
++ && sock->file->f_dentry->d_inode) {
++ if (sock->file->f_dentry->d_sb->s_magic == SOCKFS_MAGIC) {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = sock->file->f_dentry->d_inode->i_ino;
++ } else {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = sock->file->f_dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = sock->file->f_dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = sock->file->f_dentry;
++ }
++ }
++ }
++#ifdef CONFIG_RSBAC_NET_OBJ
++ else {
++ rsbac_target = T_NETOBJ;
++ rsbac_target_id.netobj.sock_p = sock;
++ rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_target_id.netobj.local_len = 0;
++ rsbac_target_id.netobj.remote_addr = NULL;
++ rsbac_target_id.netobj.remote_len = 0;
++ }
++#endif
++ rsbac_attribute_value.sock_type = sock->type;
++ if ((rsbac_target != T_NONE)
++ && !rsbac_adf_request(R_LISTEN,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ rsbac_pr_debug(aef, "sys_listen() [sys_socketcall()]: ADF returned NOT_GRANTED\n");
++ fput_light(sock->file, fput_needed);
++ return -EPERM;
++ }
++#endif
++
+ somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
+ if ((unsigned)backlog > somaxconn)
+ backlog = somaxconn;
+@@ -1486,6 +1840,23 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
+ if (!err)
+ err = sock->ops->listen(sock, backlog);
+
++
++#ifdef CONFIG_RSBAC
++ if (!err && (rsbac_target != T_NONE)) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_LISTEN,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_sock_type,
++ rsbac_attribute_value))
++ rsbac_printk(KERN_WARNING
++ "sys_listen() [sys_socketcall()]: rsbac_adf_set_attr() returned error\n");
++ }
++#endif
++
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+@@ -1511,6 +1882,14 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
+ int err, len, newfd, fput_needed;
+ struct sockaddr_storage address;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ enum rsbac_attribute_t rsbac_attribute = A_none;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
+ return -EINVAL;
+
+@@ -1550,6 +1929,100 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
+ if (err < 0)
+ goto out_fd;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_socketcall()]: calling ADF\n");
++ if (sock->ops->family == AF_UNIX) {
++ if (sock->sk) {
++ if (unix_sk(unix_sk(sock->sk)->peer)) {
++ if (unix_sk(unix_sk(sock->sk)->peer)->path.dentry
++ && unix_sk(unix_sk(sock->sk)->peer)->path.dentry->d_inode) {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = unix_sk(unix_sk(sock->sk)->peer)->path.dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = unix_sk(unix_sk(sock->sk)->peer)->path.dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = unix_sk(unix_sk(sock->sk)->peer)->path.dentry;
++ } else {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ if (unix_sk(unix_sk(sock->sk)->peer)->path.dentry
++ && unix_sk(unix_sk(sock->sk)->peer)->path.dentry->d_inode
++ && SOCKET_I(unix_sk(unix_sk(sock->sk)->peer)->path.dentry->d_inode)->file
++ && SOCKET_I(unix_sk(unix_sk(sock->sk)->peer)->path.dentry->d_inode)->file->f_dentry
++ && SOCKET_I(unix_sk(unix_sk(sock->sk)->peer)->path.dentry->d_inode)->file->f_dentry->d_inode)
++ rsbac_target_id.ipc.id.id_nr = SOCKET_I(unix_sk(unix_sk(sock->sk)->peer)->path.dentry->d_inode)->file->f_dentry->d_inode->i_ino;
++ else
++ if (sock->file
++ && sock->file->f_dentry
++ && sock->file->f_dentry->d_inode)
++ rsbac_target_id.ipc.id.id_nr = sock->file->f_dentry->d_inode->i_ino;
++ else
++ rsbac_target_id.ipc.id.id_nr = 0;
++ }
++ } else {
++ if( unix_sk(sock->sk)->path.dentry
++ && unix_sk(sock->sk)->path.dentry->d_inode) {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = unix_sk(sock->sk)->path.dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = unix_sk(sock->sk)->path.dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = unix_sk(sock->sk)->path.dentry;
++ } else {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ if (sock->file
++ && sock->file->f_dentry
++ && sock->file->f_dentry->d_inode)
++ rsbac_target_id.ipc.id.id_nr = sock->file->f_dentry->d_inode->i_ino;
++ else
++ rsbac_target_id.ipc.id.id_nr = 0;
++ }
++ }
++ }
++ if ( sock->sk
++ && sock->sk->sk_peer_pid
++ && (rsbac_attribute_value.process = sock->sk->sk_peer_pid)
++ && (pid_nr(rsbac_attribute_value.process) > 0)
++ ) {
++ rsbac_attribute = A_process;
++ } else {
++ rsbac_attribute = A_sock_type;
++ rsbac_attribute_value.sock_type = sock->type;
++ }
++ }
++#ifdef CONFIG_RSBAC_NET_OBJ
++ else {
++ rsbac_target = T_NETOBJ;
++ rsbac_target_id.netobj.sock_p = newsock;
++ rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_target_id.netobj.local_len = 0;
++ if(newsock->ops->getname(newsock, (struct sockaddr *)&address, &len, 2) <0) {
++ rsbac_target_id.netobj.remote_addr = NULL;
++ rsbac_target_id.netobj.remote_len = 0;
++ } else {
++ rsbac_target_id.netobj.remote_addr = (struct sockaddr *)&address;
++ rsbac_target_id.netobj.remote_len = len;
++ }
++ if (sock->sk
++ && sock->sk->sk_peer_pid) {
++ rsbac_attribute = A_process;
++ rsbac_attribute_value.process = sock->sk->sk_peer_pid;
++ } else {
++ rsbac_attribute = A_sock_type;
++ rsbac_attribute_value.sock_type = sock->type;
++ }
++ }
++#endif
++ if ((rsbac_target != T_NONE)
++ && !rsbac_adf_request(R_ACCEPT,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ rsbac_pr_debug(aef, "[sys_socketcall()]: ADF returned NOT_GRANTED\n");
++ err = -EPERM;
++ goto out_fd;
++ }
++#endif
++
+ if (upeer_sockaddr) {
+ if (newsock->ops->getname(newsock, (struct sockaddr *)&address,
+ &len, 2) < 0) {
+@@ -1567,6 +2040,22 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
+ fd_install(newfd, newfile);
+ err = newfd;
+
++#ifdef CONFIG_RSBAC
++ if (rsbac_target != T_NONE) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_ACCEPT,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value))
++ rsbac_printk(KERN_WARNING
++ "sys_accept() [sys_socketcall()]: rsbac_adf_set_attr() returned error\n");
++ }
++#endif
++
+ out_put:
+ fput_light(sock->file, fput_needed);
+ out:
+@@ -1602,6 +2091,12 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
+ struct sockaddr_storage address;
+ int err, fput_needed;
+
++#ifdef CONFIG_RSBAC_NET_OBJ
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+@@ -1614,8 +2109,50 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
+ if (err)
+ goto out_put;
+
++ /* RSBAC UNIX socket connects get intercepted in unix/af_unix.c */
++#ifdef CONFIG_RSBAC_NET_OBJ
++ if (sock->ops->family != AF_UNIX) {
++ rsbac_pr_debug(aef, "[sys_socketcall()]: calling ADF\n");
++ rsbac_target_id.netobj.sock_p = sock;
++ rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_target_id.netobj.local_len = 0;
++ rsbac_target_id.netobj.remote_addr = (struct sockaddr *)&address;
++ rsbac_target_id.netobj.remote_len = addrlen;
++ rsbac_attribute_value.sock_type = sock->type;
++ if (!rsbac_adf_request(R_CONNECT,
++ task_pid(current),
++ T_NETOBJ,
++ rsbac_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ rsbac_pr_debug(aef, "[sys_socketcall()]: ADF returned NOT_GRANTED\n");
++ err = -EPERM;
++ goto out_put;
++ }
++ }
++#endif
++
+ err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen,
+ sock->file->f_flags);
++
++ /* RSBAC: notify ADF of opened socket connection */
++#ifdef CONFIG_RSBAC_NET_OBJ
++ if (!err
++ && (sock->ops->family != AF_UNIX)) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_CONNECT,
++ task_pid(current),
++ T_NETOBJ,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_sock_type,
++ rsbac_attribute_value))
++ rsbac_printk(KERN_WARNING
++ "sys_connect() [sys_socketcall()]: rsbac_adf_set_attr() returned error\n");
++ }
++#endif
++
+ out_put:
+ fput_light(sock->file, fput_needed);
+ out:
+@@ -1634,6 +2171,12 @@ SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
+ struct sockaddr_storage address;
+ int len, err, fput_needed;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+@@ -1642,6 +2185,47 @@ SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
+ if (err)
+ goto out_put;
+
++#if defined(CONFIG_RSBAC)
++ rsbac_pr_debug(aef, "calling ADF\n");
++ if (sock->ops->family == AF_UNIX) {
++ if (sock->file
++ && sock->file->f_dentry
++ && sock->file->f_dentry->d_inode) {
++ if (sock->file->f_dentry->d_sb->s_magic == SOCKFS_MAGIC) {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = sock->file->f_dentry->d_inode->i_ino;
++ } else {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = sock->file->f_dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = sock->file->f_dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = sock->file->f_dentry;
++ }
++ }
++ }
++#ifdef CONFIG_RSBAC_NET_OBJ
++ else {
++ rsbac_target = T_NETOBJ;
++ rsbac_target_id.netobj.sock_p = sock;
++ rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_target_id.netobj.local_len = 0;
++ rsbac_target_id.netobj.remote_addr = NULL;
++ rsbac_target_id.netobj.remote_len = 0;
++ }
++#endif
++ rsbac_attribute_value.sock_type = sock->type;
++ if ((rsbac_target != T_NONE)
++ && !rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out_put;
++ }
++#endif
++
+ err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0);
+ if (err)
+ goto out_put;
+@@ -1665,6 +2249,12 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
+ struct sockaddr_storage address;
+ int len, err, fput_needed;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock != NULL) {
+ err = security_socket_getpeername(sock);
+@@ -1673,6 +2263,47 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
+ return err;
+ }
+
++#if defined(CONFIG_RSBAC)
++ rsbac_pr_debug(aef, "calling ADF\n");
++ if (sock->ops->family == AF_UNIX) {
++ if (sock->file
++ && sock->file->f_dentry
++ && sock->file->f_dentry->d_inode) {
++ if (sock->file->f_dentry->d_sb->s_magic == SOCKFS_MAGIC) {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = sock->file->f_dentry->d_inode->i_ino;
++ } else {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = sock->file->f_dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = sock->file->f_dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = sock->file->f_dentry;
++ }
++ }
++ }
++#ifdef CONFIG_RSBAC_NET_OBJ
++ else {
++ rsbac_target = T_NETOBJ;
++ rsbac_target_id.netobj.sock_p = sock;
++ rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_target_id.netobj.local_len = 0;
++ rsbac_target_id.netobj.remote_addr = NULL;
++ rsbac_target_id.netobj.remote_len = 0;
++ }
++#endif
++ rsbac_attribute_value.sock_type = sock->type;
++ if ((rsbac_target != T_NONE)
++ && !rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ fput_light(sock->file, fput_needed);
++ return -EPERM;
++ }
++#endif
++
+ err =
+ sock->ops->getname(sock, (struct sockaddr *)&address, &len,
+ 1);
+@@ -1811,6 +2442,12 @@ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
+ int err, fput_needed;
+ struct socket *sock;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (optlen < 0)
+ return -EINVAL;
+
+@@ -1820,6 +2457,47 @@ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
+ if (err)
+ goto out_put;
+
++#if defined(CONFIG_RSBAC)
++ rsbac_pr_debug(aef, "calling ADF\n");
++ if (sock->ops->family == AF_UNIX) {
++ if (sock->file
++ && sock->file->f_dentry
++ && sock->file->f_dentry->d_inode) {
++ if (sock->file->f_dentry->d_sb->s_magic == SOCKFS_MAGIC) {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = sock->file->f_dentry->d_inode->i_ino;
++ } else {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = sock->file->f_dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = sock->file->f_dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = sock->file->f_dentry;
++ }
++ }
++ }
++#ifdef CONFIG_RSBAC_NET_OBJ
++ else {
++ rsbac_target = T_NETOBJ;
++ rsbac_target_id.netobj.sock_p = sock;
++ rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_target_id.netobj.local_len = 0;
++ rsbac_target_id.netobj.remote_addr = NULL;
++ rsbac_target_id.netobj.remote_len = 0;
++ }
++#endif
++ rsbac_attribute_value.setsockopt_level = level;
++ if ((rsbac_target != T_NONE)
++ && !rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_setsockopt_level,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out_put;
++ }
++#endif
++
+ if (level == SOL_SOCKET)
+ err =
+ sock_setsockopt(sock, level, optname, optval,
+@@ -1845,12 +2523,62 @@ SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname,
+ int err, fput_needed;
+ struct socket *sock;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock != NULL) {
+ err = security_socket_getsockopt(sock, level, optname);
+ if (err)
+ goto out_put;
+
++#if defined(CONFIG_RSBAC)
++ rsbac_pr_debug(aef, "calling ADF\n");
++ if (sock->ops->family == AF_UNIX) {
++ if (sock->file
++ && sock->file->f_dentry
++ && sock->file->f_dentry->d_inode
++ && sock->file->f_dentry->d_inode->i_ino
++ && sock->file->f_dentry->d_sb
++ && sock->file->f_dentry->d_sb->s_dev) {
++ if (sock->file->f_dentry->d_sb->s_magic == SOCKFS_MAGIC) {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = sock->file->f_dentry->d_inode->i_ino;
++ } else {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = sock->file->f_dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = sock->file->f_dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = sock->file->f_dentry;
++ }
++ }
++ }
++#ifdef CONFIG_RSBAC_NET_OBJ
++ else {
++ rsbac_target = T_NETOBJ;
++ rsbac_target_id.netobj.sock_p = sock;
++ rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_target_id.netobj.local_len = 0;
++ rsbac_target_id.netobj.remote_addr = NULL;
++ rsbac_target_id.netobj.remote_len = 0;
++ }
++#endif
++ rsbac_attribute_value.sock_type = sock->type;
++ if ((rsbac_target != T_NONE)
++ && !rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out_put;
++ }
++#endif
++
+ if (level == SOL_SOCKET)
+ err =
+ sock_getsockopt(sock, level, optname, optval,
+@@ -1874,11 +2602,83 @@ SYSCALL_DEFINE2(shutdown, int, fd, int, how)
+ int err, fput_needed;
+ struct socket *sock;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++#ifdef CONFIG_RSBAC_NET_OBJ
++ union rsbac_target_id_t rsbac_new_target_id;
++#endif
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock != NULL) {
+ err = security_socket_shutdown(sock, how);
++
++#ifdef CONFIG_RSBAC
++ if (!err) {
++ rsbac_pr_debug(aef, "[sys_socketcall()]: calling ADF\n");
++ if (sock->ops->family == AF_UNIX) {
++ if (sock->file
++ && sock->file->f_dentry
++ && sock->file->f_dentry->d_inode) {
++ if (sock->file->f_dentry->d_sb->s_magic == SOCKFS_MAGIC) {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = sock->file->f_dentry->d_inode->i_ino;
++ } else {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = sock->file->f_dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = sock->file->f_dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = sock->file->f_dentry;
++ }
++ }
++ }
++#ifdef CONFIG_RSBAC_NET_OBJ
++ else {
++ rsbac_target = T_NETOBJ;
++ rsbac_target_id.netobj.sock_p = sock;
++ rsbac_target_id.netobj.local_addr = NULL;
++ rsbac_target_id.netobj.local_len = 0;
++ rsbac_target_id.netobj.remote_addr = NULL;
++ rsbac_target_id.netobj.remote_len = 0;
++ }
++#endif
++ rsbac_attribute_value.sock_type = sock->type;
++ if ((rsbac_target != T_NONE)
++ && !rsbac_adf_request(R_NET_SHUTDOWN,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ }
++ }
++#endif
++
+ if (!err)
+ err = sock->ops->shutdown(sock, how);
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++ if (!err && (rsbac_target != T_NONE)) {
++ rsbac_pr_debug(aef, "calling rsbac_adf_set_attr() for NET_SHUTDOWN on netobj\n");
++ rsbac_new_target_id.dummy = 0;
++ rsbac_attribute_value.dummy = 0;
++ if (rsbac_adf_set_attr(R_NET_SHUTDOWN,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "sys_shutdown(): rsbac_adf_set_attr() for NET_SHUTDOWN on socket returned error\n");
++ }
++ }
++#endif
++
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index d510353..99dfdca 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -115,6 +115,8 @@
+ #include <net/checksum.h>
+ #include <linux/security.h>
+
++#include <rsbac/hooks.h>
++
+ struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
+ EXPORT_SYMBOL_GPL(unix_socket_table);
+ DEFINE_SPINLOCK(unix_table_lock);
+@@ -691,11 +693,26 @@ static int unix_release(struct socket *sock)
+ {
+ struct sock *sk = sock->sk;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++#endif
++
+ if (!sk)
+ return 0;
+
+ sock->sk = NULL;
+
++#ifdef CONFIG_RSBAC
++ if ( sock->file
++ && sock->file->f_dentry
++ && sock->file->f_dentry->d_inode
++ ) {
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = sock->file->f_dentry->d_inode->i_ino;
++ rsbac_remove_target(T_IPC, rsbac_target_id);
++ }
++#endif
++
+ return unix_release_sock(sk, 0);
+ }
+
+@@ -709,12 +726,41 @@ static int unix_autobind(struct socket *sock)
+ int err;
+ unsigned int retries = 0;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ mutex_lock(&u->readlock);
+
+ err = 0;
+ if (u->addr)
+ goto out;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "unix_autobind() [sys_bind()]: calling ADF\n");
++ rsbac_target_id.ipc.type = I_anonunix;
++ if ( sock->file
++ && sock->file->f_dentry
++ && sock->file->f_dentry->d_inode
++ )
++ rsbac_target_id.ipc.id.id_nr = sock->file->f_dentry->d_inode->i_ino;
++ else
++ rsbac_target_id.ipc.id.id_nr = 0;
++ rsbac_attribute_value.sock_type = sock->type;
++ if (!rsbac_adf_request(R_BIND,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ rsbac_pr_debug(aef, "unix_autobind() [sys_bind() etc.]: ADF returned NOT_GRANTED\n");
++ err = -EPERM;
++ goto out;
++ }
++#endif
++
+ err = -ENOMEM;
+ addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
+ if (!addr)
+@@ -754,6 +800,20 @@ retry:
+ spin_unlock(&unix_table_lock);
+ err = 0;
+
++#ifdef CONFIG_RSBAC
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_BIND,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_sock_type,
++ rsbac_attribute_value))
++ rsbac_printk(KERN_WARNING
++ "unix_autobind() [sys_bind() etc.]: rsbac_adf_set_attr() returned error\n");
++#endif
++
+ out: mutex_unlock(&u->readlock);
+ return err;
+ }
+@@ -828,6 +888,12 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ struct unix_address *addr;
+ struct hlist_head *list;
+
++#ifdef CONFIG_RSBAC
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ err = -EINVAL;
+ if (sunaddr->sun_family != AF_UNIX)
+ goto out;
+@@ -848,6 +914,31 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ if (u->addr)
+ goto out_up;
+
++#ifdef CONFIG_RSBAC
++ if (!sunaddr->sun_path[0]) {
++ rsbac_pr_debug(aef, "unix_bind() [sys_bind()]: calling ADF\n");
++ rsbac_target_id.ipc.type = I_anonunix;
++ if ( sock->file
++ && sock->file->f_dentry
++ && sock->file->f_dentry->d_inode
++ )
++ rsbac_target_id.ipc.id.id_nr = sock->file->f_dentry->d_inode->i_ino;
++ else
++ rsbac_target_id.ipc.id.id_nr = 0;
++ rsbac_attribute_value.sock_type = sock->type;
++ if (!rsbac_adf_request(R_BIND,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ A_sock_type,
++ rsbac_attribute_value)) {
++ rsbac_pr_debug(aef, "unix_bind() [sys_bind()]: ADF returned NOT_GRANTED\n");
++ err = -EPERM;
++ goto out_up;
++ }
++ }
++#endif
++
+ err = -ENOMEM;
+ addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
+ if (!addr)
+@@ -870,6 +961,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ if (IS_ERR(dentry))
+ goto out_mknod_parent;
+
++#ifdef CONFIG_RSBAC
++ /* RSBAC add: set credentials so connect and send can copy them */
++ init_peercred(sk);
++#endif
++
+ /*
+ * All right, let's create it.
+ */
+@@ -904,6 +1000,21 @@ out_mknod_drop_write:
+ }
+
+ list = &unix_socket_table[addr->hash];
++
++#ifdef CONFIG_RSBAC
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_BIND,
++ task_pid(current),
++ T_IPC,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_sock_type,
++ rsbac_attribute_value))
++ rsbac_printk(KERN_WARNING
++ "unix_bind() [sys_bind()]: rsbac_adf_set_attr() returned error\n");
++#endif
++
+ } else {
+ list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
+ u->path = path;
+@@ -967,6 +1078,14 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
+ unsigned hash;
+ int err;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ enum rsbac_attribute_t rsbac_attribute = A_none;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (addr->sa_family != AF_UNSPEC) {
+ err = unix_mkname(sunaddr, alen, &hash);
+ if (err < 0)
+@@ -982,6 +1101,46 @@ restart:
+ if (!other)
+ goto out;
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "[sys_connect() [sys_socketcall()]]: calling ADF\n");
++ /* Named socket? */
++ if(sunaddr->sun_path[0]) {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = unix_sk(other)->path.dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = unix_sk(other)->path.dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = unix_sk(other)->path.dentry;
++ } else {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = unix_sk(other)->path.dentry->d_inode->i_ino;
++ }
++ if ( other->sk_peer_pid
++ && (rsbac_attribute_value.process = other->sk_peer_pid)
++ && (pid_nr(rsbac_attribute_value.process) > 0)
++ ) {
++ rsbac_attribute = A_process;
++ } else if ( sk->sk_peer_pid
++ && (rsbac_attribute_value.process = sk->sk_peer_pid)
++ && (pid_nr(rsbac_attribute_value.process) > 0)
++ ) {
++ rsbac_attribute = A_process;
++ } else {
++ rsbac_attribute = A_sock_type;
++ rsbac_attribute_value.sock_type = sock->type;
++ }
++ if (!rsbac_adf_request(R_CONNECT,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ rsbac_pr_debug(aef, "[sys_connect() [sys_socketcall()]]: ADF returned NOT_GRANTED\n");
++ err = -EPERM;
++ sock_put(other);
++ goto out;
++ }
++#endif
++
+ unix_state_double_lock(sk, other);
+
+ /* Apparently VFS overslept socket death. Retry. */
+@@ -1022,6 +1181,23 @@ restart:
+ unix_peer(sk) = other;
+ unix_state_double_unlock(sk, other);
+ }
++
++#ifdef CONFIG_RSBAC
++ if (rsbac_target != T_NONE) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_CONNECT,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value))
++ rsbac_printk(KERN_WARNING
++ "unix_dgram_connect() [sys_connect() [sys_socketcall()]]: rsbac_adf_set_attr() returned error\n");
++ }
++#endif
++
+ return 0;
+
+ out_unlock:
+@@ -1067,6 +1243,15 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ int err;
+ long timeo;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ enum rsbac_target_t rsbac_new_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ enum rsbac_attribute_t rsbac_attribute = A_none;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ err = unix_mkname(sunaddr, addr_len, &hash);
+ if (err < 0)
+ goto out;
+@@ -1101,6 +1286,48 @@ restart:
+ if (!other)
+ goto out;
+
++#ifdef CONFIG_RSBAC
++ if (unix_sk(other)->path.dentry && unix_sk(other)->path.dentry->d_inode) {
++ rsbac_pr_debug(aef, "[sys_connect() [sys_socketcall()]]: calling ADF\n");
++ /* Named socket? */
++ if (sunaddr->sun_path[0]) {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = unix_sk(other)->path.dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = unix_sk(other)->path.dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = unix_sk(other)->path.dentry;
++ } else {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = unix_sk(other)->path.dentry->d_inode->i_ino;
++ }
++ if ( other->sk_peer_pid
++ && (rsbac_attribute_value.process = other->sk_peer_pid)
++ && (pid_nr(rsbac_attribute_value.process) > 0)
++ ) {
++ rsbac_attribute = A_process;
++ } else if ( sk->sk_peer_pid
++ && (rsbac_attribute_value.process = sk->sk_peer_pid)
++ && (pid_nr(rsbac_attribute_value.process) > 0)
++ ) {
++ rsbac_attribute = A_process;
++ } else {
++ rsbac_attribute = A_sock_type;
++ rsbac_attribute_value.sock_type = sock->type;
++ }
++ if (!rsbac_adf_request(R_CONNECT,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ rsbac_pr_debug(aef, "[sys_connect() [sys_socketcall()]]:"
++ " ADF returned NOT_GRANTED\n");
++ err = -EPERM;
++ goto out;
++ }
++ }
++#endif
++
+ /* Latch state of peer */
+ unix_state_lock(other);
+
+@@ -1172,6 +1399,55 @@ restart:
+ goto out_unlock;
+ }
+
++#ifdef CONFIG_RSBAC
++ rsbac_pr_debug(aef, "unix_stream_connect() [sys_connect()]: calling ADF\n");
++ /* Named socket? */
++ if (unix_sk(other)->path.dentry&& unix_sk(other)->path.dentry->d_inode) {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = unix_sk(other)->path.dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = unix_sk(other)->path.dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = unix_sk(other)->path.dentry;
++ } else {
++ if (other->sk_socket
++ && other->sk_socket->file
++ && other->sk_socket->file->f_dentry
++ && other->sk_socket->file->f_dentry->d_inode
++ ) {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ rsbac_target_id.ipc.id.id_nr = other->sk_socket->file->f_dentry->d_inode->i_ino;
++ }
++ }
++ if (rsbac_target != T_NONE) {
++ if ( other->sk_peer_pid
++ && (rsbac_attribute_value.process = other->sk_peer_pid)
++ && (pid_nr(rsbac_attribute_value.process) > 0)
++ ) {
++ rsbac_attribute = A_process;
++ } else if ( sk->sk_peer_pid
++ && (rsbac_attribute_value.process = sk->sk_peer_pid)
++ && (pid_nr(rsbac_attribute_value.process) > 0)
++ ) {
++ rsbac_attribute = A_process;
++ } else {
++ rsbac_attribute = A_sock_type;
++ rsbac_attribute_value.sock_type = sock->type;
++ }
++ if (!rsbac_adf_request(R_CONNECT,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ rsbac_pr_debug(aef, "[sys_connect() [sys_socketcall()]]:"
++ " ADF returned NOT_GRANTED\n");
++ err = -EPERM;
++ unix_state_unlock(sk);
++ goto out_unlock;
++ }
++ }
++#endif
++
+ /* The way is open! Fastly set all the necessary fields... */
+
+ sock_hold(sk);
+@@ -1211,6 +1487,53 @@ restart:
+ spin_unlock(&other->sk_receive_queue.lock);
+ unix_state_unlock(other);
+ other->sk_data_ready(other, 0);
++
++#ifdef CONFIG_RSBAC
++ if (rsbac_target != T_NONE) {
++ if (newu->path.dentry&& newu->path.dentry->d_inode) {
++ rsbac_new_target = T_UNIXSOCK;
++ rsbac_new_target_id.unixsock.device = newu->path.dentry->d_sb->s_dev;
++ rsbac_new_target_id.unixsock.inode = newu->path.dentry->d_inode->i_ino;
++ rsbac_new_target_id.unixsock.dentry_p = newu->path.dentry;
++ } else {
++ if (newsk->sk_socket
++ && newsk->sk_socket->file
++ && newsk->sk_socket->file->f_dentry
++ && newsk->sk_socket->file->f_dentry->d_inode
++ ) {
++ rsbac_new_target = T_IPC;
++ rsbac_new_target_id.ipc.type = I_anonunix;
++ rsbac_new_target_id.ipc.id.id_nr = newsk->sk_socket->file->f_dentry->d_inode->i_ino;
++ }
++ }
++ if (rsbac_adf_set_attr(R_CONNECT,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ rsbac_new_target,
++ rsbac_new_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value))
++ rsbac_printk(KERN_WARNING
++ "unix_stream_connect() [sys_connect() [sys_socketcall()]]: rsbac_adf_set_attr() returned error\n");
++#ifdef CONFIG_RSBAC_NET
++#ifdef CONFIG_RSBAC_DEBUG
++ if ( rsbac_debug_aef_net
++ && sk->sk_socket
++ && newsk->sk_socket
++ && other->sk_socket
++ ) {
++ rsbac_printk("unix_stream_connect() [sys_connect()]: connected from %u to %u (type %u), orig %u\n",
++ sk->sk_socket->file->f_dentry->d_inode->i_ino,
++ newsk->sk_socket->file->f_dentry->d_inode->i_ino,
++ rsbac_target,
++ other->sk_socket->file->f_dentry->d_inode->i_ino);
++ }
++#endif
++#endif
++ }
++#endif
++
+ sock_put(other);
+ return 0;
+
+@@ -1263,6 +1586,11 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
+ if (sk->sk_state != TCP_LISTEN)
+ goto out;
+
++#ifdef CONFIG_RSBAC
++ /* set credentials again, because accepting process might be another */
++ init_peercred(sk);
++#endif
++
+ /* If socket state is TCP_LISTEN it cannot change (for now...),
+ * so that no locks are necessary.
+ */
+@@ -1282,6 +1610,21 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
+ /* attach accepted sock to socket */
+ unix_state_lock(tsk);
+ newsock->state = SS_CONNECTED;
++
++#ifdef CONFIG_RSBAC
++ /* copy dentry and mnt, if there */
++ if (unix_sk(sk)->path.dentry) {
++ if (!unix_sk(tsk)->path.dentry) {
++ unix_sk(tsk)->path.dentry = dget(unix_sk(sk)->path.dentry);
++ unix_sk(tsk)->path.mnt = mntget(unix_sk(sk)->path.mnt);
++ }
++ if (newsock->sk && !unix_sk(newsock->sk)->path.dentry) {
++ unix_sk(newsock->sk)->path.dentry = dget(unix_sk(sk)->path.dentry);
++ unix_sk(newsock->sk)->path.mnt = mntget(unix_sk(sk)->path.mnt);
++ }
++ }
++#endif
++
+ sock_graft(tsk, newsock);
+ unix_state_unlock(tsk);
+ return 0;
+@@ -1443,6 +1786,14 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ struct scm_cookie tmp_scm;
+ int max_level;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ enum rsbac_attribute_t rsbac_attribute = A_none;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (NULL == siocb->scm)
+ siocb->scm = &tmp_scm;
+ wait_for_unix_gc();
+@@ -1552,6 +1903,55 @@ restart:
+ goto out_unlock;
+ }
+
++#if defined(CONFIG_RSBAC)
++ if (other->sk_socket) {
++ rsbac_pr_debug(aef, "unix_dgram_sendmsg() [sys_send(), sys_sendto(), sys_sendmsg()]: calling ADF\n");
++ if ( other->sk_socket->sk
++ && unix_sk(other->sk_socket->sk)->path.dentry
++ && unix_sk(other->sk_socket->sk)->path.dentry->d_inode
++ ) {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = unix_sk(other->sk_socket->sk)->path.dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = unix_sk(other->sk_socket->sk)->path.dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = unix_sk(other->sk_socket->sk)->path.dentry;
++ } else {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ if ( other->sk_socket->file
++ && other->sk_socket->file->f_dentry
++ && other->sk_socket->file->f_dentry->d_inode
++ )
++ rsbac_target_id.ipc.id.id_nr = other->sk_socket->file->f_dentry->d_inode->i_ino;
++ else
++ rsbac_target_id.ipc.id.id_nr = 0;
++ }
++ if ( sk->sk_peer_pid
++ && (rsbac_attribute_value.process = sk->sk_peer_pid)
++ && (pid_nr(rsbac_attribute_value.process) > 0)
++ ) {
++ rsbac_attribute = A_process;
++ } else if ( other->sk_socket->sk
++ && other->sk_socket->sk->sk_peer_pid
++ && (rsbac_attribute_value.process = other->sk_socket->sk->sk_peer_pid)
++ && (pid_nr(rsbac_attribute_value.process) > 0)
++ ) {
++ rsbac_attribute = A_process;
++ } else {
++ rsbac_attribute = A_sock_type;
++ rsbac_attribute_value.sock_type = sock->type;
++ }
++ if(!rsbac_adf_request(R_SEND,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out_unlock;
++ }
++ }
++#endif
++
+ if (unix_peer(other) != sk && unix_recvq_full(other)) {
+ if (!timeo) {
+ err = -EAGAIN;
+@@ -1577,6 +1977,24 @@ restart:
+ other->sk_data_ready(other, len);
+ sock_put(other);
+ scm_destroy(siocb->scm);
++
++#if defined(CONFIG_RSBAC)
++ if (len > 0 && (rsbac_target != T_NONE)) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_SEND,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "unix_dgram_sendmsg() [sys_send(), sys_sendto(), sys_sendmsg()]: rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
++
+ return len;
+
+ out_unlock:
+@@ -1604,6 +2022,14 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ bool fds_sent = false;
+ int max_level;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ enum rsbac_attribute_t rsbac_attribute = A_none;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ if (NULL == siocb->scm)
+ siocb->scm = &tmp_scm;
+ wait_for_unix_gc();
+@@ -1628,6 +2054,55 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
+ goto pipe_err;
+
++#if defined(CONFIG_RSBAC)
++ if (other->sk_socket) {
++ rsbac_pr_debug(aef, "unix_stream_sendmsg() [sys_send(), sys_sendto(), sys_sendmsg()]: calling ADF\n");
++ if ( other->sk_socket->sk
++ && unix_sk(other->sk_socket->sk)->path.dentry
++ && unix_sk(other->sk_socket->sk)->path.dentry->d_inode
++ ) {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = unix_sk(other->sk_socket->sk)->path.dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = unix_sk(other->sk_socket->sk)->path.dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = unix_sk(other->sk_socket->sk)->path.dentry;
++ } else {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ if ( other->sk_socket->file
++ && other->sk_socket->file->f_dentry
++ && other->sk_socket->file->f_dentry->d_inode
++ )
++ rsbac_target_id.ipc.id.id_nr = other->sk_socket->file->f_dentry->d_inode->i_ino;
++ else
++ rsbac_target_id.ipc.id.id_nr = 0;
++ }
++ if ( sk->sk_peer_pid
++ && (rsbac_attribute_value.process = sk->sk_peer_pid)
++ && (pid_nr(rsbac_attribute_value.process) > 0)
++ ) {
++ rsbac_attribute = A_process;
++ } else if ( other->sk_socket->sk
++ && other->sk_socket->sk->sk_peer_pid
++ && (rsbac_attribute_value.process = other->sk_socket->sk->sk_peer_pid)
++ && (pid_nr(rsbac_attribute_value.process) > 0)
++ ) {
++ rsbac_attribute = A_process;
++ } else {
++ rsbac_attribute = A_sock_type;
++ rsbac_attribute_value.sock_type = sock->type;
++ }
++ if(!rsbac_adf_request(R_SEND,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out_err;
++ }
++ }
++#endif
++
+ while (sent < len) {
+ /*
+ * Optimisation for the fact that under 0.01% of X
+@@ -1696,6 +2171,23 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ scm_destroy(siocb->scm);
+ siocb->scm = NULL;
+
++#if defined(CONFIG_RSBAC)
++ if (sent && (rsbac_target != T_NONE)) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_SEND,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "unix_stream_sendmsg() [sys_send(), sys_sendto(), sys_sendmsg()]: rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
++
+ return sent;
+
+ pipe_err_free:
+@@ -1708,6 +2200,24 @@ pipe_err:
+ out_err:
+ scm_destroy(siocb->scm);
+ siocb->scm = NULL;
++
++#if defined(CONFIG_RSBAC)
++ if (sent) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_SEND,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "unix_stream_sendmsg() [sys_send(), sys_sendto(), sys_sendmsg()]: rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
++
+ return sent ? : err;
+ }
+
+@@ -1766,10 +2276,78 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
+ int err;
+ int peeked, skip;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ enum rsbac_attribute_t rsbac_attribute = A_none;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ err = -EOPNOTSUPP;
+ if (flags&MSG_OOB)
+ goto out;
+
++#if defined(CONFIG_RSBAC)
++ rsbac_pr_debug(aef, "unix_dgram_recvmsg() [sys_recv(), sys_recvfrom(), sys_recvmsg()]: calling ADF\n");
++ if (unix_peer(sk)) {
++ if ( unix_sk(unix_peer(sk))->path.dentry
++ && unix_sk(unix_peer(sk))->path.dentry->d_inode
++ ) {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = unix_sk(unix_peer(sk))->path.dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = unix_sk(unix_peer(sk))->path.dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = unix_sk(unix_peer(sk))->path.dentry;
++ } else {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ if ( unix_peer(sk)->sk_socket
++ && unix_peer(sk)->sk_socket->file
++ && unix_peer(sk)->sk_socket->file->f_dentry
++ && unix_peer(sk)->sk_socket->file->f_dentry->d_inode
++ )
++ rsbac_target_id.ipc.id.id_nr = unix_peer(sk)->sk_socket->file->f_dentry->d_inode->i_ino;
++ }
++ } else {
++ if ( unix_sk(sk)->path.dentry
++ && unix_sk(sk)->path.dentry->d_inode
++ ) {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = unix_sk(sk)->path.dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = unix_sk(sk)->path.dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = unix_sk(sk)->path.dentry;
++ } else {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ if ( sock->file
++ && sock->file->f_dentry
++ && sock->file->f_dentry->d_inode
++ )
++ rsbac_target_id.ipc.id.id_nr = sock->file->f_dentry->d_inode->i_ino;
++ else
++ rsbac_target_id.ipc.id.id_nr = 0;
++ }
++ }
++ if ( sk->sk_peer_pid
++ && (rsbac_attribute_value.process = sk->sk_peer_pid)
++ && (pid_nr(rsbac_attribute_value.process) > 0)
++ ) {
++ rsbac_attribute = A_process;
++ } else {
++ rsbac_attribute = A_sock_type;
++ rsbac_attribute_value.sock_type = sock->type;
++ }
++ if(!rsbac_adf_request(R_RECEIVE,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out;
++ }
++#endif
++
+ msg->msg_namelen = 0;
+
+ err = mutex_lock_interruptible(&u->readlock);
+@@ -1849,6 +2427,24 @@ out_free:
+ out_unlock:
+ mutex_unlock(&u->readlock);
+ out:
++
++#if defined(CONFIG_RSBAC)
++ if (err > 0) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_RECEIVE,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "unix_dgram_recvmsg() [sys_recv(), sys_recvfrom(), sys_recvmsg()]: rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
++
+ return err;
+ }
+
+@@ -1902,6 +2498,14 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ long timeo;
+ int skip;
+
++#ifdef CONFIG_RSBAC
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ enum rsbac_attribute_t rsbac_attribute = A_none;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
+ err = -EINVAL;
+ if (sk->sk_state != TCP_ESTABLISHED)
+ goto out;
+@@ -1910,6 +2514,66 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (flags&MSG_OOB)
+ goto out;
+
++#if defined(CONFIG_RSBAC)
++ rsbac_pr_debug(aef, "unix_stream_recvmsg() [sys_recv(), sys_recvfrom(), sys_recvmsg()]: calling ADF\n");
++ if (unix_peer(sk)) {
++ if ( unix_sk(unix_peer(sk))->path.dentry
++ && unix_sk(unix_peer(sk))->path.dentry->d_inode
++ ) {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = unix_sk(unix_peer(sk))->path.dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = unix_sk(unix_peer(sk))->path.dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = unix_sk(unix_peer(sk))->path.dentry;
++ } else {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ if ( unix_peer(sk)->sk_socket
++ && unix_peer(sk)->sk_socket->file
++ && unix_peer(sk)->sk_socket->file->f_dentry
++ && unix_peer(sk)->sk_socket->file->f_dentry->d_inode
++ )
++ rsbac_target_id.ipc.id.id_nr = unix_peer(sk)->sk_socket->file->f_dentry->d_inode->i_ino;
++ }
++ } else {
++ if ( unix_sk(sk)->path.dentry
++ && unix_sk(sk)->path.dentry->d_inode
++ ) {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = unix_sk(sk)->path.dentry->d_sb->s_dev;
++ rsbac_target_id.unixsock.inode = unix_sk(sk)->path.dentry->d_inode->i_ino;
++ rsbac_target_id.unixsock.dentry_p = unix_sk(sk)->path.dentry;
++ } else {
++ rsbac_target = T_IPC;
++ rsbac_target_id.ipc.type = I_anonunix;
++ if ( sock->file
++ && sock->file->f_dentry
++ && sock->file->f_dentry->d_inode
++ )
++ rsbac_target_id.ipc.id.id_nr = sock->file->f_dentry->d_inode->i_ino;
++ else
++ rsbac_target_id.ipc.id.id_nr = 0;
++ }
++ }
++ if ( sk->sk_peer_pid
++ && (rsbac_attribute_value.process = sk->sk_peer_pid)
++ && (pid_nr(rsbac_attribute_value.process) > 0)
++ ) {
++ rsbac_attribute = A_process;
++ } else {
++ rsbac_attribute = A_sock_type;
++ rsbac_attribute_value.sock_type = sock->type;
++ }
++ if(!rsbac_adf_request(R_RECEIVE,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out;
++ }
++#endif
++
+ target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
+ timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
+
+@@ -2040,6 +2704,23 @@ again:
+ mutex_unlock(&u->readlock);
+ scm_recv(sock, msg, siocb->scm, flags);
+ out:
++#if defined(CONFIG_RSBAC)
++ if (copied > 0) {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_RECEIVE,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value)) {
++ rsbac_printk(KERN_WARNING
++ "unix_stream_recvmsg() [sys_recv(), sys_recvfrom(), sys_recvmsg()]: rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
++
+ return copied ? : err;
+ }
+
+diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
+index af648e0..0374431 100644
+--- a/net/wireless/wext-core.c
++++ b/net/wireless/wext-core.c
+@@ -20,6 +20,8 @@
+ #include <net/wext.h>
+ #include <net/net_namespace.h>
+
++#include <rsbac/hooks.h>
++
+ typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *,
+ unsigned int, struct iw_request_info *,
+ iw_handler);
+@@ -950,9 +952,37 @@ static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr,
+ {
+ int ret = wext_permission_check(cmd);
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#ifndef CONFIG_RSBAC_NET_DEV_VIRT
++ char * rsbac_colon;
++#endif
++#endif
++
+ if (ret)
+ return ret;
+
++#ifdef CONFIG_RSBAC_NET_DEV
++ rsbac_pr_debug(aef, " calling ADF\n");
++ strncpy(rsbac_target_id.netdev, ifr->ifr_name, RSBAC_IFNAMSIZ);
++ rsbac_target_id.netdev[RSBAC_IFNAMSIZ] = 0;
++#ifndef CONFIG_RSBAC_NET_DEV_VIRT
++ rsbac_colon = strchr(rsbac_target_id.netdev, ':');
++ if (rsbac_colon)
++ *rsbac_colon = 0;
++#endif
++ rsbac_attribute_value.dummy = 0;
++
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_NETDEV,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ return -EPERM;
++#endif
++
+ dev_load(net, ifr->ifr_name);
+ rtnl_lock();
+ ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private);
+diff --git a/rsbac/Kconfig b/rsbac/Kconfig
+new file mode 100644
+index 0000000..04238fd
+--- /dev/null
++++ b/rsbac/Kconfig
+@@ -0,0 +1,2342 @@
++#
++# RSBAC configuration
++# (c) 1999-2010 Amon Ott <ao@rsbac.org>
++#
++
++menuconfig RSBAC
++ bool "Rule Set Based Access Control (RSBAC)"
++ default y
++ ---help---
++ RSBAC adds 'real' access control to the kernel. Currently there are
++ the following policies supported: Mandatory Access Control,
++ Functional Control, Privacy Model, Dazuko, File Flags,
++ Role Compatibility, Authentication Enforcement, User Management,
++ Access Control Lists, System Resources, Linux Capabilities and Jail.
++
++ Additionally, you can add your own policies, e.g. as a kernel module
++ (see Module Registration (REG)).
++
++ Since RSBAC exclusively uses the virtual file system, files on all
++ mounted filesystems are monitored. Still, on some filesystems, no
++ access control information is stored, it is instead kept in kernel
++ memory. These file system types are currently PROC, NFS, CODAFS, SMBFS
++ NCPFS, ISO9660 and (configurable below) all MSDOS types (inodes may
++ change between boots). You can make attribute backups and restore them
++ on each boot to keep them persistent.
++
++ RSBAC will increase your kernel size by up to 520K with all options
++ turned on, plus allocated memory for dynamical data structures.
++ If you are tight on memory, it is generally a good idea to turn off
++ those features which you do not need.
++
++ Please read the RSBAC docs in Documentation/rsbac or
++ http://www.rsbac.org/documentation before installing a RSBAC enabled
++ kernel, since you can easily make your system unaccessible when
++ changing the RSBAC configuration...
++
++if RSBAC
++
++menu "General RSBAC options"
++ depends on RSBAC=y
++
++config RSBAC_INIT_THREAD
++ bool 'Initialize RSBAC in separate kernel thread'
++ ---help---
++ Some people experienced system hangs, oopses etc. during RSBAC
++ initialization. If you experience this, enabling this option might
++ help.
++
++ At least, init can be timed out and the system more or less comes up -
++ if you are in softmode or maintenance mode, that is. The timeout value
++ is indicated by the RSBAC_MAX_INIT_TIME configure option below.
++
++config RSBAC_MAX_INIT_TIME
++ int 'Initialization timeout in seconds'
++ default 60
++ depends on RSBAC_INIT_THREAD=y
++ ---help---
++ Maximum time in seconds the init process waits for rsbac_initd to
++ complete the RSBAC initialization process via rsbac_do_init(). After
++ this time, RSBAC initialization is considered as failed and
++ rsbac_initd is killed.
++
++config RSBAC_PROC
++ bool 'RSBAC proc support'
++ depends on PROC_FS
++ default y
++ ---help---
++ If enabled, RSBAC adds one directory to the main proc dir, called
++ rsbac-info.
++ The files in rsbac-info give read access to RSBAC statistics, and
++ read and write access to many RSBAC settings.
++
++ If unsure, say Y.
++
++ See <file:Documentation/rsbac/README-proc> for details.
++
++config RSBAC_INIT_CHECK
++ bool 'Check on init'
++ default y
++ ---help---
++ Check RSBAC attribute consistency at boot time.
++
++ NOTE: This cannot check attributes on other than the root filesystem.
++ You should run rsbac_check regularly, e.g. once per day from
++ cron.
++
++config RSBAC_NO_WRITE
++ bool 'Disable RSBAC writing to disk'
++ ---help---
++ If enabled, RSBAC treats all file system types as read-only. No
++ attributes are saved and all settings are instead kept in memory.
++ If off, only PROC, NFS, CODAFS, SMBFS, NCPFS, ISO9660 and all MSDOS
++ filesystems are read-only.
++
++ If you only want to turn off automatical writing, but keep the
++ syscall sys_rsbac_write() to write by hand, set auto write to 0
++ instead.
++
++ This switch is intended for testing purposes only and off by
++ default.
++
++ If unsure, say N.
++
++config RSBAC_MSDOS_WRITE
++ bool 'Allow attribute writing on MSDOS filesystems'
++ depends on RSBAC_NO_WRITE=n
++ ---help---
++ This setting allows attribute writing on MSDOS filesystems, like FAT,
++ VFAT, UMSDOS. Turning it on makes these attributes persistent through
++ reboots.
++ Unfortunately, MSDOS filesystems do not guarantee reproducable inode
++ numbers on reboots - so attributes might be applied to wrong files or
++ get lost. Watch for warning messages from rsbac_check.
++
++ As a more reliable alternative, disable this feature, and make regular
++ attribute backups, which you can restore each time after mounting
++ the disk.
++
++ If unsure, say N.
++
++config RSBAC_AUTO_WRITE
++ int 'RSBAC auto write to disk interval in seconds'
++ default 5
++ depends on RSBAC_NO_WRITE=n
++ ---help---
++ If not 0, a kernel daemon saves all changed RSBAC attributes to disk
++ every n seconds. You can also initiate each saving through the
++ sys_rsbac_write() syscall. This interval can alternatively be changed
++ via the proc interface.
++
++config RSBAC_RCU_RATE
++ int 'RSBAC attribute change burst per second'
++ default 1000
++ ---help---
++ Normally, RSBAC removes list items in the background while the
++ process continues without delay. Keeping the items longer needs
++ extra memory, so a rate limit ensures that memory does not get
++ exhausted.
++
++ When the limit has been reached, deleting items is still
++ possible, but the process gets delayed until all possible
++ readers of the deleted item have finished reading and the item
++ is deleted immediately. This slows down attribute changes
++ somewhat, but should only be noticable on mass changes like at
++ a restore.
++
++ The default value of 1000 should be fine for normal system use
++ even with FD cache. Valid values are 100 to 100000.
++ The value can later be changed at boot time with
++ rsbac_list_rcu_rate=n kernel parameter and at runtime through
++ /proc/rsbac-info/debug.
++
++config RSBAC_LIST_MAX_HASHES
++ int 'Maximum number of list hashes'
++ default 128
++ ---help---
++ IMPORTANT: Use values of 2^n, like 64, 128, 512. If you do not,
++ the value will be automatically reduced to the next lower 2^n.
++ (e.g.: 127 would be converted to list hashes of size 64.)
++
++ Define the maximum number of internal hashes per generic list.
++ Bigger values may need more memory, but allow to distribute many
++ list items over more and thus smaller lists behind the hashes.
++ It is safe to change this value at any time, but it may not
++ go below 8 or above 2048. The system will not use more hashes
++ per list than necessary. Default is 128.
++
++config RSBAC_LIST_CHECK_INTERVAL
++ int 'Interval of automatic list check and cleanup'
++ default 1800
++ depends on CONFIG_RSBAC_AUTO_WRITE!=0
++ ---help---
++ Check all lists and remove items over ttl at this interval.
++ Uses rsbacd and thus uses CONFIG_RSBAC_AUTO_WRITE granularity.
++
++ Default is 1800, one half hour.
++
++config RSBAC_LIST_STATS
++ bool 'List access statistics'
++ default n
++ ---help---
++ Count read and write accesses to each list for optimization.
++ The values are shown in /proc/rsbac-info/gen_list_counts.
++
++config RSBAC_LIST_TRANS
++ bool 'Support transactions'
++ default y
++ ---help---
++ This option enables support for RSBAC configuration transactions.
++ RSBAC transactions are a set of temporary changes within a list that
++ are either commited or forgotten.
++
++ Any user can start a transaction with the rsbac_list_ta() syscall
++ (e.g. via the rsbac_list_ta admin tool), and use the returned
++ transaction number for further administration calls which collect a
++ set of allowed changes.
++
++ When finished, a simple commit through rsbac_list_ta applies all these
++ changes atomically to the system, while the forget command or a
++ transaction timeout (see next option) remove the complete set of
++ proposes changes.
++
++ The number of transactions in parallel is not limited, but each list,
++ which has been changed by one transaction, is locked against changes
++ by all other transactions. Such change attempts return the error
++ -RSBAC_EBUSY, while using invalid transaction numbers returns the
++ error -RSBAC_EINVALIDTRANSACTION. This means that when using
++ transactions, both error codes should be checked for each call. When
++ a list is BUSY, it is the user's choice to retry later, forget all
++ changes or leave it as it is.
++
++ Changes with transaction number 0 (no transaction) and automatic
++ changes by the system always affect both the real lists and all
++ transaction lists.
++
++config RSBAC_LIST_TRANS_MAX_TTL
++ int 'Maximum transaction time in seconds'
++ default 3600
++ depends on RSBAC_LIST_TRANS
++ ---help---
++ When starting a transaction, the ttl parameter sets its maximum
++ lifetime, after which it will be automatically removed, if
++ it has not been renewed in time.
++ This option sets the maximum allowed lifetime for any transaction in
++ seconds. The default value is 3600 (one hour).
++
++config RSBAC_LIST_TRANS_RANDOM_TA
++ bool 'Randomize transaction numbers'
++ default y
++ depends on RSBAC_LIST_TRANS
++ ---help---
++ Usually, transaction numbers start with one and increase with every
++ new transaction. With this option, they will be randomized to make
++ it a bit more difficult to tamper with other admins' transactions.
++
++ However, this is no real protection and makes transactions less human
++ friendly because of large numbers - use the transaction key or the
++ user limit to get more security for transaction handling.
++
++config RSBAC_FD_CACHE
++ bool 'Cache FD attribute values'
++ default n
++ ---help---
++ This option allows to cache inherited attribute values for some
++ modules. It speeds up attribute lookup, but increases memory
++ usage significantly.
++
++config RSBAC_FD_CACHE_TTL
++ int 'Time to live for FD cache items'
++ default 1800
++ depends on RSBAC_FD_CACHE
++ ---help---
++ After the time given here in seconds, FD cache items will be
++ removed at the next list cleanup. You can change the value at
++ boottime with kernel parameter rsbac_fd_cache_ttl= and at
++ runtime through /proc/rsbac-info/debug interface.
++ Please also adjust CONFIG_RSBAC_LIST_CHECK_INTERVAL
++ accordingly to get items over ttl expunged timely.
++
++ Default is 1800, one half hour.
++
++config RSBAC_FD_CACHE_MAX_ITEMS
++ int 'Maximum number of FD cache items per hash'
++ default 1000
++ depends on RSBAC_FD_CACHE
++ ---help---
++ Specify the maximum number of cache FD items allowed per
++ hashed list. Multiply by max number of hashes per list to get
++ total maximum number of cache entries.
++
++ Default is 1000.
++
++config RSBAC_DEBUG
++ bool 'RSBAC debugging support'
++ default y
++ ---help---
++ This option enables many debugging switches to examine RSBAC internals
++ as well as request sanity checks.
++ Most of the debugging switches can be set with rsbac_debug_* kernel
++ parameters or via proc interface at /proc/rsbac-info/debug.
++ See <file:Documentation/rsbac/README-proc> for details.
++
++ This option is recommended to be on, but you may as well turn it off,
++ if performance really matters and your RSBAC system runs without
++ problems.
++
++ If unsure, say Y.
++
++config RSBAC_DEV_USER_BACKUP
++ bool 'Provide DEV and USER backup files'
++ depends on RSBAC_PROC=y
++ ---help---
++ If enabled, you will find images of the USER and DEV target general
++ attribute list files in /proc/rsbac-info/backup. Since attribute
++ backup should be done with tools, this is usually not needed and
++ thus off by default.
++
++ If unsure, say N.
++
++config RSBAC_SECOFF_UID
++ int 'RSBAC default security officer user ID'
++ default 400
++ ---help---
++ The number n given here specifies which user IDs should be used for
++ the Security Officer/Role Admin/Supervisor (n), the PM model Data
++ Protection Officer (n+1) and the PM model TP Manager (n+2) in the
++ default configuration at first boot.
++
++ WARNING: This value should only be changed if you have a severe
++ conflict with the default values of 400 to 402. After the
++ first boot, the defaults are set and saved, and changes to
++ this option will not have any effect.
++
++config RSBAC_INIT_DELAY
++ bool 'Delayed init for initial ramdisk'
++ ---help---
++ This option allows to delay RSBAC initialization until the first mount
++ of a real disk partition (major number > 1). It is intended to be used
++ with initial ramdisks, which mount the final root partition during
++ boot.
++
++ You can trigger initialization at a specific partition mount with the
++ kernel parameter rsbac_delayed_root=major:minor. If the given
++ partition is not mounted and thus RSBAC not initialized, you can also
++ call the rsbac_init() system call at any time, e.g. with the
++ rsbac_init utility.
++
++ To disable delayed init, you have to use the kernel parameter
++ rsbac_no_delay_init. This will force the standard initialization after
++ the first root mount. If this is your initrd, the RSBAC setup in there
++ will be used instead of the configuration on your real root device.
++
++ WARNING: The delayed init option requires the RSBAC init code to be
++ kept in memory all the time, which increases your kernel
++ memory usage by a few 10s of KB. It should only be used in
++ combination with an initial ramdisk.
++
++config RSBAC_GEN_NR_P_LISTS
++ int 'Number of GEN process lists'
++ default 4
++ ---help---
++ When using network support, every process in the system accepting a
++ network connection and all its sub-processes will get individual
++ attributes set. This means that with many active processes, the list
++ lookups will become slower.
++
++ To speed them up, RSBAC uses a hash table to split the GENeral process
++ attribute lists into several shorter ones. This option sets the number
++ of these lists.
++
++ In most cases, the default of 4 will be sufficient. However, if you
++ plan to have very many processes, a higher value will reduce lookup
++ time at the cost of additional list headers.
++endmenu
++
++menuconfig RSBAC_UM
++ depends on RSBAC=y
++ bool 'User Management'
++ ---help---
++ Enable RSBAC User Management, a fully passwd/shadow compatible, but
++ kernel based Linux user and group database. All changes are access
++ controlled with USER and GROUP targets.
++
++ You will need the PAM and NSS modules from the RSBAC admin tools
++ contrib section to make transparent use of this feature.
++
++ If the SHA1 algorithm is available through the crypto API, passwords
++ can also optionally be encrypted (next option).
++
++if RSBAC_UM
++config RSBAC_UM_DIGEST
++ bool 'Use Crypto API Digest SHA1'
++ depends on RSBAC_UM=y
++ select CRYPTO
++ select CRYPTO_SHA1
++ default y
++ ---help---
++ If enabled, all passwords are hashed with SHA1 digests. To make the
++ resulting hash values unique, the password functions add a 32 Bit
++ salt value to the password string before hashing.
++
++config RSBAC_UM_USER_MIN
++ int 'Minimum auto user ID'
++ depends on RSBAC_UM=y
++ default 2000
++ ---help---
++ When users get added without giving a desired ID, the system picks the
++ lowest available number starting from the set value.
++
++config RSBAC_UM_GROUP_MIN
++ int 'Minimum auto group ID'
++ depends on RSBAC_UM=y
++ default 2000
++ ---help---
++ When groups get added without giving a desired ID, the system picks
++ the lowest available number starting from the set value.
++
++config RSBAC_UM_EXCL
++ bool 'Exclusive user management'
++ depends on RSBAC_UM=y
++ ---help---
++ With this option, RSBAC makes sure that only user and group IDs it
++ knows about can be used within the system. The User Management
++ component will only make consistency checks, but the AUTH module will
++ enforce the exclusive use.
++
++config RSBAC_UM_MIN_PASS_LEN
++ int 'Minimum password length'
++ default 6
++ depends on RSBAC_UM=y
++ ---help---
++ Set this to the minimum length a password set by the user must have.
++ The default minimum length is 6, but using at least 8 is recommended
++ for production systems.
++
++ Passwords set by admins with MODIFY_PERMISSIONS_DATA right to the user
++ will not be restricted!
++
++config RSBAC_UM_NON_ALPHA
++ bool 'Require non-alphabetic character in password'
++ default y
++ depends on RSBAC_UM=y
++ ---help---
++ This option requires that a password set by the user must have at
++ least one non-alphabetic character.
++
++ Passwords set by admins with MODIFY_PERMISSIONS_DATA right on the
++ user will not be restricted!
++
++config RSBAC_UM_PWHISTORY
++ bool 'Remember password history'
++ default y
++ depends on RSBAC_UM=y
++ ---help---
++ This option requires that the password set by the user must not be
++ the same as the RSBAC_UM_HISTORY_SIZE previous ones.
++
++ Passwords set by admins with MODIFY_PERMISSIONS_DATA right on the
++ user will not be restricted!
++
++config RSBAC_UM_PWHISTORY_MAX
++ int 'Number of successive passwords to remember'
++ default 8
++ depends on RSBAC_UM_PWHISTORY=y
++ ---help---
++ This is the number of passwords RSBAC User Management will
++ remember and check against when changing a password.
++
++config RSBAC_UM_ONETIME
++ bool 'Support one-time passwords'
++ default n
++ depends on RSBAC_UM=y
++ ---help---
++ With this option you can add additional passwords to every user
++ account, which can only be used once.
++
++config RSBAC_UM_ONETIME_MAX
++ int 'Max number of one-time passwords per account'
++ default 100
++ depends on RSBAC_UM_ONETIME=y
++ ---help---
++ Set the number of one-time passwords, which can be set at
++ each account.
++
++config RSBAC_UM_VIRTUAL
++ bool 'Support virtual users'
++ default n
++ depends on RSBAC_UM=y
++ ---help---
++ If enabled, RSBAC User Management supports virtual users,
++ which are organized in sets with 32 Bit ID numbers. ID 0 is
++ the main set.
++
++config RSBAC_UM_VIRTUAL_ISOLATE
++ bool 'Isolate virtual user sets'
++ default y
++ depends on RSBAC_UM_VIRTUAL=y
++ ---help---
++ Select this option to ensure that users in virtual sets > 0
++ never see users and groups in other virtual sets.
++endif
++
++if NET
++menu 'RSBAC networking options'
++ depends on RSBAC
++
++config RSBAC_NET
++ bool 'RSBAC network support'
++ depends on NET
++ default y
++ ---help---
++ The net support switch adds generic network device, network template
++ and network object attribute support.
++
++ Also, general settings of IPv4 (INET) networks are controlled through
++ the SCD targets 'network' and 'firewall'.
++
++ To get network device or object access control, you have to enable
++ the conditional switches below, as well as the individual model
++ switches for network access control.
++
++config RSBAC_NET_DEV
++ bool 'Net device control'
++ default y
++ depends on RSBAC_NET
++ ---help---
++ With this option turned on, reading and modifying network device
++ settings, like binding addresses to devices etc., are controlled as
++ NETDEV targets. NETDEV objects are identified by their device name.
++
++config RSBAC_NET_DEV_VIRT
++ bool 'Treat virtual devices as individuals'
++ depends on RSBAC_NET_DEV
++ ---help---
++ Turn this on, if you want to provide access control over virtual
++ devices independently from their base device. Due to the possible
++ number of virtual devices, be careful with this option.
++
++config RSBAC_IND_NETDEV_LOG
++ bool 'Individual network device logging'
++ default y
++ depends on RSBAC_NET_DEV
++ ---help---
++ Enable individual log levels for every request type for network
++ devices. Log levels are none, denied requests, full, request based.
++ Default value is request based for all request types.
++
++ If this option is off, only general log levels for requests are used
++ (same as individual logging for all objects set to request based).
++
++config RSBAC_NET_OBJ
++ bool 'Net object control (sockets)'
++ default y
++ depends on RSBAC_NET
++ depends on INET
++ ---help---
++ This option enables access control for all socket based
++ communication, except the UNIX address family (controlled by extra
++ option).
++
++ Access control is based on network object (NETOBJ) targets. Default
++ values for NETOBJ attributes are derived from the network template
++ (NETTEMP object), whose description matches this particular network
++ object.
++
++ Matching is performed from lowest to highest template number. If no
++ template matches, general NETOBJ default values will be used.
++ NOTE: The behaviour in this case is model dependent!
++
++ Socket system calls are matched to special request types with
++ matching names.
++
++ NETTEMP objects themselves are protected as NETTEMP targets with
++ repective requests.
++
++config RSBAC_NET_OBJ_RW
++ bool 'Also intercept network object read and write'
++ depends on RSBAC_NET_OBJ
++ ---help---
++ If on, READ and WRITE requests on sockets are also checked.
++
++config RSBAC_IND_NETOBJ_LOG
++ bool 'Individual network object logging'
++ default y
++ depends on RSBAC_NET_OBJ
++ ---help---
++ Enable individual log levels for every request type for network
++ objects.
++ Log levels are none, denied requests, full, request based.
++ Default value is request based for all request types.
++
++ For easier setup, the log levels are set on the network templates,
++ not the individual network objects.
++
++ If this option is off, only general log levels for requests are used
++ (same as individual logging for all objects set to request based).
++endmenu
++endif
++
++config RSBAC_MAINT
++ bool 'RSBAC Maintenance Kernel (Use with care!)'
++ ---help---
++ A maintenance kernel is useful, if the system got unaccessible,
++ e.g. because the user attributes for Administrator (root), Security
++ Officer (400) or Data Protection Officer (401, PM only) got lost and
++ have to be reset.
++ However, in most cases enabling softmode can have the same effect for
++ you, but gives the additional benefit of logging the usually denied
++ accesses.
++
++ This option turns some of the RSBAC configuration options below
++ off and disables all access control. Still, you should keep those
++ modules turned on, which you would like to administrate in
++ maintenance mode, because only then their data structures are
++ accessible.
++
++menu 'Decision modules (policy) options'
++ depends on RSBAC
++
++config RSBAC_REG
++ bool 'Support for Registration of decision modules (REG)'
++ default y
++ ---help---
++ If enabled, RSBAC supports runtime registering and unregistering of
++ additional decision module functions, e.g. from kernel modules.
++
++ Possible functions are for decision, notification and file contents
++ overwrite decisions and for write-to-disk notifications.
++
++ Additionally, syscall functions can be registered to the REG syscall
++ dispatcher.
++
++ See <file:Documentation/rsbac/README-reg>,
++ and the module examples in <file:Documentation/rsbac/reg_sample>
++ for details.
++
++config RSBAC_REG_SAMPLES
++ bool 'Build REG sample modules'
++ depends on RSBAC_REG && USB
++ ---help---
++ Build the REG sample kernel modules. These modules show how to use
++ the RSBAC infrastructure, but do not perform any access control.
++
++ The modules will be named reg_sample1, reg_sample2 and reg_sample3.
++
++menuconfig RSBAC_AUTH
++ bool 'AUTH policy support'
++ default y
++ --help---
++ This module can be seen as a support module for all others. It
++ restricts CHANGE_OWNER on process targets (setuid) for a process: the
++ request is only granted, if the process has either the
++ auth_may_setuid flag set or the target user ID is in its capability
++ set.
++ The auth_may_setuid flag and the capability set are inherited on
++ execute from the program file.
++
++ Those file capabilities can be set, if all modules grant a
++ MODIFY_ATTRIBUTE request for A_auth_add_f_cap or A_auth_remove_f_cap.
++ Process capabilities can only be added by other processes that have
++ the auth_may_set_cap flag set, which is also inherited from the
++ executed file.
++
++ This way an enforcement of daemon based authentification is possible,
++ as well as a restriction of system daemons to a set of user IDs.
++
++ WARNING: If enabled without a login program having auth_may_setuid or
++ a capability set and without a capability setting daemon,
++ you will not be able to login to your system!
++ Use kernel parameter rsbac_auth_enable_login in emergencies
++ or at the first boot to set auth_may_setuid for /bin/login.
++
++ Also see AUTH model description in
++ <http://www.rsbac.org/documentation> for details.
++
++ If unsure, say Y.
++
++if RSBAC_AUTH
++config RSBAC_AUTH_AUTH_PROT
++ bool 'AUTH module and attribute protection'
++ default y
++ ---help---
++ Only, if this option is on, the AUTH module cares for its own
++ protection, otherwise it fully depends on other modules
++ (CONFIG_RSBAC_XX_AUTH_PROT).
++ This is meant for more sophisticated access control than a simple
++ system_role setting to security_officer.
++
++ As a special effect, capability sets are cleared on every write
++ access to reduce system access after tampering.
++
++ See AUTH model description for details.
++
++config RSBAC_AUTH_OTHER_PROT
++ bool 'Protect switching of other modules'
++ depends on RSBAC_SWITCH
++ ---help---
++ This option makes AUTH care for the switching of other modules.
++ Useful if you want to prevent switching a module back on,
++ because it cannot protect itself in this case.
++
++config RSBAC_AUTH_UM_PROT
++ bool 'AUTH protection for User Management'
++ depends on RSBAC_UM
++ default y
++ ---help---
++ This option makes AUTH care for User Management settings, e.g.
++ creation, change or deletion of users or groups.
++
++ See User Management description for details.
++
++config RSBAC_AUTH_DAC_OWNER
++ bool 'AUTH support for effective and fs owner control'
++ ---help---
++ If enabled, AUTH also controls the requests CHANGE_DAC_EFF_OWNER
++ (change process effective owner) and CHANGE_DAC_FS_OWNER (change
++ process filesystem owner) on process targets. Changes to these Linux
++ DAC (Discrete Access Control) model owner settings do not affect
++ RSBAC, so this option is off by default.
++
++ This option also requires the 'Control DAC process owner (seteuid,
++ setfsuid)' option from the 'Other options', which enables the
++ requests mentioned above.
++
++config RSBAC_AUTH_ALLOW_SAME
++ bool 'Always allow setting to same id'
++ ---help---
++ Normally, AUTH restricts all setuid and setgid calls, including those
++ to the same uid. Enabling this option allows these calls to be
++ unrestricted by the AUTH module if the same id is given.
++
++config RSBAC_AUTH_GROUP
++ bool 'AUTH support for Linux group control'
++ ---help---
++ If enabled, AUTH also controls the request CHANGE_GROUP
++ (change process group) on process targets. Changes to these Linux
++ DAC (Discrete Access Control) model group settings do not affect
++ RSBAC, so this option is off by default.
++
++config RSBAC_AUTH_DAC_GROUP
++ bool 'AUTH support for effective and fs group control'
++ depends on RSBAC_AUTH_GROUP
++ ---help---
++ If enabled, AUTH also controls the requests CHANGE_DAC_EFF_GROUP
++ (change process effective group) and CHANGE_DAC_FS_GROUP (change
++ process filesystem group) on process targets. Changes to these Linux
++ DAC (Discrete Access Control) model owner settings do not affect
++ RSBAC, so this option is off by default.
++
++ This option also requires the 'Control DAC process group (setegid,
++ setfsgid)' option from the 'Other options', which enables the
++ requests mentioned above.
++
++config RSBAC_AUTH_LEARN
++ bool 'AUTH learning mode support'
++ ---help---
++ If set, you can enable AUTH learning mode with the rsbac_auth_learn
++ kernel parameter. In learning mode, the AUTH module will automatically
++ add all missing auth capabilities.
++
++ WARNING: This option is useful, but dangerous, so it should be off on
++ production systems.
++config RSBAC_AUTH_LEARN_TA
++ int 'Learning mode transaction number'
++ default 0
++ depends on RSBAC_AUTH_LEARN
++ depends on RSBAC_LIST_TRANS
++ ---help---
++ Put learned items into transaction with this number. The
++ transaction is created, if it does not exist. The default
++ value 0 means do not use transactions, all extra rights
++ get added immediately.
++
++ Note: As the additional rights only appear in the transaction,
++ the same rights may seem to be added repeatedly, until
++ the transaction is committed.
++ Note: All transactions have a maximum lifetime, after which
++ all data is lost, unless you commit or refresh in time,
++ e.g. with the rsbac_list_ta command line tool.
++ Increase RSBAC_LIST_TRANS_MAX_TTL as desired.
++
++endif
++
++menuconfig RSBAC_RC
++ bool 'RC policy support'
++ default y
++ ---help---
++ The Role Compatibility model is a powerful and flexible role based
++ model. It supports an unlimited number of roles and types. Types are
++ grouped per target type. Each role definition has compatibility
++ vectors for all types and other roles.
++
++ Additionally, there are default create/chown/execute types and
++ several special values for inheritance options.
++ Roles can also be forced by executable file attributes, similar to
++ the setuid/setgid mechanism in Unix file systems.
++
++ See <http://www.rsbac.org/documentation> for details.
++
++ If unsure, say Y.
++
++if RSBAC_RC
++config RSBAC_RC_AUTH_PROT
++ bool 'RC protection for AUTH module'
++ default y
++ ---help---
++ This option makes RC care for AUTH module settings, e.g. attributes
++ auth_may_setuid, auth_may_set_cap and the kernel-only pseudo
++ attributes auth_add_f_cap, auth_remove_f_cap, auth_get_caplist.
++
++ These settings are protected by SCD type compatibility with type
++ auth_administration, which is set for default role 1 (Role Admin).
++
++ See AUTH model description for details.
++
++config RSBAC_RC_UM_PROT
++ bool 'RC protection for User Management'
++ depends on RSBAC_UM
++ default y
++ ---help---
++ This option makes RC care for User Management settings, e.g. creation,
++ change or deletion of users or groups.
++
++ See User Management description for details.
++
++config RSBAC_RC_GEN_PROT
++ bool 'RC protection for GENeral attributes'
++ default y
++ ---help---
++ If on, RC protects general attributes (GEN module) like its own, e.g.,
++ in default setup only Role Admins may change them.
++
++config RSBAC_RC_BACKUP
++ bool 'Provide RC backup files'
++ depends on RSBAC_PROC
++ ---help---
++ If enabled, RC provides its binary ACI data files in
++ /proc/rsbac-info/backup.
++
++ Because of better backup options with admin tools, this is off by
++ default.
++
++ If unsure, say N.
++
++config RSBAC_RC_NET_DEV_PROT
++ bool 'RC network device protection'
++ default y
++ depends on RSBAC_NET_DEV
++ ---help---
++ If on, protect network devices based on RC NETDEV type
++ compatibilities.
++
++config RSBAC_RC_NET_OBJ_PROT
++ bool 'RC network object protection'
++ default y
++ depends on RSBAC_NET_OBJ
++ ---help---
++ Turn this on to have real RC access control on network objects based
++ on RC type compatibilities.
++
++ The NETOBJ default type values are derived from those of the matching
++ network template.
++
++ Templates themselves are protected through their own template type
++ in attribute rc_type_nt and the nettemp type compatibility settings.
++
++config RSBAC_RC_NET_OBJ_UNIX_PROCESS
++ bool 'RC check access to UNIX partner process'
++ default n
++ ---help---
++ This option enables additional checks for UNIX sockets: If a peer
++ process is known (via peer credential), the requesting process also
++ needs the same right as in the request to the RC type of the
++ other process.
++
++ Affected request types are CONNECT, ACCEPT, SEND and RECEIVE. If
++ READ and WRITE checking of network sockets is enabled, these request
++ types are checked as RECEIVE and SEND to avoid possible confusion
++ about read and write accesses to processes.
++
++config RSBAC_RC_LEARN
++ bool 'Enable learning mode for missing role rights to types'
++ depends on RSBAC_DEBUG
++ ---help---
++ This option allows to enable a learning mode per global switch.
++ In learning mode, missing role rights to types will be added
++ automatically. However, it will never add new roles or types
++ even when this would be a much better solution.
++ Enable temporarily for all roles with the rsbac_rc_learn
++ kernel parameter or temporarily at runtime via proc interface.
++
++ Please check <http://www.rsbac.org/documentation>
++ for more info about how the RC model works.
++
++config RSBAC_RC_LEARN_TA
++ int 'Learning mode transaction number'
++ default 0
++ depends on RSBAC_RC_LEARN
++ depends on RSBAC_LIST_TRANS
++ ---help---
++ Put learned items into transaction with this number. The
++ transaction is created, if it does not exist. The default
++ value 0 means do not use transactions, all extra rights
++ get added immediately.
++
++ Note: As the additional rights only appear in the transaction,
++ the same rights may seem to be added repeatedly, until
++ the transaction is committed.
++ Note: All transactions have a maximum lifetime, after which
++ all data is lost, unless you commit or refresh in time,
++ e.g. with the rsbac_list_ta command line tool.
++ Increase RSBAC_LIST_TRANS_MAX_TTL as desired.
++
++config RSBAC_RC_NR_P_LISTS
++ int 'RC number of process lists'
++ default 8
++ ---help---
++ When using RC model, every process in the system will get individual
++ attributes set. This means that with many active processes, the list
++ lookups will become slower.
++
++ To speed them up, RSBAC uses a hash table to split the RC process
++ attribute lists into several shorter ones. This option sets the
++ initial number of these lists, it will automatically grow when
++ needed.
++
++config RSBAC_RC_KERNEL_PROCESS_TYPE
++ int 'RC kernel process type'
++ default 999999
++ ---help---
++ This is the type being assigned to all kernel processes, despite the
++ initiating process owner role's def_process_create_type.
++
++ The default value is 999999. It should only be changed, if you have
++ role number conflicts with your existing configuration.
++endif
++
++menuconfig RSBAC_ACL
++ bool 'ACL policy support'
++ default y
++ ---help---
++ This turns on the Access Control List module. ACLs are kept on all
++ targets but those of type USER. For the IPC and PROCESS targets
++ there is only one default ACL each.
++
++ Each ACL entry contains subject type (user, rc_role, acl_group),
++ subject id and the rights this subject has. Also, rights are inherited
++ from parents and from a target specific default ACL.
++
++ Most settings have a time-to-live (TTL) option, which automatically
++ removes them after a certain amount of time.
++
++ See <http://www.rsbac.org/documentation> for details.
++
++if RSBAC_ACL
++config RSBAC_ACL_SUPER_FILTER
++ bool 'Allow masking out of SUPERVISOR right'
++ ---help---
++ Normally, inheritance masks can never filter out the SUPERVISOR right
++ (which contains all other rights) - it is always inherited.
++
++ If this switch is on, SUPERVISOR *can* be filtered out. This allows
++ separation of duties and privacy, but is also dangerous, because
++ administration can easily become impossible. In this case, you have to
++ reboot into a maintenance kernel to regain access.
++
++ For safety reasons, you must have a USER ACL entry at the target
++ containing the SUPERVISOR right to set a new mask without SUPERVISOR.
++
++config RSBAC_ACL_AUTH_PROT
++ bool 'ACL protection for AUTH module'
++ default y
++ ---help---
++ This option makes ACL care for AUTH module settings, e.g. attributes
++ auth_may_setuid, auth_may_set_cap and the kernel-only pseudo
++ attributes auth_add_f_cap, auth_remove_f_cap, auth_get_caplist. Those
++ settings are protected by SCD 'auth_administration' ACL.
++
++ See AUTH model description for AUTH details.
++
++config RSBAC_ACL_UM_PROT
++ bool 'ACL protection for User Management'
++ depends on RSBAC_UM
++ default y
++ ---help---
++ This option makes ACL care for User Management settings, e.g.
++ creation, change or deletion of users or groups.
++
++ See User Management description for details.
++
++config RSBAC_ACL_GEN_PROT
++ bool 'ACL protection for GENeral attributes'
++ default y
++ ---help---
++ If on, ACL protects general attributes (GEN module) through
++ the MODIFY_ATTRIBUTE right.
++ In default setup, only user 400 may change them.
++
++config RSBAC_ACL_BACKUP
++ bool 'Provide ACL backup files'
++ depends on RSBAC_PROC
++ ---help---
++ If on, ACL provides its binary group and group membership data files
++ in /proc/rsbac-info/backup.
++
++config RSBAC_ACL_LEARN
++ bool 'ACL learning mode support'
++ ---help---
++ If enabled, you can enable ACL learning mode with rsbac_acl_learn or
++ rsbac_acl_learn_fd kernel parameter. In learning mode, ACL module will
++ automatically add all missing acl entries for filesystem objects.
++
++ WARNING: This option is useful, but dangerous, so it should be off on
++ production systems.
++
++config RSBAC_ACL_LEARN_TA
++ int 'Learning mode transaction number'
++ default 0
++ depends on RSBAC_ACL_LEARN
++ depends on RSBAC_LIST_TRANS
++ ---help---
++ Put learned items into transaction with this number. The
++ transaction is created, if it does not exist. The default
++ value 0 means do not use transactions, all extra rights
++ get added immediately.
++
++ Note: As the additional rights only appear in the transaction,
++ the same rights may seem to be added repeatedly, until
++ the transaction is committed.
++ Note: All transactions have a maximum lifetime, after which
++ all data is lost, unless you commit or refresh in time,
++ e.g. with the rsbac_list_ta command line tool.
++ Increase RSBAC_LIST_TRANS_MAX_TTL as desired.
++
++config RSBAC_ACL_NET_DEV_PROT
++ bool 'ACL network device protection'
++ default y
++ depends on RSBAC_NET_DEV
++ ---help---
++ If on, protect network devices based on individual and default ACLs.
++
++config RSBAC_ACL_NET_OBJ_PROT
++ bool 'ACL network object protection'
++ default y
++ depends on RSBAC_NET_OBJ
++ ---help---
++ Turn this on to have real ACL access control on network objects based
++ on inherited ACLs.
++
++ When determining a subject's right to a network object (NETOBJ), the
++ following inheritance scheme is used:
++ - If there is an ACL entry at the NETOBJ itself, use it, else
++ - If there is an ACL entry at the matching template, use that, but
++ filter through individual mask, else
++ - If there is an ACL entry in the NETOBJ default ACL, use that, but
++ filter through individual mask and matching template's mask.
++
++ Certainly, user, role and group rights are accumulated as usual.
++
++ Templates themselves are protected through their own individual and
++ default ACLs, which are configured using the NETTEMP_NT target.
++endif
++
++menuconfig RSBAC_MAC
++ bool 'MAC policy support'
++ ---help---
++ Mandatory Access Control follows the Bell-LaPadula security model,
++ in which all users and resources are classified in levels of
++ confidentiality. Additionally, each subject and object has a set out
++ of 64 categories.
++
++ To read from a resource, a user's level must be at least as high as
++ that of the resource, and the user's category set must be a superset
++ of the category set of the resource.
++ To write to a resource, it must be at least as confidential as the
++ user, and its category set must be a superset of the user's.
++
++ See <http://www.rsbac.org/documentation> for details.
++
++if RSBAC_MAC
++config RSBAC_MAC_DEF_INHERIT
++ bool 'MAC inherit as default'
++ default y
++ ---help---
++ If enabled, the inheritable attributes security_level and
++ mac_categories for files, fifos and directories get the default value
++ 'inherit' instead of the old style real value. This reduces the amount
++ of attributes to be set significantly, because files, fifos and dirs
++ inherit their parent dir's attribute values automatically. Inheritance
++ ends at root dir /.
++
++ This setting should be kept constant between different RSBAC kernels
++ in use to avoid confusion for administrators/security officers,
++ rsbac_check() and backup.
++
++ Please note that inheritance is not conforming to the Bell-LaPadula
++ model, where all objects must be individually labeled.
++
++config RSBAC_MAC_SMART_INHERIT
++ bool 'Smart inherit'
++ default y
++ depends on RSBAC_MAC_DEF_INHERIT
++ ---help---
++ If enabled, the MAC model checks whether the values of attributes
++ security_level and mac_categories for new objects would already be in
++ effect via inheritance. Only if the inherited value differs, the new
++ values are set explicitely. Otherwise the default value 'inherit' is
++ automatically applied.
++
++ This option largely reduces the amount of new attribute objects needed
++ for whole created directory trees with same values. It thus saves
++ memory and CPU cycles.
++
++ However, inheritance is not conforming to the Bell-LaPadula model,
++ where all objects must be individually labeled - here we are even
++ denying explicit labeling of new objects. Use with care.
++
++config RSBAC_MAC_AUTH_PROT
++ bool 'MAC protection for AUTH module'
++ ---help---
++ This option makes MAC care for AUTH module settings, e.g. attributes
++ auth_may_setuid, auth_may_set_cap and the kernel-only pseudo
++ attributes auth_add_f_cap, auth_remove_f_cap, auth_get_caplist. These
++ settings are treated like MAC settings.
++
++ See AUTH model description for details.
++
++config RSBAC_MAC_UM_PROT
++ bool 'MAC protection for User Management'
++ depends on RSBAC_UM
++ default y
++ ---help---
++ This option makes MAC care for User Management settings, e.g.
++ creation, change or deletion of users or groups.
++
++ See User Management description for details.
++
++config RSBAC_MAC_GEN_PROT
++ bool 'MAC protection for GENeral attributes'
++ ---help---
++ If on, MAC protects general attributes (GEN module) like its own,
++ i.e., only security officers may change them.
++
++config RSBAC_MAC_LIGHT
++ bool 'Light MAC edition'
++ ---help---
++ This option makes MAC easier to use, but a bit less conforming to the
++ Bell-LaPadula model.
++ 1. Allow R_CREATE of new files WITHOUT any checking. This way, higher
++ level objects can be created in a lower level directory.
++ 2. Allow R_MOUNT and R_UMOUNT to ANY user (only Administrator in base
++ MAC version).
++
++config RSBAC_MAC_TRUSTED_READ
++ bool 'Give trusted processes full read access'
++ ---help---
++ Normally, a mac_trusted process may only violate *-property, i.e., it
++ may write to any level within its owner's level range from
++ min_security_level to security_level, regardless of its current level
++ and the max_read boundary. This makes a user's trusted flag equivalent
++ to the combination of write_up and write_down flag.
++
++ With this option turned on, a trusted process may also read from any
++ such level despite its current level and the min_write boundary. This
++ adds the meaning of the read_up flag to the trusted flag.
++
++ Please note that the mac_auto privilege with automatic current level
++ and read/write boundary adjustment as well as the object mac_shared
++ flag are always tried before trusted, write_up, write_down and
++ read_up.
++
++config RSBAC_MAC_RESET_CURR
++ bool 'Reset current level on each execute'
++ ---help---
++ If enabled, the current process level is reset to the user's initial
++ level on every execute.
++
++config RSBAC_MAC_LOG_LEVEL_CHANGE
++ bool 'Log all automatic changes to current level'
++ default y
++ ---help---
++ If both the effective mac_auto flag at an executable and the
++ mac_allow_auto flag at the user executing it are set, current levels
++ may be automatically adjusted to allow access, where it would
++ otherwise be denied.
++
++ This option logs each such automatic change to the process current
++ level, because it means a change to the current access rights.
++
++config RSBAC_MAC_NET_DEV_PROT
++ bool 'MAC network device protection'
++ depends on RSBAC_NET_DEV
++ ---help---
++ If enabled, The MAC module protects network devices in that only
++ System Administrators may configure them.
++
++config RSBAC_MAC_NET_OBJ_PROT
++ bool 'MAC network object protection'
++ depends on RSBAC_NET_OBJ
++ ---help---
++ Turn this on to have real MAC access control on network objects based
++ on security levels and categories.
++
++ The default attribute values are derived from those of the matching
++ network template.
++
++config RSBAC_MAC_NR_P_LISTS
++ int 'MAC number of process lists'
++ default 4
++ ---help---
++ When using MAC model, every process in the system will get individual
++ attributes set. This means that with many active processes, the list
++ lookups will become slower.
++
++ To speed them up, RSBAC uses a hash table to split the MAC process
++ attribute lists into several shorter ones. This option sets the number
++ of these lists.
++
++ In most cases, the default of 4 will be sufficient. However, if you
++ plan to have very many processes, a higher value will reduce lookup
++ time at the cost of additional list headers.
++endif
++
++menuconfig RSBAC_PAX
++ bool 'PAX policy support'
++ default y
++ depends on PAX
++ ---help---
++ The PAX module allows to administrate the PaX flags of programs and
++ processes. To have these flags enforced, you need to enable
++ "direct" MAC integration in the PaX control menu under the security
++ menu (CONFIG_PAX_NO_ACL_FLAGS).
++
++ PaX is a separate Linux kernel patch available at
++ <http://pax.grsecurity.net>.
++ Please have a look at the homepage to get some more information.
++
++if RSBAC_PAX
++config RSBAC_PAX_DEFAULT
++ bool 'Change PAX default flags (PeMRxS)'
++ help
++ This option allows to change the PaX default flags for all files from
++ PeMRxS to any other value.
++ Please be careful, if you change this setting, specially with existing
++ configurations - unexpected failures of previously running programs
++ might happen. Nothing you could not fix by reconfiguration, though.
++
++config RSBAC_PAX_PAGEEXEC
++ bool 'PAX Default P: Enable paging based non-exec pages'
++ depends on RSBAC_PAX_DEFAULT
++ default y
++
++config RSBAC_PAX_EMUTRAMP
++ bool 'PAX Default E: Emulate Trampolines'
++ depends on RSBAC_PAX_DEFAULT
++
++config RSBAC_PAX_MPROTECT
++ bool 'PAX Default M: Restrict mprotect'
++ depends on RSBAC_PAX_DEFAULT
++ default y
++
++config RSBAC_PAX_RANDMMAP
++ bool 'PAX Default R: Randomize mmap() base'
++ depends on RSBAC_PAX_DEFAULT
++ default y
++
++config RSBAC_PAX_RANDEXEC
++ bool 'PAX Default X: Randomize ET_EXEC base'
++ depends on RSBAC_PAX_DEFAULT
++
++config RSBAC_PAX_SEGMEXEC
++ bool 'PAX Default S: Segmentation based non-exec pages'
++ depends on RSBAC_PAX_DEFAULT
++ default y
++endif
++
++menuconfig RSBAC_DAZ
++ bool 'DAZuko policy support'
++ ---help---
++ The Dazuko policy provides the Dazuko malware scanning interface.
++ Scanning results may optionally be cached, see CONFIG_RSBAC_DAZ_CACHE
++ below.
++
++ Only programs marked as scanners may connect to the Dazuko interface,
++ and only DAZ security administrators are allowed to modify daz_scanner
++ or daz_scanned.
++
++if RSBAC_DAZ
++config RSBAC_DAZ_SELECT
++ bool 'Let scanners subselect paths'
++ default y
++ ---help---
++ Through Dazuko interface, scanners can define a set of paths
++ they are interested in. Without this option, RSBAC will ignore
++ these paths for the sake of speed and full mandatory control.
++
++ Please note that with RSBAC the daz_do_scan attribute on FD
++ objects controls which paths get scanned or not. The scanner
++ selection only reduces the set of paths, never increases.
++
++ In previous RSBAC versions, subselection by scanners was
++ always on. It now defaults to on, will be off in next RSBAC
++ version.
++
++config RSBAC_DAZ_CACHE
++ bool 'Cache scanning results'
++ default y
++ ---help---
++ With this option, all scanning results get cached for the time
++ specified below.
++
++config RSBAC_DAZ_TTL
++ int 'Scanning result lifetime in seconds'
++ default 86400
++ depends on RSBAC_DAZ_CACHE
++ ---help---
++ Specify the time in seconds a scanning result is kept. After this time
++ the object will be rescanned on the next access.
++
++ Use 0 for unlimited, default is 86400 (1 day). Set to 1 to force a
++ fast rescan.
++
++config RSBAC_DAZ_PERSIST
++ bool 'Keep scanning results over reboot'
++ depends on RSBAC_DAZ_CACHE
++ ---help---
++ If on, file scanning results, which are younger than their life time
++ limit, are kept persistently during reboots.
++
++ Using this option can reduce the amount of scanning, but it cannot
++ protect against file modifications while another kernel is booted.
++
++config RSBAC_DAZ_DEV_MAJOR
++ int 'Dazuko device major number'
++ default 250
++ ---help---
++ Specify the major char device number for /dev/dazuko,
++ which is used for scanner registration and communication.
++
++ Use 0 to let the system pick one.
++endif
++
++menuconfig RSBAC_CAP
++ bool 'CAP (Linux CAP) policy support'
++ default y
++ ---help---
++ The Linux Capability (CAP) module allows to set minimum and maximum
++ Linux capability sets for single users and programs. These boundaries
++ are applied at CHANGE_OWNER on processes (setuid) and EXECUTE.
++
++ Minimum settings have precedence over maximums, and program settings
++ have precedence over user settings.
++
++ Use this module to run programs that need root privileges from normal
++ user accounts, e.g. file server daemons, or to restrict programs run
++ by root, e.g. certain mail daemons.
++
++ If softmode is enabled and turned on, only the minimum sets are
++ applied.
++
++
++if RSBAC_CAP
++config RSBAC_CAP_PROC_HIDE
++ bool 'Support CAP process hiding'
++ default y
++ ---help---
++ If enabled, you can hide the process properties shown in /proc from
++ other users, e.g. command line and current state. The hiding level is
++ set with the cap_process_hiding process attribute. There are three
++ possible values:
++ 0 / off: no hiding.
++ 1 / from other users: only processes running for the same user, a CAP
++ security officer or a CAP system admin may read the properties.
++ 2 / full: only this process and CAP security officers may read the
++ properties.
++
++ The kernel command line switch rsbac_cap_process_hiding changes the
++ default value from 0 to 1. Thus, every normal user can only see her
++ own process properties.
++
++config RSBAC_CAP_AUTH_PROT
++ bool 'CAP protection for AUTH module'
++ ---help---
++ This option makes CAP care for AUTH module settings, e.g. attributes
++ auth_may_setuid, auth_may_set_cap and the kernel-only pseudo
++ attributes auth_add_f_cap, auth_remove_f_cap, auth_get_caplist. Those
++ settings are protected by the cap_roles admin (read) and security
++ officer (rw).
++
++ See AUTH model description for AUTH details.
++
++config RSBAC_CAP_LOG_MISSING
++ bool 'Log missing capabilities in capable()'
++ ---help---
++ If checked, the Linux capability check function capable() can log
++ all queries for capabilities, which are missing from the effective
++ set. Enable with the rsbac_cap_log_missing kernel parameter, or at
++ runtime via proc interface.
++ The only exception is CAP_SYS_ADMIN, which fails much too often
++ and thus never gets logged.
++
++ Background: If you limit Linux capabilities for users or programs
++ with the CAP module, some programs fail in secure mode without
++ "NOT_GRANTED" RSBAC log messages, because this Linux internal check
++ failed.
++
++ Please check <http://www.rsbac.org/documentation>
++ for more info about how the CAP module works.
++
++config RSBAC_CAP_LEARN
++ bool 'Learn missing capabilities in capable()'
++ ---help---
++ If checked, the Linux capability check function capable() will
++ set all capabilities in the max_caps set of user or program,
++ which are missing from the current process effective set because
++ of RSBAC max_caps restrictions.
++ Enable with the rsbac_cap_learn kernel parameter or at runtime
++ via proc interface.
++ Warning: In learning mode, a program with reduced max_caps might
++ temporarily get more effective caps assigned than granted by
++ standard kernel!
++
++ Background: If you limit Linux capabilities for users or programs
++ with the CAP module, some programs fail in secure mode without
++ "NOT_GRANTED" RSBAC log messages, because this Linux internal check
++ failed.
++
++ Please check <http://www.rsbac.org/documentation>
++ for more info about how the CAP module works.
++
++config RSBAC_CAP_LEARN_TA
++ int 'Learning mode transaction number'
++ default 0
++ depends on RSBAC_CAP_LEARN
++ depends on RSBAC_LIST_TRANS
++ ---help---
++ Put learned items into transaction with this number. The
++ transaction is created, if it does not exist. The default
++ value 0 means do not use transactions, all extra rights
++ get added immediately.
++
++ Note: As the additional rights only appear in the transaction,
++ the same rights may seem to be added repeatedly, until
++ the transaction is committed.
++ Note: All transactions have a maximum lifetime, after which
++ all data is lost, unless you commit or refresh in time,
++ e.g. with the rsbac_list_ta command line tool.
++ Increase RSBAC_LIST_TRANS_MAX_TTL as desired.
++endif
++
++menuconfig RSBAC_JAIL
++ bool 'JAIL policy support'
++ default y
++ ---help---
++ The JAIL module gives you an extended chroot facility, similar to
++ FreeBSD Jails. To put a process into a jail, start it with the
++ rsbac_jail wrapper or make it call the sys_rsbac_jail syscall
++ directly.
++
++ With RSBAC network object support, jailed processes can only use a
++ designated IP address (if designated address is not 0.0.0.0), and
++ UNIX or INET sockets of type STREAM, DGRAM or RDM. All other families
++ and types, e.g. RAW network access, are generally prohibited.
++
++ From within a jail, only processes and IPC objects of the same jail
++ can be accessed. Jails can be created from within jails, but get
++ limited to the parent jail's filesystem root, IP and flags.
++
++ Additionally, most kernel based administration tasks are forbidden,
++ e.g. creating device special files, setting network addresses,
++ getting or setting RSBAC attributes, changing system settings like
++ name or rlimits etc.
++
++ Several sys_rsbac_jail flags change the jail behaviour:
++ - allow_external_ipc: allow to access IPC objects outside this jail
++ - allow_rlimit: allow to change rlimit
++ - allow_all_net_family: allow to use all network families, not only
++ UNIX and INET (IPv4)
++ - allow_inet_raw: allow to use RAW INET sockets, e.g. for ping
++ - auto_adjust_inet_any: also allow to bind to INET ANY address
++ (0.0.0.0), but always change it to specified address
++ (requires CONFIG_RSBAC_JAIL_NET_ADJUST)
++
++if RSBAC_JAIL
++config RSBAC_JAIL_NET_ADJUST
++ bool 'JAIL allow to auto-adjust INET ANY (0.0.0.0) address'
++ default y
++ depends on RSBAC_NET_OBJ
++ ---help---
++ Turn this option on to allow automatic adjusting of the INET ANY
++ address 0.0.0.0 to the specified address with the
++ auto_adjust_inet_any syscall flag.
++
++config RSBAC_JAIL_NET_DEV_PROT
++ bool 'JAIL network device protection'
++ default y
++ depends on RSBAC_NET_DEV
++ ---help---
++ Only with this option enabled can the JAIL module prevent network
++ device configuration from within a jail. Recommended.
++
++config RSBAC_JAIL_NR_P_LISTS
++ int 'JAIL number of process lists'
++ default 4
++ ---help---
++ When using JAIL model, every process in the system will get individual
++ attributes set. This means that with many active processes, the list
++ lookups will become slower.
++
++ To speed them up, RSBAC uses a hash table to split the JAIL process
++ attribute lists into several shorter ones. This option sets the number
++ of these lists.
++
++ In most cases, the default of 4 will be sufficient. However, if you
++ plan to have very many processes, a higher value will reduce lookup
++ time at the cost of additional list headers.
++
++config RSBAC_JAIL_LOG_MISSING
++ bool 'Log missing capabilities in capable()'
++ ---help---
++ If checked, the Linux capability check function capable() can log
++ all queries for capabilities, which are missing from the effective
++ set. Enable with the rsbac_jail_log_missing kernel parameter, or at
++ runtime via proc interface.
++
++ Background: If you limit Linux capabilities for users or programs
++ within a JAIL, some programs fail in secure mode without
++ "NOT_GRANTED" RSBAC log messages, because this Linux internal check
++ failed.
++
++ Please check
++ <http://www.rsbac.org/documentation>
++ for more info about how the JAIL module works.
++endif
++
++config RSBAC_RES
++ bool 'RES (System Resources) policy support'
++ default y
++ ---help---
++ The Linux Resources (RES) module allows to set minimum and maximum
++ Linux resource sets for single users and programs. These boundaries
++ are applied at CHANGE_OWNER on processes (setuid) and EXECUTE.
++
++ Minimum settings have precedence over maximums, and program settings
++ have precedence over user settings.
++
++ Default values for all users can be set at user RSBAC_ALL_USER with
++ uid 4294967292 ((rsbac_uid_t) -4).
++
++ If softmode is enabled and turned on, only the minimum sets are
++ applied.
++
++menuconfig RSBAC_FF
++ bool 'FF policy support'
++ default y
++ ---help---
++ The File Flag module adds some flags for files and dirs to the
++ system. Current flags are:
++
++ execute_only (files): Only request EXECUTE is granted
++
++ read_only (files and dirs): Only non-modifying requests are granted
++
++ search_only (dirs): All file/subdir accesses need full path, no
++ listing or modification of dir is granted.
++
++ write_only (files): Only writing requests are granted, useful for
++ logging etc. Specially good if inherited to new files from a dir.
++
++ no_execute (files): No execution of this file allowed.
++
++ add_inherited (files and dirs): Add (or) flags of parent dir to own
++ flags. This last flag makes the file flags powerful: simply set
++ a file flag for a dir (e.g. no_execute on /home), and the whole
++ subtree is affected.
++
++ no_rename_or_delete (files and dirs, not inherited): Prevents renaming
++ or deleting an object, e.g to keep a directory structure fixed.
++
++ append_only (files): the only write access allowed is APPEND_OPEN.
++ Good for log files.
++
++if RSBAC_FF
++config RSBAC_FF_AUTH_PROT
++ bool 'FF protection for AUTH module'
++ ---help---
++ This option makes FF care for AUTH module settings, e.g. attributes
++ auth_may_setuid, auth_may_set_cap and the kernel-only pseudo
++ attributes auth_add_f_cap, auth_remove_f_cap, auth_get_caplist. Those
++ settings are treated like FF settings.
++
++ See AUTH model description for details.
++
++config RSBAC_FF_UM_PROT
++ bool 'FF protection for User Management'
++ depends on RSBAC_UM
++ default y
++ ---help---
++ This option makes FF care for User Management settings, e.g. creation,
++ change or deletion of users or groups.
++
++ See User Management description for details.
++
++config RSBAC_FF_GEN_PROT
++ bool 'FF protection for GENeral attributes'
++ ---help---
++ If on, FF protects general attributes (GEN module) like its own, i.e.,
++ only security officers may change them.
++endif
++
++menuconfig RSBAC_PM
++ bool 'PM policy support'
++ ---help---
++ The Privacy Model defines tasks, for which personal data of certain
++ classes has been collected. Similar to the Clark Wilson Model it
++ defines Transformation Procedures to be applied by authorised users
++ to data classes performing tasks they are authorised for. In
++ addition, the TP's task must match the tasks data in this class was
++ collected for.
++ Also, users are classified as general, security administrator
++ and privacy agent with appropriate access rights.
++
++ See <file:Documentation/rsbac> for details.
++
++if RSBAC_PM
++config RSBAC_PM_AUTH_PROT
++ bool 'PM protection for AUTH module'
++ ---help---
++ This option makes PM care for AUTH module settings, e.g. attributes
++ auth_may_setuid, auth_may_set_cap and the kernel-only pseudo
++ attributes auth_add_f_cap, auth_remove_f_cap, auth_get_caplist. Those
++ settings are treated like PM settings.
++
++ Real AUTH attributes can only be set with PM tickets and sys_rsbac_pm
++ system call, which calls all other decision modules before setting.
++
++ See AUTH model description for details.
++
++config RSBAC_PM_GEN_PROT
++ bool 'PM protection for GENeral attributes'
++ ---help---
++ If on, PM protects general attributes (GEN module) so that only
++ security officers may change them.
++endif
++
++endmenu
++# Policies
++
++
++if RSBAC_MAINT = n
++
++menu 'Softmode and switching'
++ depends on RSBAC
++
++config RSBAC_SOFTMODE
++ default y
++ bool 'RSBAC soft mode'
++ ---help---
++ This option enables RSBAC softmode support. In softmode, all
++ decisions and logging are performed, but the result that is returned
++ to enforcement is always DO_NOT_CARE. This means that access control
++ is effectively off!
++
++ Single exception: even in softmode, all access to rsbac attribute dirs
++ is always NOT_GRANTED.
++
++ After boot, softmode will be off, unless kernel parameter
++ 'rsbac_softmode' has been given. It can be turned on via proc
++ interface with
++
++ echo debug softmode <value> > /proc/rsbac-info/debug,
++
++ where <value> is 1 (on) or 0 (off).
++ If policy switching is enabled, you can also use sys_rsbac_switch,
++ e.g. via switch_module command line tool.
++
++ Switching softmode on or off is access controlled with an ADF request
++ SWITCH_MODULE for module SOFTMODE. The RSBAC builtin modules only
++ allow softmode under the same conditions as switching themselves off.
++
++ WARNING: For security reasons, this option should only be used for
++ debugging of your RSBAC administration settings!
++
++ Additionally, you might get strange effects during the notification
++ call rsbac_adf_set_attr(), because the request should not have been
++ granted in the first place. Unexpected access decisions might occur,
++ because attributes might have misleading values!
++
++config RSBAC_SOFTMODE_SYSRQ
++ bool 'Toggle soft mode with SysRq-X'
++ depends on RSBAC_SOFTMODE
++ ---help---
++ If this setting and kernel SysRq are enabled, you can toggle softmode
++ with SysRq-X (char can be changed in rsbac/debug.h).
++ This makes debugging of your RSBAC administration settings much
++ easier.
++
++ WARNING: This is dangerous, because everyone with physical access to
++ your keyboard can effectively turn off access control!
++
++ Do not use in production systems!
++
++ If unsure, say N.
++
++config RSBAC_SOFTMODE_IND
++ bool 'Individual module softmode support'
++ default y
++ depends on RSBAC_SOFTMODE
++ ---help---
++ If on, you can toggle softmode individually for each module.
++ Softmode for a module can be switched via proc interface with
++
++ echo debug ind_softmode <module> <value> > /proc/rsbac-info/debug
++
++ where <module> is the module short name in capitals, e.g. RC, and
++ <value> is 1 (on) or 0 (off).
++ If policy switching is enabled, you can also use sys_rsbac_switch,
++ e.g. via switch_module command line tool.
++
++config RSBAC_SWITCH
++ default y
++ bool 'RSBAC policies switchable'
++ ---help---
++ If enabled, the configured policies can be switched on or off by
++ syscall sys_rsbac_switch().
++
++ Of course, switching modules off is performed under their own control.
++
++ Warning: Though switching off is access controlled itself, any way to
++ switch off access control is always dangerous!
++
++config RSBAC_SWITCH_ON
++ bool 'Allow to switch stateful modules back on'
++ depends on RSBAC_SWITCH
++ ---help---
++ Some modules must be active all the time to keep their state intact,
++ e.g. to have correct process roles or security levels. This means
++ that after turning such a module off, it is in an inconsistent state
++ and can block the whole system when turned back on. Most prominent
++ examples are RC and MAC. Some modules only loose part of their
++ ability to protect the system, e.g. JAIL does not jail new processes.
++
++ By default, modules that can block the system may never be turned on,
++ only off. Enable this switch to be able to turn them back on - you
++ have been warned!
++
++config RSBAC_SWITCH_BOOT_OFF
++ bool 'Allow to switch modules off with kernel parameter'
++ depends on RSBAC_SWITCH
++ ---help---
++ Enable this to allow switching modules off by kernel parameter
++ rsbac_switch_off_xyz with xyz = module name in small letters,
++ e.g. rc or auth. Module needs to be set switchable below.
++
++config RSBAC_SWITCH_REG
++ default y
++ bool 'Switch REG modules'
++ depends on RSBAC_SWITCH
++ depends on RSBAC_REG
++ ---help---
++ Select to switch REG modules on and off at runtime.
++
++config RSBAC_SWITCH_AUTH
++ bool 'Switch AUTH policy'
++ depends on RSBAC_SWITCH
++ depends on RSBAC_AUTH
++ ---help---
++ Select to switch AUTH module on and off at runtime.
++
++config RSBAC_SWITCH_RC
++ bool 'Switch RC policy'
++ depends on RSBAC_SWITCH
++ depends on RSBAC_RC
++ ---help---
++ Select to switch RC module on and off at runtime.
++
++config RSBAC_SWITCH_ACL
++ bool 'Switch ACL policy'
++ depends on RSBAC_SWITCH
++ depends on RSBAC_ACL
++ ---help---
++ Select to switch ACL module on and off at runtime.
++
++config RSBAC_SWITCH_MAC
++ bool 'Switch MAC policy'
++ depends on RSBAC_SWITCH
++ depends on RSBAC_MAC
++ ---help---
++ Select to switch MAC module on and off at runtime.
++
++config RSBAC_SWITCH_PAX
++ bool 'Switch PAX policy'
++ depends on RSBAC_SWITCH
++ depends on RSBAC_PAX
++ ---help---
++ Select to switch PAX module on and off at runtime.
++
++config RSBAC_SWITCH_DAZ
++ bool 'Switch DAZ policy'
++ depends on RSBAC_SWITCH
++ depends on RSBAC_DAZ
++ ---help---
++ Select to switch DAZ module on and off at runtime.
++
++config RSBAC_SWITCH_CAP
++ bool 'Switch CAP policy'
++ depends on RSBAC_SWITCH
++ depends on RSBAC_CAP
++ ---help---
++ Select to switch CAP module on and off at runtime.
++
++config RSBAC_SWITCH_JAIL
++ bool 'Switch JAIL policy'
++ depends on RSBAC_SWITCH
++ depends on RSBAC_JAIL
++ ---help---
++ Select to switch JAIL module on and off at runtime.
++
++config RSBAC_SWITCH_RES
++ bool 'Switch RES policy'
++ depends on RSBAC_SWITCH
++ depends on RSBAC_RES
++ ---help---
++ Select to switch RES module on and off at runtime.
++
++config RSBAC_SWITCH_FF
++ bool 'Switch FF policy'
++ depends on RSBAC_SWITCH
++ depends on RSBAC_FF
++ ---help---
++ Select to switch FF module on and off at runtime.
++
++config RSBAC_SWITCH_PM
++ bool 'Switch PM policy'
++ depends on RSBAC_SWITCH
++ depends on RSBAC_PM
++ ---help---
++ Select to switch PM module on and off at runtime.
++endmenu
++
++menu 'Logging'
++ depends on RSBAC
++
++config RSBAC_IND_LOG
++ bool 'Individual file/dir/dev object logging'
++ default y
++ ---help---
++ Enable individual log levels for every request type for every file,
++ dir and device. Log levels are none, denied requests, full, request
++ based. Default value is request based for all request types.
++
++ If this option is off, only general log levels for requests are used
++ (same as individual logging for all objects set to request based).
++
++config RSBAC_IND_USER_LOG
++ bool 'Individual user logging'
++ default y
++ ---help---
++ When enabled, you can specify for every single user, which request
++ type(s) will always be logged.
++
++config RSBAC_IND_PROG_LOG
++ bool 'Individual program logging'
++ default y
++ ---help---
++ When enabled, you can specify for every single program file, which
++ request type(s) will always be logged.
++
++config RSBAC_LOG_PROGRAM_FILE
++ bool 'Log program file'
++ default y
++ ---help---
++ Enable this option to get the full program file path logged together
++ with the process name.
++
++config RSBAC_LOG_FULL_PATH
++ bool 'Log full path'
++ default y
++ ---help---
++ If this is turned on, logging messages for file and dir targets will
++ contain the full path. This makes the log significantly longer and
++ takes some extra CPU time, but also increases log usability.
++
++config RSBAC_MAX_PATH_LEN
++ int 'Maximum path length (256 - 2000)'
++ default 512
++ depends on RSBAC_LOG_FULL_PATH
++ ---help---
++ If the full path is longer than CONFIG_RSBAC_MAX_PATH_LEN, the
++ leading dirnames will be left out until it fits. The bigger this
++ value, the more memory will be allocated in the logging routine.
++
++config RSBAC_LOG_PSEUDO
++ bool 'Pseudonymous logging support'
++ default n
++ ---help---
++ Enable to get pseudo values for users honoured in all request logging
++ entries. If a pseudo value is not 0, it is logged instead of the uid.
++
++config RSBAC_LOG_PSEUDO_FS
++ bool 'Pseudonymize filesystem objects'
++ default n
++ depends on RSBAC_LOG_PSEUDO
++ ---help---
++ If enabled, the logging code exchanges filesystem object names with
++ their owner's pseudo, if the pseudo value is not 0 and the parent
++ object has another owner.
++
++config RSBAC_SYSLOG_RATE
++ bool 'Syslog rate limit'
++ default y
++ ---help---
++ Enable the limiting of the number of log message lines sent to syslog
++ per second.
++
++config RSBAC_SYSLOG_RATE_DEF
++ int 'Default allowed message lines per second'
++ depends on RSBAC_SYSLOG_RATE
++ default 1000
++ ---help---
++ Number of allowed syslog message lines per second. This value can be
++ changed with kernel parameter rsbac_syslog_rate=<n> or at runtime via
++ the proc interface.
++
++ Default value is 1000.
++
++config RSBAC_RMSG
++ bool 'RSBAC own logging facility'
++ default y
++ ---help---
++ Add logging of requests with extra facility, which is basically a
++ clone of printk/sys_syslog. If proc is supported, a file rmsg appears
++ in rsbac-info and can be used like kmsg, e.g. with a patched klogd,
++ cat or tail -f.
++ This file is protected by RSBAC with object type SCD/rsbac_log.
++ A new syscall sys_rsbac_log, similar to sys_syslog, also gives access
++ to logging stuff.
++
++config RSBAC_RMSG_MAXENTRIES
++ int 'Maximum number of messages in the log buffer'
++ depends on RSBAC_RMSG
++ default 200
++ ---help---
++ Number of allowed messages in the RSBAC log buffer. Each message
++ takes from 32 to 2048 bytes of memory.
++
++ Default value is 200.
++
++config RSBAC_RMSG_NOSYSLOG
++ bool 'Allow to disable logging to syslog'
++ default y
++ depends on RSBAC_RMSG
++ ---help---
++ When on, you can temporarily disable logging to syslog with kernel
++ parameter rsbac_nosyslog or via /proc/rsbac-info/debug. Useful for
++ initial configuration on system installation or if you need a clean
++ separation.
++
++ If unsure, say Y.
++
++config RSBAC_LOG_REMOTE
++ bool 'Log to remote UDP network socket'
++ depends on RSBAC_RMSG
++ ---help---
++ If enabled, copies of every message in the RSBAC log facility will be
++ sent to a remote system over network.
++
++if RSBAC_LOG_REMOTE=y
++
++config RSBAC_LOG_REMOTE_TCP
++ bool 'Use TCP for remote logging'
++ depends on RSBAC_RMSG
++ ---help---
++ If enabled, use more reliable TCP protocol to transfer logs.
++
++config RSBAC_LOG_REMOTE_MAXENTRIES
++ int 'Maximum number of messages in the remote log buffer'
++ depends on RSBAC_RMSG
++ default 200
++ ---help---
++ Number of allowed messages in the RSBAC remote log buffer. Each
++ message takes from 32 to 2048 bytes of memory.
++
++ Default value is 200.
++
++#config RSBAC_LOG_REMOTE_SYNC
++# bool 'Immediate remote logging'
++
++config RSBAC_LOG_INTERVAL
++ int 'Logging interval in timer ticks'
++ default 100
++# depends on RSBAC_LOG_REMOTE_SYNC=n
++
++config RSBAC_LOG_LOCAL_ADDR
++ string 'Local UDP address'
++ default "0.0.0.0"
++
++config RSBAC_LOG_LOCAL_PORT
++ int 'Local UDP port'
++ default 0
++
++config RSBAC_LOG_REMOTE_ADDR
++ string 'Remote UDP address'
++ default "0.0.0.0"
++
++config RSBAC_LOG_REMOTE_PORT
++ int 'Remote UDP port'
++ default 514
++
++endif
++
++endmenu
++
++config RSBAC_SYM_REDIR
++ bool 'RSBAC symlink redirection'
++ depends on RSBAC
++ ---help---
++ This feature optionally changes the contents of a symlink, based on
++ the owner ID, the current MAC security level or the current RC role
++ ID of the process accessing it.
++
++ NOTE: For technical reasons, all numeric characters at the end of the
++ original symlink contents will be replaced, not appended to.
++ This can be used to e.g. get the uid itself as final name and
++ the parent dir (or nothing...), if redirection is off.
++
++if RSBAC_SYM_REDIR
++
++config RSBAC_SYM_REDIR_REMOTE_IP
++ bool 'Add remote IP address'
++ depends on RSBAC_NET_OBJ
++ ---help---
++ With this option enabled, every read from a symlink, which has a
++ symlink_add_remote_ip value > 0, gets this number of bytes added
++ in dot notation, e.g. 192.168.0 with value 3.
++
++ This option is e.g. useful, if you want to provide different user
++ shells, depending on the origin of a connection:
++ Set user shell to /bin/linkshell. Make this a symlink to
++ /bin/usershell-, provide /bin/usershell-192.168.0 for users from
++ that network and /bin/usershell-0.0.0 for local users.
++
++ As a side effect, users connecting from other networks have no
++ valid shell at all.
++
++config RSBAC_SYM_REDIR_UID
++ bool 'Add user ID number'
++ ---help---
++ With this option enabled, every read from a symlink, which has the
++ symlink_add_uid flag set, gets the caller uid added in decimal
++ notation.
++
++ This feature can e.g. be used to setup individual /tmp dirs for all
++ users, as root call:
++
++ cd /
++ mkdir tmpdirs && chmod 777 tmpdirs && chmod o+t tmpdirs
++ # stay compatible, if redirection is off, by reusing old /tmp
++ mv tmp tmpdirs
++ ln -s tmpdirs/tmp tmp
++ mkdir tmpdirs/tmp0 ; chmod 700 tmpdirs/tmp0
++ echo 'mkdir /tmpdirs/tmp$UID && chmod 700 /tmpdirs/tmp$UID' \
++ >>/etc/profile
++
++ As user with modify right for general attributes (e.g. user 400), set
++ symlink_add_uid to on for /tmp:
++ attr_set_file_dir SYMLINK /tmp symlink_add_uid 1
++
++ From now on, root accesses to /tmp show /tmpdirs/tmp0, user 400
++ accesses show /tmpdir/tmp400, etc. It is of course advisable to
++ protect the individual dirs against root.
++
++config RSBAC_SYM_REDIR_MAC
++ bool 'Add MAC current security level'
++ depends on RSBAC_MAC
++ ---help---
++ With this option enabled, every read from a symlink, which has the
++ symlink_add_mac_level flag set, gets the calling process's current
++ MAC security level added in decimal notation.
++
++ It can be used to e.g. provide separate /tmp dirs for all MAC levels
++ and thus avoid unwanted flow of information.
++ See 'Add user ID number' help to get an idea of how to do this.
++
++config RSBAC_SYM_REDIR_MAC_CAT
++ bool 'Also add MAC current category vector'
++ depends on RSBAC_SYM_REDIR_MAC
++ ---help---
++ If enabled, the redirected symlink contents will not only contain the
++ process current MAC security level, but also its current category set
++ as the usual string of 0 and 1, separated by a colon.
++
++ WARNING: This will result in more possible values than your
++ filesystem can handle names and inodes, so please be
++ careful.
++
++config RSBAC_SYM_REDIR_RC
++ bool 'Add RC role number'
++ depends on RSBAC_SYM_REDIR && RSBAC_RC
++ ---help---
++ With this option enabled, every read from a symlink, which has the
++ symlink_add_rc_role flag set, gets the calling process's current RC
++ role ID added in decimal notation.
++
++ It can be used to e.g. provide individual /tmp dirs for all roles.
++ See 'Add user ID number' help to get an idea of how to do this.
++endif
++
++config RSBAC_ALLOW_DAC_DISABLE
++ bool 'Allow disabling of Linux filesystem access control'
++ depends on RSBAC
++ ---help---
++ Turn this on to get access to Linux DAC disabling switches.
++
++if RSBAC_ALLOW_DAC_DISABLE
++
++config RSBAC_ALLOW_DAC_DISABLE_FULL
++ bool 'Allow full disabling (DANGEROUS!)'
++ ---help---
++ With this option you can allow turning off Linux discretionary access
++ control for all filesystem objects, except where the filesystem
++ provides its own access control function.
++
++ In most cases it should be sufficient to set the DAC_OVERRIDE Linux
++ capability with the DAC module for single users or programs instead.
++
++ To disable Linux access control, use the kernel param
++ rsbac_dac_disable, syscall sys_rsbac_switch (utility switch_module)
++ for switch target DAC_DISABLE or
++
++ echo debug dac_disable 1 >/proc/rsbac-info/debug.
++
++ For security reasons, Linux access control is still enabled by
++ default, even with this option active!
++
++ DANGER! Do not use, unless you really know what you are doing!
++
++config RSBAC_ALLOW_DAC_DISABLE_PART
++ bool 'Allow partial (dir tree based) disabling'
++ ---help---
++ If enabled, you can turn off Linux discretionary access control for
++ individual filesystem objects and directory trees.
++
++ This option is useful, if you want to rely on RSBAC access control
++ instead of standard Linux style, without giving full access in case
++ RSBAC is not running.
++
++ NOTE: You can use the linux2acl command line tool to get your old
++ Linux rights converted to ACL settings.
++endif
++
++menu 'Other RSBAC options'
++ depends on RSBAC
++
++config RSBAC_SECDEL
++ bool 'Support secure_delete'
++ ---help---
++ This option enables secure deletion and truncation of all files.
++ The decision whether to overwrite is dispatched to all modules,
++ if one says yes, the file is overwritten. Currently only FF and PM
++ support this.
++
++ FF returns yes, if file is marked with ff_flag secure_delete. This
++ flag can, as usual, be inherited from parent dir, if flag
++ add_inherited is set (default, if option 'Inherit as default' has
++ been chosen).
++
++ PM returns yes for all files marked as personal data and no
++ otherwise.
++
++ Secure deletion and truncation is currently done by overwriting once
++ with zeros, because this is enough against hackers and standard level
++ analysers. Against well-fed organisations who got hold of your disk
++ there is no protection anyway.
++
++ This mechanism is currently only supported for ext2, ext3, minix,
++ msdos and vfat, but could be extended to other file systems, if
++ needed.
++
++config RSBAC_RW
++ bool 'Intercept sys_read and sys_write'
++ default y
++ ---help---
++ If enabled, the syscalls sys_read() and sys_write() for reading from
++ and writing to opened files, fifos and devices are also intercepted.
++ This slows down the system a bit, but allows more control of object
++ accesses to adapt to configuration changes.
++
++ Please note that the interception for sockets only takes place, if
++ net support and socket read/write interception are also enabled
++ (CONFIG_RSBAC_NET and CONFIG_RSBAC_NET_RW).
++
++config RSBAC_IPC_SEM
++ bool 'Intercept Semaphore IPC operations'
++ default y
++ ---help---
++ If on, System V IPC Semaphores are also protected. As there is no
++ direct data flow over semaphores, they can mostly be misused for
++ denial of service attacks. Turn on for special needs, keep off
++ otherwise.
++
++config RSBAC_DAC_OWNER
++ bool 'Control DAC process owner (seteuid, setfsuid)'
++ ---help---
++ Usually, only set*uid calls, which affect the real user ID used for
++ RSBAC decisions, issue a decision request CHANGE_OWNER for processes.
++ With this option, changes to the effective (CHANGE_DAC_EFF_OWNER) and
++ the filesystem (CHANGE_DAC_FS_OWNER) owner are also controlled.
++
++ Please also see AUTH option 'AUTH support for effective and fs owner
++ control'.
++
++config RSBAC_DAC_GROUP
++ bool 'Control DAC process group (setegid, setfsgid)'
++ ---help---
++ Usually, set*gid calls issue only the decision request CHANGE_GROUP
++ for changes to the process group.
++ With this option, changes to the effective (CHANGE_DAC_EFF_GROUP) and
++ the filesystem (CHANGE_DAC_FS_GROUP) group are also controlled.
++
++ Please also see AUTH option 'AUTH support for effective and fs group
++ control'.
++
++
++config RSBAC_PROC_HIDE
++ bool 'Hide processes in /proc'
++ ---help---
++ If enabled, a process is also hidden in /proc listing, if the reading
++ process has no GET_STATUS_DATA right to it.
++ This option adds a significant amount of requests, when reading
++ /proc, so it is off by default.
++
++config RSBAC_FSOBJ_HIDE
++ bool 'Hide filesystem objects'
++ depends on EXPERIMENTAL
++ ---help---
++ If enabled, every filesystem object (file, dir, named pipe,
++ unix socket) will be completely hidden from every process that
++ has no SEARCH right to it.
++
++ WARNING! You need to remove all rights to that object you are
++ trying to hide to make it fully protected! Otherwise
++ overwriting may be possible by a process that knows this
++ object's name!
++
++config RSBAC_FREEZE
++ bool 'Support freezing of RSBAC configuration'
++ default y
++ ---help---
++ If enabled, the kernel parameter rsbac_freeze disables all
++ administrative system calls, which change settings.
++
++config RSBAC_FREEZE_UM
++ bool 'Also freeze User Management'
++ depends on RSBAC_FREEZE && RSBAC_UM
++ ---help---
++ If enabled, the RSBAC User Management gets frozen, too.
++
++config RSBAC_SYSLOG
++ bool 'RSBAC check sys_syslog'
++ ---help---
++ If enabled, the syscall sys_syslog() for system log is also checked,
++ leading to less performance for a minor security issue.
++
++config RSBAC_IOCTL
++ bool 'Generic check in sys_ioctl'
++ default y
++ ---help---
++ Enable this switch to get all calls to sys_ioctl for DEV and NETOBJ
++ targets checked by RSBAC. In any case, some important sys_ioctl calls
++ will also be checked as GET_STATUS_DATA or MODIFY_SYSTEM_DATA
++ requests.
++
++ This interception produces some overhead on desktop systems, so it
++ should be considered to turn it off on such systems.
++
++config RSBAC_USER_CHOWN
++ bool 'Check CHANGE_OWNER on PROCESS against USER target'
++ default n
++ ---help---
++ With this option, each CHANGE_OWNER request on PROCESS targets
++ creates an additional CHANGE_OWNER request on the new owner as
++ USER target.
++
++ The additional check allows modules to control user changes
++ depending on the current state of the process attributes,
++ e.g. current role or owner.
++
++config RSBAC_DAT_VISIBLE
++ bool 'Make RSBAC data files visible'
++ ---help---
++ By default, the rsbac.dat directories containing the list data
++ backups are unreadable for any user. Unfortunately, some programs
++ must be able to read all directories, e.g. the quota tools.
++
++ This option enables read and status access to the rsbac.dat
++ directories, but the files themselves are kept unreadable.
++
++config RSBAC_NO_DECISION_ON_NETMOUNT
++ bool 'No decision on net mounts'
++ ---help---
++ If this option is turned on, no decisions are made and no attributes
++ are set for all accesses to files and dirs on network mounts. For
++ these targets, RSBAC behaves like a maintenance kernel. So even
++ Dazuko is turned off. Attribute saving or restoring is not done
++ anyway.
++
++ Use this option, if you cannot access your network files properly,
++ and keep it off otherwise.
++
++config RSBAC_ENFORCE_CLOSE
++ bool 'Enforce denied close access'
++ ---help---
++ By default, RSBAC only logs denied CLOSE access on filesystem
++ objects and still performs the close, because many broken
++ programs rely on close to always succeed. With this switch on,
++ RSBAC really returns -EPERM and keeps the object opened.
++
++config RSBAC_USER_MOD_IOPERM
++ bool 'X support (normal user MODIFY_PERM access to ST_ioports)'
++ ---help---
++ Normally, MODIFY_PERMISSIONS_DATA access to SCD target ST_ioports is
++ not granted for normal users, because this would allow direct
++ hardware access. Unfortunately, X servers depend on this. So if you
++ need X on your system, enable this, otherwise keep it off.
++
++ This switch enables the following changes:
++ - MAC, FC, SIM and AUTH hardwired policies are adjusted to allow
++ this access type for every user.
++ - RC and ACL default settings are adjusted accordingly, but
++ existing or later settings are not affected.
++
++config RSBAC_FAKE_ROOT_UID
++ bool 'Faked root uid'
++ ---help---
++ With this turned on, programs can optionally get uid 0 returned from
++ every getuid() and/or geteuid() call.
++
++ Some programs check, whether they really run as root, although they
++ already have sufficient rights to do their jobs. This forces users
++ to keep them running under the dangerous root account.
++
++
++config RSBAC_XSTATS
++ bool 'RSBAC extra statistics'
++ default y
++ depends on RSBAC_PROC
++ ---help---
++ If enabled, extended RSBAC statistics are collected. Currently these
++ are mostly count matrices of adf_request and adf_set_attr calls by
++ target type and request type. These matrices give a detailed overview
++ of the request and thus the system call behaviour.
++
++ Also, there is an access count for calls to data structures by target
++ type.
++
++ The extra data can be read from another proc file called xstats.
++endmenu
++
++endif
++
++endif
+diff --git a/rsbac/Makefile b/rsbac/Makefile
+new file mode 100644
+index 0000000..abdae60
+--- /dev/null
++++ b/rsbac/Makefile
+@@ -0,0 +1,11 @@
++#
++# Main Makefile for the Rule Set Based Access Control subsystem.
++#
++# Author and (c) 1999-2012 Amon Ott <ao@rsbac.org>
++
++obj-y := help/ data_structures/ adf/
++obj-m := adf/
++
++clean: Makefile
++ rm -f `find . -name '*.o' -print`
++
+diff --git a/rsbac/adf/Makefile b/rsbac/adf/Makefile
+new file mode 100644
+index 0000000..ef1514e
+--- /dev/null
++++ b/rsbac/adf/Makefile
+@@ -0,0 +1,62 @@
++#
++# File: rsbac/adf/Makefile
++#
++# Makefile for the Linux RSBAC Access Control Decision Facility (ADF)
++#
++# Author and (c) 1999-2012 Amon Ott <ao@rsbac.org>
++#
++
++obj-y := adf_main.o
++
++ifeq ($(CONFIG_RSBAC_DEBUG),y)
++obj-y += adf_check.o
++endif
++
++# Adding policies
++subdir-$(CONFIG_RSBAC_MAC) += mac
++obj-$(CONFIG_RSBAC_MAC) += mac/
++
++subdir-$(CONFIG_RSBAC_PM) += pm
++obj-$(CONFIG_RSBAC_PM) += pm/
++
++ifneq ($(CONFIG_RSBAC_MAINT),y)
++subdir-$(CONFIG_RSBAC_DAZ) += daz
++obj-$(CONFIG_RSBAC_DAZ) += daz/
++endif
++
++ifneq ($(CONFIG_RSBAC_MAINT),y)
++subdir-$(CONFIG_RSBAC_FF) += ff
++obj-$(CONFIG_RSBAC_FF) += ff/
++endif
++
++subdir-$(CONFIG_RSBAC_RC) += rc
++obj-$(CONFIG_RSBAC_RC) += rc/
++
++subdir-$(CONFIG_RSBAC_AUTH) += auth
++obj-$(CONFIG_RSBAC_AUTH) += auth/
++
++subdir-$(CONFIG_RSBAC_ACL) += acl
++obj-$(CONFIG_RSBAC_ACL) += acl/
++
++ifneq ($(CONFIG_RSBAC_MAINT),y)
++subdir-$(CONFIG_RSBAC_CAP) += cap
++obj-$(CONFIG_RSBAC_CAP) += cap/
++endif
++
++subdir-$(CONFIG_RSBAC_JAIL) += jail
++obj-$(CONFIG_RSBAC_JAIL) += jail/
++
++subdir-$(CONFIG_RSBAC_PAX) += pax
++obj-$(CONFIG_RSBAC_PAX) += pax/
++
++ifneq ($(CONFIG_RSBAC_MAINT),y)
++subdir-$(CONFIG_RSBAC_RES) += res
++obj-$(CONFIG_RSBAC_RES) += res/
++endif
++
++subdir-$(CONFIG_RSBAC_REG) += reg
++obj-$(CONFIG_RSBAC_REG) += reg/
++ifeq ($(CONFIG_RSBAC_REG_SAMPLES),y)
++subdir-m += reg
++endif
++
+diff --git a/rsbac/adf/acl/Makefile b/rsbac/adf/acl/Makefile
+new file mode 100644
+index 0000000..c37b2e1
+--- /dev/null
++++ b/rsbac/adf/acl/Makefile
+@@ -0,0 +1,14 @@
++#
++# File: rsbac/adf/auth/Makefile
++#
++# Makefile for the Linux rsbac auth decision module.
++#
++# Author and (c) 1999-2012 Amon Ott <ao@rsbac.org>
++#
++
++obj-y := acl_syscalls.o
++# decisions only in non-maint mode
++ifneq ($(CONFIG_RSBAC_MAINT),y)
++obj-y += acl_main.o
++endif
++
+diff --git a/rsbac/adf/acl/acl_main.c b/rsbac/adf/acl/acl_main.c
+new file mode 100644
+index 0000000..7d9f3a7
+--- /dev/null
++++ b/rsbac/adf/acl/acl_main.c
+@@ -0,0 +1,511 @@
++/**************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - Access Control Lists (ACL) */
++/* File: rsbac/adf/acl/acl_main.c */
++/* */
++/* Author and (c) 1999-2009: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 16/Nov/2009 */
++/**************************************************** */
++
++#include <linux/string.h>
++#include <rsbac/aci.h>
++#include <rsbac/acl.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/adf_syshelpers.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/debug.h>
++#include <rsbac/lists.h>
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++#if defined(CONFIG_RSBAC_ACL_LEARN)
++#ifdef CONFIG_RSBAC_ACL_LEARN_TA
++rsbac_list_ta_number_t acl_learn_ta = CONFIG_RSBAC_ACL_LEARN_TA;
++#else
++rsbac_list_ta_number_t acl_learn_ta = 0;
++#endif
++#endif
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++/* in acl_syscalls.c */
++rsbac_boolean_t rsbac_acl_check_super(enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_uid_t user);
++
++rsbac_boolean_t rsbac_acl_check_right(enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_uid_t user,
++ rsbac_pid_t caller_pid,
++ enum rsbac_adf_request_t request)
++ {
++ rsbac_boolean_t result = FALSE;
++ int err=0, tmperr;
++ int i;
++ rsbac_acl_group_id_t * group_p;
++ #if defined(CONFIG_RSBAC_RC)
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ #endif
++
++ /* Only check implemented targets */
++ switch(target)
++ {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_DEV:
++ case T_IPC:
++ case T_SCD:
++ case T_USER:
++ case T_PROCESS:
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ case T_GROUP:
++#endif
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ case T_NETDEV:
++#endif
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ case T_NETTEMP_NT:
++ case T_NETTEMP:
++ case T_NETOBJ:
++#endif
++ break;
++ default:
++ return TRUE;
++ }
++ /* inherited own rights */
++ err = rsbac_acl_get_single_right(target,
++ tid,
++ ACLS_USER,
++ (rsbac_acl_subject_id_t) user,
++ request,
++ &result);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_check_right(): rsbac_acl_get_single_right() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ return FALSE;
++ }
++ if(result)
++ return TRUE;
++
++ /* add group and role rights */
++ /* group everyone */
++ err = rsbac_acl_get_single_right(target,
++ tid,
++ ACLS_GROUP,
++ RSBAC_ACL_GROUP_EVERYONE,
++ request,
++ &result);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_check_right(): rsbac_acl_get_single_right() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ return FALSE;
++ }
++ if(result)
++ return TRUE;
++
++ #if defined(CONFIG_RSBAC_RC)
++ /* use process role */
++ /* first get role */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_RC,
++ T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_check_right(): rsbac_get_attr() for process rc_role returned error!\n");
++ }
++ else
++ {
++ err = rsbac_acl_get_single_right(target,
++ tid,
++ ACLS_ROLE,
++ i_attr_val1.rc_role,
++ request,
++ &result);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ get_error_name(tmp,err);
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_check_right(): rsbac_acl_get_single_right() returned error %s!\n",
++ tmp);
++ rsbac_kfree(tmp);
++ }
++ return FALSE;
++ }
++ if(result)
++ return TRUE;
++ }
++ #endif
++
++ /* other groups */
++ /* first get user groups */
++ group_p = NULL;
++ err = rsbac_acl_get_user_groups(0, user, &group_p, NULL);
++ if(err<0)
++ {
++ char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_check_right(): rsbac_acl_get_user_groups() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ return err;
++ }
++ for(i=0; i<err; i++)
++ {
++ tmperr = rsbac_acl_get_single_right(target,
++ tid,
++ ACLS_GROUP,
++ group_p[i],
++ request,
++ &result);
++ if(tmperr)
++ {
++ char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_check_right(): rsbac_acl_get_single_right() returned error %s!\n",
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ if(group_p)
++ rsbac_kfree(group_p);
++ return FALSE;
++ }
++ if(result)
++ {
++ if(group_p)
++ rsbac_kfree(group_p);
++ return TRUE;
++ }
++ }
++ if(group_p)
++ rsbac_kfree(group_p);
++
++ /* SUPERVISOR? */
++#ifdef CONFIG_RSBAC_ACL_LEARN
++ result = rsbac_acl_check_super(target, tid, user);
++ if( !result
++ && (request < R_NONE)
++ )
++ {
++ switch(target)
++ {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if(rsbac_acl_learn_fd)
++ {
++ char * tmp;
++ enum rsbac_acl_subject_type_t subj_type;
++ rsbac_acl_subject_id_t subj_id;
++ rsbac_acl_rights_vector_t rights;
++ rsbac_time_t ttl;
++
++#ifdef CONFIG_RSBAC_ACL_LEARN_TA
++ if (!rsbac_list_ta_exist(acl_learn_ta))
++ rsbac_list_ta_begin(CONFIG_RSBAC_LIST_TRANS_MAX_TTL,
++ &acl_learn_ta,
++ RSBAC_ALL_USERS,
++ RSBAC_ACL_LEARN_TA_NAME,
++ NULL);
++#endif
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if(tmp)
++ {
++ char * target_type_name;
++
++ target_type_name = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if(target_type_name)
++ {
++ char * target_id_name;
++
++ #ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name
++ = rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++ #else
++ target_id_name = rsbac_kmalloc(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++ #endif
++ if(target_id_name)
++ {
++ get_request_name(tmp,request);
++ get_target_name(target_type_name, target, target_id_name, tid);
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_check_right(): auto_learn_fd: granting right %s for user %u to target_type %s, tid %s, transaction %u!\n",
++ tmp,
++ user,
++ target_type_name,
++ target_id_name,
++ acl_learn_ta);
++ rsbac_kfree(target_id_name);
++ }
++ rsbac_kfree(target_type_name);
++ }
++ }
++ subj_type = ACLS_USER;
++ subj_id = user;
++ rights = RSBAC_REQUEST_VECTOR(request);
++ ttl = 0;
++ err = rsbac_acl_add_to_acl_entry(acl_learn_ta, target, tid, subj_type, subj_id, rights, ttl);
++ if(tmp)
++ {
++ if(err)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_check_right(): rsbac_acl_add_to_acl_entry() returned error %s!\n",
++ get_error_name(tmp,err));
++ }
++ rsbac_kfree(tmp);
++ }
++ result = TRUE;
++ }
++ break;
++
++ default:
++ break;
++ }
++ }
++ return result;
++#else
++ return rsbac_acl_check_super(target, tid, user);
++#endif
++ }
++
++rsbac_boolean_t rsbac_acl_check_forward(enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_uid_t user,
++ rsbac_acl_rights_vector_t rights)
++ {
++ rsbac_acl_rights_vector_t i_rights = 0;
++ rsbac_acl_rights_vector_t i_rvec = ((rsbac_acl_rights_vector_t) 1 << ACLR_FORWARD) | rights;
++ int err=0;
++
++
++ /* Only check implemented targets */
++ switch(target)
++ {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_DEV:
++ case T_IPC:
++ case T_SCD:
++ case T_USER:
++ case T_PROCESS:
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ case T_GROUP:
++#endif
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ case T_NETDEV:
++#endif
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ case T_NETTEMP_NT:
++ case T_NETTEMP:
++ case T_NETOBJ:
++#endif
++ break;
++ default:
++ return TRUE;
++ }
++ /* get effective rights */
++ err = rsbac_acl_sys_get_rights(0, target, tid, ACLS_USER, (rsbac_acl_subject_id_t) user, &i_rights, TRUE);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_check_forward(): rsbac_acl_sys_get_rights() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ return FALSE;
++ }
++ if((i_rights & i_rvec) == i_rvec)
++ return TRUE;
++ else
++ return FALSE;
++ }
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++inline enum rsbac_adf_req_ret_t
++ rsbac_adf_request_acl (enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ switch (request)
++ {
++ case R_READ_ATTRIBUTE:
++ case R_MODIFY_ATTRIBUTE:
++ switch(attr)
++ { /* owner must be changed by other request to prevent inconsistency */
++ case A_owner:
++ if(request == R_READ_ATTRIBUTE)
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++
++ /* Only protect AUTH, if asked to by configuration */
++ #ifdef CONFIG_RSBAC_ACL_AUTH_PROT
++ case A_auth_may_setuid:
++ case A_auth_may_set_cap:
++ case A_auth_start_uid:
++ case A_auth_start_euid:
++ case A_auth_start_gid:
++ case A_auth_start_egid:
++ case A_auth_learn:
++ case A_auth_add_f_cap:
++ case A_auth_remove_f_cap:
++ tid.scd = AST_auth_administration;
++ if (rsbac_acl_check_right(T_SCD, tid, owner, caller_pid, request))
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++ #endif
++
++ #ifdef CONFIG_RSBAC_ACL_GEN_PROT
++ case A_pseudo:
++ case A_log_array_low:
++ case A_log_array_high:
++ case A_log_program_based:
++ case A_log_user_based:
++ case A_symlink_add_remote_ip:
++ case A_symlink_add_uid:
++ case A_symlink_add_rc_role:
++ case A_linux_dac_disable:
++ case A_fake_root_uid:
++ case A_audit_uid:
++ case A_auid_exempt:
++ case A_remote_ip:
++ case A_vset:
++ case A_program_file:
++ if (!rsbac_acl_check_right(target, tid, owner, caller_pid, request))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++ #endif
++
++ #ifdef CONFIG_RSBAC_ACL_LEARN
++ case A_acl_learn:
++ /* check supervisor on target */
++ if(rsbac_acl_check_super(target,
++ tid,
++ owner))
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++ #endif
++
++ /* All attributes (remove target!) */
++ case A_none:
++ if (!rsbac_acl_check_right(target, tid, owner, caller_pid, request))
++ return NOT_GRANTED;
++ #ifdef CONFIG_RSBAC_ACL_AUTH_PROT
++ tid.scd = AST_auth_administration;
++ if (!rsbac_acl_check_right(T_SCD, tid, owner, caller_pid, request))
++ return NOT_GRANTED;
++ #endif
++ return GRANTED;
++
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_SWITCH_MODULE:
++ switch(target)
++ {
++ case T_NONE:
++ if( (attr_val.switch_target != SW_ACL)
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ && (attr_val.switch_target != SW_SOFTMODE)
++ #endif
++ #ifdef CONFIG_RSBAC_FREEZE
++ && (attr_val.switch_target != SW_FREEZE)
++ #endif
++ #ifdef CONFIG_RSBAC_ACL_AUTH_PROT
++ && (attr_val.switch_target != SW_AUTH)
++ #endif
++ )
++ return DO_NOT_CARE;
++
++ tid.scd = ST_other;
++ if (rsbac_acl_check_right(T_SCD, tid, owner, caller_pid, request))
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++/*********************/
++ default:
++ if(target == T_NONE)
++ {
++ target = T_SCD;
++ tid.scd = ST_other;
++ }
++ if (rsbac_acl_check_right(target, tid, owner, caller_pid, request))
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++ }
++ } /* end of rsbac_adf_request_acl() */
++
++/* end of rsbac/adf/acl/main.c */
+diff --git a/rsbac/adf/acl/acl_syscalls.c b/rsbac/adf/acl/acl_syscalls.c
+new file mode 100644
+index 0000000..33bc9a1
+--- /dev/null
++++ b/rsbac/adf/acl/acl_syscalls.c
+@@ -0,0 +1,1738 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - ACL module */
++/* File: rsbac/adf/acl/syscalls.c */
++/* */
++/* Author and (c) 1999-2012: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 07/May/2012 */
++/*************************************************** */
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/errno.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/error.h>
++#include <rsbac/acl.h>
++#include <rsbac/getname.h>
++#include <rsbac/acl_getname.h>
++#include <rsbac/helpers.h>
++#include <rsbac/debug.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/adf_main.h>
++#ifdef CONFIG_RSBAC_NET_OBJ
++#include <net/sock.h>
++#endif
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++rsbac_boolean_t rsbac_acl_check_super(enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_uid_t user)
++ {
++ rsbac_boolean_t i_result = FALSE;
++ int err=0, tmperr;
++ int i;
++ rsbac_acl_group_id_t * group_p;
++ #if defined(CONFIG_RSBAC_RC)
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ #endif
++
++ /* Only check implemented targets */
++ switch(target)
++ {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_DEV:
++ case T_IPC:
++ case T_SCD:
++ case T_USER:
++ case T_PROCESS:
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ case T_GROUP:
++#endif
++ case T_NETDEV:
++ case T_NETTEMP_NT:
++ case T_NETTEMP:
++ case T_NETOBJ:
++ break;
++ default:
++ return TRUE;
++ }
++ /* own right */
++ err = rsbac_acl_get_single_right(target,
++ tid,
++ ACLS_USER,
++ (rsbac_acl_subject_id_t) user,
++ ACLR_SUPERVISOR,
++ &i_result);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_check_super(): rsbac_acl_get_single_right() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ return FALSE;
++ }
++ if(i_result)
++ return(TRUE);
++
++ /* try SUPERVISOR for group and role */
++ /* group everyone */
++ err = rsbac_acl_get_single_right(target,
++ tid,
++ ACLS_GROUP,
++ RSBAC_ACL_GROUP_EVERYONE,
++ ACLR_SUPERVISOR,
++ &i_result);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_check_super(): rsbac_acl_get_single_right() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ return FALSE;
++ }
++ if(i_result)
++ return(TRUE);
++
++ #if defined(CONFIG_RSBAC_RC)
++ /* use process role */
++ /* first get role */
++ i_tid.process = task_pid(current);
++ if (rsbac_get_attr(SW_RC,
++ T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_check_super(): rsbac_get_attr() for process rc_role returned error!\n");
++ }
++ else
++ {
++ err = rsbac_acl_get_single_right(target,
++ tid,
++ ACLS_ROLE,
++ i_attr_val1.rc_role,
++ ACLR_SUPERVISOR,
++ &i_result);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ get_error_name(tmp,err);
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_check_super(): rsbac_acl_get_single_right() returned error %s!\n",
++ tmp);
++ rsbac_kfree(tmp);
++ }
++ return FALSE;
++ }
++ if(i_result)
++ return(TRUE);
++ }
++ #endif
++
++ /* other groups */
++ /* first get user groups */
++ group_p = NULL;
++ err = rsbac_acl_get_user_groups(0, user, &group_p, NULL);
++ if(err<0)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_check_super(): rsbac_acl_get_user_groups() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ return err;
++ }
++ for(i=0; i<err; i++)
++ {
++ tmperr = rsbac_acl_get_single_right(target,
++ tid,
++ ACLS_GROUP,
++ group_p[i],
++ ACLR_SUPERVISOR,
++ &i_result);
++ if(tmperr)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_check_super(): rsbac_acl_get_single_right() returned error %s!\n",
++ get_error_name(tmp,tmperr));
++ rsbac_kfree(tmp);
++ }
++ if(group_p)
++ rsbac_kfree(group_p);
++ return FALSE;
++ }
++ if(i_result)
++ {
++ if(group_p)
++ rsbac_kfree(group_p);
++ return(TRUE);
++ }
++ }
++ if(group_p)
++ rsbac_kfree(group_p);
++
++ /* give up */
++ return FALSE;
++ };
++
++
++#if !defined(CONFIG_RSBAC_MAINT)
++rsbac_boolean_t rsbac_acl_check_forward(enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_uid_t user,
++ rsbac_acl_rights_vector_t rights);
++
++rsbac_boolean_t rsbac_acl_check_super(enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_uid_t user);
++
++rsbac_boolean_t rsbac_acl_check_right(enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_uid_t user,
++ rsbac_pid_t caller_pid,
++ enum rsbac_adf_request_t request);
++#endif
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++int rsbac_acl_sys_set_acl_entry(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ rsbac_acl_rights_vector_t rights,
++ rsbac_time_t ttl)
++ {
++ int err=0;
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ /* sanity check before using pointer */
++ if( (target == T_NETOBJ)
++ && tid.netobj.sock_p
++ && ( tid.netobj.remote_addr
++ || !tid.netobj.sock_p->file
++ || !tid.netobj.sock_p->file->f_dentry
++ || !tid.netobj.sock_p->file->f_dentry->d_inode
++ || (SOCKET_I(tid.netobj.sock_p->file->f_dentry->d_inode) != tid.netobj.sock_p)
++ )
++ )
++ return -RSBAC_EINVALIDTARGET;
++#endif
++
++/* check only in non-maint mode */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ if(rsbac_switch_acl)
++#endif
++ {
++ rsbac_uid_t user;
++
++ if(rsbac_get_owner(&user))
++ return -RSBAC_EREADFAILED;
++ /* first try access control right (SUPERVISOR try is included) */
++ if(!rsbac_acl_check_right(target, tid, user, task_pid(current), ACLR_ACCESS_CONTROL))
++ {
++ /* no access control -> try forward for these rights */
++ /* but only, if no ttl requested */
++ if( (ttl != RSBAC_LIST_TTL_KEEP)
++ || !rsbac_acl_check_forward(target, tid, user, rights)
++ )
++ {
++ char * rights_string = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ char * subject_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ char * target_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ #ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ char * target_id_name
++ = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++ #else
++ char * target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++ #endif
++
++ u64tostracl(rights_string, rights);
++ get_acl_subject_type_name(subject_type_name, subj_type);
++ get_target_name(target_type_name, target, target_id_name, tid);
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_sys_set_acl_entry(): setting rights %s for %s %u to %s %s denied for user %u!\n",
++ rights_string,
++ subject_type_name,
++ subj_id,
++ target_type_name,
++ target_id_name,
++ user);
++ rsbac_kfree(rights_string);
++ rsbac_kfree(subject_type_name);
++ rsbac_kfree(target_type_name);
++ rsbac_kfree(target_id_name);
++
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_ACL]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++ }
++ if(rights & RSBAC_ACL_SUPERVISOR_RIGHT_VECTOR)
++ {
++ /* you must have SUPERVISOR to set SUPERVISOR */
++ if(!rsbac_acl_check_super(target, tid, user))
++ {
++ char * subject_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ char * target_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ #ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ char * target_id_name
++ = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++ #else
++ char * target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++ #endif
++
++ get_acl_subject_type_name(subject_type_name, subj_type);
++ get_target_name(target_type_name, target, target_id_name, tid);
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_sys_set_acl_entry(): setting SUPERVISOR for %s %u to %s %s denied for user %u!\n",
++ subject_type_name,
++ subj_id,
++ target_type_name,
++ target_id_name,
++ user);
++ rsbac_kfree(subject_type_name);
++ rsbac_kfree(target_type_name);
++ rsbac_kfree(target_id_name);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_ACL]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++ }
++ }
++#endif /* !MAINT */
++
++ /* OK, check passed. Set ACL. */
++ err = rsbac_acl_set_acl_entry(ta_number, target, tid, subj_type, subj_id, rights, ttl);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_set_acl_entry(): rsbac_acl_set_acl_entry() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ }
++ return err;
++ }
++
++int rsbac_acl_sys_remove_acl_entry(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id)
++ {
++ int err=0;
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ /* sanity check before using pointer */
++ if( (target == T_NETOBJ)
++ && tid.netobj.sock_p
++ && ( tid.netobj.remote_addr
++ || !tid.netobj.sock_p->file
++ || !tid.netobj.sock_p->file->f_dentry
++ || !tid.netobj.sock_p->file->f_dentry->d_inode
++ || (SOCKET_I(tid.netobj.sock_p->file->f_dentry->d_inode) != tid.netobj.sock_p)
++ )
++ )
++ return -RSBAC_EINVALIDTARGET;
++#endif
++
++/* check only in non-maint mode */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ if(rsbac_switch_acl)
++#endif
++ {
++ rsbac_uid_t user;
++ rsbac_acl_rights_vector_t res_rights = 0;
++
++ if(rsbac_get_owner(&user))
++ return -RSBAC_EREADFAILED;
++ /* first try access control right (SUPERVISOR is included) */
++ if(!rsbac_acl_check_right(target, tid, user, task_pid(current), ACLR_ACCESS_CONTROL))
++ {
++ char * subject_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ char * target_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ #ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ char * target_id_name
++ = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++ #else
++ char * target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++ #endif
++
++ get_acl_subject_type_name(subject_type_name, subj_type);
++ get_target_name(target_type_name, target, target_id_name, tid);
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_sys_remove_acl_entry(): removing ACL entry for %s %u at %s %s denied for user %u!\n",
++ subject_type_name,
++ subj_id,
++ target_type_name,
++ target_id_name,
++ user);
++ rsbac_kfree(subject_type_name);
++ rsbac_kfree(target_type_name);
++ rsbac_kfree(target_id_name);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_ACL]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++
++ err = rsbac_acl_get_rights(0, target, tid, subj_type, subj_id, &res_rights, FALSE);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_remove_acl_entry(): rsbac_acl_get_rights() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ return err;
++ }
++ if(res_rights & RSBAC_ACL_SUPERVISOR_RIGHT_VECTOR)
++ {
++ /* you must have SUPERVISOR to remove an entry with SUPERVISOR */
++ if(!rsbac_acl_check_super(target, tid, user))
++ {
++ char * subject_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ char * target_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ #ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ char * target_id_name
++ = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++ #else
++ char * target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++ #endif
++
++ get_acl_subject_type_name(subject_type_name, subj_type);
++ get_target_name(target_type_name, target, target_id_name, tid);
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_sys_remove_acl_entry(): removing ACL entry with SUPERVISOR for %s %u at %s %s denied for user %u!\n",
++ subject_type_name,
++ subj_id,
++ target_type_name,
++ target_id_name,
++ user);
++ rsbac_kfree(subject_type_name);
++ rsbac_kfree(target_type_name);
++ rsbac_kfree(target_id_name);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_ACL]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++ }
++ }
++#endif /* !MAINT */
++
++ /* OK, check passed. Set ACL. */
++ err = rsbac_acl_remove_acl_entry(ta_number, target, tid, subj_type, subj_id);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_remove_acl_entry(): rsbac_acl_remove_acl_entry() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ }
++ return err;
++ }
++
++int rsbac_acl_sys_remove_acl(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid)
++ {
++ int err=0;
++
++/* check only in non-maint mode */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ if(rsbac_switch_acl)
++#endif
++ {
++ rsbac_uid_t user;
++
++ if(rsbac_get_owner(&user))
++ return -RSBAC_EREADFAILED;
++ /* check SUPERVISOR */
++ if(!rsbac_acl_check_super(target, tid, user))
++ {
++ char * target_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ #ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ char * target_id_name
++ = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++ #else
++ char * target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++ #endif
++
++ get_target_name(target_type_name, target, target_id_name, tid);
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_sys_remove_acl(): removing ACL from %s %s denied for user %u!\n",
++ target_type_name,
++ target_id_name,
++ user);
++ rsbac_kfree(target_type_name);
++ rsbac_kfree(target_id_name);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_ACL]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++ }
++#endif /* !MAINT */
++
++ /* OK, check passed. Set ACL. */
++ err = rsbac_acl_remove_acl(ta_number, target, tid);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_remove_acl(): rsbac_acl_remove_acl() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ }
++ return err;
++ }
++
++int rsbac_acl_sys_add_to_acl_entry(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ rsbac_acl_rights_vector_t rights,
++ rsbac_time_t ttl)
++ {
++ int err=0;
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ /* sanity check before using pointer */
++ if( (target == T_NETOBJ)
++ && tid.netobj.sock_p
++ && ( tid.netobj.remote_addr
++ || !tid.netobj.sock_p->file
++ || !tid.netobj.sock_p->file->f_dentry
++ || !tid.netobj.sock_p->file->f_dentry->d_inode
++ || (SOCKET_I(tid.netobj.sock_p->file->f_dentry->d_inode) != tid.netobj.sock_p)
++ )
++ )
++ return -RSBAC_EINVALIDTARGET;
++#endif
++
++/* check only in non-maint mode */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ if(rsbac_switch_acl)
++#endif
++ {
++ rsbac_uid_t user;
++
++ if(rsbac_get_owner(&user))
++ return -RSBAC_EREADFAILED;
++ /* first try access control right (SUPERVISOR is included) */
++ if(!rsbac_acl_check_right(target, tid, user, task_pid(current), ACLR_ACCESS_CONTROL))
++ {
++ /* no access control -> try forward for these rights */
++ /* but only, if no ttl requested */
++ if( (ttl != RSBAC_LIST_TTL_KEEP)
++ || !rsbac_acl_check_forward(target, tid, user, rights)
++ )
++ {
++ char * rights_string = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ char * subject_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ char * target_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ #ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ char * target_id_name
++ = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++ #else
++ char * target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++ #endif
++
++ u64tostracl(rights_string, rights);
++ get_acl_subject_type_name(subject_type_name, subj_type);
++ get_target_name(target_type_name, target, target_id_name, tid);
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_sys_add_to_acl_entry(): adding rights %s for %s %u to %s %s denied for user %u!\n",
++ rights_string,
++ subject_type_name,
++ subj_id,
++ target_type_name,
++ target_id_name,
++ user);
++ rsbac_kfree(rights_string);
++ rsbac_kfree(subject_type_name);
++ rsbac_kfree(target_type_name);
++ rsbac_kfree(target_id_name);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_ACL]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++ }
++ if(rights & RSBAC_ACL_SUPERVISOR_RIGHT_VECTOR)
++ {
++ /* you must have SUPERVISOR to add SUPERVISOR */
++ if(!rsbac_acl_check_super(target, tid, user))
++ {
++ char * subject_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ char * target_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ #ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ char * target_id_name
++ = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++ #else
++ char * target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++ #endif
++
++ get_acl_subject_type_name(subject_type_name, subj_type);
++ get_target_name(target_type_name, target, target_id_name, tid);
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_sys_add_to_acl_entry(): adding SUPERVISOR for %s %u to %s %s denied for user %u!\n",
++ subject_type_name,
++ subj_id,
++ target_type_name,
++ target_id_name,
++ user);
++ rsbac_kfree(subject_type_name);
++ rsbac_kfree(target_type_name);
++ rsbac_kfree(target_id_name);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_ACL]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++ }
++ }
++#endif /* !MAINT */
++
++ /* OK, check passed. Set ACL. */
++ err = rsbac_acl_add_to_acl_entry(ta_number, target, tid, subj_type, subj_id, rights, ttl);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_add_to_acl_entry(): rsbac_acl_add_to_acl_entry() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ }
++ return err;
++ }
++
++int rsbac_acl_sys_remove_from_acl_entry(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ rsbac_acl_rights_vector_t rights)
++ {
++ int err=0;
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ /* sanity check before using pointer */
++ if( (target == T_NETOBJ)
++ && tid.netobj.sock_p
++ && ( tid.netobj.remote_addr
++ || !tid.netobj.sock_p->file
++ || !tid.netobj.sock_p->file->f_dentry
++ || !tid.netobj.sock_p->file->f_dentry->d_inode
++ || (SOCKET_I(tid.netobj.sock_p->file->f_dentry->d_inode) != tid.netobj.sock_p)
++ )
++ )
++ return -RSBAC_EINVALIDTARGET;
++#endif
++
++/* check only in non-maint mode */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ if(rsbac_switch_acl)
++#endif
++ {
++ rsbac_uid_t user;
++
++ if(rsbac_get_owner(&user))
++ return -RSBAC_EREADFAILED;
++ /* first try access control right (SUPERVISOR is included) */
++ if(!rsbac_acl_check_right(target, tid, user, task_pid(current), ACLR_ACCESS_CONTROL))
++ {
++ char * rights_string = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ char * subject_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ char * target_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ #ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ char * target_id_name
++ = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++ #else
++ char * target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++ #endif
++
++ u64tostracl(rights_string, rights);
++ get_acl_subject_type_name(subject_type_name, subj_type);
++ get_target_name(target_type_name, target, target_id_name, tid);
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_sys_remove_from_acl_entry(): removing rights %s for %s %u to %s %s denied for user %u!\n",
++ rights_string,
++ subject_type_name,
++ subj_id,
++ target_type_name,
++ target_id_name,
++ user);
++ rsbac_kfree(rights_string);
++ rsbac_kfree(subject_type_name);
++ rsbac_kfree(target_type_name);
++ rsbac_kfree(target_id_name);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_ACL]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++ if(rights & RSBAC_ACL_SUPERVISOR_RIGHT_VECTOR)
++ {
++ /* you must have SUPERVISOR to revoke SUPERVISOR */
++ if(!rsbac_acl_check_super(target, tid, user))
++ {
++ char * subject_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ char * target_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ #ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ char * target_id_name
++ = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++ #else
++ char * target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++ #endif
++
++ get_acl_subject_type_name(subject_type_name, subj_type);
++ get_target_name(target_type_name, target, target_id_name, tid);
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_sys_remove_from_acl_entry(): removing SUPERVISOR for %s %u to %s %s denied for user %u!\n",
++ subject_type_name,
++ subj_id,
++ target_type_name,
++ target_id_name,
++ user);
++ rsbac_kfree(subject_type_name);
++ rsbac_kfree(target_type_name);
++ rsbac_kfree(target_id_name);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_ACL]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++ }
++ }
++#endif /* !MAINT */
++
++ /* OK, check passed. Remove ACL. */
++ err = rsbac_acl_remove_from_acl_entry(ta_number, target, tid, subj_type, subj_id, rights);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_remove_from_acl_entry(): rsbac_acl_remove_from_acl_entry() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ }
++ return err;
++ }
++
++int rsbac_acl_sys_set_mask(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_acl_rights_vector_t mask)
++ {
++ int err=0;
++
++/* check only in non-maint mode */
++#if !defined(CONFIG_RSBAC_MAINT) || defined (CONFIG_RSBAC_ACL_SUPER_FILTER)
++ rsbac_uid_t user;
++
++ if(rsbac_get_owner(&user))
++ return -RSBAC_EREADFAILED;
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ /* sanity check before using pointer */
++ if( (target == T_NETOBJ)
++ && tid.netobj.sock_p
++ && ( tid.netobj.remote_addr
++ || !tid.netobj.sock_p->file
++ || !tid.netobj.sock_p->file->f_dentry
++ || !tid.netobj.sock_p->file->f_dentry->d_inode
++ || (SOCKET_I(tid.netobj.sock_p->file->f_dentry->d_inode) != tid.netobj.sock_p)
++ )
++ )
++ return -RSBAC_EINVALIDTARGET;
++#endif
++
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ if(rsbac_switch_acl)
++#endif
++ {
++ /* first try access control right (SUPERVISOR is included) */
++ if(!rsbac_acl_check_right(target, tid, user, task_pid(current), ACLR_ACCESS_CONTROL))
++ {
++ char * rights_string = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ char * target_type_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ #ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ char * target_id_name
++ = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++ #else
++ char * target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++ #endif
++
++ u64tostracl(rights_string, mask);
++ get_target_name(target_type_name, target, target_id_name, tid);
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_sys_set_mask(): setting mask %s for %s %s denied for user %u!\n",
++ rights_string,
++ target_type_name,
++ target_id_name,
++ user);
++ rsbac_kfree(rights_string);
++ rsbac_kfree(target_type_name);
++ rsbac_kfree(target_id_name);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_ACL]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++ }
++#endif /* !MAINT */
++
++#ifdef CONFIG_RSBAC_ACL_SUPER_FILTER
++ if(!(mask & RSBAC_ACL_SUPERVISOR_RIGHT_VECTOR))
++ { /* trial to mask out SUPERVISOR */
++ rsbac_acl_rights_vector_t res_rights = 0;
++
++ /* you must have direct SUPERVISOR as a USER to set a mask without SUPERVISOR */
++ /* get direct own rights (still uses default_fd_rights) */
++ err = rsbac_acl_get_rights(0, target, tid, ACLS_USER, user, &res_rights, FALSE);
++ if(err)
++ return -RSBAC_EREADFAILED;
++ if(!(res_rights & RSBAC_ACL_SUPERVISOR_RIGHT_VECTOR))
++ mask |= RSBAC_ACL_SUPERVISOR_RIGHT_VECTOR;
++ }
++#else
++ /* SUPERVISOR must never be masked out */
++ mask |= RSBAC_ACL_SUPERVISOR_RIGHT_VECTOR;
++#endif
++
++ /* OK, checks passed. Set mask. */
++ err = rsbac_acl_set_mask(ta_number, target, tid, mask);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_set_mask(): rsbac_acl_set_mask() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ }
++ return err;
++ }
++
++int rsbac_acl_sys_remove_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t uid)
++ {
++ int err=0;
++
++/* check only in non-maint mode */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ if(rsbac_switch_acl)
++#endif
++ {
++ rsbac_uid_t user;
++ union rsbac_target_id_t tid;
++
++ if(rsbac_get_owner(&user))
++ return -RSBAC_EREADFAILED;
++ tid.user = uid;
++ /* first try access control right (SUPERVISOR is included) */
++ if(!rsbac_acl_check_right(T_USER, tid, user, task_pid(current), R_DELETE))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_sys_remove_user(): removing all data for user %u denied for user %u!\n",
++ uid,
++ user);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_ACL]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++ }
++#endif /* !MAINT */
++
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_sys_remove_user(): removing all data for user %u!\n",
++ uid);
++ /* OK, checks passed. Set mask. */
++ err = rsbac_acl_remove_user(ta_number, uid);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_remove_user(): rsbac_acl_remove_user() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ }
++ return err;
++ }
++
++/*********/
++
++int rsbac_acl_sys_get_mask(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_acl_rights_vector_t * mask_p)
++ {
++ int err=0;
++
++/* no check */
++
++ /* OK, check passed. Get mask. */
++ err = rsbac_acl_get_mask(ta_number, target, tid, mask_p);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_get_mask(): rsbac_acl_get_mask() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ }
++ return err;
++ }
++
++int rsbac_acl_sys_get_rights(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ rsbac_acl_rights_vector_t * rights_p,
++ rsbac_boolean_t effective)
++ {
++ int err=0;
++ rsbac_acl_rights_vector_t res_rights;
++ #if defined(CONFIG_RSBAC_RC)
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ #endif
++
++ /* no check (Attention: rsbac_acl_check_forward depends on this to be allowed!) */
++
++ if( (subj_type == ACLS_USER)
++ && (subj_id == RSBAC_NO_USER)
++ )
++ rsbac_get_owner((rsbac_uid_t *) &subj_id);
++ /* OK, check passed. Call ACL. */
++ if(effective)
++ {
++ /* inherited own rights */
++ res_rights = 0;
++ err = rsbac_acl_get_rights(ta_number, target, tid, subj_type, subj_id, &res_rights, TRUE);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_get_rights(): rsbac_acl_get_rights() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ return err;
++ }
++ *rights_p = res_rights;
++ /* add group and role rights, if normal user */
++ if(subj_type == ACLS_USER)
++ {
++ rsbac_acl_group_id_t * group_p;
++ int i;
++ int tmperr;
++
++ /* group everyone */
++ res_rights = 0;
++ err = rsbac_acl_get_rights(ta_number, target, tid,
++ ACLS_GROUP, RSBAC_ACL_GROUP_EVERYONE,
++ &res_rights, TRUE);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_get_rights(): rsbac_acl_get_rights() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ return err;
++ }
++ *rights_p |= res_rights;
++
++ /* other groups */
++ /* first get user groups */
++ group_p = NULL;
++ err = rsbac_acl_get_user_groups(ta_number, subj_id, &group_p, NULL);
++ if(err<0)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_get_rights(): rsbac_acl_get_user_groups() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ return err;
++ }
++ for(i=0; i<err; i++)
++ {
++ res_rights = 0;
++ tmperr = rsbac_acl_get_rights(ta_number, target, tid, ACLS_GROUP, group_p[i],
++ &res_rights, TRUE);
++ if(tmperr)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_get_rights(): rsbac_acl_get_rights() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ if(group_p)
++ rsbac_kfree(group_p);
++ return tmperr;
++ }
++ *rights_p |= res_rights;
++ }
++ err = 0;
++ if(group_p)
++ rsbac_kfree(group_p);
++
++ #if defined(CONFIG_RSBAC_RC)
++ /* use user role */
++ /* first get role */
++ i_tid.user = subj_id;
++ if (rsbac_get_attr(SW_RC,
++ T_USER,
++ i_tid,
++ A_rc_def_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_get_rights(): rsbac_get_attr() for process rc_role returned error!\n");
++ }
++ else
++ {
++ res_rights = 0;
++ err = rsbac_acl_get_rights(ta_number, target, tid,
++ ACLS_ROLE, i_attr_val1.rc_role,
++ &res_rights, TRUE);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ get_error_name(tmp,err);
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_get_rights(): rsbac_acl_get_rights() returned error %s!\n",
++ tmp);
++ rsbac_kfree(tmp);
++ }
++ return err;
++ }
++ *rights_p |= res_rights;
++ }
++ #endif
++
++ /* check for SUPERVISOR right, if not yet there */
++ if( !(*rights_p & RSBAC_ACL_SUPERVISOR_RIGHT_VECTOR)
++ && rsbac_acl_check_super(target, tid, subj_id)
++ )
++ *rights_p |= RSBAC_ACL_SUPERVISOR_RIGHT_VECTOR;
++ }
++ else /* not ACLS_USER */
++ {
++ if(!(*rights_p & RSBAC_ACL_SUPERVISOR_RIGHT_VECTOR))
++ {
++ rsbac_boolean_t i_result = FALSE;
++
++ /* check for SUPERVISOR right */
++ /* own right */
++ err = rsbac_acl_get_single_right(target,
++ tid,
++ subj_type,
++ subj_id,
++ ACLR_SUPERVISOR,
++ &i_result);
++ if(err)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_get_rights(): rsbac_acl_get_right() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ }
++ else
++ if(i_result)
++ *rights_p |= RSBAC_ACL_SUPERVISOR_RIGHT_VECTOR;
++ }
++ }
++ }
++ else /* not effective = direct */
++ {
++ /* direct own rights (still uses default_fd_rights) */
++ res_rights = 0;
++ err = rsbac_acl_get_rights(ta_number, target, tid, subj_type, subj_id, &res_rights, FALSE);
++ if(!err)
++ *rights_p = res_rights;
++ }
++ return err;
++ }
++
++int rsbac_acl_sys_get_tlist(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ struct rsbac_acl_entry_t ** entry_pp,
++ rsbac_time_t ** ttl_pp)
++ {
++ int err=0;
++
++ /* no check */
++
++ /* OK, check passed. Call ACL. */
++ err = rsbac_acl_get_tlist(ta_number, target, tid, entry_pp, ttl_pp);
++ if(err == -RSBAC_ENOTFOUND)
++ err = 0;
++ else
++ if(err<0)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_acl_sys_get_tlist(): rsbac_acl_get_tlist() returned error %s!\n",
++ get_error_name(tmp,err));
++ rsbac_kfree(tmp);
++ }
++ }
++ return err;
++ }
++
++/*********** Groups ***********/
++
++int rsbac_acl_sys_group(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_acl_group_syscall_type_t call,
++ union rsbac_acl_group_syscall_arg_t arg)
++ {
++ int err = -RSBAC_EINVALIDREQUEST;
++ char * k_name;
++ rsbac_acl_group_id_t k_group;
++ struct rsbac_acl_group_entry_t entry;
++ rsbac_uid_t caller;
++
++ if(call >= ACLGS_none)
++ return -RSBAC_EINVALIDREQUEST;
++ if(rsbac_get_owner(&caller))
++ return -RSBAC_EREADFAILED;
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_aef_acl)
++ {
++ char * tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_acl_sys_group(): %s called\n",
++ get_acl_group_syscall_name(tmp,call));
++ rsbac_kfree(tmp);
++ }
++ }
++#endif
++
++ switch(call)
++ {
++ case ACLGS_add_group:
++ if(arg.add_group.type >= ACLG_NONE)
++ {
++ err = -RSBAC_EINVALIDVALUE;
++ break;
++ }
++ k_name = rsbac_getname(arg.add_group.name);
++ if(!k_name)
++ {
++ err = -RSBAC_EINVALIDVALUE;
++ break;
++ }
++ err = rsbac_get_user(&k_group, arg.add_group.group_id_p, sizeof(k_group));
++ if(err)
++ break;
++ err = rsbac_acl_add_group(ta_number,
++ caller,
++ arg.add_group.type,
++ k_name,
++ &k_group);
++ rsbac_putname(k_name);
++ if(!err)
++ err = rsbac_put_user(&k_group, arg.add_group.group_id_p, sizeof(k_group));
++ break;
++
++ case ACLGS_change_group:
++ if(arg.change_group.type >= ACLG_NONE)
++ {
++ err = -RSBAC_EINVALIDVALUE;
++ break;
++ }
++ err = rsbac_acl_get_group_entry(ta_number, arg.change_group.id, &entry);
++ if(err)
++ break;
++ /* check owner only, if non-maint */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ if(rsbac_switch_acl)
++#endif
++ {
++ if(entry.owner != caller)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_group(): changing group %u denied for user %u - not owner!\n",
++ entry.id,
++ caller);
++ err = -EPERM;
++ break;
++ }
++ }
++#endif /* !MAINT */
++ {
++ char * k_name;
++
++ k_name = rsbac_getname(arg.change_group.name);
++ if(k_name)
++ {
++ err = rsbac_acl_change_group(ta_number,
++ arg.change_group.id,
++ arg.change_group.owner,
++ arg.change_group.type,
++ k_name);
++ putname(k_name);
++ }
++ else
++ err = -RSBAC_EINVALIDVALUE;
++ }
++ break;
++
++ case ACLGS_remove_group:
++ err = rsbac_acl_get_group_entry(ta_number, arg.remove_group.id, &entry);
++ if(err)
++ break;
++ /* check owner only, if non-maint */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ if(rsbac_switch_acl)
++#endif
++ {
++ if(entry.owner != caller)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_group(): removing group %u denied for user %u - not owner!\n",
++ entry.id,
++ caller);
++ err = -EPERM;
++ break;
++ }
++ }
++#endif /* !MAINT */
++ err = rsbac_acl_remove_group(ta_number, arg.remove_group.id);
++ break;
++
++ case ACLGS_get_group_entry:
++ if(!arg.get_group_entry.entry_p)
++ {
++ err = -RSBAC_EINVALIDPOINTER;
++ break;
++ }
++ if(!arg.get_group_entry.id)
++ { /* Everyone -> fill by hand */
++ entry.id=0;
++ entry.owner=RSBAC_NO_USER;
++ entry.type=ACLG_GLOBAL;
++ strcpy(entry.name, "Everyone");
++ err=0;
++ }
++ else
++ {
++ err = rsbac_acl_get_group_entry(ta_number,
++ arg.get_group_entry.id,
++ &entry);
++ }
++ if(!err)
++ {
++ if( (entry.owner != caller)
++ &&(entry.type != ACLG_GLOBAL)
++ )
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_group(): getting group entry %u denied for user %u - neither owner nor global!\n",
++ entry.id,
++ caller);
++ err = -EPERM;
++ }
++ else
++ err = rsbac_put_user(&entry, arg.get_group_entry.entry_p, sizeof(entry));
++ }
++ break;
++
++ case ACLGS_list_groups:
++ if(arg.list_groups.maxnum <= 0)
++ {
++ err = -RSBAC_EINVALIDVALUE;
++ break;
++ }
++ if(!arg.list_groups.group_entry_array)
++ {
++ err = -RSBAC_EINVALIDPOINTER;
++ break;
++ }
++ {
++ struct rsbac_acl_group_entry_t * entry_p;
++ int tmperr=0;
++
++ if(arg.list_groups.include_global)
++ {
++ struct rsbac_acl_group_entry_t entry_0;
++
++ entry_0.id=0;
++ entry_0.owner=RSBAC_NO_USER;
++ entry_0.type=ACLG_GLOBAL;
++ strcpy(entry_0.name, "Everyone");
++ tmperr = rsbac_put_user(&entry_0,
++ arg.list_groups.group_entry_array,
++ sizeof(entry_0));
++ if(tmperr)
++ {
++ err = tmperr;
++ break;
++ }
++ else
++ err = 1;
++ arg.list_groups.maxnum--;
++ arg.list_groups.group_entry_array++;
++ }
++ else
++ err = 0;
++
++ if(arg.list_groups.maxnum)
++ {
++ long count;
++
++ count = rsbac_acl_list_groups(ta_number,
++ caller,
++ arg.list_groups.include_global,
++ &entry_p);
++ if(count>0)
++ {
++ if(count > arg.list_groups.maxnum)
++ count = arg.list_groups.maxnum;
++ err+=count;
++ tmperr = rsbac_put_user(entry_p,
++ arg.list_groups.group_entry_array,
++ count * sizeof(*entry_p));
++ if(tmperr)
++ err=tmperr;
++ rsbac_kfree(entry_p);
++ }
++ else
++ if(count < 0)
++ err=count;
++ }
++ }
++ break;
++
++ case ACLGS_add_member:
++ /* check owner only, if non-maint */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ if(rsbac_switch_acl)
++#endif
++ {
++ err = rsbac_acl_get_group_entry(ta_number, arg.add_member.group, &entry);
++ if(err)
++ break;
++ if(entry.owner != caller)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_group(): adding group member to group %u denied for user %u - not owner!\n",
++ entry.id,
++ caller);
++ err = -EPERM;
++ break;
++ }
++ }
++#endif /* !MAINT */
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(arg.add_member.user) == RSBAC_UM_VIRTUAL_KEEP)
++ arg.add_member.user = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(arg.add_member.user));
++ else
++ if (RSBAC_UID_SET(arg.add_member.user) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ arg.add_member.user = RSBAC_UID_NUM(arg.add_member.user);
++#endif
++ err = rsbac_acl_add_group_member(ta_number,
++ arg.add_member.group,
++ arg.add_member.user,
++ arg.add_member.ttl);
++ break;
++
++ case ACLGS_remove_member:
++ /* check owner only, if non-maint */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ if(rsbac_switch_acl)
++#endif
++ {
++ err = rsbac_acl_get_group_entry(ta_number, arg.remove_member.group, &entry);
++ if(err)
++ break;
++ if(entry.owner != caller)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_group(): removing group member from group %u denied for user %u - not owner!\n",
++ entry.id,
++ caller);
++ err = -EPERM;
++ break;
++ }
++ }
++#endif /* !MAINT */
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(arg.remove_member.user) == RSBAC_UM_VIRTUAL_KEEP)
++ arg.remove_member.user = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(arg.remove_member.user));
++ else
++ if (RSBAC_UID_SET(arg.remove_member.user) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ arg.remove_member.user = RSBAC_UID_NUM(arg.remove_member.user);
++#endif
++ err = rsbac_acl_remove_group_member(ta_number, arg.remove_member.group, arg.remove_member.user);
++ break;
++
++ case ACLGS_get_user_groups:
++ {
++ rsbac_acl_group_id_t * group_p = NULL;
++ rsbac_time_t * ttl_p = NULL;
++
++ if(arg.get_user_groups.maxnum <= 0)
++ {
++ err = -RSBAC_EINVALIDVALUE;
++ break;
++ }
++ if(!arg.get_user_groups.group_array)
++ {
++ err = -RSBAC_EINVALIDPOINTER;
++ break;
++ }
++ if(arg.get_user_groups.user == RSBAC_NO_USER)
++ arg.get_user_groups.user = caller;
++#if !defined(CONFIG_RSBAC_MAINT)
++ else
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ if(rsbac_switch_acl)
++#endif
++ {
++ if(arg.get_user_groups.user != caller)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_group(): getting user groups for user %u denied for user %u!\n",
++ arg.get_user_groups.user,
++ caller);
++ err = -EPERM;
++ break;
++ }
++ }
++#endif /* !MAINT */
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(arg.get_user_groups.user) == RSBAC_UM_VIRTUAL_KEEP)
++ arg.get_user_groups.user = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(arg.get_user_groups.user));
++ else
++ if (RSBAC_UID_SET(arg.get_user_groups.user) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ arg.get_user_groups.user = RSBAC_UID_NUM(arg.get_user_groups.user);
++#endif
++ err = rsbac_acl_get_user_groups(ta_number, arg.get_user_groups.user, &group_p, &ttl_p);
++ if(err>0)
++ {
++ int tmperr;
++
++ err = rsbac_min(err, arg.get_user_groups.maxnum);
++ tmperr = rsbac_put_user(group_p,
++ arg.get_user_groups.group_array,
++ err * sizeof(*group_p));
++ if(tmperr)
++ err=tmperr;
++ if(arg.get_user_groups.ttl_array)
++ {
++ tmperr = rsbac_put_user(ttl_p,
++ arg.get_user_groups.ttl_array,
++ err * sizeof(*ttl_p));
++ if(tmperr)
++ err=tmperr;
++ }
++ }
++ if(group_p)
++ rsbac_kfree(group_p);
++ if(ttl_p)
++ rsbac_kfree(ttl_p);
++ break;
++ }
++
++ case ACLGS_get_group_members:
++ if( (arg.get_group_members.maxnum <= 0)
++ || !arg.get_group_members.group
++ )
++ {
++ err = -RSBAC_EINVALIDVALUE;
++ break;
++ }
++ if(arg.get_group_members.maxnum > RSBAC_ACL_MAX_MAXNUM)
++ arg.get_group_members.maxnum = RSBAC_ACL_MAX_MAXNUM;
++ if(!arg.get_group_members.user_array)
++ {
++ err = -RSBAC_EINVALIDPOINTER;
++ break;
++ }
++ err = rsbac_acl_get_group_entry(ta_number,
++ arg.get_group_members.group,
++ &entry);
++ if(err)
++ break;
++ if( (entry.owner != caller)
++ &&(entry.type != ACLG_GLOBAL)
++ )
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_acl_group(): getting group members of group %u denied for user %u - neither owner nor global!\n",
++ entry.id,
++ caller);
++ err = -EPERM;
++ break;
++ }
++ {
++ rsbac_uid_t * user_array;
++ rsbac_time_t * ttl_array;
++
++ user_array = rsbac_kmalloc_unlocked(sizeof(*user_array) * arg.get_group_members.maxnum);
++ if(!user_array)
++ return -RSBAC_ENOMEM;
++ ttl_array = rsbac_kmalloc_unlocked(sizeof(*ttl_array) * arg.get_group_members.maxnum);
++ if(!ttl_array)
++ {
++ rsbac_kfree(user_array);
++ return -RSBAC_ENOMEM;
++ }
++
++ err = rsbac_acl_get_group_members(ta_number,
++ arg.get_group_members.group,
++ user_array,
++ ttl_array,
++ arg.get_group_members.maxnum);
++ if(err>0)
++ {
++ int tmperr;
++
++ tmperr = rsbac_put_user(user_array,
++ arg.get_group_members.user_array,
++ err * sizeof(*user_array));
++ if(tmperr)
++ err=tmperr;
++ if(arg.get_group_members.ttl_array)
++ {
++ tmperr = rsbac_put_user(ttl_array,
++ arg.get_group_members.ttl_array,
++ err * sizeof(*ttl_array));
++ if(tmperr)
++ err=tmperr;
++ }
++ }
++ rsbac_kfree(user_array);
++ rsbac_kfree(ttl_array);
++ }
++ break;
++
++ default:
++ break;
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( ( rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ || rsbac_ind_softmode[SW_ACL]
++ #endif
++ )
++ && (err == -EPERM)
++ )
++ return 0;
++ else
++ #endif
++ return err;
++ }
++/* end of rsbac/adf/acl/syscalls.c */
+diff --git a/rsbac/adf/adf_check.c b/rsbac/adf/adf_check.c
+new file mode 100644
+index 0000000..84fd7d8
+--- /dev/null
++++ b/rsbac/adf/adf_check.c
+@@ -0,0 +1,1026 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - check for well defined requests */
++/* File: rsbac/adf/check.c */
++/* */
++/* Author and (c) 1999-2010: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 19/May/2010 */
++/*************************************************** */
++
++#include <linux/string.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++enum rsbac_adf_req_ret_t
++rsbac_adf_request_check(enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *attr_val_p,
++ rsbac_uid_t owner)
++{
++ switch (request) {
++ case R_SEARCH:
++ switch (target) {
++ case T_DIR:
++ case T_FILE:
++ case T_SYMLINK:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_DEV:
++ case T_NETOBJ:
++#if defined(CONFIG_RSBAC_UM)
++ case T_USER:
++ case T_GROUP:
++#endif
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_CLOSE: /* only notifying for clean-up of opened-tables */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_DEV:
++ case T_IPC:
++ case T_NETOBJ:
++ return DO_NOT_CARE;
++ default:
++ return UNDEFINED;
++ }
++
++ case R_GET_STATUS_DATA:
++ switch (target) {
++ case T_PROCESS:
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_DEV:
++ case T_IPC:
++ case T_SCD:
++ case T_NETDEV:
++ case T_NETOBJ:
++#if defined(CONFIG_RSBAC_UM)
++ case T_USER:
++ case T_GROUP:
++#endif
++ return DO_NOT_CARE;
++ default:
++ return UNDEFINED;
++ }
++
++ case R_READ:
++ switch (target) {
++ case T_DIR:
++#ifdef CONFIG_RSBAC_RW
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_DEV:
++ case T_IPC:
++#endif
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ case T_NETTEMP:
++#endif
++#if defined(CONFIG_RSBAC_NET_OBJ_RW)
++ case T_NETOBJ:
++#endif
++#if defined(CONFIG_RSBAC_UM)
++ case T_USER:
++ case T_GROUP:
++#endif
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_GET_PERMISSIONS_DATA:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_IPC:
++ case T_SCD:
++ case T_DEV:
++ case T_NETOBJ:
++#if defined(CONFIG_RSBAC_UM)
++ case T_USER:
++ case T_GROUP:
++#endif
++ return DO_NOT_CARE;
++ default:
++ return UNDEFINED;
++ };
++
++ case R_MAP_EXEC:
++ switch (target) {
++ case T_FILE:
++ case T_NONE:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_SEND:
++ switch (target) {
++ case T_DEV:
++ case T_UNIXSOCK:
++ case T_IPC:
++ case T_PROCESS:
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ case T_NETOBJ:
++#endif
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_RECEIVE:
++ switch (target) {
++ case T_UNIXSOCK:
++ case T_IPC:
++ case T_PROCESS:
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ case T_NETOBJ:
++#endif
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_LISTEN:
++ case R_ACCEPT:
++ case R_CONNECT:
++ case R_NET_SHUTDOWN:
++ switch (target) {
++ case T_UNIXSOCK:
++ case T_IPC:
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ case T_NETOBJ:
++#endif
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_EXECUTE:
++ switch (target) {
++ case T_FILE:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_READ_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_IPC:
++ case T_DEV:
++ case T_UNIXSOCK:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_WRITE:
++ switch (target) {
++ case T_DIR:
++ case T_UNIXSOCK:
++ case T_SCD:
++ case T_IPC:
++#ifdef CONFIG_RSBAC_RW
++ case T_FILE:
++ case T_FIFO:
++ case T_DEV:
++#endif
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ case T_NETTEMP:
++#endif
++#if defined(CONFIG_RSBAC_NET_OBJ_RW)
++ case T_NETOBJ:
++#endif
++#if defined(CONFIG_RSBAC_UM)
++ case T_USER:
++ case T_GROUP:
++#endif
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_APPEND_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_DEV:
++ case T_UNIXSOCK:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_READ_WRITE_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_IPC:
++ case T_DEV:
++ case T_UNIXSOCK:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_WRITE_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_DEV:
++ case T_UNIXSOCK:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_IOCTL:
++ switch (target) {
++ case T_UNIXSOCK:
++ case T_IPC:
++ case T_DEV:
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ case T_NETOBJ:
++#endif
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_ADD_TO_KERNEL:
++ switch (target) {
++ case T_FILE:
++ case T_DEV:
++ case T_NONE:
++ return DO_NOT_CARE;
++ default:
++ return UNDEFINED;
++ }
++
++ case R_ALTER:
++ /* only for IPC */
++ if (target == T_IPC)
++ return DO_NOT_CARE;
++ else
++ /* all other targets are undefined */
++ return UNDEFINED;
++ break;
++
++ case R_CHANGE_GROUP:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_IPC:
++ case T_PROCESS:
++ case T_NONE:
++#if defined(CONFIG_RSBAC_UM)
++ case T_USER:
++#endif
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++#ifdef CONFIG_RSBAC_DAC_GROUP
++ case R_CHANGE_DAC_EFF_GROUP:
++ case R_CHANGE_DAC_FS_GROUP:
++ switch (target) {
++ case T_PROCESS:
++ /* there must be a new group specified */
++ if (attr == A_group)
++ return DO_NOT_CARE;
++ /* fall through */
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++#endif
++
++ case R_CHANGE_OWNER:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_IPC:
++ return DO_NOT_CARE;
++ case T_PROCESS:
++ /* there must be a new owner specified */
++ if (attr == A_owner)
++ return DO_NOT_CARE;
++ else
++ return UNDEFINED;
++ /* all other cases are undefined */
++#ifdef CONFIG_RSBAC_USER_CHOWN
++ case T_USER:
++ /* there must be a new owner specified */
++ if (attr == A_process)
++ return DO_NOT_CARE;
++ else
++ return UNDEFINED;
++ /* all other cases are undefined */
++#endif
++ default:
++ return UNDEFINED;
++ }
++
++#ifdef CONFIG_RSBAC_DAC_OWNER
++ case R_CHANGE_DAC_EFF_OWNER:
++ case R_CHANGE_DAC_FS_OWNER:
++ switch (target) {
++ case T_PROCESS:
++ /* there must be a new owner specified */
++ if (attr == A_owner)
++ return DO_NOT_CARE;
++ /* fall through */
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++#endif
++
++ case R_CHDIR:
++ switch (target) {
++ case T_DIR:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_CLONE:
++ if (target == T_PROCESS)
++ return DO_NOT_CARE;
++ else
++ return UNDEFINED;
++
++ case R_CREATE:
++ switch (target) {
++ /* Creating dir or (pseudo) file IN target dir! */
++ case T_DIR:
++ case T_IPC:
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ case T_NETTEMP:
++ case T_NETOBJ:
++#endif
++#if defined(CONFIG_RSBAC_UM)
++ case T_USER:
++ case T_GROUP:
++#endif
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_DELETE:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_IPC:
++#if defined(CONFIG_RSBAC_UM)
++ case T_USER:
++ case T_GROUP:
++#endif
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ case T_NETTEMP:
++ case T_NETOBJ:
++#endif
++ return DO_NOT_CARE;
++ default:
++ return UNDEFINED;
++ }
++
++ case R_LINK_HARD:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_SYMLINK:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_MODIFY_ACCESS_DATA:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_AUTHENTICATE:
++ switch (target) {
++ case T_USER:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_MODIFY_ATTRIBUTE:
++ return DO_NOT_CARE;
++
++ case R_MODIFY_PERMISSIONS_DATA:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_IPC:
++ case T_SCD:
++ case T_DEV:
++ case T_NETOBJ:
++#if defined(CONFIG_RSBAC_UM)
++ case T_USER:
++ case T_GROUP:
++#endif
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE
++ case T_NONE:
++#endif
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_MODIFY_SYSTEM_DATA:
++ switch (target) {
++ case T_UNIXSOCK:
++ case T_IPC:
++ case T_SCD:
++ case T_DEV:
++ case T_NETDEV:
++ case T_PROCESS:
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ case T_NETOBJ:
++#endif
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_MOUNT:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_DEV:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_READ_ATTRIBUTE:
++ return DO_NOT_CARE;
++
++ case R_REMOVE_FROM_KERNEL:
++ switch (target) {
++ case T_FILE:
++ case T_DEV:
++ case T_NONE:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_RENAME:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++#if defined(CONFIG_RSBAC_UM)
++ case T_USER:
++ case T_GROUP:
++#endif
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_SEND_SIGNAL:
++ switch (target) {
++ case T_PROCESS:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_SHUTDOWN:
++ switch (target) {
++ case T_NONE:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++
++ case R_SWITCH_LOG:
++ switch (target) {
++ case T_NONE:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_SWITCH_MODULE:
++ switch (target) {
++ case T_NONE:
++ /* there must be a switch target specified */
++ if (attr == A_switch_target)
++ return DO_NOT_CARE;
++ /* fall through */
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ /* notify only, handled by adf-dispatcher */
++ case R_TERMINATE:
++ if (target == T_PROCESS)
++ return DO_NOT_CARE;
++ else
++ return UNDEFINED;
++
++ case R_TRACE:
++ switch (target) {
++ case T_PROCESS:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_TRUNCATE:
++ switch (target) {
++ case T_FILE:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_UMOUNT:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_DEV:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++
++ case R_BIND:
++ switch (target) {
++ case T_IPC:
++ return DO_NOT_CARE;
++#if defined(CONFIG_RSBAC_NET_DEV)
++ case T_NETDEV:
++ return DO_NOT_CARE;
++#endif
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ case T_NETOBJ:
++ return DO_NOT_CARE;
++#endif
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++ case R_LOCK:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_IPC:
++ return DO_NOT_CARE;
++ /* all other cases are undefined */
++ default:
++ return UNDEFINED;
++ }
++
++/*********************/
++ default:
++ return UNDEFINED;
++ }
++
++ return UNDEFINED;
++} /* end of rsbac_adf_request_check() */
++
++
++/*****************************************************************************/
++/* If the request returned granted and the operation is performed, */
++/* the following function can be called by the AEF to get all aci set */
++/* correctly. For write accesses that are performed fully within the kernel, */
++/* this is usually not done to prevent extra calls, including R_CLOSE for */
++/* cleaning up. Because of this, the write boundary is not adjusted - there */
++/* is no user-level writing anyway... */
++/* The second instance of target specification is the new target, if one has */
++/* been created, otherwise its values are ignored. */
++/* On success, 0 is returned, and an error from rsbac/error.h otherwise. */
++
++int rsbac_adf_set_attr_check(enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_target_t new_target,
++ union rsbac_target_id_t new_tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++{
++ switch (request) {
++ case R_CLOSE: /* only notifying for clean-up of opened-tables */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_DEV:
++ case T_IPC:
++ case T_NETOBJ:
++ return 0;
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ };
++
++ case R_APPEND_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_DEV:
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case R_CHANGE_OWNER:
++ switch (target) {
++ /* Changing process owner affects access decisions, */
++ /* so attributes have to be adjusted. */
++ case T_PROCESS:
++ /* there must be a new owner specified */
++ if (attr != A_owner)
++ return -RSBAC_EINVALIDATTR;
++ /* fall through */
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_IPC:
++ case T_NONE:
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++#ifdef CONFIG_RSBAC_DAC_OWNER
++ case R_CHANGE_DAC_EFF_OWNER:
++ case R_CHANGE_DAC_FS_OWNER:
++ switch (target) {
++ /* Changing process owner affects access decisions, */
++ /* so attributes have to be adjusted. */
++ case T_PROCESS:
++ /* there must be a new owner specified */
++ if (attr != A_owner)
++ return -RSBAC_EINVALIDATTR;
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++#endif
++
++ case R_CHDIR:
++ switch (target) {
++ case T_DIR:
++ return 0;
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ };
++
++ case R_CLONE:
++ if (target == T_PROCESS)
++ return 0;
++ else
++ return -RSBAC_EINVALIDTARGET;
++
++ case R_CREATE:
++ switch (target) {
++ /* Creating dir or (pseudo) file IN target dir! */
++ case T_DIR:
++ case T_IPC:
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ case T_NETOBJ:
++#endif
++#if defined(CONFIG_RSBAC_UM)
++ case T_USER:
++ case T_GROUP:
++#endif
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ /* removal of targets is done in main adf dispatcher! */
++ case R_DELETE:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_IPC:
++#if defined(CONFIG_RSBAC_UM)
++ case T_USER:
++ case T_GROUP:
++#endif
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case R_EXECUTE:
++ switch (target) {
++ case T_FILE:
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case R_SEND:
++ case R_RECEIVE:
++ switch (target) {
++ case T_UNIXSOCK:
++ case T_IPC:
++ case T_PROCESS:
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ case T_NETOBJ:
++#endif
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case R_BIND:
++ case R_LISTEN:
++ case R_ACCEPT:
++ case R_CONNECT:
++ case R_NET_SHUTDOWN:
++ switch (target) {
++ case T_UNIXSOCK:
++ case T_IPC:
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ case T_NETOBJ:
++#endif
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case R_MODIFY_SYSTEM_DATA:
++ switch (target) {
++ case T_SCD:
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case R_MOUNT:
++ switch (target) {
++ case T_DIR:
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case R_READ:
++ switch (target) {
++ case T_DIR:
++#ifdef CONFIG_RSBAC_RW
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_DEV:
++ case T_IPC:
++#endif
++#if defined(CONFIG_RSBAC_NET_OBJ_RW) || defined(CONFIG_RSBAC_MS_SOCK)
++ case T_NETOBJ:
++#endif
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case R_READ_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_IPC:
++ case T_DEV:
++ case T_UNIXSOCK:
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case R_READ_WRITE_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_IPC:
++ case T_DEV:
++ case T_UNIXSOCK:
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case R_RENAME:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case R_SEARCH:
++ switch (target) {
++ case T_DIR:
++ case T_FILE:
++ case T_SYMLINK:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_DEV:
++ case T_NETOBJ:
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ case R_SHUTDOWN:
++ switch (target) {
++ case T_NETOBJ:
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++#endif
++
++ case R_TRACE:
++ switch (target) {
++ case T_PROCESS:
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case R_TRUNCATE:
++ switch (target) {
++ case T_FILE:
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++#ifdef CONFIG_RSBAC_RW
++ case R_WRITE:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_DEV:
++ case T_UNIXSOCK:
++ case T_IPC:
++ case T_NETOBJ:
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++#endif
++
++ case R_WRITE_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_DEV:
++ case T_UNIXSOCK:
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case R_MAP_EXEC:
++ switch (target) {
++ case T_FILE:
++ case T_NONE:
++ return 0;
++ /* all other cases are undefined */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ return -RSBAC_EINVALIDTARGET;
++}
+diff --git a/rsbac/adf/adf_main.c b/rsbac/adf/adf_main.c
+new file mode 100644
+index 0000000..49e9e23
+--- /dev/null
++++ b/rsbac/adf/adf_main.c
+@@ -0,0 +1,3394 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - Main file main.c */
++/* */
++/* Author and (c) 1999-2011: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 12/Jul/2011 */
++/*************************************************** */
++
++#include <linux/string.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/mount.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/adf.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/cap_getname.h>
++#include <rsbac/jail_getname.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/network.h>
++#if defined(CONFIG_RSBAC_UM_EXCL)
++#include <rsbac/um.h>
++#endif
++
++#ifdef CONFIG_RSBAC_NO_DECISION_ON_NETMOUNT
++#include <linux/magic.h>
++#endif
++
++#ifdef CONFIG_RSBAC_SECDEL
++#include <linux/types.h>
++#include <linux/dcache.h>
++#include <asm/uaccess.h>
++#include <linux/buffer_head.h>
++extern void wait_on_retry_sync_kiocb(struct kiocb *iocb);
++#endif /* SECDEL */
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++__u64 rsbac_adf_request_count[T_NONE+1] = {0,0,0,0,0,0,0,0};
++__u64 rsbac_adf_set_attr_count[T_NONE+1] = {0,0,0,0,0,0,0,0};
++#ifdef CONFIG_RSBAC_XSTATS
++__u64 rsbac_adf_request_xcount[T_NONE+1][R_NONE];
++__u64 rsbac_adf_set_attr_xcount[T_NONE+1][R_NONE];
++#endif
++
++/******* MAC ********/
++#ifdef CONFIG_RSBAC_SWITCH_MAC
++rsbac_boolean_t rsbac_switch_mac = TRUE;
++#endif /* MAC */
++
++/******* PM ********/
++#ifdef CONFIG_RSBAC_SWITCH_PM
++rsbac_boolean_t rsbac_switch_pm = TRUE;
++#endif /* PM */
++
++/******* DAZ ********/
++#ifdef CONFIG_RSBAC_SWITCH_DAZ
++rsbac_boolean_t rsbac_switch_daz = TRUE;
++#endif /* DAZ */
++
++/******* FF ********/
++#ifdef CONFIG_RSBAC_SWITCH_FF
++rsbac_boolean_t rsbac_switch_ff = TRUE;
++#endif /* FF */
++
++/******* RC ********/
++#ifdef CONFIG_RSBAC_SWITCH_RC
++rsbac_boolean_t rsbac_switch_rc = TRUE;
++#endif /* RC */
++
++/****** AUTH *******/
++#ifdef CONFIG_RSBAC_SWITCH_AUTH
++rsbac_boolean_t rsbac_switch_auth = TRUE;
++#endif /* AUTH */
++
++/****** ACL *******/
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++rsbac_boolean_t rsbac_switch_acl = TRUE;
++#endif /* ACL */
++
++/****** CAP *******/
++#ifdef CONFIG_RSBAC_SWITCH_CAP
++rsbac_boolean_t rsbac_switch_cap = TRUE;
++#endif /* CAP */
++
++/****** JAIL *******/
++#ifdef CONFIG_RSBAC_SWITCH_JAIL
++rsbac_boolean_t rsbac_switch_jail = TRUE;
++#endif /* JAIL */
++
++/****** PAX ********/
++#ifdef CONFIG_RSBAC_SWITCH_PAX
++rsbac_boolean_t rsbac_switch_pax = TRUE;
++#endif /* PAX */
++
++/****** RES *******/
++#ifdef CONFIG_RSBAC_SWITCH_RES
++rsbac_boolean_t rsbac_switch_res = TRUE;
++#endif /* RES */
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++/* Init function, calls inits for all sub-modules */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++void rsbac_init_adf(void)
++#else
++void __init rsbac_init_adf(void)
++#endif
++ {
++ #if defined(CONFIG_RSBAC_REG)
++ rsbac_reg_init();
++ #endif
++ }
++
++enum rsbac_adf_req_ret_t
++ adf_and_plus(enum rsbac_adf_req_ret_t res1,
++ enum rsbac_adf_req_ret_t res2)
++ {
++ switch (res1)
++ {
++ case DO_NOT_CARE: return (res2);
++ case GRANTED: if (res2 == DO_NOT_CARE)
++ return (GRANTED);
++ else
++ return (res2);
++ case NOT_GRANTED: if (res2 == UNDEFINED)
++ return (UNDEFINED);
++ else
++ return (NOT_GRANTED);
++ default: return (UNDEFINED);
++ }
++ }
++
++/*
++ * rsbac_adf_request_int()
++ * This function is the main decision function, called though the
++ * rsbac_adf_request wrapper from the AEF.
++ */
++
++EXPORT_SYMBOL(rsbac_adf_request_int);
++enum rsbac_adf_req_ret_t
++ rsbac_adf_request_int(enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t * tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t * attr_val_p,
++ enum rsbac_switch_target_t ignore_module)
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val;
++ rsbac_uid_t owner=0;
++ int tmperr=0;
++ rsbac_request_vector_t request_vector;
++ enum rsbac_adf_req_ret_t result = DO_NOT_CARE;
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ enum rsbac_adf_req_ret_t ret_result = DO_NOT_CARE;
++#endif
++#ifndef CONFIG_RSBAC_MAINT
++ rsbac_enum_t mod_result[SW_NONE + 1] = {
++ DO_NOT_CARE,
++ DO_NOT_CARE,
++ DO_NOT_CARE,
++ DO_NOT_CARE,
++ DO_NOT_CARE,
++ DO_NOT_CARE,
++ DO_NOT_CARE,
++ DO_NOT_CARE,
++ DO_NOT_CARE,
++ DO_NOT_CARE,
++ DO_NOT_CARE,
++ DO_NOT_CARE,
++ DO_NOT_CARE,
++ DO_NOT_CARE,
++ DO_NOT_CARE,
++ DO_NOT_CARE,
++ DO_NOT_CARE,
++ DO_NOT_CARE
++ };
++#endif
++ rsbac_boolean_t do_log = FALSE;
++ rsbac_boolean_t log_on_request = TRUE;
++/* only if individual logging is enabled */
++#if defined(CONFIG_RSBAC_IND_LOG) || defined(CONFIG_RSBAC_IND_NETDEV_LOG) || defined(CONFIG_RSBAC_IND_NETOBJ_LOG)
++ union rsbac_attribute_value_t i_attr_val2;
++ enum rsbac_log_level_t log_level;
++#endif
++ struct vfsmount * mnt_p;
++#ifdef CONFIG_RSBAC_SOFTMODE
++ rsbac_boolean_t rsbac_internal = FALSE;
++#endif
++
++/* No decision possible before init (called at boot time) -> don't care */
++ if (!rsbac_is_initialized())
++ return DO_NOT_CARE;
++
++/* Always granted for kernel (pid 0) and logging daemon */
++ if ( !pid_nr(caller_pid)
++ #if defined(CONFIG_RSBAC_LOG_REMOTE)
++ || (pid_nr(caller_pid) == pid_nr(rsbaclogd_pid))
++ #endif
++ )
++ return GRANTED;
++
++/* Checking base values */
++ if( request >= R_NONE
++ || target > T_NONE
++ || attr > A_none)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_int(): called with invalid request, target or attribute\n");
++ return NOT_GRANTED;
++ }
++ request_vector = RSBAC_REQUEST_VECTOR(request);
++
++ if (in_interrupt())
++ {
++ char * request_name = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(request_name)
++ {
++ get_request_name(request_name, request);
++ printk(KERN_WARNING "rsbac_adf_request_int(): called from interrupt: request %s, pid %u(%s), attr_val %u!\n",
++ request_name, pid_nr(caller_pid), current->comm, attr_val_p->dummy);
++ rsbac_kfree(request_name);
++ }
++ else
++ {
++ printk(KERN_WARNING "rsbac_adf_request_int(): called from interrupt: request %u, pid %u(%s)!\n",
++ request, pid_nr(caller_pid), current->comm);
++ }
++ dump_stack();
++ return DO_NOT_CARE;
++ }
++
++/* Getting basic information about this request */
++
++ /* only useful for real process, not idle or init */
++ if (pid_nr(caller_pid) > 1)
++ {
++ tmperr = rsbac_get_owner(&owner);
++ if(tmperr)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_adf_request_int(): caller_pid %i, RSBAC not initialized, returning DO_NOT_CARE\n",
++ pid_nr(caller_pid));
++ return DO_NOT_CARE; /* Startup-Sequence (see above) */
++ }
++ }
++ else /* caller_pid = 1 -> init, always owned by root */
++ {
++ owner = 0;
++ }
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if ((attr == A_owner) && (RSBAC_UID_SET(attr_val_p->owner) > RSBAC_UM_VIRTUAL_MAX))
++ attr_val_p->owner = RSBAC_GEN_UID(RSBAC_UID_SET(owner), attr_val_p->owner);
++ else
++ if ((attr == A_group) && (RSBAC_GID_SET(attr_val_p->group) > RSBAC_UM_VIRTUAL_MAX))
++ attr_val_p->group = RSBAC_GEN_GID(RSBAC_UID_SET(owner), attr_val_p->group);
++#else
++ if (attr == A_owner)
++ attr_val_p->owner = RSBAC_UID_NUM(attr_val_p->owner);
++ else
++ if (attr == A_group)
++ attr_val_p->group = RSBAC_GID_NUM(attr_val_p->group);
++#endif
++
++/******************************************************/
++/* General work for all modules - before module calls */
++ /* test target on rsbac_internal */
++ switch(target)
++ {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++#ifdef CONFIG_RSBAC_NO_DECISION_ON_NETMOUNT
++ if ( ((mnt_p = rsbac_get_vfsmount(tid_p->file.device)))
++ && ( (mnt_p->mnt_sb->s_magic == NFS_SUPER_MAGIC)
++ || (mnt_p->mnt_sb->s_magic == CODA_SUPER_MAGIC)
++ || (mnt_p->mnt_sb->s_magic == NCP_SUPER_MAGIC)
++ || (mnt_p->mnt_sb->s_magic == SMB_SUPER_MAGIC)
++ )
++ )
++ {
++ result = DO_NOT_CARE;
++ goto log;
++ }
++#endif
++ /* No decision on pseudo pipefs */
++ if( (target == T_FIFO)
++ && ((mnt_p = rsbac_get_vfsmount(tid_p->file.device)))
++ && (mnt_p->mnt_sb->s_magic == PIPEFS_MAGIC)
++ )
++ return DO_NOT_CARE;
++
++ switch(request)
++ {
++ case R_GET_STATUS_DATA:
++ case R_GET_PERMISSIONS_DATA:
++ case R_READ_ATTRIBUTE:
++#ifdef CONFIG_RSBAC_DAT_VISIBLE
++ case R_SEARCH:
++ case R_READ:
++ case R_CLOSE:
++ case R_CHDIR:
++#endif
++ break;
++
++ default:
++ if ((tmperr = rsbac_get_attr(SW_GEN,
++ target,
++ *tid_p,
++ A_internal,
++ &i_attr_val,
++ TRUE) ))
++ {
++ if(tmperr == -RSBAC_EINVALIDDEV)
++ {
++// rsbac_ds_get_error_num("rsbac_adf_request()", A_internal, tmperr);
++ return DO_NOT_CARE; /* last calls on shutdown */
++ }
++ else
++ {
++ rsbac_ds_get_error_num("rsbac_adf_request()", A_internal, tmperr);
++ return NOT_GRANTED; /* something weird happened */
++ }
++ }
++ /* no access to rsbac_internal objects is granted in any case */
++ if (i_attr_val.internal)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request(): trial to access object declared RSBAC-internal!\n");
++ result = NOT_GRANTED;
++ #ifndef CONFIG_RSBAC_MAINT
++ mod_result[SW_NONE] = NOT_GRANTED;
++ #endif
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ ret_result = NOT_GRANTED;
++ #endif
++ rsbac_internal = TRUE;
++ #endif
++ }
++ }
++
++#if defined(CONFIG_RSBAC_UM_VIRTUAL_ISOLATE)
++ if (attr == A_vset && (RSBAC_UID_SET(owner))) {
++ result = adf_and_plus(result, NOT_GRANTED);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ ret_result = adf_and_plus(ret_result, NOT_GRANTED);
++#endif
++ }
++#endif
++
++ break;
++
++#if defined(CONFIG_RSBAC_UM_EXCL) || defined(CONFIG_RSBAC_UM_VIRTUAL_ISOLATE)
++ case T_PROCESS:
++#if defined(CONFIG_RSBAC_UM_EXCL)
++ switch(request)
++ {
++ case R_CHANGE_OWNER:
++#ifdef CONFIG_RSBAC_DAC_OWNER
++ case R_CHANGE_DAC_EFF_OWNER:
++ case R_CHANGE_DAC_FS_OWNER:
++#endif
++ if( (attr == A_owner)
++ && !rsbac_um_no_excl
++ && !rsbac_um_user_exists(0, attr_val_p->owner)
++ )
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_adf_request(): uid %u not known to RSBAC User Management!\n",
++ attr_val_p->owner);
++ result = adf_and_plus(result, NOT_GRANTED);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ ret_result = adf_and_plus(ret_result, NOT_GRANTED);
++#endif
++ }
++ break;
++
++ case R_CHANGE_GROUP:
++#ifdef CONFIG_RSBAC_DAC_OWNER
++ case R_CHANGE_DAC_EFF_GROUP:
++ case R_CHANGE_DAC_FS_GROUP:
++#endif
++ if( (attr == A_group)
++ && !rsbac_um_no_excl
++ && !rsbac_um_group_exists(0, attr_val_p->group)
++ )
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_adf_request(): gid %u not known to RSBAC User Management!\n",
++ attr_val_p->group);
++ result = adf_and_plus(result, NOT_GRANTED);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ ret_result = adf_and_plus(ret_result, NOT_GRANTED);
++#endif
++ }
++ break;
++
++ default:
++ break;
++ }
++#endif
++#if defined(CONFIG_RSBAC_UM_VIRTUAL_ISOLATE)
++ if (attr == A_vset && (RSBAC_UID_SET(owner))) {
++ result = adf_and_plus(result, NOT_GRANTED);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ ret_result = adf_and_plus(ret_result, NOT_GRANTED);
++#endif
++ }
++#endif
++ break;
++#endif /* UM_EXCL || UM_VIRTUAL_ISOLATE */
++
++#if defined(CONFIG_RSBAC_UM_VIRTUAL_ISOLATE)
++ case T_USER:
++ if( RSBAC_UID_SET(owner)
++ && (RSBAC_UID_SET(owner) != RSBAC_UID_SET(tid_p->user))
++ ) {
++ if (RSBAC_UID_SET(tid_p->user) == RSBAC_UM_VIRTUAL_ALL)
++ tid_p->user = RSBAC_GEN_UID(RSBAC_UID_SET(owner), tid_p->user);
++ else {
++ result = adf_and_plus(result, NOT_GRANTED);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ ret_result = adf_and_plus(ret_result, NOT_GRANTED);
++#endif
++ }
++ }
++ break;
++ case T_GROUP:
++ if( RSBAC_UID_SET(owner)
++ && (RSBAC_UID_SET(owner) != RSBAC_GID_SET(tid_p->group))
++ ) {
++ if (RSBAC_UID_SET(tid_p->user) == RSBAC_UM_VIRTUAL_ALL)
++ tid_p->user = RSBAC_GEN_UID(RSBAC_UID_SET(owner), tid_p->user);
++ else {
++ result = adf_and_plus(result, NOT_GRANTED);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ ret_result = adf_and_plus(ret_result, NOT_GRANTED);
++#endif
++ }
++ }
++ break;
++#endif
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++#if defined(CONFIG_RSBAC_IND_NETOBJ_LOG) || defined(CONFIG_RSBAC_MAC) || defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_RC)
++ case T_NETOBJ:
++ if(rsbac_net_remote_request(request)) {
++ tid_p->netobj.local_temp = 0;
++ rsbac_ta_net_lookup_templates(0,
++ &tid_p->
++ netobj, NULL,
++ &tid_p->netobj.remote_temp);
++ } else {
++ tid_p->netobj.remote_temp = 0;
++ rsbac_ta_net_lookup_templates(0,
++ &tid_p->
++ netobj,
++ &tid_p->netobj.local_temp,
++ NULL);
++ }
++#endif
++#endif
++
++ default:
++ break;
++ }
++
++/**********************************************************/
++/* calling all decision modules, building a common result */
++
++#ifdef CONFIG_RSBAC_DEBUG
++/* first, check for valid request/target combination */
++/* (undefined should only happen in _check and means a real bug!) */
++ result = adf_and_plus(result,rsbac_adf_request_check(request,
++ caller_pid,
++ target,
++ tid_p,
++ attr,
++ attr_val_p,
++ owner) );
++#endif
++
++#if !defined(CONFIG_RSBAC_MAINT)
++/******* MAC ********/
++#if defined(CONFIG_RSBAC_MAC)
++#ifdef CONFIG_RSBAC_SWITCH_MAC
++if (rsbac_switch_mac)
++#endif
++ /* no need to call module, if to be ignored */
++ if(ignore_module != SW_MAC && (request_vector & RSBAC_MAC_REQUEST_VECTOR)) {
++ mod_result[SW_MAC] = rsbac_adf_request_mac(request,
++ caller_pid,
++ target,
++ *tid_p,
++ attr,
++ *attr_val_p,
++ owner);
++ result = adf_and_plus(result, mod_result[SW_MAC]);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(!rsbac_ind_softmode[SW_MAC])
++ ret_result = adf_and_plus(ret_result, mod_result[SW_MAC]);
++#endif
++ }
++#endif /* MAC */
++
++/******* PM ********/
++#if defined(CONFIG_RSBAC_PM)
++#ifdef CONFIG_RSBAC_SWITCH_PM
++if (rsbac_switch_pm)
++#endif
++ /* no need to call module, if to be ignored */
++ if(ignore_module != SW_PM && (request_vector & RSBAC_PM_REQUEST_VECTOR))
++ {
++ mod_result[SW_PM] = rsbac_adf_request_pm (request,
++ caller_pid,
++ target,
++ *tid_p,
++ attr,
++ *attr_val_p,
++ owner);
++ result = adf_and_plus(result, mod_result[SW_PM]);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(!rsbac_ind_softmode[SW_PM])
++ ret_result = adf_and_plus(ret_result, mod_result[SW_PM]);
++#endif
++ }
++#endif /* PM */
++
++/******* DAZ ********/
++#if defined(CONFIG_RSBAC_DAZ)
++#ifdef CONFIG_RSBAC_SWITCH_DAZ
++if (rsbac_switch_daz)
++#endif
++ /* no need to call module, if to be ignored */
++ if(ignore_module != SW_DAZ && (request_vector & RSBAC_DAZ_REQUEST_VECTOR))
++ {
++ mod_result[SW_DAZ] = rsbac_adf_request_daz (request,
++ caller_pid,
++ target,
++ *tid_p,
++ attr,
++ *attr_val_p,
++ owner);
++ result = adf_and_plus(result, mod_result[SW_DAZ]);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(!rsbac_ind_softmode[SW_DAZ])
++ ret_result = adf_and_plus(ret_result, mod_result[SW_DAZ]);
++#endif
++ }
++#endif /* DAZ */
++
++/******* FF ********/
++#if defined(CONFIG_RSBAC_FF)
++#ifdef CONFIG_RSBAC_SWITCH_FF
++if (rsbac_switch_ff)
++#endif
++ /* no need to call module, if to be ignored */
++ if(ignore_module != SW_FF && (request_vector & RSBAC_FF_REQUEST_VECTOR))
++ {
++ mod_result[SW_FF] = rsbac_adf_request_ff (request,
++ caller_pid,
++ target,
++ *tid_p,
++ attr,
++ *attr_val_p,
++ owner);
++ result = adf_and_plus(result, mod_result[SW_FF]);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(!rsbac_ind_softmode[SW_FF])
++ ret_result = adf_and_plus(ret_result, mod_result[SW_FF]);
++#endif
++ }
++#endif /* FF */
++
++/******* RC ********/
++#if defined(CONFIG_RSBAC_RC)
++#ifdef CONFIG_RSBAC_SWITCH_RC
++if (rsbac_switch_rc)
++#endif
++ /* no need to call module, if to be ignored */
++ if(ignore_module != SW_RC)
++ {
++ mod_result[SW_RC] = rsbac_adf_request_rc (request,
++ caller_pid,
++ target,
++ *tid_p,
++ attr,
++ *attr_val_p,
++ owner);
++ result = adf_and_plus(result, mod_result[SW_RC]);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(!rsbac_ind_softmode[SW_RC])
++ ret_result = adf_and_plus(ret_result, mod_result[SW_RC]);
++#endif
++ }
++#endif /* RC */
++
++/****** AUTH *******/
++#if defined(CONFIG_RSBAC_AUTH)
++#ifdef CONFIG_RSBAC_SWITCH_AUTH
++if (rsbac_switch_auth)
++#endif
++ /* no need to call module, if to be ignored */
++ if(ignore_module != SW_AUTH && (request_vector & RSBAC_AUTH_REQUEST_VECTOR))
++ {
++ mod_result[SW_AUTH]= rsbac_adf_request_auth(request,
++ caller_pid,
++ target,
++ *tid_p,
++ attr,
++ *attr_val_p,
++ owner);
++ result = adf_and_plus(result, mod_result[SW_AUTH]);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(!rsbac_ind_softmode[SW_AUTH])
++ ret_result = adf_and_plus(ret_result, mod_result[SW_AUTH]);
++#endif
++ }
++#endif /* AUTH */
++
++/****** ACL *******/
++#if defined(CONFIG_RSBAC_ACL)
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++if (rsbac_switch_acl)
++#endif
++ /* no need to call module, if to be ignored */
++ if(ignore_module != SW_ACL)
++ {
++ mod_result[SW_ACL] = rsbac_adf_request_acl(request,
++ caller_pid,
++ target,
++ *tid_p,
++ attr,
++ *attr_val_p,
++ owner);
++ result = adf_and_plus(result, mod_result[SW_ACL]);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(!rsbac_ind_softmode[SW_ACL])
++ ret_result = adf_and_plus(ret_result, mod_result[SW_ACL]);
++#endif
++ }
++#endif /* ACL */
++
++/****** CAP *******/
++#if defined(CONFIG_RSBAC_CAP)
++#ifdef CONFIG_RSBAC_SWITCH_CAP
++if (rsbac_switch_cap)
++#endif
++ /* no need to call module, if to be ignored */
++ if(ignore_module != SW_CAP && (request_vector & RSBAC_CAP_REQUEST_VECTOR))
++ {
++ mod_result[SW_CAP] = rsbac_adf_request_cap(request,
++ caller_pid,
++ target,
++ *tid_p,
++ attr,
++ *attr_val_p,
++ owner);
++ result = adf_and_plus(result, mod_result[SW_CAP]);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(!rsbac_ind_softmode[SW_CAP])
++ ret_result = adf_and_plus(ret_result, mod_result[SW_CAP]);
++#endif
++ }
++#endif /* CAP */
++
++/****** JAIL *******/
++#if defined(CONFIG_RSBAC_JAIL)
++#ifdef CONFIG_RSBAC_SWITCH_JAIL
++if (rsbac_switch_jail)
++#endif
++ /* no need to call module, if to be ignored */
++ if(ignore_module != SW_JAIL && (request_vector & RSBAC_JAIL_REQUEST_VECTOR))
++ {
++ mod_result[SW_JAIL]= rsbac_adf_request_jail(request,
++ caller_pid,
++ target,
++ *tid_p,
++ attr,
++ *attr_val_p,
++ owner);
++ result = adf_and_plus(result, mod_result[SW_JAIL]);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(!rsbac_ind_softmode[SW_JAIL])
++ ret_result = adf_and_plus(ret_result, mod_result[SW_JAIL]);
++#endif
++ }
++#endif /* JAIL */
++
++/******* PAX ********/
++#if defined(CONFIG_RSBAC_PAX)
++#ifdef CONFIG_RSBAC_SWITCH_PAX
++if (rsbac_switch_pax)
++#endif
++ /* no need to call module, if to be ignored */
++ if(ignore_module != SW_PAX && (request_vector & RSBAC_PAX_REQUEST_VECTOR))
++ {
++ mod_result[SW_PAX] = rsbac_adf_request_pax (request,
++ caller_pid,
++ target,
++ *tid_p,
++ attr,
++ *attr_val_p,
++ owner);
++ result = adf_and_plus(result, mod_result[SW_PAX]);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(!rsbac_ind_softmode[SW_PAX])
++ ret_result = adf_and_plus(ret_result, mod_result[SW_PAX]);
++#endif
++ }
++#endif /* PAX */
++
++/****** RES *******/
++#if defined(CONFIG_RSBAC_RES)
++#ifdef CONFIG_RSBAC_SWITCH_RES
++if (rsbac_switch_res)
++#endif
++ /* no need to call module, if to be ignored */
++ if(ignore_module != SW_RES && (request_vector & RSBAC_RES_REQUEST_VECTOR))
++ {
++ mod_result[SW_RES] = rsbac_adf_request_res(request,
++ caller_pid,
++ target,
++ *tid_p,
++ attr,
++ *attr_val_p,
++ owner);
++ result = adf_and_plus(result, mod_result[SW_RES]);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(!rsbac_ind_softmode[SW_RES])
++ ret_result = adf_and_plus(ret_result, mod_result[SW_RES]);
++#endif
++ }
++#endif /* RES */
++
++/****** REG *******/
++#if defined(CONFIG_RSBAC_REG)
++if(ignore_module != SW_REG)
++ {
++ mod_result[SW_REG]= rsbac_adf_request_reg (request,
++ caller_pid,
++ target,
++ *tid_p,
++ attr,
++ *attr_val_p,
++ owner);
++ result = adf_and_plus(result, mod_result[SW_REG]);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(!rsbac_ind_softmode[SW_REG])
++ ret_result = adf_and_plus(ret_result, mod_result[SW_REG]);
++#endif
++ }
++#endif /* REG */
++
++#endif /* !MAINT */
++
++/****************************/
++
++#if defined(CONFIG_RSBAC_DEBUG) && defined(CONFIG_RSBAC_NET)
++ if( rsbac_debug_adf_net
++ && ( (target == T_NETDEV)
++ || (target == T_NETTEMP)
++ || (target == T_NETOBJ)
++ )
++ )
++ do_log = TRUE;
++#endif
++
++/* log based on process owner */
++#ifdef CONFIG_RSBAC_IND_USER_LOG
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_GEN,
++ T_USER,
++ i_tid,
++ A_log_user_based,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request()", A_log_user_based);
++ }
++ else
++ {
++ if(((rsbac_request_vector_t) 1 << request) & i_attr_val.log_user_based)
++ do_log = TRUE;
++ }
++#endif /* CONFIG_RSBAC_IND_USER_LOG */
++
++/* log based on program */
++#ifdef CONFIG_RSBAC_IND_PROG_LOG
++ if(!do_log)
++ {
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_GEN,
++ T_PROCESS,
++ i_tid,
++ A_log_program_based,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request()", A_log_program_based);
++ }
++ else
++ {
++ if(((rsbac_request_vector_t) 1 << request) & i_attr_val.log_program_based)
++ do_log = TRUE;
++ }
++ }
++#endif /* CONFIG_RSBAC_IND_PROG_LOG */
++
++/*****************************************************/
++/* General work for all modules - after module calls */
++/* Note: the process' individual logging attributes are needed above */
++ switch(request)
++ {
++ case R_TERMINATE:
++ if (target == T_PROCESS)
++ rsbac_remove_target(T_PROCESS,*tid_p);
++ break;
++
++#ifdef CONFIG_RSBAC_USER_CHOWN
++ case R_CHANGE_OWNER:
++ if (target == T_PROCESS) {
++ i_tid.user = attr_val_p->owner;
++ i_attr_val.process = tid_p->process;
++ result = adf_and_plus(result,
++ rsbac_adf_request_int(request,
++ caller_pid,
++ T_USER,
++ &i_tid,
++ A_process,
++ &i_attr_val,
++ ignore_module));
++ }
++ break;
++#endif
++
++ default:
++ break;
++ }
++
++/* logging request on info level, if requested by file/dir/dev attributes */
++/* log_array_low/high, or, if that is requested, if enabled for this request */
++/* type (attributes state level, or that request based level is to be taken) */
++/* loglevel 2: log everything */
++/* loglevel 1: log, if denied */
++/* loglevel 0: log nothing */
++
++#ifdef CONFIG_RSBAC_IND_LOG /* only if individual logging is enabled */
++ /* if file/dir/dev, depend log on log_arrays */
++ /* (but not for file.device = 0) */
++ /* log_on_request is TRUE */
++ if( !do_log
++ && ( ( ( (target == T_FILE)
++ || (target == T_DIR)
++ || (target == T_FIFO)
++ || (target == T_SYMLINK)
++ || (target == T_UNIXSOCK)
++ )
++ && RSBAC_MAJOR(tid_p->file.device)
++ && RSBAC_MINOR(tid_p->file.device)
++ )
++ || (target == T_DEV)
++ )
++ )
++ {
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ *tid_p,
++ A_log_array_low,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request()", A_log_array_low);
++ }
++ else
++ {
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ *tid_p,
++ A_log_array_high,
++ &i_attr_val2,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request()", A_log_array_high);
++ }
++ else
++ { /* ll = low-bit for request | (high-bit for request as bit 1) */
++ /* WARNING: we deal with u64 here, only logical operations and */
++ /* shifts work correctly! */
++ log_level = ((i_attr_val.log_array_low >> request) & 1)
++ | ( ((i_attr_val2.log_array_high >> request) & 1) << 1);
++ if ( log_level == LL_full
++ || ( log_level == LL_denied
++ && (result == NOT_GRANTED
++ || result == UNDEFINED)) )
++ {
++ do_log = TRUE;
++ }
++ if(log_level != LL_request)
++ log_on_request = FALSE;
++ }
++ }
++ }
++#endif /* CONFIG_RSBAC_IND_LOG */
++
++#ifdef CONFIG_RSBAC_IND_NETDEV_LOG /* only if individual logging for netdev is enabled */
++ /* if netdev, depend log on log_arrays */
++ /* log_on_request is TRUE */
++ if( !do_log
++ && (target == T_NETDEV)
++ )
++ {
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ *tid_p,
++ A_log_array_low,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request()", A_log_array_low);
++ }
++ else
++ {
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ *tid_p,
++ A_log_array_high,
++ &i_attr_val2,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request()", A_log_array_high);
++ }
++ else
++ { /* ll = low-bit for request | (high-bit for request as bit 1) */
++ /* WARNING: we deal with u64 here, only logical operations and */
++ /* shifts work correctly! */
++ log_level = ((i_attr_val.log_array_low >> request) & 1)
++ | ( ((i_attr_val2.log_array_high >> request) & 1) << 1);
++ if ( log_level == LL_full
++ || ( log_level == LL_denied
++ && (result == NOT_GRANTED
++ || result == UNDEFINED)) )
++ {
++ do_log = TRUE;
++ }
++ if(log_level != LL_request)
++ log_on_request = FALSE;
++ }
++ }
++ }
++#endif /* CONFIG_RSBAC_IND_NETDEV_LOG */
++
++#ifdef CONFIG_RSBAC_IND_NETOBJ_LOG /* only if individual logging for net objects is enabled */
++ /* if nettemp, netobj, depend log on log_arrays */
++ /* (but not for file.device = 0) */
++ /* log_on_request is TRUE */
++ if( !do_log
++ && ( (target == T_NETTEMP)
++ || (target == T_NETOBJ)
++ )
++ )
++ {
++ enum rsbac_attribute_t i_attr1, i_attr2;
++
++ if(target == T_NETOBJ)
++ {
++ if(rsbac_net_remote_request(request))
++ {
++ i_attr1 = A_remote_log_array_low;
++ i_attr2 = A_remote_log_array_high;
++ }
++ else
++ {
++ i_attr1 = A_local_log_array_low;
++ i_attr2 = A_local_log_array_high;
++ }
++ }
++ else
++ {
++ i_attr1 = A_log_array_low;
++ i_attr2 = A_log_array_high;
++ }
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ *tid_p,
++ i_attr1,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request()", i_attr1);
++ }
++ else
++ {
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ *tid_p,
++ i_attr2,
++ &i_attr_val2,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request()", i_attr2);
++ }
++ else
++ { /* ll = low-bit for request | (high-bit for request as bit 1) */
++ /* WARNING: we deal with u64 here, only logical operations and */
++ /* shifts work correctly! */
++ log_level = ((i_attr_val.log_array_low >> request) & 1)
++ | ( ((i_attr_val2.log_array_high >> request) & 1) << 1);
++ if ( log_level == LL_full
++ || ( log_level == LL_denied
++ && (result == NOT_GRANTED
++ || result == UNDEFINED)) )
++ {
++ do_log = TRUE;
++ }
++ if(log_level != LL_request)
++ log_on_request = FALSE;
++ }
++ }
++ }
++#endif /* CONFIG_RSBAC_IND_NETOBJ_LOG */
++
++#ifdef CONFIG_RSBAC_NO_DECISION_ON_NETMOUNT
++log:
++#endif
++ /* if enabled, try request based log level */
++ if ( !do_log
++ && log_on_request
++ && ( rsbac_log_levels[request][target] == LL_full
++ || ( rsbac_log_levels[request][target] == LL_denied
++ && (result == NOT_GRANTED
++ || result == UNDEFINED)) ) )
++ do_log = TRUE;
++
++ if(do_log)
++ {
++ char * request_name;
++ char * res_name;
++ char * res_mods;
++ char * target_type_name;
++ char * target_id_name;
++ char * attr_name;
++ char * attr_val_name;
++#ifdef CONFIG_RSBAC_NET_OBJ
++ char * remote_ip_name;
++#else
++ char remote_ip_name[1];
++#endif
++ char * audit_uid_name;
++ char command[17];
++ rsbac_pid_t parent_pid = 0;
++ rsbac_uid_t audit_uid;
++#ifdef CONFIG_RSBAC_LOG_PSEUDO
++ rsbac_pseudo_t pseudo = 0;
++#endif
++ char * program_path;
++
++ /* parent pid */
++ if(current->parent)
++ parent_pid = task_pid(current->parent);
++
++ /* rsbac_kmalloc all memory */
++ request_name = rsbac_kmalloc(32);
++ res_name = rsbac_kmalloc(32);
++ res_mods = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ target_type_name = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ #ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name
++ = rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++ #else
++ target_id_name = rsbac_kmalloc(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++ #endif
++ #ifdef CONFIG_RSBAC_LOG_PROGRAM_FILE
++ #ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ program_path
++ = rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++ #else
++ program_path = rsbac_kmalloc(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++ #endif
++ #else
++ program_path = rsbac_kmalloc(2);
++ #endif
++ attr_name = rsbac_kmalloc(32);
++ attr_val_name = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++#ifdef CONFIG_RSBAC_NET_OBJ
++ remote_ip_name = rsbac_kmalloc(32);
++#endif
++ audit_uid_name = rsbac_kmalloc(32);
++
++ request_name[0] = (char) 0;
++ target_type_name[0] = (char) 0;
++ target_id_name[0] = (char) 0;
++ program_path[0] = (char) 0;
++ attr_name[0] = (char) 0;
++ attr_val_name[0] = (char) 0;
++ remote_ip_name[0] = (char) 0;
++ audit_uid_name[0] = (char) 0;
++ res_name[0] = (char) 0;
++ res_mods[0] = (char) 0;
++ command[0] = (char) 0;
++ get_request_name(request_name, request);
++ #if !defined(CONFIG_RSBAC_MAINT)
++/*
++ if(result == mod_result[SW_NONE])
++ {
++ strcat(res_mods, " SW_GEN");
++ }
++*/
++ #if defined(CONFIG_RSBAC_MAC)
++ if(result == mod_result[SW_MAC])
++ {
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(rsbac_ind_softmode[SW_MAC])
++ strcat(res_mods, " MAC(Softmode)");
++ else
++ #endif
++ strcat(res_mods, " MAC");
++ }
++ #endif
++ #if defined(CONFIG_RSBAC_PM)
++ if(result == mod_result[SW_PM])
++ {
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(rsbac_ind_softmode[SW_PM])
++ strcat(res_mods, " PM(Softmode)");
++ else
++ #endif
++ strcat(res_mods, " PM");
++ }
++ #endif
++ #if defined(CONFIG_RSBAC_DAZ)
++ if(result == mod_result[SW_DAZ])
++ {
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(rsbac_ind_softmode[SW_DAZ])
++ strcat(res_mods, " DAZ(Softmode)");
++ else
++ #endif
++ strcat(res_mods, " DAZ");
++ }
++ #endif
++ #ifdef CONFIG_RSBAC_FF
++ if(result == mod_result[SW_FF])
++ {
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(rsbac_ind_softmode[SW_FF])
++ strcat(res_mods, " FF(Softmode)");
++ else
++ #endif
++ strcat(res_mods, " FF");
++ }
++ #endif
++ #ifdef CONFIG_RSBAC_RC
++ if(result == mod_result[SW_RC])
++ {
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(rsbac_ind_softmode[SW_RC])
++ strcat(res_mods, " RC(Softmode)");
++ else
++ #endif
++ strcat(res_mods, " RC");
++ }
++ #endif
++ #ifdef CONFIG_RSBAC_AUTH
++ if(result == mod_result[SW_AUTH])
++ {
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(rsbac_ind_softmode[SW_AUTH])
++ strcat(res_mods, " AUTH(Softmode)");
++ else
++ #endif
++ strcat(res_mods, " AUTH");
++ }
++ #endif
++ #ifdef CONFIG_RSBAC_ACL
++ if(result == mod_result[SW_ACL])
++ {
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(rsbac_ind_softmode[SW_ACL])
++ strcat(res_mods, " ACL(Softmode)");
++ else
++ #endif
++ strcat(res_mods, " ACL");
++ }
++ #endif
++ #ifdef CONFIG_RSBAC_CAP
++ if(result == mod_result[SW_CAP])
++ {
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(rsbac_ind_softmode[SW_CAP])
++ strcat(res_mods, " CAP(Softmode)");
++ else
++ #endif
++ strcat(res_mods, " CAP");
++ }
++ #endif
++ #ifdef CONFIG_RSBAC_JAIL
++ if(result == mod_result[SW_JAIL])
++ {
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(rsbac_ind_softmode[SW_JAIL])
++ strcat(res_mods, " JAIL(Softmode)");
++ else
++ #endif
++ strcat(res_mods, " JAIL");
++ }
++ #endif
++ #ifdef CONFIG_RSBAC_RES
++ if(result == mod_result[SW_RES])
++ {
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(rsbac_ind_softmode[SW_RES])
++ strcat(res_mods, " RES(Softmode)");
++ else
++ #endif
++ strcat(res_mods, " RES");
++ }
++ #endif
++ #ifdef CONFIG_RSBAC_REG
++ if(result == mod_result[SW_REG])
++ {
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(rsbac_ind_softmode[SW_REG])
++ strcat(res_mods, " REG(Softmode)");
++ else
++ #endif
++ strcat(res_mods, " REG");
++ }
++ #endif
++ #endif /* !MAINT */
++ if(!res_mods[0])
++ strcat(res_mods, " ADF");
++
++ /* Get process audit_uid */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_GEN,T_PROCESS,i_tid,A_audit_uid,&i_attr_val,FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request()", A_audit_uid);
++ return NOT_GRANTED; /* something weird happened */
++ }
++ audit_uid = i_attr_val.audit_uid;
++ if(audit_uid == RSBAC_NO_USER)
++ audit_uid = owner;
++ else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(audit_uid))
++ sprintf(audit_uid_name, "audit uid %u/%u, ",
++ RSBAC_UID_SET(audit_uid),
++ RSBAC_UID_NUM(audit_uid));
++ else
++#endif
++ sprintf(audit_uid_name, "audit uid %u, ", RSBAC_UID_NUM(audit_uid));
++ }
++#ifdef CONFIG_RSBAC_LOG_PSEUDO
++ /* Get owner's logging pseudo */
++ i_tid.user = audit_uid;
++ if (rsbac_get_attr(SW_GEN,T_USER,i_tid,A_pseudo,&i_attr_val,FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request()", A_pseudo);
++ return NOT_GRANTED; /* something weird happened */
++ }
++ /* if pseudo is not registered, return attribute value is 0 (see later) */
++ pseudo = i_attr_val.pseudo;
++#endif
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++ /* Get process remote_ip */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_GEN,T_PROCESS,i_tid,A_remote_ip,&i_attr_val,FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request()", A_remote_ip);
++ return NOT_GRANTED; /* something weird happened */
++ }
++ if(i_attr_val.remote_ip)
++ sprintf(remote_ip_name, "remote ip %u.%u.%u.%u, ", NIPQUAD(i_attr_val.remote_ip));
++#endif
++
++ #ifdef CONFIG_RSBAC_LOG_PROGRAM_FILE
++ {
++ struct mm_struct * mm;
++ struct vm_area_struct * vma;
++ struct dentry * dentry_p = NULL;
++
++ mm = current->mm;
++ if(mm)
++ {
++ atomic_inc(&mm->mm_users);
++ if(!down_read_trylock(&mm->mmap_sem))
++ goto down_failed;
++ vma = mm->mmap;
++ while (vma)
++ {
++ if( (vma->vm_flags & VM_EXECUTABLE)
++ && vma->vm_file)
++ {
++ dentry_p = dget(vma->vm_file->f_dentry);
++ break;
++ }
++ vma = vma->vm_next;
++ }
++ up_read(&mm->mmap_sem);
++ if(dentry_p)
++ {
++ char * p = program_path;
++
++ p += sprintf(program_path, ", prog_file ");
++ #ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ rsbac_get_full_path(dentry_p, p, CONFIG_RSBAC_MAX_PATH_LEN);
++ #else
++ int namelen = rsbac_min(dentry_p->d_name.len, RSBAC_MAXNAMELEN);
++
++ strncpy(p, dentry_p->d_name.name, namelen);
++ p[namelen]=0;
++ #endif
++ dput(dentry_p);
++ }
++down_failed:
++ mmput_nosleep(mm);
++ }
++ }
++ #endif
++ get_target_name(target_type_name, target, target_id_name, *tid_p);
++ get_attribute_name(attr_name, attr);
++ get_attribute_value_name(attr_val_name, attr, attr_val_p);
++ get_result_name(res_name, result);
++ if ((current) && (current->comm))
++ {
++ strncpy(command,current->comm,16);
++ command[16] = (char) 0;
++ }
++
++#ifdef CONFIG_RSBAC_LOG_PSEUDO
++ /* if pseudo is set, its value is != 0, else -> use id */
++ if (pseudo)
++ {
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if(rsbac_softmode)
++ rsbac_printk(KERN_INFO "rsbac_adf_request(): request %s, pid %u, ppid %u, prog_name %s%s, pseudo %u, %starget_type %s, tid %s, attr %s, value %s, result %s (Softmode) by%s\n",
++ request_name, pid_nr(caller_pid), parent_pid, command, program_path, pseudo, remote_ip_name, target_type_name, target_id_name, attr_name, attr_val_name, res_name, res_mods);
++ else
++ #endif
++ rsbac_printk(KERN_INFO "rsbac_adf_request(): request %s, pid %u, ppid %u, prog_name %s%s, pseudo %u, %starget_type %s, tid %s, attr %s, value %s, result %s by%s\n",
++ request_name, pid_nr(caller_pid), parent_pid, command, program_path, pseudo, remote_ip_name, target_type_name, target_id_name, attr_name, attr_val_name, res_name, res_mods);
++ }
++ else
++#endif
++ {
++ char * owner_name;
++
++ owner_name = rsbac_kmalloc(32);
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(owner))
++ sprintf(owner_name, "%u/%u",
++ RSBAC_UID_SET(owner),
++ RSBAC_UID_NUM(owner));
++ else
++#endif
++ sprintf(owner_name, "%u", RSBAC_UID_NUM(owner));
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if(rsbac_softmode)
++ rsbac_printk(KERN_INFO "rsbac_adf_request(): request %s, pid %u, ppid %u, prog_name %s%s, uid %s, %s%starget_type %s, tid %s, attr %s, value %s, result %s (Softmode) by%s\n",
++ request_name, pid_nr(caller_pid), pid_nr(parent_pid), command, program_path, owner_name, audit_uid_name, remote_ip_name, target_type_name, target_id_name, attr_name, attr_val_name, res_name, res_mods);
++ else
++ #endif
++ rsbac_printk(KERN_INFO "rsbac_adf_request(): request %s, pid %u, ppid %u, prog_name %s%s, uid %s, %s%starget_type %s, tid %s, attr %s, value %s, result %s by%s\n",
++ request_name, pid_nr(caller_pid), pid_nr(parent_pid), command, program_path, owner_name, audit_uid_name, remote_ip_name, target_type_name, target_id_name, attr_name, attr_val_name, res_name, res_mods);
++ rsbac_kfree(owner_name);
++ }
++ /* rsbac_kfree all helper mem */
++ rsbac_kfree(request_name);
++ rsbac_kfree(res_name);
++ rsbac_kfree(res_mods);
++ rsbac_kfree(target_type_name);
++ rsbac_kfree(target_id_name);
++ rsbac_kfree(program_path);
++ rsbac_kfree(attr_name);
++ rsbac_kfree(attr_val_name);
++#ifdef CONFIG_RSBAC_NET_OBJ
++ rsbac_kfree(remote_ip_name);
++#endif
++ rsbac_kfree(audit_uid_name);
++ }
++
++/* UNDEFINED must never be returned -> change result */
++ if(result == UNDEFINED)
++ result = NOT_GRANTED;
++
++/* count */
++ rsbac_adf_request_count[target]++;
++#ifdef CONFIG_RSBAC_XSTATS
++ rsbac_adf_request_xcount[target][request]++;
++#endif
++
++/* return result */
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if(rsbac_softmode && !rsbac_internal)
++ return DO_NOT_CARE;
++ else
++ #endif
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ return ret_result;
++ #else
++ return result; /* change for debugging! */
++ #endif
++ } /* end of rsbac_adf_request_int() */
++
++
++/*****************************************************************************/
++/* If the request returned granted and the operation is performed, */
++/* the following function is called by the AEF to get all aci set correctly. */
++/* The second instance of target specification is the new target, if one has */
++/* been created, otherwise its values are ignored. */
++/* It returns 0 on success and an error from error.h otherwise. */
++
++EXPORT_SYMBOL(rsbac_adf_set_attr);
++int rsbac_adf_set_attr(
++ enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_target_t new_target,
++ union rsbac_target_id_t new_tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val)
++ {
++ union rsbac_target_id_t i_tid;
++ rsbac_uid_t owner;
++ int error = 0;
++ rsbac_request_vector_t request_vector;
++ rsbac_boolean_t do_log = FALSE;
++ rsbac_boolean_t log_on_request = TRUE;
++ union rsbac_attribute_value_t i_attr_val;
++#ifdef CONFIG_RSBAC_IND_LOG
++ union rsbac_attribute_value_t i_attr_val2;
++ enum rsbac_log_level_t log_level;
++#endif
++#ifdef CONFIG_RSBAC_NO_DECISION_ON_NETMOUNT
++ struct vfsmount * mnt_p;
++#endif
++
++/* No attribute setting possible before init (called at boot time) */
++
++ if (!rsbac_is_initialized())
++ return 0;
++
++/* kernel (pid 0) is ignored */
++ if ( !pid_nr(caller_pid)
++ #if defined(CONFIG_RSBAC_LOG_REMOTE)
++ || (caller_pid == rsbaclogd_pid)
++ #endif
++ )
++ return 0;
++/* Checking base values */
++ if( request >= R_NONE
++ || target > T_NONE
++ || new_target > T_NONE
++ || attr > A_none)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr(): called with invalid request, target or attribute\n");
++ return(-RSBAC_EINVALIDVALUE);
++ }
++
++ if (in_interrupt())
++ {
++ char * request_name = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(request_name)
++ {
++ get_request_name(request_name, request);
++ printk(KERN_WARNING "rsbac_adf_set_attr(): called from interrupt: request %s, pid %u(%s), attr_val %u!\n",
++ request_name, pid_nr(caller_pid), current->comm, attr_val.dummy);
++ rsbac_kfree(request_name);
++ }
++ else
++ {
++ printk(KERN_WARNING "rsbac_adf_set_attr(): called from interrupt: request %u, pid %u(%s)!\n",
++ request, pid_nr(caller_pid), current->comm);
++ }
++ dump_stack();
++ return -RSBAC_EFROMINTERRUPT;
++ }
++
++ request_vector = RSBAC_REQUEST_VECTOR(request);
++
++/* Getting basic information about this adf_set_attr-call */
++
++ owner = RSBAC_NO_USER;
++ /* only useful for real process, not idle or init */
++ if (pid_nr(caller_pid) > 1)
++ {
++ error = rsbac_get_owner(&owner);
++ if(error)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_adf_set_attr(): caller_pid %i, RSBAC not initialized, returning 0",
++ pid_nr(caller_pid));
++ return 0; /* Startup-Sequence (see above) */
++ }
++ }
++ else /* caller_pid = 1 -> init -> owner = root */
++ owner = 0;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if ((attr == A_owner) && (RSBAC_UID_SET(attr_val.owner) > RSBAC_UM_VIRTUAL_MAX))
++ attr_val.owner = RSBAC_GEN_UID(RSBAC_UID_SET(owner), attr_val.owner);
++ else
++ if ((attr == A_group) && (RSBAC_GID_SET(attr_val.group) > RSBAC_UM_VIRTUAL_MAX))
++ attr_val.group = RSBAC_GEN_GID(RSBAC_UID_SET(owner), attr_val.group);
++#else
++ if (attr == A_owner)
++ attr_val.owner = RSBAC_UID_NUM(attr_val.owner);
++ else
++ if (attr == A_group)
++ attr_val.group = RSBAC_GID_NUM(attr_val.group);
++#endif
++
++/*************************************************/
++/* General work for all modules - before modules */
++#if defined(CONFIG_RSBAC_NO_DECISION_ON_NETMOUNT) || defined(CONFIG_RSBAC_FD_CACHE)
++ switch (target) {
++ case T_DIR:
++#if defined(CONFIG_RSBAC_NO_DECISION_ON_NETMOUNT)
++ if ((mnt_p = rsbac_get_vfsmount(tid.file.device))
++ && ( (mnt_p->mnt_sb->s_magic == NFS_SUPER_MAGIC)
++ || (mnt_p->mnt_sb->s_magic == CODA_SUPER_MAGIC)
++ || (mnt_p->mnt_sb->s_magic == NCP_SUPER_MAGIC)
++ || (mnt_p->mnt_sb->s_magic == SMB_SUPER_MAGIC)
++ )
++ ) {
++ error = 0;
++ goto log;
++ }
++#endif
++ /* Ensure that there are no leftover attributes */
++ if (request == R_CREATE) {
++ rsbac_remove_target(new_target, new_tid);
++#if defined(CONFIG_RSBAC_FD_CACHE)
++ rsbac_fd_cache_invalidate(&new_tid.file);
++#endif
++ }
++#if defined(CONFIG_RSBAC_FD_CACHE)
++ else
++ if (request == R_RENAME)
++ rsbac_fd_cache_invalidate_all();
++#endif
++ break;
++
++ case T_FILE:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++#if defined(CONFIG_RSBAC_NO_DECISION_ON_NETMOUNT)
++ if ((mnt_p = rsbac_get_vfsmount(tid.file.device))
++ && ( (mnt_p->mnt_sb->s_magic == NFS_SUPER_MAGIC)
++ || (mnt_p->mnt_sb->s_magic == CODA_SUPER_MAGIC)
++ || (mnt_p->mnt_sb->s_magic == NCP_SUPER_MAGIC)
++ || (mnt_p->mnt_sb->s_magic == SMB_SUPER_MAGIC)
++ )
++ ) {
++ error = 0;
++ goto log;
++ }
++#endif
++#if defined(CONFIG_RSBAC_FD_CACHE)
++ if (request == R_RENAME)
++ rsbac_fd_cache_invalidate(&tid.file);
++#endif
++ break;
++
++ case T_PROCESS:
++ if ( (request == R_CLONE)
++ && !new_tid.process
++ ) {
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr(): tid for new process in CLONE is NULL!\n");
++ return -RSBAC_EINVALIDTARGET;
++ }
++ break;
++
++ default:
++ break;
++ }
++#endif
++
++/**********************************************************/
++/* calling all decision modules, building a common result */
++
++
++#ifdef CONFIG_RSBAC_DEBUG
++/* first, check for valid request/target combination */
++error |= rsbac_adf_set_attr_check(request,
++ caller_pid,
++ target,
++ tid,
++ new_target,
++ new_tid,
++ attr,
++ attr_val,
++ owner);
++if(error)
++ goto general_work;
++#endif
++
++#if !defined(CONFIG_RSBAC_MAINT)
++/******* MAC ********/
++#if defined(CONFIG_RSBAC_MAC)
++#ifdef CONFIG_RSBAC_SWITCH_MAC
++if (rsbac_switch_mac)
++#endif
++ if(request_vector & RSBAC_MAC_SET_ATTR_VECTOR)
++ error |= rsbac_adf_set_attr_mac(request,
++ caller_pid,
++ target,
++ tid,
++ new_target,
++ new_tid,
++ attr,
++ attr_val,
++ owner);
++#endif /* MAC */
++
++/******* PM ********/
++#ifdef CONFIG_RSBAC_PM
++#ifdef CONFIG_RSBAC_SWITCH_PM
++if (rsbac_switch_pm)
++#endif
++ if(request_vector & RSBAC_PM_SET_ATTR_VECTOR)
++ error |= rsbac_adf_set_attr_pm (request,
++ caller_pid,
++ target,
++ tid,
++ new_target,
++ new_tid,
++ attr,
++ attr_val,
++ owner);
++#endif /* PM */
++
++/******* DAZ ********/
++#ifdef CONFIG_RSBAC_DAZ
++#ifdef CONFIG_RSBAC_SWITCH_DAZ
++if (rsbac_switch_daz)
++#endif
++ if(request_vector & RSBAC_DAZ_SET_ATTR_VECTOR)
++ error |= rsbac_adf_set_attr_daz (request,
++ caller_pid,
++ target,
++ tid,
++ new_target,
++ new_tid,
++ attr,
++ attr_val,
++ owner);
++#endif /* DAZ */
++
++/******* RC ********/
++#ifdef CONFIG_RSBAC_RC
++#ifdef CONFIG_RSBAC_SWITCH_RC
++if (rsbac_switch_rc)
++#endif
++ error |= rsbac_adf_set_attr_rc (request,
++ caller_pid,
++ target,
++ tid,
++ new_target,
++ new_tid,
++ attr,
++ attr_val,
++ owner);
++#endif /* RC */
++
++/****** AUTH *******/
++#ifdef CONFIG_RSBAC_AUTH
++#ifdef CONFIG_RSBAC_SWITCH_AUTH
++if (rsbac_switch_auth)
++#endif
++ if(request_vector & RSBAC_AUTH_SET_ATTR_VECTOR)
++ error |= rsbac_adf_set_attr_auth(request,
++ caller_pid,
++ target,
++ tid,
++ new_target,
++ new_tid,
++ attr,
++ attr_val,
++ owner);
++#endif /* AUTH */
++
++/****** CAP *******/
++#ifdef CONFIG_RSBAC_CAP
++#ifdef CONFIG_RSBAC_SWITCH_CAP
++if (rsbac_switch_cap)
++#endif
++ if(request_vector & RSBAC_CAP_SET_ATTR_VECTOR)
++ error |= rsbac_adf_set_attr_cap (request,
++ caller_pid,
++ target,
++ tid,
++ new_target,
++ new_tid,
++ attr,
++ attr_val,
++ owner);
++#endif /* CAP */
++
++/****** JAIL *******/
++#ifdef CONFIG_RSBAC_JAIL
++#ifdef CONFIG_RSBAC_SWITCH_JAIL
++if (rsbac_switch_jail)
++#endif
++ if(request_vector & RSBAC_JAIL_SET_ATTR_VECTOR)
++ error |= rsbac_adf_set_attr_jail(request,
++ caller_pid,
++ target,
++ tid,
++ new_target,
++ new_tid,
++ attr,
++ attr_val,
++ owner);
++#endif /* JAIL */
++
++/****** RES *******/
++#ifdef CONFIG_RSBAC_RES
++#ifdef CONFIG_RSBAC_SWITCH_RES
++if (rsbac_switch_res)
++#endif
++ if(request_vector & RSBAC_RES_SET_ATTR_VECTOR)
++ error |= rsbac_adf_set_attr_res (request,
++ caller_pid,
++ target,
++ tid,
++ new_target,
++ new_tid,
++ attr,
++ attr_val,
++ owner);
++#endif /* RES */
++
++/****** REG *******/
++#ifdef CONFIG_RSBAC_REG
++ error |= rsbac_adf_set_attr_reg (request,
++ caller_pid,
++ target,
++ tid,
++ new_target,
++ new_tid,
++ attr,
++ attr_val,
++ owner);
++#endif /* REG */
++#endif /* !MAINT */
++
++/* General work for all modules (after set_attr call) */
++#ifdef CONFIG_RSBAC_DEBUG
++general_work:
++#endif
++ switch(request)
++ {
++ /* remove deleted item from rsbac data */
++ case R_DELETE :
++ switch (target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ case T_SYMLINK:
++ /* Only remove file/fifo target on deletion of last link */
++ if ( (attr == A_nlink)
++ && (attr_val.nlink > 1)
++ )
++ break;
++ /* fall through */
++ case T_DIR:
++ rsbac_remove_target(target,tid);
++ break;
++ case T_IPC:
++ /* shm removal delayed and removed directly, when destroyed */
++ if(tid.ipc.type != I_shm)
++ rsbac_remove_target(target,tid);
++ break;
++ default:
++ break;
++ }
++ break;
++
++ case R_CLONE:
++ switch (target)
++ {
++ case T_PROCESS:
++ #if defined(CONFIG_RSBAC_IND_PROG_LOG)
++ /* get program based log from old process */
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ tid,
++ A_log_program_based,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr()", A_log_program_based);
++ }
++ else
++ { /* only set, if not default value 0 */
++ if(i_attr_val.log_program_based)
++ {
++ /* set program based log for new process */
++ if (rsbac_set_attr(SW_GEN, new_target,
++ new_tid,
++ A_log_program_based,
++ i_attr_val))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr()", A_log_program_based);
++ }
++ }
++ }
++ #endif
++ #if defined(CONFIG_RSBAC_FAKE_ROOT_UID)
++ /* get fake_root_uid from old process */
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ tid,
++ A_fake_root_uid,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr()", A_fake_root_uid);
++ }
++ else
++ { /* only set, of not default value 0 */
++ if(i_attr_val.fake_root_uid)
++ {
++ /* set program based log for new process */
++ if (rsbac_set_attr(SW_GEN, new_target,
++ new_tid,
++ A_fake_root_uid,
++ i_attr_val))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr()", A_fake_root_uid);
++ }
++ }
++ }
++ #endif
++ #if defined(CONFIG_RSBAC_NET)
++ /* get remote_ip from old process */
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ tid,
++ A_remote_ip,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr()", A_remote_ip);
++ }
++ else
++ { /* only set, of not default value 0 */
++ if(i_attr_val.remote_ip)
++ {
++ /* set program based log for new process */
++ if (rsbac_set_attr(SW_GEN, new_target,
++ new_tid,
++ A_remote_ip,
++ i_attr_val))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr()", A_remote_ip);
++ }
++ }
++ }
++ #endif
++ /* get kernel_thread from old process */
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ tid,
++ A_kernel_thread,
++ &i_attr_val, FALSE)) {
++ rsbac_ds_get_error("rsbac_adf_set_attr()",
++ A_kernel_thread);
++ } else {
++ if (i_attr_val.kernel_thread) {
++ if (rsbac_set_attr(SW_GEN, new_target,
++ new_tid,
++ A_kernel_thread,
++ i_attr_val)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr()",
++ A_kernel_thread);
++ }
++ }
++ }
++
++ /* get audit_uid from old process */
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ tid,
++ A_audit_uid,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr()", A_audit_uid);
++ }
++ else
++ { /* only set, of not default value NO_USER */
++ if(i_attr_val.audit_uid != RSBAC_NO_USER)
++ {
++ /* set audit uid for new process */
++ if (rsbac_set_attr(SW_GEN,
++ new_target,
++ new_tid,
++ A_audit_uid,
++ i_attr_val))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr()", A_audit_uid);
++ }
++ }
++ }
++ /* get auid_exempt from old process */
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ tid,
++ A_auid_exempt,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr()", A_auid_exempt);
++ }
++ else
++ { /* only set, of not default value NO_USER */
++ if(i_attr_val.auid_exempt != RSBAC_NO_USER)
++ {
++ /* set program based log for new process */
++ if (rsbac_set_attr(SW_GEN,
++ new_target,
++ new_tid,
++ A_auid_exempt,
++ i_attr_val))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr()", A_auid_exempt);
++ }
++ }
++ }
++ #ifdef CONFIG_RSBAC_UM_VIRTUAL
++ /* set vset of new process */
++ i_attr_val.vset = RSBAC_UID_SET(owner);
++ if(i_attr_val.vset)
++ {
++ /* set vset for new process */
++ if (rsbac_set_attr(SW_GEN, new_target,
++ new_tid,
++ A_vset,
++ i_attr_val))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr()", A_vset);
++ }
++ }
++ #endif
++#if defined(CONFIG_RSBAC_AUTH_LEARN) || defined(CONFIG_RSBAC_CAP_LEARN)
++ /* copy program_file */
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ tid,
++ A_program_file,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr()", A_program_file);
++ }
++ else
++ { /* set program based log for new process */
++ if (rsbac_set_attr(SW_GEN, new_target,
++ new_tid,
++ A_program_file,
++ i_attr_val))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr()", A_program_file);
++ }
++ }
++#endif
++ break;
++
++ default:
++ break;
++ }
++ break;
++
++ case R_CLOSE:
++ switch (target) {
++#if 0
++ case T_IPC:
++ if( (tid.ipc.type == I_anonunix)
++ && ( (attr != A_nlink)
++ || (attr_val.nlink <= 1)
++ )
++ )
++ rsbac_remove_target(target, tid);
++ break;
++#endif
++#ifdef CONFIG_RSBAC_NET_OBJ
++ case T_NETOBJ:
++ rsbac_remove_target(target, tid);
++ break;
++#endif
++ default:
++ break;
++ }
++ break;
++
++#if 0
++ case R_CREATE:
++ switch (target) {
++ case T_IPC:
++ if((tid.ipc.type != I_sem) && !tid.ipc.id.id_nr)
++ error |= -RSBAC_EINVALIDVALUE;
++ break;
++ default:
++ break;
++ }
++ break;
++#endif
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++ case R_ACCEPT:
++ switch (target)
++ {
++ case T_NETOBJ:
++ /* store remote IP */
++ if( tid.netobj.sock_p
++ && tid.netobj.sock_p->ops
++ && tid.netobj.sock_p->sk
++ && (tid.netobj.sock_p->ops->family == AF_INET)
++ )
++ {
++ i_tid.process = caller_pid;
++ i_attr_val.remote_ip = inet_sk(tid.netobj.sock_p->sk)->inet_daddr;
++ /* set program based log for new process */
++ if (rsbac_set_attr(SW_GEN,
++ T_PROCESS,
++ i_tid,
++ A_remote_ip,
++ i_attr_val))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr()", A_remote_ip);
++ }
++ }
++ break;
++
++ default:
++ break;
++ }
++ break;
++#endif /* CONFIG_RSBAC_NET_OBJ */
++
++ case R_EXECUTE :
++ switch (target)
++ {
++ case T_FILE:
++ #if defined(CONFIG_RSBAC_IND_PROG_LOG)
++ /* get program based log from file */
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ tid,
++ A_log_program_based,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr()", A_log_program_based);
++ }
++ else
++ {
++ /* set program based log for process */
++ i_tid.process = caller_pid;
++ if (rsbac_set_attr(SW_GEN, T_PROCESS,
++ i_tid,
++ A_log_program_based,
++ i_attr_val))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr()", A_log_program_based);
++ }
++ }
++ #endif
++ #if defined(CONFIG_RSBAC_FAKE_ROOT_UID)
++ /* get fake_root_uid from file */
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ tid,
++ A_fake_root_uid,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr()", A_fake_root_uid);
++ }
++ else
++ {
++ /* set fake_root_uid for process */
++ if(i_attr_val.fake_root_uid)
++ {
++ i_tid.process = caller_pid;
++ if (rsbac_set_attr(SW_GEN, T_PROCESS,
++ i_tid,
++ A_fake_root_uid,
++ i_attr_val))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr()", A_fake_root_uid);
++ }
++ }
++ }
++ #endif
++ #ifdef CONFIG_RSBAC_UM_VIRTUAL
++ /* get vset from file */
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ tid,
++ A_vset,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr()", A_vset);
++ }
++ else
++ {
++ /* set vset for process */
++ if(i_attr_val.vset != RSBAC_UM_VIRTUAL_KEEP)
++ {
++ i_tid.process = caller_pid;
++ if (rsbac_set_attr(SW_GEN, T_PROCESS,
++ i_tid,
++ A_vset,
++ i_attr_val))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr()", A_fake_root_uid);
++ }
++ }
++ }
++ #endif
++#if defined(CONFIG_RSBAC_AUTH_LEARN) || defined(CONFIG_RSBAC_CAP_LEARN)
++ /* remember executed file */
++ i_tid.process = caller_pid;
++ i_attr_val.program_file = tid.file;
++ if (rsbac_set_attr(SW_GEN,
++ T_PROCESS,
++ i_tid,
++ A_program_file,
++ i_attr_val))
++ {
++ rsbac_pr_set_error(A_program_file);
++ }
++#endif
++ /* get auid_exempt from file */
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ tid,
++ A_auid_exempt,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr()", A_auid_exempt);
++ }
++ else
++ {
++ if(i_attr_val.auid_exempt != RSBAC_NO_USER)
++ {
++ /* set auid_exempt for process */
++ i_tid.process = caller_pid;
++ if (rsbac_set_attr(SW_GEN, T_PROCESS,
++ i_tid,
++ A_auid_exempt,
++ i_attr_val))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr()", A_auid_exempt);
++ }
++ }
++ }
++ break;
++
++ default:
++ break;
++ }
++ break;
++
++ default:
++ break;
++ }
++
++#if defined(CONFIG_RSBAC_DEBUG) && defined(CONFIG_RSBAC_NET)
++ if( rsbac_debug_adf_net
++ && ( (target == T_NETDEV)
++ || (target == T_NETTEMP)
++ || (target == T_NETOBJ)
++ )
++ )
++ do_log = TRUE;
++#endif
++
++/* log based on process owner */
++#ifdef CONFIG_RSBAC_IND_USER_LOG
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_GEN,
++ T_USER,
++ i_tid,
++ A_log_user_based,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr()", A_log_user_based);
++ }
++ else
++ {
++ if(((rsbac_request_vector_t) 1 << request) & i_attr_val.log_user_based)
++ do_log = TRUE;
++ }
++#endif /* CONFIG_RSBAC_IND_USER_LOG */
++
++/* log based on program */
++#ifdef CONFIG_RSBAC_IND_PROG_LOG
++ if(!do_log)
++ {
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_GEN,
++ T_PROCESS,
++ i_tid,
++ A_log_program_based,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr()", A_log_program_based);
++ }
++ else
++ {
++ if(((rsbac_request_vector_t) 1 << request) & i_attr_val.log_program_based)
++ do_log = TRUE;
++ }
++ }
++#endif /* CONFIG_RSBAC_IND_PROG_LOG */
++
++
++/* logging request on info level, if requested by file/dir/dev attributes */
++/* log_array_low/high, or, if that is requested, if enabled for this request */
++/* type (attributes state level, or that request based level is to be taken) */
++/* loglevel 2: log everything */
++/* loglevel 1: log, if denied */
++/* loglevel 0: log nothing */
++
++#ifdef CONFIG_RSBAC_IND_LOG /* only if individual logging is enabled */
++ /* if file/dir/dev, depend log on log_arrays */
++ /* (but not for file.device = 0) */
++ /* log_on_request is TRUE */
++ if(!do_log)
++ {
++ if( ( ( (target == T_FILE)
++ || (target == T_DIR)
++ || (target == T_FIFO)
++ || (target == T_SYMLINK)
++ )
++ && RSBAC_MAJOR(tid.file.device)
++ && RSBAC_MINOR(tid.file.device)
++ )
++ || (target == T_DEV)
++ )
++ {
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ tid,
++ A_log_array_low,
++ &i_attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr()", A_log_array_low);
++ }
++ else
++ {
++ if (rsbac_get_attr(SW_GEN,
++ target,
++ tid,
++ A_log_array_high,
++ &i_attr_val2,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr()", A_log_array_high);
++ }
++ else
++ { /* ll = low-bit for request | (high-bit for request as bit 1) */
++ log_level = ((i_attr_val.log_array_low >> request) & 1)
++ | ( ((i_attr_val2.log_array_high >> request) & 1) << 1);
++ if ( log_level == LL_full
++ || ( log_level == LL_denied
++ && error) )
++ {
++ do_log = TRUE;
++ }
++ if(log_level != LL_request)
++ log_on_request = FALSE;
++ }
++ }
++ }
++ }
++#endif /* CONFIG_RSBAC_IND_LOG */
++
++#ifdef CONFIG_RSBAC_NO_DECISION_ON_NETMOUNT
++log:
++#endif
++ /* if enabled, try request based log level */
++ if (log_on_request
++ && ( rsbac_log_levels[request][target] == LL_full
++ || ( rsbac_log_levels[request][target] == LL_denied
++ && error) ) )
++ do_log = TRUE;
++
++ if(do_log)
++ {
++ char * request_name;
++ char * target_type_name;
++ char * new_target_type_name;
++ char * target_id_name;
++ char * new_target_id_name;
++ char * attr_name;
++ rsbac_uid_t audit_uid;
++ char * audit_uid_name;
++#ifdef CONFIG_RSBAC_LOG_PSEUDO
++ rsbac_pseudo_t pseudo = 0;
++#endif
++
++ /* Get process audit_uid */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_GEN,T_PROCESS,i_tid,A_audit_uid,&i_attr_val,FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr()", A_audit_uid);
++ return -RSBAC_EREADFAILED; /* something weird happened */
++ }
++ audit_uid_name = rsbac_kmalloc(32);
++ audit_uid = i_attr_val.audit_uid;
++ if(audit_uid == RSBAC_NO_USER) {
++ audit_uid_name[0] = 0;
++ audit_uid = owner;
++ } else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(audit_uid))
++ sprintf(audit_uid_name, "audit uid %u/%u, ",
++ RSBAC_UID_SET(audit_uid),
++ RSBAC_UID_NUM(audit_uid));
++ else
++#endif
++ sprintf(audit_uid_name, "audit uid %u, ", RSBAC_UID_NUM(audit_uid));
++ }
++#ifdef CONFIG_RSBAC_LOG_PSEUDO
++ /* Get owner's logging pseudo */
++ i_tid.user = audit_uid;
++ if (rsbac_get_attr(SW_GEN,T_USER,i_tid,A_pseudo,&i_attr_val,FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr()", A_pseudo);
++ return -RSBAC_EREADFAILED; /* something weird happened */
++ }
++ /* if pseudo is not registered, return attribute value is 0 (see later) */
++ pseudo = i_attr_val.pseudo;
++#endif
++
++ /* rsbac_kmalloc all memory */
++ request_name = rsbac_kmalloc(32);
++ target_type_name = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ new_target_type_name = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ #ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name
++ = rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ new_target_id_name
++ = rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++ #else
++ target_id_name = rsbac_kmalloc(2 * RSBAC_MAXNAMELEN);
++ new_target_id_name = rsbac_kmalloc(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++ #endif
++ attr_name = rsbac_kmalloc(32);
++
++ /* Getting basic information about this request */
++ request_name[0] = (char) 0;
++ target_type_name[0] = (char) 0;
++ target_id_name[0] = (char) 0;
++ new_target_type_name[0] = (char) 0;
++ new_target_id_name[0] = (char) 0;
++ attr_name[0] = (char) 0;
++ get_request_name(request_name, request);
++ get_target_name(target_type_name, target, target_id_name, tid);
++ get_target_name(new_target_type_name, new_target,
++ new_target_id_name, new_tid);
++ get_attribute_name(attr_name, attr);
++
++#ifdef CONFIG_RSBAC_LOG_PSEUDO
++ if(pseudo)
++ rsbac_printk(KERN_INFO
++ "rsbac_adf_set_attr(): request %s, pid %u, pseudo %u, target_type %s, tid %s, new_target_type %s, new_tid %s, attr %s, value %u, error %i\n",
++ request_name, pid_nr(caller_pid), pseudo, target_type_name, target_id_name,
++ new_target_type_name, new_target_id_name, attr_name, attr_val.dummy, error);
++ else
++#endif
++ {
++ char * owner_name = rsbac_kmalloc(32);
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(owner))
++ sprintf(owner_name, "%u/%u",
++ RSBAC_UID_SET(owner),
++ RSBAC_UID_NUM(owner));
++ else
++#endif
++ sprintf(owner_name, "%u", RSBAC_UID_NUM(owner));
++
++ rsbac_printk(KERN_INFO
++ "rsbac_adf_set_attr(): request %s, pid %u, uid %s, %starget_type %s, tid %s, new_target_type %s, new_tid %s, attr %s, value %u, error %i\n",
++ request_name, pid_nr(caller_pid), owner_name, audit_uid_name, target_type_name, target_id_name,
++ new_target_type_name, new_target_id_name, attr_name, attr_val.dummy, error);
++ rsbac_kfree(owner_name);
++ rsbac_kfree(audit_uid_name);
++ }
++ /* rsbac_kfree all helper mem */
++ rsbac_kfree(request_name);
++ rsbac_kfree(target_type_name);
++ rsbac_kfree(new_target_type_name);
++ rsbac_kfree(target_id_name);
++ rsbac_kfree(new_target_id_name);
++ rsbac_kfree(attr_name);
++ }
++
++/* count */
++ rsbac_adf_set_attr_count[target]++;
++#ifdef CONFIG_RSBAC_XSTATS
++ rsbac_adf_set_attr_xcount[target][request]++;
++#endif
++
++ return(error);
++ } /* end of rsbac_adf_set_attr() */
++
++
++/****************
++ *
++ * Secure Delete
++ *
++ ****************/
++
++#ifdef CONFIG_RSBAC_SECDEL
++
++/* open_by_dentry */
++/* This is done by hand (copy from rsbac_read_open), because system calls */
++/* are currently blocked by rsbac */
++
++static int open_by_dentry(struct dentry * file_dentry_p, struct file * file_p)
++ {
++ int tmperr;
++
++ if ( !(S_ISREG(file_dentry_p->d_inode->i_mode)) )
++ { /* this is not a file! -> error! */
++ rsbac_printk(KERN_WARNING
++ "open_by_dentry(): expected file is not a file!\n");
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* Now we fill the file structure, and */
++ /* if there is an open func, use it, otherwise ignore */
++ if ((tmperr = init_private_file(file_p, file_dentry_p, O_WRONLY | O_SYNC)))
++ {
++ rsbac_printk(KERN_WARNING
++ "open_by_dentry(): could not open file!\n");
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* Without a write function we get into troubles -> error */
++ if ((!file_p->f_op) || (!file_p->f_op->write))
++ {
++ rsbac_printk(KERN_WARNING
++ "open_by_dentry(): file write function missing!\n");
++ return -RSBAC_EWRITEFAILED;
++ }
++ return 0;
++ }
++
++/*
++ **********************
++ * Secure File Truncation
++ */
++static int do_rsbac_sec_trunc(struct dentry * dentry_p,
++ loff_t new_len,
++ loff_t old_len,
++ u_int may_sync)
++ {
++#if defined(CONFIG_RSBAC_MAINT)
++ return 0;
++#else
++ int err = 0;
++ rsbac_boolean_t need_overwrite = FALSE;
++
++ if (!rsbac_is_initialized())
++ return 0;
++ /* security checks */
++ if( !dentry_p
++ || !dentry_p->d_inode)
++ return -RSBAC_EINVALIDPOINTER;
++ if(!S_ISREG(dentry_p->d_inode->i_mode))
++ return -RSBAC_EINVALIDTARGET;
++ if(dentry_p->d_sb->s_magic == PIPEFS_MAGIC)
++ return 0;
++ if(new_len >= old_len)
++ return 0;
++
++ if (in_interrupt())
++ {
++ printk(KERN_WARNING "do_rsbac_sec_trunc(): called from interrupt: pid %u(%s)!\n",
++ current->pid, current->comm);
++ dump_stack();
++ return -RSBAC_EFROMINTERRUPT;
++ }
++
++ if(dentry_p->d_inode && !rsbac_writable(dentry_p->d_inode->i_sb))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_write)
++ {
++ rsbac_printk(KERN_DEBUG
++ "do_rsbac_sec_trunc(): ignoring file %lu on network device %02u:%02u!\n",
++ dentry_p->d_inode->i_ino,
++ MAJOR(dentry_p->d_inode->i_sb->s_dev),
++ MINOR(dentry_p->d_inode->i_sb->s_dev));
++ }
++#endif
++ return 0;
++ }
++
++ /******* PM ********/
++ #ifdef CONFIG_RSBAC_PM
++ #ifdef CONFIG_RSBAC_SWITCH_PM
++ if (rsbac_switch_pm)
++ #endif
++ /* no need to call module, if already need_overwrite */
++ if(!need_overwrite)
++ need_overwrite = rsbac_need_overwrite_pm(dentry_p);
++ #endif /* PM */
++
++ /******* FF ********/
++ #ifdef CONFIG_RSBAC_FF
++ #ifdef CONFIG_RSBAC_SWITCH_FF
++ if (rsbac_switch_ff)
++ #endif
++ /* no need to call module, if already need_overwrite */
++ if(!need_overwrite)
++ need_overwrite = rsbac_need_overwrite_ff(dentry_p);
++ #endif /* FF */
++
++ /******* RC ********/
++ #ifdef CONFIG_RSBAC_RC
++ #ifdef CONFIG_RSBAC_SWITCH_RC
++ if (rsbac_switch_rc)
++ #endif
++ /* no need to call module, if already need_overwrite */
++ if(!need_overwrite)
++ need_overwrite = rsbac_need_overwrite_rc(dentry_p);
++ #endif /* RC */
++
++ /****** RES *******/
++ #ifdef CONFIG_RSBAC_RES
++ #ifdef CONFIG_RSBAC_SWITCH_RES
++ if (rsbac_switch_res)
++ #endif
++ /* no need to call module, if already need_overwrite */
++ if(!need_overwrite)
++ need_overwrite = rsbac_need_overwrite_res(dentry_p);
++ #endif /* RES */
++
++ /****** REG *******/
++ #ifdef CONFIG_RSBAC_REG
++ if(!need_overwrite)
++ need_overwrite = rsbac_need_overwrite_reg(dentry_p);
++ #endif /* REG */
++
++ if(need_overwrite)
++ {
++ char * buffer;
++ struct file file;
++ int tmperr = 0;
++ mm_segment_t oldfs;
++
++ buffer = rsbac_kmalloc(RSBAC_SEC_DEL_CHUNK_SIZE);
++ if(!buffer)
++ return -RSBAC_ENOMEM;
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_write)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_sec_trunc(): zeroing of file %lu on device %02u:%02u from byte %lu to %lu!\n",
++ dentry_p->d_inode->i_ino,
++ MAJOR(dentry_p->d_inode->i_sb->s_dev),
++ MINOR(dentry_p->d_inode->i_sb->s_dev),
++ (u_long) new_len,
++ (u_long) old_len-1);
++ }
++#endif
++ /* open */
++ err = open_by_dentry(dentry_p, &file);
++ if(err)
++ {
++ rsbac_kfree(buffer);
++ return err;
++ }
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_write)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_sec_trunc(): file %lu on device %02u:%02u is open, seeking to %lu!\n",
++ dentry_p->d_inode->i_ino,
++ MAJOR(dentry_p->d_inode->i_sb->s_dev),
++ MINOR(dentry_p->d_inode->i_sb->s_dev),
++ (u_long) new_len);
++ }
++#endif
++
++ /* OK, now we can start writing */
++
++ /* Set current user space to kernel space, because write() reads
++ * from user space
++ */
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++
++ { /* taken from fs/read_write.c */
++ file.f_pos = new_len;
++ file.f_version = 0;
++ }
++ memset(buffer,0,RSBAC_SEC_DEL_CHUNK_SIZE);
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_write)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_sec_trunc(): file %lu on device %02u:%02u is positioned, starting to write!\n",
++ dentry_p->d_inode->i_ino,
++ MAJOR(dentry_p->d_inode->i_sb->s_dev),
++ MINOR(dentry_p->d_inode->i_sb->s_dev));
++ }
++#endif
++ while (new_len < old_len)
++ {
++ struct iovec iov = { .iov_base = buffer,
++ .iov_len = rsbac_min(RSBAC_SEC_DEL_CHUNK_SIZE, old_len-new_len) };
++ struct kiocb kiocb;
++
++ init_sync_kiocb(&kiocb, &file);
++ kiocb.ki_pos = file.f_pos;
++ kiocb.ki_left = iov.iov_len;
++
++ for (;;) {
++ tmperr = blkdev_aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
++ if (tmperr != -EIOCBRETRY)
++ break;
++ wait_on_retry_sync_kiocb(&kiocb);
++ }
++
++ if (-EIOCBQUEUED == tmperr)
++ tmperr = wait_on_sync_kiocb(&kiocb);
++ file.f_pos = kiocb.ki_pos;
++
++ if (tmperr < 0) {
++ err = tmperr;
++ break;
++ }
++ new_len += tmperr;
++ }
++ set_fs(oldfs);
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_write)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_sec_trunc(): syncing file %lu on device %02u:%02u!\n",
++ dentry_p->d_inode->i_ino,
++ MAJOR(dentry_p->d_inode->i_sb->s_dev),
++ MINOR(dentry_p->d_inode->i_sb->s_dev));
++ }
++#endif
++
++ if(may_sync && (dentry_p->d_inode->i_size > 0))
++ err = generic_file_fsync(&file, 0, dentry_p->d_inode->i_size - 1, 1);
++
++ rsbac_kfree(buffer);
++ }
++ /* Ready. */
++ return err;
++
++#endif /* else of MAINT */
++ }
++
++EXPORT_SYMBOL(rsbac_sec_trunc);
++int rsbac_sec_trunc(struct dentry * dentry_p,
++ loff_t new_len, loff_t old_len)
++ {
++ return do_rsbac_sec_trunc(dentry_p, new_len, old_len, TRUE);
++ }
++
++EXPORT_SYMBOL(rsbac_sec_del);
++int rsbac_sec_del(struct dentry * dentry_p, u_int may_sync)
++ {
++ return do_rsbac_sec_trunc(dentry_p,
++ 0,
++ dentry_p->d_inode->i_size,
++ may_sync);
++ }
++
++#else /* no SECDEL */
++EXPORT_SYMBOL(rsbac_sec_trunc);
++int rsbac_sec_trunc(struct dentry * dentry_p,
++ loff_t new_len, loff_t old_len)
++ {
++ return 0;
++ }
++EXPORT_SYMBOL(rsbac_sec_del);
++int rsbac_sec_del(struct dentry * dentry_p, u_int may_sync)
++ {
++ return 0;
++ }
++#endif /* SECDEL */
++
++#ifdef CONFIG_RSBAC_SYM_REDIR
++EXPORT_SYMBOL(rsbac_symlink_redirect);
++
++/* This function changes the symlink content by adding a suffix, if
++ * requested. It returns NULL, if unchanged, or a pointer to a
++ * kmalloc'd new char * otherwise, which has to be kfree'd after use.
++ */
++char * rsbac_symlink_redirect(
++ struct inode * inode_p,
++ const char * name,
++ u_int maxlen)
++ {
++#if defined(CONFIG_RSBAC_SYM_REDIR_REMOTE_IP) || defined(CONFIG_RSBAC_SYM_REDIR_MAC) || defined(CONFIG_RSBAC_SYM_REDIR_RC) || defined(CONFIG_RSBAC_SYM_REDIR_UID)
++ union rsbac_target_id_t * i_tid_p;
++ int err;
++ union rsbac_attribute_value_t i_attr_val;
++#endif
++
++ if(!name)
++ return NULL;
++ if(!inode_p)
++ return NULL;
++ if (!rsbac_is_initialized())
++ return NULL;
++
++ if(!S_ISLNK(inode_p->i_mode))
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_symlink_redirect(): called for non-symlink inode %u on dev %02u:%02u!\n",
++ inode_p->i_ino,
++ RSBAC_MAJOR(inode_p->i_sb->s_dev), RSBAC_MINOR(inode_p->i_sb->s_dev) );
++ return NULL;
++ }
++
++ if (in_interrupt())
++ {
++ printk(KERN_WARNING "rsbac_symlink_redirect(): called from interrupt: pid %u(%s)!\n",
++ current->pid, current->comm);
++ dump_stack();
++ return NULL;
++ }
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_symlink_redirect(): called for symlink inode %u on dev %02u:%02u!\n",
++ inode_p->i_ino,
++ RSBAC_MAJOR(inode_p->i_sb->s_dev), RSBAC_MINOR(inode_p->i_sb->s_dev) );
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_SYM_REDIR_REMOTE_IP) || defined(CONFIG_RSBAC_SYM_REDIR_MAC) || defined(CONFIG_RSBAC_SYM_REDIR_RC) || defined(CONFIG_RSBAC_SYM_REDIR_UID)
++ i_tid_p = kmalloc(sizeof(*i_tid_p), GFP_KERNEL);
++ if(!i_tid_p)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_symlink_redirect(): not enough memory for symlink redir remote ip inode %u on dev %02u:%02u!\n",
++ inode_p->i_ino,
++ RSBAC_MAJOR(inode_p->i_sb->s_dev), RSBAC_MINOR(inode_p->i_sb->s_dev) );
++ return NULL;
++ }
++ i_tid_p->symlink.device = inode_p->i_sb->s_dev;
++ i_tid_p->symlink.inode = inode_p->i_ino;
++ i_tid_p->symlink.dentry_p = NULL;
++#endif
++
++#ifdef CONFIG_RSBAC_SYM_REDIR_REMOTE_IP
++ if ((err = rsbac_get_attr(SW_GEN,
++ T_SYMLINK,
++ *i_tid_p,
++ A_symlink_add_remote_ip,
++ &i_attr_val,
++ FALSE) ))
++ {
++ rsbac_ds_get_error_num("rsbac_symlink_redirect()", A_symlink_add_remote_ip, err);
++ kfree(i_tid_p);
++ return NULL; /* something weird happened */
++ }
++ if(i_attr_val.symlink_add_remote_ip)
++ {
++ u_int len;
++ rsbac_enum_t add_remote_ip;
++ __u32 addr;
++ char * new_name;
++
++ add_remote_ip = i_attr_val.symlink_add_remote_ip;
++ i_tid_p->process = task_pid(current);
++ err = rsbac_get_attr(SW_GEN,
++ T_PROCESS,
++ *i_tid_p,
++ A_remote_ip,
++ &i_attr_val,
++ FALSE);
++ kfree(i_tid_p);
++ if (err)
++ {
++ rsbac_ds_get_error_num("rsbac_symlink_redirect()", A_remote_ip, err);
++ return NULL; /* something weird happened */
++ }
++ addr = i_attr_val.remote_ip;
++ len = strlen(name);
++#if 0
++ while( len
++ && (name[len-1] >= '0')
++ && (name[len-1] <= '9')
++ )
++ len--;
++
++#endif
++ if(len > (maxlen - 20))
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_symlink_redirect(): not enough space for symlink inode %u on dev %02u:%02u!\n",
++ inode_p->i_ino,
++ RSBAC_MAJOR(inode_p->i_sb->s_dev), RSBAC_MINOR(inode_p->i_sb->s_dev) );
++ return NULL;
++ }
++ new_name = kmalloc(len + 20, GFP_KERNEL);
++ if(!new_name)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_symlink_redirect(): not enough memory for symlink redir remote ip inode %u on dev %02u:%02u!\n",
++ inode_p->i_ino,
++ RSBAC_MAJOR(inode_p->i_sb->s_dev), RSBAC_MINOR(inode_p->i_sb->s_dev) );
++ return NULL;
++ }
++ strcpy(new_name, name);
++ switch(add_remote_ip)
++ {
++ case 1:
++ sprintf(new_name+len, "%u",
++ ((unsigned char *)&addr)[0]);
++ break;
++ case 2:
++ sprintf(new_name+len, "%u.%u",
++ ((unsigned char *)&addr)[0],
++ ((unsigned char *)&addr)[1]);
++ break;
++ case 3:
++ sprintf(new_name+len, "%u.%u.%u",
++ ((unsigned char *)&addr)[0],
++ ((unsigned char *)&addr)[1],
++ ((unsigned char *)&addr)[2]);
++ break;
++ default:
++ sprintf(new_name+len, "%u.%u.%u.%u",
++ ((unsigned char *)&addr)[0],
++ ((unsigned char *)&addr)[1],
++ ((unsigned char *)&addr)[2],
++ ((unsigned char *)&addr)[3]);
++ }
++ return new_name;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_SYM_REDIR_UID
++ if ((err = rsbac_get_attr(SW_GEN,
++ T_SYMLINK,
++ *i_tid_p,
++ A_symlink_add_uid,
++ &i_attr_val,
++ FALSE) ))
++ {
++ rsbac_ds_get_error_num("rsbac_symlink_redirect()", A_symlink_add_uid, err);
++ kfree(i_tid_p);
++ return NULL; /* something weird happened */
++ }
++ if(i_attr_val.symlink_add_uid)
++ {
++ rsbac_uid_t user;
++
++ kfree(i_tid_p);
++ if(!rsbac_get_owner(&user))
++ {
++ u_int len;
++ u_int room = 20;
++ char * new_name;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(user))
++ room = 40;
++#endif
++ len = strlen(name);
++ while( len
++ && ( ( (name[len-1] >= '0')
++ && (name[len-1] <= '9')
++ )
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ || (name[len-1] == '-')
++#endif
++ )
++ )
++ len--;
++ if(len > (maxlen - room))
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_symlink_redirect(): not enough space for symlink inode %u on dev %02u:%02u!\n",
++ inode_p->i_ino,
++ RSBAC_MAJOR(inode_p->i_sb->s_dev), RSBAC_MINOR(inode_p->i_sb->s_dev) );
++ return NULL;
++ }
++ new_name = kmalloc(len + room, GFP_KERNEL);
++ if(!new_name)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_symlink_redirect(): not enough memory for symlink redir uid inode %u on dev %02u:%02u!\n",
++ inode_p->i_ino,
++ RSBAC_MAJOR(inode_p->i_sb->s_dev), RSBAC_MINOR(inode_p->i_sb->s_dev) );
++ return NULL;
++ }
++ strcpy(new_name, name);
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(user))
++ sprintf(new_name+len, "%u-%u",
++ RSBAC_UID_SET(user), RSBAC_UID_NUM(user));
++ else
++#endif
++ ulongtostr(new_name+len, RSBAC_UID_NUM(user));
++ return new_name;
++ }
++ else
++ return NULL;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_SYM_REDIR_MAC
++ if ((err = rsbac_get_attr(SW_GEN,
++ T_SYMLINK,
++ *i_tid_p,
++ A_symlink_add_mac_level,
++ &i_attr_val,
++ FALSE) ))
++ {
++ rsbac_ds_get_error_num("rsbac_symlink_redirect()", A_symlink_add_mac_level, err);
++ kfree(i_tid_p);
++ return NULL; /* something weird happened */
++ }
++ if(i_attr_val.symlink_add_mac_level)
++ {
++ u_int len;
++ char * new_name;
++
++ i_tid_p->process = task_pid(current);
++ if ((err = rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ *i_tid_p,
++ A_current_sec_level,
++ &i_attr_val,
++ FALSE) ))
++ {
++ rsbac_ds_get_error_num("rsbac_symlink_redirect()", A_current_sec_level, err);
++ kfree(i_tid_p);
++ return NULL; /* something weird happened */
++ }
++
++ len = strlen(name);
++ while( len
++ && ( ( (name[len-1] >= '0')
++ && (name[len-1] <= '9')
++ )
++#ifdef CONFIG_RSBAC_SYM_REDIR_MAC_CAT
++ || (name[len-1] == ':')
++#endif
++ )
++ )
++ len--;
++#ifdef CONFIG_RSBAC_SYM_REDIR_MAC_CAT
++ if(len > (maxlen - 85))
++#else
++ if(len > (maxlen - 20))
++#endif
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_symlink_redirect(): not enough space for symlink inode %u on dev %02u:%02u!\n",
++ inode_p->i_ino,
++ RSBAC_MAJOR(inode_p->i_sb->s_dev), RSBAC_MINOR(inode_p->i_sb->s_dev) );
++ kfree(i_tid_p);
++ return NULL;
++ }
++
++#ifdef CONFIG_RSBAC_SYM_REDIR_MAC_CAT
++ new_name = kmalloc(len + 85, GFP_KERNEL);
++#else
++ new_name = kmalloc(len + 20, GFP_KERNEL);
++#endif
++ if(!new_name)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_symlink_redirect(): not enough memory for symlink redir MAC level inode %u on dev %02u:%02u!\n",
++ inode_p->i_ino,
++ RSBAC_MAJOR(inode_p->i_sb->s_dev), RSBAC_MINOR(inode_p->i_sb->s_dev) );
++ kfree(i_tid_p);
++ return NULL;
++ }
++ strcpy(new_name, name);
++#ifdef CONFIG_RSBAC_SYM_REDIR_MAC_CAT
++ len+=sprintf(new_name+len, "%u:", i_attr_val.current_sec_level);
++ if ((err = rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ *i_tid_p,
++ A_mac_curr_categories,
++ &i_attr_val,
++ FALSE) ))
++ {
++ rsbac_ds_get_error_num("rsbac_symlink_redirect()", A_mac_curr_categories, err);
++ kfree(i_tid_p);
++ kfree(new_name);
++ return NULL; /* something weird happened */
++ }
++ kfree(i_tid_p);
++ u64tostrmac(new_name+len, i_attr_val.mac_categories);
++#else
++ len+=sprintf(new_name+len, "%u", i_attr_val.current_sec_level);
++#endif
++ return new_name;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_SYM_REDIR_RC
++ if ((err = rsbac_get_attr(SW_GEN,
++ T_SYMLINK,
++ *i_tid_p,
++ A_symlink_add_rc_role,
++ &i_attr_val,
++ FALSE) ))
++ {
++ rsbac_ds_get_error_num("rsbac_symlink_redirect()", A_symlink_add_rc_role, err);
++ kfree(i_tid_p);
++ return NULL; /* something weird happened */
++ }
++ if(i_attr_val.symlink_add_rc_role)
++ {
++ u_int len;
++ char * new_name;
++
++ i_tid_p->process = task_pid(current);
++ err = rsbac_get_attr(SW_RC,
++ T_PROCESS,
++ *i_tid_p,
++ A_rc_role,
++ &i_attr_val,
++ FALSE);
++ kfree(i_tid_p);
++ if (err)
++ {
++ rsbac_ds_get_error_num("rsbac_symlink_redirect()", A_rc_role, err);
++ return NULL; /* something weird happened */
++ }
++
++ len = strlen(name);
++ while( len
++ && (name[len-1] >= '0')
++ && (name[len-1] <= '9')
++ )
++ len--;
++ if(len > (maxlen - 20))
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_symlink_redirect(): not enough space for symlink inode %u on dev %02u:%02u!\n",
++ inode_p->i_ino,
++ RSBAC_MAJOR(inode_p->i_sb->s_dev), RSBAC_MINOR(inode_p->i_sb->s_dev) );
++ return NULL;
++ }
++
++ new_name = kmalloc(len + 20, GFP_KERNEL);
++ if(!new_name)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_symlink_redirect(): not enough memory for symlink redir RC role inode %u on dev %02u:%02u!\n",
++ inode_p->i_ino,
++ RSBAC_MAJOR(inode_p->i_sb->s_dev), RSBAC_MINOR(inode_p->i_sb->s_dev) );
++ return NULL;
++ }
++ strcpy(new_name, name);
++ ulongtostr(new_name+len, i_attr_val.rc_role);
++ return new_name;
++ }
++#endif
++
++ kfree(i_tid_p);
++ return NULL;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_PART
++int rsbac_dac_part_disabled(struct dentry * dentry_p)
++ {
++ int err;
++ enum rsbac_target_t i_target;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val;
++
++ if( !dentry_p
++ || !dentry_p->d_inode
++ || !dentry_p->d_inode->i_sb->s_dev
++ || !rsbac_is_initialized()
++ || !current->pid
++ || (current->pid == 1))
++ return FALSE;
++
++ if(S_ISREG(dentry_p->d_inode->i_mode))
++ i_target = T_FILE;
++ else
++ if(S_ISDIR(dentry_p->d_inode->i_mode))
++ i_target = T_DIR;
++ else
++ if(S_ISFIFO(dentry_p->d_inode->i_mode))
++ i_target = T_FIFO;
++ else
++ if(S_ISLNK(dentry_p->d_inode->i_mode))
++ i_target = T_SYMLINK;
++ else
++ return FALSE;
++
++ if (in_interrupt())
++ {
++ printk(KERN_WARNING "rsbac_dac_part_disabled(): called from interrupt: pid %u(%s)!\n",
++ current->pid, current->comm);
++ dump_stack();
++ return FALSE;
++ }
++
++ i_tid.file.device = dentry_p->d_sb->s_dev;
++ i_tid.file.inode = dentry_p->d_inode->i_ino;
++ i_tid.file.dentry_p = dentry_p;
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_dac_part_disable(): called for dentry_p->d_inode %u on dev %02u:%02u, dentry_p %p!\n",
++ i_tid.file.inode,
++ RSBAC_MAJOR(i_tid.file.device), RSBAC_MINOR(i_tid.file.device),
++ i_tid.file.dentry_p );
++ }
++#endif
++
++ if ((err = rsbac_get_attr(SW_GEN,
++ i_target,
++ i_tid,
++ A_linux_dac_disable,
++ &i_attr_val,
++ TRUE) ))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_dac_part_disable(): rsbac_get_attr() for linux_dac_disable returned error %i!\n",
++ err);
++ return FALSE; /* something weird happened */
++ }
++ if(i_attr_val.linux_dac_disable == LDD_true)
++ return TRUE;
++ else
++ return FALSE;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_FAKE_ROOT_UID
++rsbac_uid_t rsbac_fake_uid(void)
++ {
++ int err;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val;
++
++ if(!current_uid())
++ return 0;
++ if (!rsbac_is_initialized())
++ return current_uid();
++ if (in_interrupt())
++ {
++ printk(KERN_WARNING "rsbac_fake_uid(): called from interrupt: pid %u(%s)!\n",
++ current->pid, current->comm);
++ dump_stack();
++ return current_uid();
++ }
++
++ i_tid.process = task_pid(current);
++ if ((err = rsbac_get_attr(SW_GEN,
++ T_PROCESS,
++ i_tid,
++ A_fake_root_uid,
++ &i_attr_val,
++ FALSE) ))
++ {
++ rsbac_ds_get_error("rsbac_fake_uid()", A_fake_root_uid);
++ return current_uid();
++ }
++ switch(i_attr_val.fake_root_uid)
++ {
++ case FR_both:
++ case FR_uid_only:
++ return 0;
++ default:
++ return current_uid();
++ }
++ }
++
++rsbac_uid_t rsbac_fake_euid(void)
++ {
++ int err;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val;
++
++ if(!current_euid())
++ return 0;
++ if (!rsbac_is_initialized())
++ return current_euid();
++
++ if (in_interrupt())
++ {
++ printk(KERN_WARNING "rsbac_fake_euid(): called from interrupt: pid %u(%s)!\n",
++ current->pid, current->comm);
++ dump_stack();
++ return current_euid();
++ }
++
++ i_tid.process = task_pid(current);
++ if ((err = rsbac_get_attr(SW_GEN,
++ T_PROCESS,
++ i_tid,
++ A_fake_root_uid,
++ &i_attr_val,
++ FALSE) ))
++ {
++ rsbac_ds_get_error("rsbac_fake_euid()", A_fake_root_uid);
++ return current_euid();
++ }
++ switch(i_attr_val.fake_root_uid)
++ {
++ case FR_both:
++ case FR_euid_only:
++ return 0;
++ default:
++ return current_euid();
++ }
++ }
++
++int rsbac_uid_faked(void)
++ {
++ int err;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val;
++
++ if (!rsbac_is_initialized())
++ return 0;
++
++ if (in_interrupt())
++ {
++ printk(KERN_WARNING "rsbac_uid_faked(): called from interrupt: pid %u(%s)!\n",
++ current->pid, current->comm);
++ dump_stack();
++ return 0;
++ }
++
++ i_tid.process = task_pid(current);
++ if ((err = rsbac_get_attr(SW_GEN,
++ T_PROCESS,
++ i_tid,
++ A_fake_root_uid,
++ &i_attr_val,
++ FALSE) ))
++ {
++ rsbac_ds_get_error("rsbac_uid_faked()", A_fake_root_uid);
++ return 0; /* something weird happened */
++ }
++ switch(i_attr_val.fake_root_uid)
++ {
++ case FR_both:
++ case FR_uid_only:
++ return 1;
++ default:
++ return 0;
++ }
++ }
++
++#endif
++
++int rsbac_set_audit_uid(rsbac_uid_t uid)
++ {
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val;
++
++ if(!uid || (uid == current_uid()))
++ return 0;
++
++ if (in_interrupt())
++ {
++ printk(KERN_WARNING "rsbac_set_audit_uid(): called from interrupt: pid %u(%s)!\n",
++ current->pid, current->comm);
++ dump_stack();
++ return -RSBAC_EFROMINTERRUPT;
++ }
++
++ tid.process = task_pid(current);
++ if (rsbac_get_attr(SW_GEN,
++ T_PROCESS,
++ tid,
++ A_audit_uid,
++ &attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_set_audit_uid()", A_audit_uid);
++ return -RSBAC_EREADFAILED;
++ }
++ if(attr_val.audit_uid != RSBAC_NO_USER)
++ return 0;
++
++ if (rsbac_get_attr(SW_GEN,
++ T_PROCESS,
++ tid,
++ A_auid_exempt,
++ &attr_val,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_set_audit_uid()", A_auid_exempt);
++ return -RSBAC_EREADFAILED;
++ }
++ if(attr_val.auid_exempt == uid)
++ return 0;
++
++ attr_val.audit_uid = uid;
++ if (rsbac_set_attr(SW_GEN,
++ T_PROCESS,
++ tid,
++ A_audit_uid,
++ attr_val))
++ {
++ rsbac_ds_set_error("rsbac_set_audit_uid()", A_audit_uid);
++ return -RSBAC_EWRITEFAILED;
++ }
++ return 0;
++ }
++
++#if defined(CONFIG_RSBAC_CAP_LOG_MISSING) || defined(CONFIG_RSBAC_JAIL_LOG_MISSING)
++EXPORT_SYMBOL(rsbac_log_missing_cap);
++
++void rsbac_log_missing_cap(int cap)
++ {
++ #if defined(CONFIG_RSBAC_CAP_LOG_MISSING) || defined(CONFIG_RSBAC_CAP_LEARN)
++ #if defined(CONFIG_RSBAC_CAP_LOG_MISSING) && defined(CONFIG_RSBAC_CAP_LEARN)
++ if(rsbac_cap_log_missing || rsbac_cap_learn)
++ #elif defined(CONFIG_RSBAC_CAP_LEARN)
++ if(rsbac_cap_learn)
++ #else
++ if(rsbac_cap_log_missing)
++ #endif
++ rsbac_cap_log_missing_cap(cap);
++ #endif
++ #if defined(CONFIG_RSBAC_JAIL_LOG_MISSING)
++ if(rsbac_jail_log_missing)
++ rsbac_jail_log_missing_cap(cap);
++ #endif
++ }
++#endif
++
++/* end of rsbac/adf/main.c */
+diff --git a/rsbac/adf/auth/Makefile b/rsbac/adf/auth/Makefile
+new file mode 100644
+index 0000000..91737a7
+--- /dev/null
++++ b/rsbac/adf/auth/Makefile
+@@ -0,0 +1,14 @@
++#
++# File: rsbac/adf/auth/Makefile
++#
++# Makefile for the Linux rsbac auth decision module.
++#
++# Author and (c) 1999-2012 Amon Ott <ao@rsbac.org>
++#
++
++obj-y := auth_syscalls.o
++# decisions only in non-maint mode
++ifneq ($(CONFIG_RSBAC_MAINT),y)
++obj-y += auth_main.o
++endif
++
+diff --git a/rsbac/adf/auth/auth_main.c b/rsbac/adf/auth/auth_main.c
+new file mode 100644
+index 0000000..00cfeeb
+--- /dev/null
++++ b/rsbac/adf/auth/auth_main.c
+@@ -0,0 +1,1189 @@
++/**************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - Authorization module */
++/* File: rsbac/adf/auth/main.c */
++/* */
++/* Author and (c) 1999-2011: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 12/Jul/2011 */
++/**************************************************** */
++
++#include <linux/string.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/auth.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/debug.h>
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++static int rsbac_replace_auth_cap(rsbac_pid_t caller_pid,
++ enum rsbac_auth_cap_type_t cap_type,
++ rsbac_uid_t from,
++ rsbac_uid_t to)
++ {
++ if(rsbac_auth_p_capset_member(caller_pid, cap_type, from))
++ {
++ struct rsbac_auth_cap_range_t cap_range;
++
++ /* remove it and set cap for 'to' */
++ cap_range.first = to;
++ cap_range.last = to;
++ if (rsbac_auth_add_to_p_capset(0, caller_pid, cap_type, cap_range, 0))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_auth(): rsbac_auth_add_to_p_capset() returned error!\n");
++ return -RSBAC_EWRITEFAILED;
++ }
++ cap_range.first = from;
++ cap_range.last = from;
++ if (rsbac_auth_remove_from_p_capset(0, caller_pid, cap_type, cap_range))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_auth(): rsbac_auth_remove_from_p_capset() returned error!\n");
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ return 0; /* success */
++ }
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++inline enum rsbac_adf_req_ret_t
++ rsbac_adf_request_auth (enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ enum rsbac_adf_req_ret_t result = DO_NOT_CARE;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_target_id_t i_tid;
++
++ switch (request)
++ {
++#if defined(CONFIG_RSBAC_AUTH_UM_PROT) || defined(CONFIG_RSBAC_AUTH_GROUP)
++ case R_CHANGE_GROUP:
++ switch(target)
++ {
++#if defined(CONFIG_RSBAC_AUTH_UM_PROT)
++ case T_USER:
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_AUTH,
++ T_USER,
++ i_tid,
++ A_auth_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_pr_get_error(A_auth_role);
++ return NOT_GRANTED;
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++#endif /* AUTH_UM_PROT */
++
++#if defined(CONFIG_RSBAC_AUTH_GROUP)
++ case T_PROCESS:
++ if(attr != A_group)
++ return NOT_GRANTED;
++#if defined(CONFIG_RSBAC_AUTH_ALLOW_SAME)
++ if(attr_val.group == RSBAC_GEN_GID(RSBAC_UID_SET(owner),current_gid()))
++ return DO_NOT_CARE;
++#endif
++ /* check auth_may_setuid of process */
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_may_setuid,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_may_setuid);
++ return NOT_GRANTED;
++ }
++ /* if auth_may_setuid is full or and_gid, then grant */
++ if( (i_attr_val1.auth_may_setuid == AMS_full)
++ || (i_attr_val1.auth_may_setuid == AMS_last_auth_and_gid)
++ )
++ return GRANTED;
++
++ /* check, if the target uid is in capset, grant, if yes, deny, if not. */
++ if(rsbac_auth_p_capset_member(caller_pid, ACT_group_real, attr_val.group))
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++#endif /* AUTH_GROUP */
++
++ /* We do not care about */
++ /* all other cases */
++ default:
++ return DO_NOT_CARE;
++ }
++#endif /* AUTH_UM_PROT || AUTH_GROUP */
++
++#if defined(CONFIG_RSBAC_AUTH_UM_PROT)
++ case R_CREATE:
++ case R_DELETE:
++ case R_GET_PERMISSIONS_DATA:
++ case R_RENAME:
++ case R_WRITE:
++ switch(target)
++ {
++ case T_USER:
++ case T_GROUP:
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_AUTH,
++ T_USER,
++ i_tid,
++ A_auth_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_pr_get_error(A_auth_role);
++ return NOT_GRANTED;
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++ /* We do not care about */
++ /* all other cases */
++ default: return DO_NOT_CARE;
++ }
++#endif
++
++ case R_CHANGE_OWNER:
++ switch(target)
++ {
++ case T_PROCESS:
++ if(attr != A_owner)
++ return NOT_GRANTED;
++#if defined(CONFIG_RSBAC_AUTH_ALLOW_SAME)
++ if(attr_val.owner == owner)
++ return DO_NOT_CARE;
++#endif
++ /* check auth_may_setuid of process */
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_may_setuid,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_may_setuid);
++ return NOT_GRANTED;
++ }
++ switch(i_attr_val1.auth_may_setuid)
++ {
++ case AMS_off:
++ break;
++ case AMS_full:
++ return GRANTED;
++ case AMS_last_auth_only:
++ case AMS_last_auth_and_gid:
++ if(attr_val.owner == RSBAC_NO_USER)
++ return NOT_GRANTED;
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_last_auth,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_last_auth);
++ return NOT_GRANTED;
++ }
++ if(i_attr_val1.auth_last_auth == attr_val.owner)
++ return GRANTED;
++ break;
++
++ default:
++ rsbac_printk(KERN_INFO
++ "rsbac_adf_request_auth(): auth_may_setuid of process %u an invalid value %u!\n",
++ tid.process, i_attr_val1.auth_may_setuid);
++ return NOT_GRANTED;
++ }
++ /* check, if the target uid is in capset, grant, if yes, deny, if not. */
++ if(rsbac_auth_p_capset_member(caller_pid, ACT_real, attr_val.owner))
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++
++ /* all other cases are not checked */
++ default:
++ return DO_NOT_CARE;
++ }
++
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ case R_CHANGE_DAC_EFF_OWNER:
++ switch(target)
++ {
++ case T_PROCESS:
++ if(attr != A_owner)
++ return NOT_GRANTED;
++ if(attr_val.owner == owner)
++ return DO_NOT_CARE;
++#if defined(CONFIG_RSBAC_AUTH_ALLOW_SAME)
++ if(attr_val.owner == current_euid())
++ return DO_NOT_CARE;
++#endif
++ /* check auth_may_setuid of process */
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_may_setuid,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_may_setuid);
++ return NOT_GRANTED;
++ }
++ switch(i_attr_val1.auth_may_setuid)
++ {
++ case AMS_off:
++ break;
++ case AMS_full:
++ return GRANTED;
++ case AMS_last_auth_only:
++ case AMS_last_auth_and_gid:
++ if(attr_val.owner == RSBAC_NO_USER)
++ return NOT_GRANTED;
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_last_auth,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_last_auth);
++ return NOT_GRANTED;
++ }
++ if(i_attr_val1.auth_last_auth == attr_val.owner)
++ return GRANTED;
++ break;
++
++ default:
++ rsbac_printk(KERN_INFO
++ "rsbac_adf_request_auth(): auth_may_setuid of process %u has invalid value %u!\n",
++ tid.process, i_attr_val1.auth_may_setuid);
++ return NOT_GRANTED;
++ }
++ /* check, if the target uid is in capset, grant, if yes, deny, if not. */
++ if(rsbac_auth_p_capset_member(caller_pid, ACT_eff, attr_val.owner))
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++
++ /* all other cases are not checked */
++ default:
++ return DO_NOT_CARE;
++ }
++ case R_CHANGE_DAC_FS_OWNER:
++ switch(target)
++ {
++ case T_PROCESS:
++ if(attr != A_owner)
++ return NOT_GRANTED;
++ if(attr_val.owner == owner)
++ return DO_NOT_CARE;
++#if defined(CONFIG_RSBAC_AUTH_ALLOW_SAME)
++ if(attr_val.owner == current_fsuid())
++ return DO_NOT_CARE;
++#endif
++ /* check auth_may_setuid of process */
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_may_setuid,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_may_setuid);
++ return NOT_GRANTED;
++ }
++ switch(i_attr_val1.auth_may_setuid)
++ {
++ case AMS_off:
++ break;
++ case AMS_full:
++ return GRANTED;
++ case AMS_last_auth_only:
++ case AMS_last_auth_and_gid:
++ if(attr_val.owner == RSBAC_NO_USER)
++ return NOT_GRANTED;
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_last_auth,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_last_auth);
++ return NOT_GRANTED;
++ }
++ if(i_attr_val1.auth_last_auth == attr_val.owner)
++ return GRANTED;
++ break;
++
++ default:
++ rsbac_printk(KERN_INFO
++ "rsbac_adf_request_auth(): auth_may_setuid of process %u has an invalid value!\n",
++ tid.process);
++ return NOT_GRANTED;
++ }
++ /* check, if the target uid is in capset, grant, if yes, deny, if not. */
++ if(rsbac_auth_p_capset_member(caller_pid, ACT_fs, attr_val.owner))
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++
++ /* all other cases are not checked */
++ default:
++ return DO_NOT_CARE;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ case R_CHANGE_DAC_EFF_GROUP:
++ switch(target)
++ {
++ case T_PROCESS:
++ if(attr != A_group)
++ return NOT_GRANTED;
++ if(attr_val.group == RSBAC_GEN_GID(RSBAC_UID_SET(owner),current_gid()))
++ return DO_NOT_CARE;
++#if defined(CONFIG_RSBAC_AUTH_ALLOW_SAME)
++ if(attr_val.group == RSBAC_GEN_GID(RSBAC_UID_SET(owner),current_egid()))
++ return DO_NOT_CARE;
++#endif
++ /* check auth_may_setuid of process */
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_may_setuid,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_may_setuid);
++ return NOT_GRANTED;
++ }
++ /* if auth_may_setuid is set, then grant */
++ if( (i_attr_val1.auth_may_setuid == AMS_full)
++ || (i_attr_val1.auth_may_setuid == AMS_last_auth_and_gid)
++ )
++ return GRANTED;
++
++ /* check, if the target uid is in capset, grant, if yes, deny, if not. */
++ if(rsbac_auth_p_capset_member(caller_pid, ACT_group_eff, attr_val.group))
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++
++ /* all other cases are not checked */
++ default:
++ return DO_NOT_CARE;
++ }
++ case R_CHANGE_DAC_FS_GROUP:
++ switch(target)
++ {
++ case T_PROCESS:
++ if(attr != A_group)
++ return NOT_GRANTED;
++ if(attr_val.group == RSBAC_GEN_GID(RSBAC_UID_SET(owner),current_gid()))
++ return DO_NOT_CARE;
++#if defined(CONFIG_RSBAC_AUTH_ALLOW_SAME)
++ if(attr_val.group == RSBAC_GEN_GID(RSBAC_UID_SET(owner),current_fsgid()))
++ return DO_NOT_CARE;
++#endif
++ /* check auth_may_setuid of process */
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_may_setuid,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_may_setuid);
++ return NOT_GRANTED;
++ }
++ /* if auth_may_setuid is set, then grant */
++ if( (i_attr_val1.auth_may_setuid == AMS_full)
++ || (i_attr_val1.auth_may_setuid == AMS_last_auth_and_gid)
++ )
++ return GRANTED;
++
++ /* check, if the target uid is in capset, grant, if yes, deny, if not. */
++ if(rsbac_auth_p_capset_member(caller_pid, ACT_group_fs, attr_val.group))
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++
++ /* all other cases are not checked */
++ default:
++ return DO_NOT_CARE;
++ }
++#endif
++#endif /* AUTH_GROUP */
++
++ case R_MODIFY_ATTRIBUTE:
++ switch(attr)
++ {
++ /* Only protect itself, if asked to by configuration */
++ #ifdef CONFIG_RSBAC_AUTH_AUTH_PROT
++ case A_system_role:
++ case A_auth_role:
++ case A_auth_may_setuid:
++ case A_auth_may_set_cap:
++ case A_auth_start_uid:
++ case A_auth_start_euid:
++ case A_auth_start_gid:
++ case A_auth_start_egid:
++ case A_auth_learn:
++ case A_program_file:
++ case A_auth_add_f_cap:
++ case A_auth_remove_f_cap:
++ /* All attributes (remove target!) */
++ case A_none:
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_AUTH,
++ T_USER,
++ i_tid,
++ A_auth_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_pr_get_error(A_auth_role);
++ return NOT_GRANTED;
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++ #endif
++
++ case A_auth_last_auth:
++ if(target != T_PROCESS)
++ return DO_NOT_CARE;
++ /* check auth_may_set_cap of calling process */
++ i_tid.process = task_pid(current);
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ i_tid,
++ A_auth_may_set_cap,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_may_set_cap);
++ return -RSBAC_EREADFAILED;
++ }
++ /* if auth_may_set_cap is not set, then reject */
++ if (!i_attr_val1.auth_may_set_cap)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_adf_request_auth(): changing auth_last_auth of process %u to %u denied for process %u!\n",
++ tid.process,
++ attr_val.auth_last_auth,
++ task_pid(current));
++ return NOT_GRANTED;
++ }
++
++ default:
++ return DO_NOT_CARE;
++ }
++
++/* Only protect itself, if asked to by configuration */
++#ifdef CONFIG_RSBAC_AUTH_AUTH_PROT
++ case R_GET_STATUS_DATA:
++ switch(target)
++ {
++ case T_SCD:
++ /* target rsbac_log? only for secoff */
++ if (tid.scd != ST_rsbac_log)
++ return GRANTED;
++ /* Secoff or Auditor? */
++ i_tid.user = owner;
++ if ((rsbac_get_attr(SW_AUTH,
++ T_USER,
++ i_tid,
++ A_auth_role,
++ &i_attr_val1,
++ TRUE)))
++ {
++ rsbac_pr_get_error(A_auth_role);
++ return NOT_GRANTED;
++ }
++ /* grant only for secoff */
++ if ( (i_attr_val1.system_role == SR_security_officer)
++ || (i_attr_val1.system_role == SR_auditor)
++ )
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++
++ default:
++ return DO_NOT_CARE;
++ };
++
++ case R_MODIFY_PERMISSIONS_DATA:
++ switch(target)
++ {
++ case T_SCD:
++ #ifdef CONFIG_RSBAC_USER_MOD_IOPERM
++ if(tid.scd == ST_ioports)
++ return GRANTED;
++ #endif
++ /* fall through */
++ #if defined(CONFIG_RSBAC_AUTH_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++ #endif
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_AUTH,
++ T_USER,
++ i_tid,
++ A_auth_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_pr_get_error(A_auth_role);
++ return NOT_GRANTED;
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return GRANTED;
++ /* For booting: if administrator and ioports, then grant */
++ if (
++ #if defined(CONFIG_RSBAC_AUTH_UM_PROT)
++ (target == T_SCD) &&
++ #endif
++ (i_attr_val1.system_role == SR_administrator)
++ && (tid.scd == ST_ioports) )
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE
++ /* switching Linux DAC */
++ case T_NONE:
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_AUTH,
++ T_USER,
++ i_tid,
++ A_auth_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_pr_get_error(A_auth_role);
++ return NOT_GRANTED;
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++#endif
++
++ /* all other cases are not checked */
++ default: return DO_NOT_CARE;
++ }
++
++ case R_MODIFY_SYSTEM_DATA:
++ switch(target)
++ {
++ case T_SCD:
++ /* target not rsbac_log? no problem -> grant */
++ switch(tid.scd)
++ {
++ case ST_rsbac_log:
++ case ST_rsbac_remote_log:
++ break;
++ case ST_kmem:
++ return NOT_GRANTED;
++ default:
++ return GRANTED;
++ }
++ /* Get role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_AUTH,
++ T_USER,
++ i_tid,
++ A_auth_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_pr_get_error(A_auth_role);
++ return NOT_GRANTED;
++ }
++ /* grant only for secoff and auditor */
++ if ( (i_attr_val1.system_role == SR_security_officer)
++ || (i_attr_val1.system_role == SR_auditor)
++ )
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++
++ /* all other cases are not checked */
++ default: return DO_NOT_CARE;
++ }
++
++ case R_SWITCH_LOG:
++ switch(target)
++ {
++ case T_NONE:
++ /* test owner's auth_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_AUTH,
++ T_USER,
++ i_tid,
++ A_auth_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_pr_get_error(A_auth_role);
++ return NOT_GRANTED;
++ }
++ /* security officer? -> grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++
++ /* all other cases are not checked */
++ default: return DO_NOT_CARE;
++ }
++
++ case R_SWITCH_MODULE:
++ switch(target)
++ {
++ case T_NONE:
++ /* we need the switch_target */
++ if(attr != A_switch_target)
++ return NOT_GRANTED;
++#ifndef CONFIG_RSBAC_AUTH_OTHER_PROT
++ /* do not care for other modules */
++ if( (attr_val.switch_target != SW_AUTH)
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ && (attr_val.switch_target != SW_SOFTMODE)
++ #endif
++ #ifdef CONFIG_RSBAC_FREEZE
++ && (attr_val.switch_target != SW_FREEZE)
++ #endif
++ )
++ return DO_NOT_CARE;
++#endif
++ /* test owner's auth_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_AUTH,
++ T_USER,
++ i_tid,
++ A_auth_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_pr_get_error(A_auth_role);
++ return NOT_GRANTED;
++ }
++ /* security officer? -> grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++
++ /* all other cases are not checked */
++ default: return DO_NOT_CARE;
++ }
++#endif
++
++/*********************/
++ default: return DO_NOT_CARE;
++ }
++
++ return result;
++ } /* end of rsbac_adf_request_auth() */
++
++
++/*****************************************************************************/
++/* If the request returned granted and the operation is performed, */
++/* the following function can be called by the AEF to get all aci set */
++/* correctly. For write accesses that are performed fully within the kernel, */
++/* this is usually not done to prevent extra calls, including R_CLOSE for */
++/* cleaning up. */
++/* The second instance of target specification is the new target, if one has */
++/* been created, otherwise its values are ignored. */
++/* On success, 0 is returned, and an error from rsbac/error.h otherwise. */
++
++inline int rsbac_adf_set_attr_auth(
++ enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_target_t new_target,
++ union rsbac_target_id_t new_tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ int error;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_attribute_value_t i_attr_val2;
++ #if defined(CONFIG_RSBAC_AUTH_LEARN)
++ union rsbac_attribute_value_t i_attr_val3;
++ union rsbac_attribute_value_t i_attr_val4;
++ #endif
++
++ switch (request)
++ {
++ case R_CLONE:
++ if (target == T_PROCESS)
++ {
++ /* Get auth_may_setuid from first process */
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_may_setuid,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_may_setuid);
++ return -RSBAC_EREADFAILED;
++ }
++ /* Get auth_may_set_cap from first process */
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_may_set_cap,
++ &i_attr_val2,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_may_set_cap);
++ return -RSBAC_EREADFAILED;
++ }
++ #if defined(CONFIG_RSBAC_AUTH_LEARN)
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_start_uid,
++ &i_attr_val3,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_start_uid);
++ return -RSBAC_EREADFAILED;
++ }
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_learn,
++ &i_attr_val4,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_learn);
++ return -RSBAC_EREADFAILED;
++ }
++ #endif
++ /* Set auth_may_setuid for new process */
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ new_tid,
++ A_auth_may_setuid,
++ i_attr_val1))
++ {
++ rsbac_pr_set_error(A_auth_may_setuid);
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* Set auth_may_set_cap for new process */
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ new_tid,
++ A_auth_may_set_cap,
++ i_attr_val2))
++ {
++ rsbac_pr_set_error(A_auth_may_set_cap);
++ return -RSBAC_EWRITEFAILED;
++ }
++ #if defined(CONFIG_RSBAC_AUTH_LEARN)
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ new_tid,
++ A_auth_start_uid,
++ i_attr_val3))
++ {
++ rsbac_pr_set_error(A_auth_start_uid);
++ return -RSBAC_EWRITEFAILED;
++ }
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ new_tid,
++ A_auth_learn,
++ i_attr_val4))
++ {
++ rsbac_pr_set_error(A_auth_learn);
++ return -RSBAC_EWRITEFAILED;
++ }
++ #ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_start_euid,
++ &i_attr_val4,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_start_uid);
++ return -RSBAC_EREADFAILED;
++ }
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ new_tid,
++ A_auth_start_euid,
++ i_attr_val4))
++ {
++ rsbac_pr_set_error(A_auth_start_uid);
++ return -RSBAC_EWRITEFAILED;
++ }
++ #endif
++ #ifdef CONFIG_RSBAC_AUTH_GROUP
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_start_gid,
++ &i_attr_val4,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_start_uid);
++ return -RSBAC_EREADFAILED;
++ }
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ new_tid,
++ A_auth_start_gid,
++ i_attr_val4))
++ {
++ rsbac_pr_set_error(A_auth_start_uid);
++ return -RSBAC_EWRITEFAILED;
++ }
++ #ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_start_egid,
++ &i_attr_val4,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_start_uid);
++ return -RSBAC_EREADFAILED;
++ }
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ new_tid,
++ A_auth_start_egid,
++ i_attr_val4))
++ {
++ rsbac_pr_set_error(A_auth_start_uid);
++ return -RSBAC_EWRITEFAILED;
++ }
++ #endif
++ #endif
++ #endif
++ /* copy auth_last_auth */
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ tid,
++ A_auth_last_auth,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_last_auth);
++ return -RSBAC_EREADFAILED;
++ }
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ new_tid,
++ A_auth_last_auth,
++ i_attr_val1))
++ {
++ rsbac_pr_set_error(A_auth_last_auth);
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* copy capability list */
++ if(rsbac_auth_copy_pp_capset(tid.process,new_tid.process))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_auth(): rsbac_auth_copy_pp_capset() returned error!\n");
++ return -RSBAC_EWRITEFAILED;
++ }
++ return 0;
++ }
++ else
++ return 0;
++
++ case R_EXECUTE:
++ switch(target)
++ {
++ case T_FILE:
++ /* reset auth_may_setuid and auth_may_set_cap for process */
++ i_tid.process = caller_pid;
++ /* First, set auth_may_setuid to program file's auth_may_setuid */
++ if (rsbac_get_attr(SW_AUTH,
++ T_FILE,
++ tid,
++ A_auth_may_setuid,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_pr_get_error(A_auth_may_setuid);
++ return -RSBAC_EREADFAILED;
++ }
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ i_tid,
++ A_auth_may_setuid,
++ i_attr_val1))
++ {
++ rsbac_pr_set_error(A_auth_may_setuid);
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* Next, set auth_may_set_cap to program file's auth_may_set_cap */
++ if (rsbac_get_attr(SW_AUTH,
++ T_FILE,
++ tid,
++ A_auth_may_set_cap,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_may_set_cap);
++ return -RSBAC_EREADFAILED;
++ }
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ i_tid,
++ A_auth_may_set_cap,
++ i_attr_val1))
++ {
++ rsbac_pr_set_error(A_auth_may_set_cap);
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* reset auth_last_auth for process */
++ i_attr_val1.auth_last_auth = RSBAC_NO_USER;
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ i_tid,
++ A_auth_last_auth,
++ i_attr_val1))
++ {
++ rsbac_pr_set_error(A_auth_last_auth);
++ }
++
++ /* copy file capability list from file to process */
++ if (rsbac_auth_copy_fp_capset(tid.file, caller_pid))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_auth(): rsbac_auth_copy_fp_capset() returned error!\n");
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* replace RSBAC_AUTH_OWNER_F_CAP by current owner */
++ error = rsbac_replace_auth_cap(caller_pid,
++ ACT_real,
++ RSBAC_AUTH_OWNER_F_CAP,
++ owner);
++ if(error)
++ return error;
++ #ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ error = rsbac_replace_auth_cap(caller_pid,
++ ACT_eff,
++ RSBAC_AUTH_OWNER_F_CAP,
++ owner);
++ if(error)
++ return error;
++ error = rsbac_replace_auth_cap(caller_pid,
++ ACT_eff,
++ RSBAC_AUTH_DAC_OWNER_F_CAP,
++ current_euid());
++ if(error)
++ return error;
++ error = rsbac_replace_auth_cap(caller_pid,
++ ACT_fs,
++ RSBAC_AUTH_OWNER_F_CAP,
++ owner);
++ if(error)
++ return error;
++ error = rsbac_replace_auth_cap(caller_pid,
++ ACT_fs,
++ RSBAC_AUTH_DAC_OWNER_F_CAP,
++ current_fsuid());
++ if(error)
++ return error;
++ #endif
++ #ifdef CONFIG_RSBAC_AUTH_GROUP
++ error = rsbac_replace_auth_cap(caller_pid,
++ ACT_group_real,
++ RSBAC_AUTH_GROUP_F_CAP,
++ current_gid());
++ if(error)
++ return error;
++ #ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ error = rsbac_replace_auth_cap(caller_pid,
++ ACT_group_eff,
++ RSBAC_AUTH_GROUP_F_CAP,
++ current_gid());
++ if(error)
++ return error;
++ error = rsbac_replace_auth_cap(caller_pid,
++ ACT_group_eff,
++ RSBAC_AUTH_DAC_GROUP_F_CAP,
++ current_egid());
++ if(error)
++ return error;
++ error = rsbac_replace_auth_cap(caller_pid,
++ ACT_group_fs,
++ RSBAC_AUTH_GROUP_F_CAP,
++ current_gid());
++ if(error)
++ return error;
++ error = rsbac_replace_auth_cap(caller_pid,
++ ACT_group_fs,
++ RSBAC_AUTH_DAC_GROUP_F_CAP,
++ current_fsgid());
++ if(error)
++ return error;
++ #endif
++ #endif
++
++ #if defined(CONFIG_RSBAC_AUTH_LEARN)
++ /* Set auth_learn to program file's auth_learn */
++ if (rsbac_get_attr(SW_AUTH,
++ T_FILE,
++ tid,
++ A_auth_learn,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_pr_get_error(A_auth_learn);
++ return -RSBAC_EREADFAILED;
++ }
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ i_tid,
++ A_auth_learn,
++ i_attr_val1))
++ {
++ rsbac_pr_set_error(A_auth_learn);
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* remember caller */
++ i_attr_val1.auth_start_uid = owner;
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ i_tid,
++ A_auth_start_uid,
++ i_attr_val1))
++ {
++ rsbac_pr_set_error(A_auth_start_uid);
++ return -RSBAC_EWRITEFAILED;
++ }
++ #ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ i_attr_val1.auth_start_euid = current_euid();
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ i_tid,
++ A_auth_start_euid,
++ i_attr_val1))
++ {
++ rsbac_pr_set_error(A_auth_start_euid);
++ return -RSBAC_EWRITEFAILED;
++ }
++ #endif
++ #ifdef CONFIG_RSBAC_AUTH_GROUP
++ i_attr_val1.auth_start_gid = current_gid();
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ i_tid,
++ A_auth_start_gid,
++ i_attr_val1))
++ {
++ rsbac_pr_set_error(A_auth_start_gid);
++ return -RSBAC_EWRITEFAILED;
++ }
++ #ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ i_attr_val1.auth_start_egid = current_egid();
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ i_tid,
++ A_auth_start_egid,
++ i_attr_val1))
++ {
++ rsbac_pr_set_error(A_auth_start_egid);
++ return -RSBAC_EWRITEFAILED;
++ }
++ #endif
++ #endif
++ #endif
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++/* Only protect itself, if asked to by configuration */
++#ifdef CONFIG_RSBAC_AUTH_AUTH_PROT
++ /* remove all file capabilities on all changing requests to files */
++ case R_APPEND_OPEN:
++ case R_CHANGE_GROUP:
++ case R_DELETE:
++ case R_LINK_HARD:
++ case R_MODIFY_ACCESS_DATA:
++ case R_READ_WRITE_OPEN:
++ case R_RENAME:
++ case R_TRUNCATE:
++ case R_WRITE_OPEN:
++ switch(target)
++ {
++ case T_FILE:
++ /* remove cap set */
++ if(rsbac_auth_remove_f_capsets(tid.file))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_auth(): rsbac_auth_remove_f_capsets() returned error!\n");
++ return -RSBAC_EWRITEFAILED;
++ }
++ return 0;
++
++ /* all other cases are not handled */
++ default: return 0;
++ }
++#endif
++
++/*********************/
++ default: return 0;
++ }
++
++ return 0;
++ } /* end of rsbac_adf_set_attr_auth() */
++
++/* end of rsbac/adf/auth/main.c */
+diff --git a/rsbac/adf/auth/auth_syscalls.c b/rsbac/adf/auth/auth_syscalls.c
+new file mode 100644
+index 0000000..892a285
+--- /dev/null
++++ b/rsbac/adf/auth/auth_syscalls.c
+@@ -0,0 +1,161 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - Authentification module */
++/* File: rsbac/adf/auth/syscalls.c */
++/* */
++/* Author and (c) 1999-2008: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 26/Feb/2008 */
++/*************************************************** */
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/errno.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/error.h>
++#include <rsbac/auth.h>
++#include <rsbac/debug.h>
++#include <rsbac/helpers.h>
++#include <rsbac/adf_main.h>
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++int rsbac_auth_add_p_cap(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range,
++ rsbac_time_t ttl)
++ {
++/* check only in non-maint mode */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_AUTH
++ if(rsbac_switch_auth)
++#endif
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ /* check auth_may_set_cap of calling process */
++ i_tid.process = task_pid(current);
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ i_tid,
++ A_auth_may_set_cap,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_may_set_cap);
++ return -RSBAC_EREADFAILED;
++ }
++ /* if auth_may_set_cap is not set, then reject */
++ if (!i_attr_val1.auth_may_set_cap)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_auth_add_p_cap(): adding AUTH cap %u:%u to process %u denied for process %u!\n",
++ cap_range.first,
++ cap_range.last,
++ pid,
++ task_pid(current));
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_AUTH]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++ }
++#endif
++
++ /* OK, check passed. Add the capability. */
++ return rsbac_auth_add_to_p_capset(ta_number, pid, cap_type, cap_range, ttl);
++ }
++
++int rsbac_auth_remove_p_cap(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range)
++ {
++/* check only in non-maint mode */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_AUTH
++ if(rsbac_switch_auth)
++#endif
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ /* check auth_may_set_cap of calling process */
++ i_tid.process = task_pid(current);
++ if (rsbac_get_attr(SW_AUTH,
++ T_PROCESS,
++ i_tid,
++ A_auth_may_set_cap,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_auth_may_set_cap);
++ return -RSBAC_EREADFAILED;
++ }
++ /* if auth_may_set_cap is not set, then reject */
++ if (!i_attr_val1.auth_may_set_cap)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_auth_remove_p_cap(): removing AUTH cap %u:%u from process %u denied for process %u!\n",
++ cap_range.first,
++ cap_range.last,
++ pid,
++ task_pid(current));
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_AUTH]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++ }
++#endif
++
++ /* OK, check passed. Try to remove the capability. */
++ return rsbac_auth_remove_from_p_capset(ta_number, pid, cap_type, cap_range);
++ }
++
++int rsbac_auth_add_f_cap(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_auth_file_t file,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range,
++ rsbac_time_t ttl)
++ {
++ /* check has been done in help/syscalls.c: sys_rsbac_auth_add_f_cap */
++ return rsbac_auth_add_to_f_capset(ta_number, file, cap_type, cap_range, ttl);
++ }
++
++int rsbac_auth_remove_f_cap(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_auth_file_t file,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range)
++ {
++ /* check has been done in help/syscalls.c: sys_rsbac_auth_remove_f_cap */
++ return rsbac_auth_remove_from_f_capset(ta_number, file, cap_type, cap_range);
++ }
++
++/* end of rsbac/adf/auth/syscalls.c */
+diff --git a/rsbac/adf/cap/Makefile b/rsbac/adf/cap/Makefile
+new file mode 100644
+index 0000000..3411764
+--- /dev/null
++++ b/rsbac/adf/cap/Makefile
+@@ -0,0 +1,10 @@
++#
++# File: rsbac/adf/cap/Makefile
++#
++# Makefile for the Linux rsbac cap decision module.
++#
++# Author and (c) 1999-2012 Amon Ott <ao@rsbac.org>
++#
++
++obj-y := cap_main.o
++
+diff --git a/rsbac/adf/cap/cap_main.c b/rsbac/adf/cap/cap_main.c
+new file mode 100644
+index 0000000..1e09ef8
+--- /dev/null
++++ b/rsbac/adf/cap/cap_main.c
+@@ -0,0 +1,864 @@
++/**************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - Linux Capabilities (CAP) */
++/* File: rsbac/adf/cap/main.c */
++/* */
++/* Author and (c) 1999-2011: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 12/Jul/2011 */
++/**************************************************** */
++
++#include <linux/string.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/debug.h>
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++inline enum rsbac_adf_req_ret_t
++ rsbac_adf_request_cap (enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ switch (request)
++ {
++ case R_MODIFY_ATTRIBUTE:
++ switch(attr)
++ {
++ case A_system_role:
++ case A_cap_role:
++ case A_min_caps:
++ case A_max_caps:
++ case A_max_caps_user:
++ case A_max_caps_program:
++ case A_cap_process_hiding:
++ case A_cap_learn:
++ #ifdef CONFIG_RSBAC_CAP_AUTH_PROT
++ case A_auth_may_setuid:
++ case A_auth_may_set_cap:
++ case A_auth_start_uid:
++ case A_auth_start_euid:
++ case A_auth_start_gid:
++ case A_auth_start_egid:
++ case A_auth_learn:
++ case A_auth_add_f_cap:
++ case A_auth_remove_f_cap:
++ #endif
++ /* All attributes (remove target!) */
++ case A_none:
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_CAP,
++ T_USER,
++ i_tid,
++ A_cap_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_cap()", A_cap_role);
++ return(NOT_GRANTED);
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_READ_ATTRIBUTE:
++ switch(attr)
++ {
++ case A_system_role:
++ case A_cap_role:
++ case A_min_caps:
++ case A_max_caps:
++ case A_max_caps_user:
++ case A_max_caps_program:
++ case A_cap_process_hiding:
++ /* All attributes (remove target!) */
++ case A_none:
++ /* Security Officer or Admin? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_CAP,
++ T_USER,
++ i_tid,
++ A_cap_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_cap()", A_cap_role);
++ return(NOT_GRANTED);
++ }
++ /* if sec_officer, then grant */
++ if( (i_attr_val1.system_role == SR_security_officer)
++ || (i_attr_val1.system_role == SR_administrator)
++ )
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_SWITCH_LOG:
++ switch(target)
++ {
++ case T_NONE:
++ /* test owner's cap_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_CAP,
++ T_USER,
++ i_tid,
++ A_cap_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_cap()", A_cap_role);
++ return(NOT_GRANTED);
++ }
++ /* security officer? -> grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ /* all other cases are unknown */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_SWITCH_MODULE:
++ switch(target)
++ {
++ case T_NONE:
++ /* we need the switch_target */
++ if(attr != A_switch_target)
++ return NOT_GRANTED;
++ /* do not care for other modules */
++ if( (attr_val.switch_target != SW_CAP)
++ #ifdef CONFIG_RSBAC_CAP_AUTH_PROT
++ && (attr_val.switch_target != SW_AUTH)
++ #endif
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ && (attr_val.switch_target != SW_SOFTMODE)
++ #endif
++ #ifdef CONFIG_RSBAC_FREEZE
++ && (attr_val.switch_target != SW_FREEZE)
++ #endif
++ )
++ return(DO_NOT_CARE);
++ /* test owner's cap_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_CAP,
++ T_USER,
++ i_tid,
++ A_cap_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_cap()", A_cap_role);
++ return(NOT_GRANTED);
++ }
++ /* security officer? -> grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++#ifdef CONFIG_RSBAC_CAP_PROC_HIDE
++ case R_CHANGE_GROUP:
++ case R_GET_STATUS_DATA:
++ case R_MODIFY_SYSTEM_DATA:
++ case R_SEND_SIGNAL:
++ case R_TRACE:
++ switch(target)
++ {
++ case T_PROCESS:
++ if(caller_pid == tid.process)
++ return GRANTED;
++ if (rsbac_get_attr(SW_CAP,
++ target,
++ tid,
++ A_cap_process_hiding,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_cap()", A_cap_process_hiding);
++ return(NOT_GRANTED); /* something weird happened */
++ }
++ switch(i_attr_val1.cap_process_hiding)
++ {
++ case PH_full:
++ /* Security Officer or Admin? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_CAP,
++ T_USER,
++ i_tid,
++ A_cap_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_cap()", A_cap_role);
++ return(NOT_GRANTED);
++ }
++ /* if sec_officer, then grant */
++ if(i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++ case PH_from_other_users:
++ {
++ struct task_struct * task_p;
++ enum rsbac_adf_req_ret_t result;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (rsbac_get_attr(SW_GEN,
++ T_PROCESS,
++ tid,
++ A_vset,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_cap()", A_vset);
++ return(NOT_GRANTED);
++ }
++ if (i_attr_val1.vset == RSBAC_UID_SET(owner))
++#endif
++ {
++ read_lock(&tasklist_lock);
++ task_p = pid_task(tid.process, PIDTYPE_PID);
++ if( task_p
++ && (task_uid(task_p) != RSBAC_UID_NUM(owner))
++ )
++ result = NOT_GRANTED;
++ else
++ result = GRANTED;
++ read_unlock(&tasklist_lock);
++ if(result == GRANTED)
++ return GRANTED;
++ }
++ /* Security Officer or Admin? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_CAP,
++ T_USER,
++ i_tid,
++ A_cap_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_cap()", A_cap_role);
++ return(NOT_GRANTED);
++ }
++ /* if sec_officer or admin, then grant */
++ if( (i_attr_val1.system_role == SR_security_officer)
++ || (i_attr_val1.system_role == SR_administrator)
++ )
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++ }
++ default:
++ return DO_NOT_CARE;
++ }
++
++ default:
++ return DO_NOT_CARE;
++ }
++#endif
++
++/*********************/
++ default: return DO_NOT_CARE;
++ }
++
++ return DO_NOT_CARE;
++ } /* end of rsbac_adf_request_cap() */
++
++
++/*****************************************************************************/
++/* If the request returned granted and the operation is performed, */
++/* the following function can be called by the AEF to get all aci set */
++/* correctly. For write accesses that are performed fully within the kernel, */
++/* this is usually not done to prevent extra calls, including R_CLOSE for */
++/* cleaning up. */
++/* The second instance of target specification is the new target, if one has */
++/* been created, otherwise its values are ignored. */
++/* On success, 0 is returned, and an error from rsbac/error.h otherwise. */
++
++inline int rsbac_adf_set_attr_cap(
++ enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_target_t new_target,
++ union rsbac_target_id_t new_tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ switch (request)
++ {
++ case R_CHANGE_OWNER:
++ switch(target)
++ {
++ case T_PROCESS:
++ if(attr != A_owner)
++ return(-RSBAC_EINVALIDATTR);
++ i_tid.user = attr_val.owner;
++ if (rsbac_get_attr(SW_CAP,
++ T_USER,
++ i_tid,
++ A_cap_ld_env,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_cap()",
++ A_max_caps);
++ } else {
++ if (i_attr_val1.cap_ld_env == LD_keep) {
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_CAP,
++ T_PROCESS,
++ i_tid,
++ A_cap_ld_env,
++ &i_attr_val1, FALSE)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_cap()",
++ A_cap_ld_env);
++ } else {
++ if (rsbac_set_attr(SW_CAP,
++ T_PROCESS,
++ tid,
++ A_cap_ld_env,
++ i_attr_val1)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_cap()",
++ A_cap_ld_env);
++ }
++ }
++ }
++ }
++ /* Adjust Linux caps */
++ i_tid.user = attr_val.owner;
++ if (rsbac_get_attr(SW_CAP,
++ T_USER,
++ i_tid,
++ A_max_caps,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_cap()", A_max_caps);
++ }
++ else
++ {
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ || rsbac_ind_softmode[SW_CAP]
++ #endif
++ )
++ { /* Warn */
++ if((i_attr_val1.max_caps.cap[0] != RSBAC_CAP_DEFAULT_MAX) || (i_attr_val1.max_caps.cap[1] != RSBAC_CAP_DEFAULT_MAX))
++ {
++ rsbac_printk(KERN_NOTICE
++ "rsbac_adf_set_attr_cap(): running in softmode, max_caps of user %u not applied to process %u(%.15s)!\n",
++ owner,
++ pid_nr(caller_pid),
++ current->comm);
++ }
++ }
++ else
++ #endif
++ {
++ /* set caps for process */
++ struct cred *override_cred;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ return -ENOMEM;
++ override_cred->cap_permitted.cap[0] &= i_attr_val1.max_caps.cap[0];
++ override_cred->cap_effective.cap[0] &= i_attr_val1.max_caps.cap[0];
++ override_cred->cap_inheritable.cap[0] &= i_attr_val1.max_caps.cap[0];
++ override_cred->cap_permitted.cap[1] &= i_attr_val1.max_caps.cap[1];
++ override_cred->cap_effective.cap[1] &= i_attr_val1.max_caps.cap[1];
++ override_cred->cap_inheritable.cap[1] &= i_attr_val1.max_caps.cap[1];
++ commit_creds(override_cred);
++
++#ifdef CONFIG_RSBAC_CAP_LOG_MISSING
++ /* set max_caps_user for process */
++ if (rsbac_set_attr(SW_CAP,
++ target,
++ tid,
++ A_max_caps_user,
++ i_attr_val1))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr_cap()", A_max_caps_user);
++ }
++#endif
++ }
++ }
++ if (rsbac_get_attr(SW_CAP,
++ T_USER,
++ i_tid,
++ A_min_caps,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_cap()", A_min_caps);
++ }
++ else
++ {
++ /* set caps for process */
++ {
++ struct cred *override_cred;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ return -ENOMEM;
++
++ override_cred->cap_permitted.cap[0] |= i_attr_val1.min_caps.cap[0];
++ override_cred->cap_effective.cap[0] |= i_attr_val1.min_caps.cap[0];
++ override_cred->cap_inheritable.cap[0] |= i_attr_val1.min_caps.cap[0];
++ override_cred->cap_permitted.cap[1] |= i_attr_val1.min_caps.cap[1];
++ override_cred->cap_effective.cap[1] |= i_attr_val1.min_caps.cap[1];
++ override_cred->cap_inheritable.cap[1] |= i_attr_val1.min_caps.cap[1];
++ commit_creds(override_cred);
++ }
++ }
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++ break;
++
++#if defined (CONFIG_RSBAC_CAP_PROC_HIDE) || defined(CONFIG_RSBAC_CAP_LOG_MISSING)
++ case R_CLONE:
++ switch(target)
++ {
++ case T_PROCESS:
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_CAP,
++ target,
++ i_tid,
++ A_cap_ld_env,
++ &i_attr_val1, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_cap()",
++ A_cap_ld_env);
++ } else {
++ if (rsbac_set_attr(SW_CAP,
++ new_target,
++ new_tid,
++ A_cap_ld_env,
++ i_attr_val1)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_cap()",
++ A_cap_ld_env);
++ }
++ }
++#ifdef CONFIG_RSBAC_CAP_PROC_HIDE
++ /* get process hiding from old process */
++ if (rsbac_get_attr(SW_CAP,
++ target,
++ tid,
++ A_cap_process_hiding,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_cap()", A_cap_process_hiding);
++ }
++ else
++ { /* only set, of not default value 0 */
++ if(i_attr_val1.cap_process_hiding)
++ {
++ /* set program based log for new process */
++ if (rsbac_set_attr(SW_CAP,
++ new_target,
++ new_tid,
++ A_cap_process_hiding,
++ i_attr_val1))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr_cap()", A_cap_process_hiding);
++ }
++ }
++ }
++#endif
++#ifdef CONFIG_RSBAC_CAP_LOG_MISSING
++ /* get max_caps_user from old process */
++ if (rsbac_get_attr(SW_CAP,
++ target,
++ tid,
++ A_max_caps_user,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_cap():CLONE", A_max_caps_user);
++ }
++ else
++ { /* only set, of not default value */
++ if((i_attr_val1.max_caps_user.cap[0] != RSBAC_CAP_DEFAULT_MAX) || (i_attr_val1.max_caps_user.cap[1] != RSBAC_CAP_DEFAULT_MAX))
++ {
++ if (rsbac_set_attr(SW_CAP,
++ new_target,
++ new_tid,
++ A_max_caps_user,
++ i_attr_val1))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr_cap():CLONE", A_max_caps_user);
++ }
++ }
++ }
++ /* get max_caps_program from old process */
++ if (rsbac_get_attr(SW_CAP,
++ target,
++ tid,
++ A_max_caps_program,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_cap():CLONE", A_max_caps_program);
++ }
++ else
++ { /* only set, of not default value */
++ if((i_attr_val1.max_caps_program.cap[0] != RSBAC_CAP_DEFAULT_MAX) || (i_attr_val1.max_caps_program.cap[1] != RSBAC_CAP_DEFAULT_MAX))
++ {
++ if (rsbac_set_attr(SW_CAP,
++ new_target,
++ new_tid,
++ A_max_caps_program,
++ i_attr_val1))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr_cap():CLONE", A_max_caps_program);
++ }
++ }
++ }
++#endif
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++#endif /* PROC_HIDE || LOG_MISSING */
++
++ case R_EXECUTE:
++ switch(target)
++ {
++ case T_FILE:
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_CAP,
++ T_USER,
++ i_tid,
++ A_cap_ld_env,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error("rsbac_adf_set_attr_cap()",A_cap_ld_env);
++ } else {
++ if (i_attr_val1.cap_ld_env == LD_keep) {
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_CAP,
++ T_PROCESS,
++ i_tid,
++ A_cap_ld_env,
++ &i_attr_val1, FALSE)) {
++ rsbac_ds_get_error("rsbac_adf_set_attr_cap()",
++ A_cap_ld_env);
++ }
++ i_tid.process = caller_pid;
++ if (rsbac_set_attr(SW_CAP,
++ T_PROCESS,
++ i_tid,
++ A_cap_ld_env,
++ i_attr_val1)) {
++ rsbac_ds_get_error("rsbac_adf_set_attr_cap()",
++ A_cap_ld_env);
++ }
++ }
++ }
++ /* Adjust Linux caps - first user, then program based */
++ /* User must be redone, because caps are cleared by Linux kernel */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_CAP,
++ T_USER,
++ i_tid,
++ A_max_caps,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_cap()", A_max_caps);
++ }
++ else
++ {
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ || rsbac_ind_softmode[SW_CAP]
++ #endif
++ )
++ { /* Warn */
++ if((i_attr_val1.max_caps.cap[0] != RSBAC_CAP_DEFAULT_MAX) || (i_attr_val1.max_caps.cap[1] != RSBAC_CAP_DEFAULT_MAX))
++ {
++ rsbac_printk(KERN_NOTICE
++ "rsbac_adf_set_attr_cap(): running in softmode, max_caps of user %u not applied to process %u(%.15s)!\n",
++ owner,
++ pid_nr(caller_pid),
++ current->comm);
++ }
++ }
++ else
++ #endif
++ {
++ struct cred *override_cred;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ return -ENOMEM;
++ override_cred->cap_permitted.cap[0] &= i_attr_val1.max_caps.cap[0];
++ override_cred->cap_effective.cap[0] &= i_attr_val1.max_caps.cap[0];
++ override_cred->cap_inheritable.cap[0] &= i_attr_val1.max_caps.cap[0];
++ override_cred->cap_permitted.cap[1] &= i_attr_val1.max_caps.cap[1];
++ override_cred->cap_effective.cap[1] &= i_attr_val1.max_caps.cap[1];
++ override_cred->cap_inheritable.cap[1] &= i_attr_val1.max_caps.cap[1];
++ commit_creds(override_cred);
++ }
++ }
++ if (rsbac_get_attr(SW_CAP,
++ T_USER,
++ i_tid,
++ A_min_caps,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_cap()", A_min_caps);
++ }
++ else
++ {
++ /* set caps for process */
++ struct cred *override_cred;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ return -ENOMEM;
++ override_cred->cap_permitted.cap[0] |= i_attr_val1.min_caps.cap[0];
++ override_cred->cap_effective.cap[0] |= i_attr_val1.min_caps.cap[0];
++ override_cred->cap_inheritable.cap[0] |= i_attr_val1.min_caps.cap[0];
++ override_cred->cap_bset.cap[0] |= i_attr_val1.min_caps.cap[0];
++ override_cred->cap_permitted.cap[1] |= i_attr_val1.min_caps.cap[1];
++ override_cred->cap_effective.cap[1] |= i_attr_val1.min_caps.cap[1];
++ override_cred->cap_inheritable.cap[1] |= i_attr_val1.min_caps.cap[1];
++ override_cred->cap_bset.cap[1] |= i_attr_val1.min_caps.cap[1];
++ commit_creds(override_cred);
++ }
++ if (rsbac_get_attr(SW_CAP,
++ target,
++ tid,
++ A_max_caps,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_cap()", A_max_caps);
++ }
++ else
++ {
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ || rsbac_ind_softmode[SW_CAP]
++ #endif
++ )
++ { /* Warn */
++ if((i_attr_val1.max_caps.cap[0] != RSBAC_CAP_DEFAULT_MAX) || (i_attr_val1.max_caps.cap[1] != RSBAC_CAP_DEFAULT_MAX))
++ {
++ rsbac_printk(KERN_NOTICE
++ "rsbac_adf_set_attr_cap(): running in softmode, max_caps of program not applied to process %u(%.15s)!\n",
++ pid_nr(caller_pid),
++ current->comm);
++ }
++ }
++ else
++ #endif
++ {
++ struct cred *override_cred;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ return -ENOMEM;
++ override_cred->cap_permitted.cap[0] &= i_attr_val1.max_caps.cap[0];
++ override_cred->cap_effective.cap[0] &= i_attr_val1.max_caps.cap[0];
++ override_cred->cap_inheritable.cap[0] &= i_attr_val1.max_caps.cap[0];
++ override_cred->cap_permitted.cap[1] &= i_attr_val1.max_caps.cap[1];
++ override_cred->cap_effective.cap[1] &= i_attr_val1.max_caps.cap[1];
++ override_cred->cap_inheritable.cap[1] &= i_attr_val1.max_caps.cap[1];
++ commit_creds(override_cred);
++
++#ifdef CONFIG_RSBAC_CAP_LOG_MISSING
++ i_tid.process = caller_pid;
++ /* set max_caps_program for process */
++ if (rsbac_set_attr(SW_CAP,
++ T_PROCESS,
++ i_tid,
++ A_max_caps_program,
++ i_attr_val1))
++ {
++ rsbac_ds_set_error("rsbac_adf_set_attr_cap():EXECUTE", A_max_caps_program);
++ }
++#endif
++ }
++ }
++ if (rsbac_get_attr(SW_CAP,
++ target,
++ tid,
++ A_min_caps,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_cap()", A_min_caps);
++ }
++ else
++ {
++ /* set caps for process */
++ struct cred *override_cred;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ return -ENOMEM;
++ override_cred->cap_permitted.cap[0] |= i_attr_val1.min_caps.cap[0];
++ override_cred->cap_effective.cap[0] |= i_attr_val1.min_caps.cap[0];
++ override_cred->cap_inheritable.cap[0] |= i_attr_val1.min_caps.cap[0];
++ override_cred->cap_bset.cap[0] |= i_attr_val1.min_caps.cap[0];
++ override_cred->cap_permitted.cap[1] |= i_attr_val1.min_caps.cap[1];
++ override_cred->cap_effective.cap[1] |= i_attr_val1.min_caps.cap[1];
++ override_cred->cap_inheritable.cap[1] |= i_attr_val1.min_caps.cap[1];
++ override_cred->cap_bset.cap[1] |= i_attr_val1.min_caps.cap[1];
++ commit_creds(override_cred);
++ }
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++ break;
++
++ case R_MODIFY_SYSTEM_DATA:
++ switch(target)
++ {
++ case T_SCD:
++ if (tid.scd != ST_capability)
++ return 0;
++
++ /* Adjust Linux caps - user only */
++ /* User must be redone, because caps have been changed by sys_capset() */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_CAP,
++ T_USER,
++ i_tid,
++ A_max_caps,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_cap()", A_max_caps);
++ }
++ else
++ {
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ || rsbac_ind_softmode[SW_CAP]
++ #endif
++ )
++ { /* Warn */
++ if((i_attr_val1.max_caps.cap[0] != RSBAC_CAP_DEFAULT_MAX) || (i_attr_val1.max_caps.cap[1] != RSBAC_CAP_DEFAULT_MAX))
++ {
++ rsbac_printk(KERN_NOTICE
++ "rsbac_adf_set_attr_cap(): running in softmode, max_caps of user %u not applied to process %u(%.15s)!\n",
++ owner,
++ pid_nr(caller_pid),
++ current->comm);
++ }
++ }
++ else
++ #endif
++ {
++ /* set caps for process */
++ struct cred *override_cred;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ return -ENOMEM;
++ override_cred->cap_permitted.cap[0] &= i_attr_val1.max_caps.cap[0];
++ override_cred->cap_effective.cap[0] &= i_attr_val1.max_caps.cap[0];
++ override_cred->cap_inheritable.cap[0] &= i_attr_val1.max_caps.cap[0];
++ override_cred->cap_permitted.cap[1] &= i_attr_val1.max_caps.cap[1];
++ override_cred->cap_effective.cap[1] &= i_attr_val1.max_caps.cap[1];
++ override_cred->cap_inheritable.cap[1] &= i_attr_val1.max_caps.cap[1];
++ commit_creds(override_cred);
++ }
++ }
++ if (rsbac_get_attr(SW_CAP,
++ T_USER,
++ i_tid,
++ A_min_caps,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_cap()", A_min_caps);
++ }
++ else
++ {
++ /* set caps for process */
++ struct cred *override_cred;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ return -ENOMEM;
++ override_cred->cap_permitted.cap[0] |= i_attr_val1.min_caps.cap[0];
++ override_cred->cap_effective.cap[0] |= i_attr_val1.min_caps.cap[0];
++ override_cred->cap_inheritable.cap[0] |= i_attr_val1.min_caps.cap[0];
++ override_cred->cap_bset.cap[0] |= i_attr_val1.min_caps.cap[0];
++ override_cred->cap_permitted.cap[1] |= i_attr_val1.min_caps.cap[1];
++ override_cred->cap_effective.cap[1] |= i_attr_val1.min_caps.cap[1];
++ override_cred->cap_inheritable.cap[1] |= i_attr_val1.min_caps.cap[1];
++ override_cred->cap_bset.cap[1] |= i_attr_val1.min_caps.cap[1];
++ commit_creds(override_cred);
++ }
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++ break;
++
++/*********************/
++ default: return 0;
++ }
++
++ return 0;
++ } /* end of rsbac_adf_set_attr_cap() */
++
++/* end of rsbac/adf/cap/main.c */
+diff --git a/rsbac/adf/daz/Makefile b/rsbac/adf/daz/Makefile
+new file mode 100644
+index 0000000..823c83d
+--- /dev/null
++++ b/rsbac/adf/daz/Makefile
+@@ -0,0 +1,9 @@
++#
++# File: rsbac/adf/daz/Makefile
++#
++# Makefile for the Linux rsbac DAZ decision module.
++#
++# Author and (c) 1999-2012 Amon Ott <ao@rsbac.org>
++#
++
++obj-y := daz_main.o dazuko_xp.o
+diff --git a/rsbac/adf/daz/daz_main.c b/rsbac/adf/daz/daz_main.c
+new file mode 100644
+index 0000000..94aa810
+--- /dev/null
++++ b/rsbac/adf/daz/daz_main.c
+@@ -0,0 +1,1165 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - Dazuko Malware Scan */
++/* File: rsbac/adf/daz/daz_main.c */
++/* */
++/* Author and (c) 1999-2011: Amon Ott <ao@rsbac.org> */
++/* */
++/* Copyright (c) 2004 H+BEDV Datentechnik GmbH */
++/* Written by John Ogness <jogness@antivir.de> */
++/* */
++/* Last modified: 12/Jul/2011 */
++/*************************************************** */
++
++/* Dazuko RSBAC.
++ Allow RSBAC Linux file access control for 3rd-party applications.
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License
++ as published by the Free Software Foundation; either version 2
++ of the License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++#include "dazuko_rsbac.h"
++#include "dazuko_xp.h"
++#include "dazukoio.h"
++
++#include <linux/init.h>
++#include <linux/unistd.h>
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/random.h>
++
++#include <linux/string.h>
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/version.h>
++#include <linux/syscalls.h>
++#include <asm/uaccess.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/adf.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/debug.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/net_getname.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/proc_fs.h>
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++#include <linux/device.h>
++
++#define DAZ_MAX_FILENAME PATH_MAX
++
++ssize_t linux_dazuko_device_read(struct file *file, char *buffer, size_t length, loff_t *pos);
++ssize_t linux_dazuko_device_write(struct file *file, const char *buffer, size_t length, loff_t *pos);
++long linux_dazuko_device_ioctl(struct file *file, unsigned int cmd, unsigned long param);
++int linux_dazuko_device_open(struct inode *inode, struct file *file);
++int linux_dazuko_device_release(struct inode *inode, struct file *file);
++
++extern struct xp_atomic active;
++
++static int dev_major = -1;
++
++static struct file_operations fops = {
++ read: linux_dazuko_device_read, /* read */
++ write: linux_dazuko_device_write, /* write */
++ unlocked_ioctl: linux_dazuko_device_ioctl, /* ioctl */
++ open: linux_dazuko_device_open, /* open */
++ release: linux_dazuko_device_release, /* release */
++};
++
++static struct class *dazuko_class = NULL;
++
++static struct kmem_cache * dazuko_file_slab = NULL;
++static struct kmem_cache * xp_file_slab = NULL;
++static struct kmem_cache * xp_daemon_slab = NULL;
++static struct kmem_cache * dazuko_filename_slab = NULL;
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++static int daz_reset_scanned(struct rsbac_fs_file_t file)
++{
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_target_id_t i_tid;
++
++ /* reset scanned status for file */
++ rsbac_pr_debug(adf_daz, "pid %u (%.15s), resetting scanned status!\n",
++ current->pid, current->comm);
++ i_tid.file=file;
++ i_attr_val1.daz_scanned = DAZ_unscanned;
++ if(rsbac_set_attr(SW_DAZ,
++ T_FILE,
++ i_tid,
++ A_daz_scanned,
++ i_attr_val1))
++ {
++ rsbac_printk(KERN_WARNING "daz_reset_scanned(): rsbac_set_attr() for daz_scanned on device %02u:%02u inode %u returned error!\n",
++ MAJOR(file.device), MINOR(file.device), file.inode);
++ return -RSBAC_EWRITEFAILED;
++ }
++ if (rsbac_get_attr(SW_DAZ,
++ T_FILE,
++ i_tid,
++ A_daz_scanner,
++ &i_attr_val1,
++ TRUE)) {
++ rsbac_printk(KERN_WARNING
++ "daz_reset_scanned(): rsbac_get_attr() for daz_scanner returned error!\n");
++ return -RSBAC_EREADFAILED;
++ }
++ if (i_attr_val1.daz_scanner) {
++ /* reset scanner flag for file */
++ i_attr_val1.daz_scanner = FALSE;
++ if(rsbac_set_attr(SW_DAZ,
++ T_FILE,
++ i_tid,
++ A_daz_scanner,
++ i_attr_val1))
++ {
++ rsbac_printk(KERN_WARNING "daz_reset_scanned(): rsbac_set_attr() for daz_scanner on device %02u:%02u inode %u returned error!\n",
++ MAJOR(file.device), MINOR(file.device), file.inode);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ return 0;
++}
++#else
++static inline int daz_reset_scanned(struct rsbac_fs_file_t file)
++{
++ return 0;
++}
++#endif
++
++
++/* mutex */
++
++inline int xp_init_mutex(struct xp_mutex *mutex)
++{
++#ifdef init_MUTEX
++ init_MUTEX(&(mutex->mutex));
++#else
++ sema_init(&(mutex->mutex), 1);
++#endif
++
++ return 0;
++}
++
++inline int xp_down(struct xp_mutex *mutex)
++{
++ down(&(mutex->mutex));
++ return 0;
++}
++
++inline int xp_up(struct xp_mutex *mutex)
++{
++ up(&(mutex->mutex));
++ return 0;
++}
++
++inline int xp_destroy_mutex(struct xp_mutex *mutex)
++{
++ return 0;
++}
++
++
++/* read-write lock */
++
++inline int xp_init_rwlock(struct xp_rwlock *rwlock)
++{
++ rwlock_init(&(rwlock->rwlock));
++ return 0;
++}
++
++inline int xp_write_lock(struct xp_rwlock *rwlock)
++{
++ write_lock(&(rwlock->rwlock));
++ return 0;
++}
++
++inline int xp_write_unlock(struct xp_rwlock *rwlock)
++{
++ write_unlock(&(rwlock->rwlock));
++ return 0;
++}
++
++inline int xp_read_lock(struct xp_rwlock *rlock)
++{
++ read_lock(&(rlock->rwlock));
++ return 0;
++}
++
++inline int xp_read_unlock(struct xp_rwlock *rlock)
++{
++ read_unlock(&(rlock->rwlock));
++ return 0;
++}
++
++inline int xp_destroy_rwlock(struct xp_rwlock *rwlock)
++{
++ return 0;
++}
++
++
++/* wait-notify queue */
++
++inline int xp_init_queue(struct xp_queue *queue)
++{
++ init_waitqueue_head(&(queue->queue));
++ return 0;
++}
++
++inline int xp_wait_until_condition(struct xp_queue *queue, int (*cfunction)(void *), void *cparam, int allow_interrupt)
++{
++ /* wait until cfunction(cparam) != 0 (condition is true) */
++
++ if (allow_interrupt)
++ {
++ return wait_event_interruptible(queue->queue, cfunction(cparam) != 0);
++ }
++ else
++ {
++ wait_event(queue->queue, cfunction(cparam) != 0);
++ }
++
++ return 0;
++}
++
++inline int xp_notify(struct xp_queue *queue)
++{
++ wake_up(&(queue->queue));
++ return 0;
++}
++
++inline int xp_destroy_queue(struct xp_queue *queue)
++{
++ return 0;
++}
++
++
++/* memory */
++
++inline int xp_copyin(const void *user_src, void *kernel_dest, size_t size)
++{
++ return copy_from_user(kernel_dest, user_src, size);
++}
++
++inline int xp_copyout(const void *kernel_src, void *user_dest, size_t size)
++{
++ return copy_to_user(user_dest, kernel_src, size);
++}
++
++inline int xp_verify_user_writable(const void *user_ptr, size_t size)
++{
++ return 0;
++}
++
++inline int xp_verify_user_readable(const void *user_ptr, size_t size)
++{
++ return 0;
++}
++
++
++/* path attribute */
++
++inline int xp_is_absolute_path(const char *path)
++{
++ return (path[0] == '/');
++}
++
++
++/* atomic */
++
++inline int xp_atomic_set(struct xp_atomic *atomic, int value)
++{
++ atomic_set(&(atomic->atomic), value);
++ return 0;
++}
++
++inline int xp_atomic_inc(struct xp_atomic *atomic)
++{
++ atomic_inc(&(atomic->atomic));
++ return 0;
++}
++
++inline int xp_atomic_dec(struct xp_atomic *atomic)
++{
++ atomic_dec(&(atomic->atomic));
++ return 0;
++}
++
++inline int xp_atomic_read(struct xp_atomic *atomic)
++{
++ return atomic_read(&(atomic->atomic));
++}
++
++
++/* file descriptor */
++
++inline int xp_copy_file(struct xp_file *dest, struct xp_file *src)
++{
++ return 0;
++}
++
++inline int xp_compare_file(struct xp_file *file1, struct xp_file *file2)
++{
++ return 0;
++}
++
++inline int xp_fill_file_struct(struct dazuko_file_struct *dfs)
++{
++ /* make sure we have access to everything */
++ if (dfs == NULL)
++ return -1;
++
++ if (dfs->extra_data == NULL)
++ return -1;
++
++ if (dfs->extra_data->dentry == NULL)
++ return -1;
++
++ if (dfs->extra_data->dentry->d_inode == NULL)
++ return -1;
++
++ /* ok, we have everything we need */
++
++ dfs->extra_data->full_filename = rsbac_smalloc_unlocked(dazuko_filename_slab);
++ if (dfs->extra_data->full_filename == NULL)
++ return -1;
++ rsbac_lookup_full_path(dfs->extra_data->dentry, dfs->extra_data->full_filename, DAZ_MAX_FILENAME, 0);
++
++ rsbac_pr_debug(adf_daz, "pid %u (%.15s), file is %s!\n",
++ current->pid, current->comm,
++ dfs->extra_data->full_filename);
++
++ /* find the actual value of the length */
++ dfs->extra_data->full_filename_length = strlen(dfs->extra_data->full_filename);
++
++ /* reference copy of full path */
++ dfs->filename = dfs->extra_data->full_filename;
++ dfs->filename_length = dfs->extra_data->full_filename_length;
++
++ dfs->file_p.size = dfs->extra_data->dentry->d_inode->i_size;
++ dfs->file_p.set_size = 1;
++ dfs->file_p.uid = dfs->extra_data->dentry->d_inode->i_uid;
++ dfs->file_p.set_uid = 1;
++ dfs->file_p.gid = dfs->extra_data->dentry->d_inode->i_gid;
++ dfs->file_p.set_gid = 1;
++ dfs->file_p.mode = dfs->extra_data->dentry->d_inode->i_mode;
++ dfs->file_p.set_mode = 1;
++ dfs->file_p.device_type = dfs->extra_data->dentry->d_inode->i_rdev;
++ dfs->file_p.set_device_type = 1;
++
++ return 0;
++}
++
++static int dazuko_file_struct_cleanup(struct dazuko_file_struct **dfs)
++{
++ if (dfs == NULL)
++ return 0;
++
++ if (*dfs == NULL)
++ return 0;
++
++ if ((*dfs)->extra_data != NULL)
++ {
++ if ((*dfs)->extra_data->full_filename)
++ rsbac_sfree(dazuko_filename_slab, (*dfs)->extra_data->full_filename);
++
++ rsbac_sfree(xp_file_slab, (*dfs)->extra_data);
++ }
++
++ rsbac_sfree(dazuko_file_slab, *dfs);
++
++ *dfs = NULL;
++
++ return 0;
++}
++
++
++/* daemon id */
++
++int xp_id_compare(struct xp_daemon_id *id1, struct xp_daemon_id *id2)
++{
++ if (id1 == NULL || id2 == NULL)
++ return -1;
++
++ /* if file's are available and they match,
++ * then we say that the id's match */
++ if (id1->file != NULL && id1->file == id2->file)
++ return 0;
++
++ if (id1->pid == id2->pid)
++ return 0;
++
++ return 1;
++}
++
++int xp_id_free(struct xp_daemon_id *id)
++{
++ rsbac_sfree(xp_daemon_slab, id);
++ return 0;
++}
++
++struct xp_daemon_id* xp_id_copy(struct xp_daemon_id *id)
++{
++ struct xp_daemon_id *ptr;
++
++ if (id == NULL)
++ return NULL;
++
++ ptr = rsbac_smalloc(xp_daemon_slab);
++
++ if (ptr != NULL)
++ {
++ ptr->pid = id->pid;
++ ptr->file = id->file;
++ }
++ return ptr;
++}
++
++
++/* system hook */
++
++inline int xp_sys_hook()
++{
++ int wanted_major = CONFIG_RSBAC_DAZ_DEV_MAJOR;
++
++ /* Called from insmod when inserting the module. */
++ /* register the dazuko device */
++ if((wanted_major > 0) && (wanted_major <= 254)) {
++ dev_major = register_chrdev(wanted_major, DEVICE_NAME, &fops);
++ if (dev_major < 0) {
++ rsbac_printk(KERN_WARNING "dazuko: unable to register major chrdev %u, err=%d\n",
++ wanted_major, dev_major);
++ return dev_major;
++ }
++ dev_major = wanted_major;
++ dazuko_class = class_create(THIS_MODULE, "dazuko");
++ device_create(dazuko_class, NULL,
++ MKDEV(wanted_major, 0),
++ NULL, "dazuko");
++ } else {
++ dev_major = register_chrdev(0, DEVICE_NAME, &fops);
++ if (dev_major < 0) {
++ rsbac_printk(KERN_WARNING "dazuko: unable to register any major chrdev, err=%d\n",
++ dev_major);
++ return dev_major;
++ }
++ dazuko_class = class_create(THIS_MODULE, "dazuko");
++ device_create(dazuko_class, NULL,
++ MKDEV(dev_major, 0),
++ NULL, "dazuko");
++ }
++ return 0;
++}
++
++inline int xp_sys_unhook()
++{
++ /* Called by rmmod when removing the module. */
++ unregister_chrdev(dev_major, DEVICE_NAME);
++ device_destroy(dazuko_class, MKDEV(dev_major, CONFIG_RSBAC_DAZ_DEV_MAJOR));
++ class_destroy(dazuko_class);
++
++ return 0;
++}
++
++
++/* ioctl's */
++
++int linux_dazuko_device_open(struct inode *inode, struct file *file)
++{
++ DPRINT(("dazuko: linux_dazuko_device_open() [%d]\n", current->pid));
++
++ return 0;
++}
++
++ssize_t linux_dazuko_device_read(struct file *file, char *buffer, size_t length, loff_t *pos)
++{
++ /* Reading from the dazuko device simply
++ * returns the device number. This is to
++ * help out the daemon. */
++
++ char tmp[20];
++ size_t dev_major_len;
++
++ DPRINT(("dazuko: linux_dazuko_device_read() [%d]\n", current->pid));
++
++ /* only one read is allowed */
++ if (*pos != 0)
++ return 0;
++
++ if (dev_major < 0)
++ return -ENODEV;
++
++ /* print dev_major to a string
++ * and get length (with terminator) */
++ dazuko_bzero(tmp, sizeof(tmp));
++
++ dev_major_len = dazuko_snprintf(tmp, sizeof(tmp), "%d", dev_major) + 1;
++
++ if (tmp[sizeof(tmp)-1] != 0)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: failing device_read, device number overflow for dameon %d (dev_major=%d)\n", current->pid, dev_major);
++ return -EFAULT;
++ }
++
++ if (length < dev_major_len)
++ return -EINVAL;
++
++ /* copy dev_major string to userspace */
++ if (xp_copyout(tmp, buffer, dev_major_len) != 0)
++ return -EFAULT;
++
++ *pos = dev_major_len;
++
++ return dev_major_len;
++}
++
++ssize_t linux_dazuko_device_write(struct file *file, const char *buffer, size_t length, loff_t *pos)
++{
++ struct dazuko_request *u_request;
++ struct xp_daemon_id xp_id;
++ char tmpbuffer[32];
++ char *value;
++ unsigned int size;
++
++ size = length;
++ if (length >= sizeof(tmpbuffer))
++ size = sizeof(tmpbuffer) -1;
++
++ /* copy request pointer string to kernelspace */
++ if (xp_copyin(buffer, tmpbuffer, size) != 0)
++ return -EFAULT;
++
++ tmpbuffer[size] = 0;
++
++ if (dazuko_get_value("\nRA=", buffer, &value) != 0)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: error: linux_dazuko_device_write.RA missing\n");
++ return -EFAULT;
++ }
++
++ u_request = (struct dazuko_request *)simple_strtoul(value, NULL, 10);
++
++ rsbac_kfree(value);
++
++ xp_id.pid = current->pid;
++ xp_id.file = file;
++
++ if (dazuko_handle_user_request(u_request, &xp_id) == 0)
++ return length;
++ else
++ return -EINTR;
++}
++
++long linux_dazuko_device_ioctl(struct file *file, unsigned int cmd, unsigned long param)
++{
++ /* A daemon uses this function to interact with
++ * the kernel. A daemon can set scanning parameters,
++ * give scanning response, and get filenames to scan. */
++
++ struct xp_daemon_id xp_id;
++ int error = 0;
++
++ if (param == 0)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: error: linux_dazuko_device_ioctl(..., 0)\n");
++ return -EFAULT;
++ }
++
++ xp_id.pid = current->pid;
++ xp_id.file = file;
++
++ error = dazuko_handle_user_request_compat12((void *)param, _IOC_NR(cmd), &xp_id);
++
++ if (error != 0)
++ {
++ /* general error occurred */
++
++ return -EPERM;
++ }
++
++ return error;
++}
++
++int linux_dazuko_device_release(struct inode *inode, struct file *file)
++{
++ struct xp_daemon_id xp_id;
++
++ DPRINT(("dazuko: dazuko_device_release() [%d]\n", current->pid));
++
++ xp_id.pid = current->pid;
++ xp_id.file = file;
++
++ return dazuko_unregister_daemon(&xp_id);
++}
++
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++int rsbac_init_daz(void)
++#else
++int __init rsbac_init_daz(void)
++#endif
++{
++ if (rsbac_is_initialized())
++ {
++ rsbac_printk(KERN_WARNING "rsbac_init_daz(): RSBAC already initialized\n");
++ return -RSBAC_EREINIT;
++ }
++
++ /* init data structures */
++ rsbac_printk(KERN_INFO "rsbac_init_daz(): Initializing RSBAC: DAZuko subsystem\n");
++
++ dazuko_file_slab = rsbac_slab_create("rsbac_daz_file",
++ sizeof(struct dazuko_file_struct));
++ xp_file_slab = rsbac_slab_create("rsbac_daz_xp_file",
++ sizeof(struct xp_file_struct));
++ xp_daemon_slab = rsbac_slab_create("rsbac_daz_xp_daemon",
++ sizeof(struct xp_daemon_id));
++ dazuko_filename_slab = rsbac_slab_create("rsbac_daz_filename",
++ DAZ_MAX_FILENAME);
++
++ return dazuko_init();
++}
++
++static int daz_ignored(union rsbac_target_id_t tid)
++{
++ union rsbac_attribute_value_t i_attr_val1;
++
++ if (rsbac_get_attr(SW_DAZ,
++ T_FILE,
++ tid,
++ A_daz_do_scan,
++ &i_attr_val1,
++ TRUE)) {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_daz(): rsbac_get_attr() for daz_do_scan returned error!\n");
++ return FALSE;
++ }
++ if(i_attr_val1.daz_do_scan == DAZ_never)
++ return TRUE;
++ return FALSE;
++}
++
++static enum rsbac_adf_req_ret_t daz_check_secoff(rsbac_uid_t owner, enum rsbac_attribute_t attr)
++{
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ switch(attr) {
++ case A_daz_scanned:
++ case A_daz_scanner:
++ case A_system_role:
++ case A_daz_role:
++ case A_daz_do_scan:
++ /* All attributes (remove target!) */
++ case A_none:
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_DAZ,
++ T_USER,
++ i_tid,
++ A_daz_role,
++ &i_attr_val1,
++ TRUE)) {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_daz(): rsbac_get_attr() returned error!\n");
++ return NOT_GRANTED;
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++
++ default:
++ return DO_NOT_CARE;
++ }
++}
++
++inline enum rsbac_adf_req_ret_t
++rsbac_adf_request_daz (enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++{
++ struct dazuko_file_struct *dfs = NULL;
++ struct xp_daemon_id xp_id;
++ int error = 0;
++ int check_error = 0;
++ struct event_properties event_p;
++ int event;
++ int daemon_allowed;
++
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ /* get daz_do_scan for target */
++ switch(target) {
++ case T_FILE:
++ switch(request) {
++ case R_DELETE:
++ if(daz_ignored(tid))
++ return DO_NOT_CARE;
++ event = DAZUKO_ON_UNLINK;
++ daemon_allowed = 1;
++ break;
++ case R_CLOSE:
++ if(daz_ignored(tid))
++ return DO_NOT_CARE;
++ event = DAZUKO_ON_CLOSE;
++ daemon_allowed = 1;
++ break;
++ case R_EXECUTE:
++ if(daz_ignored(tid))
++ return DO_NOT_CARE;
++ event = DAZUKO_ON_EXEC;
++ daemon_allowed = 0;
++ break;
++ case R_READ_WRITE_OPEN:
++ case R_READ_OPEN:
++ if(daz_ignored(tid))
++ return DO_NOT_CARE;
++ event = DAZUKO_ON_OPEN;
++ daemon_allowed = 1;
++ break;
++ case R_READ_ATTRIBUTE:
++ case R_MODIFY_ATTRIBUTE:
++ return daz_check_secoff(owner, attr);
++ default:
++ return DO_NOT_CARE;
++ }
++ break;
++ case T_DIR:
++ switch(request) {
++ case R_DELETE:
++ if(daz_ignored(tid))
++ return DO_NOT_CARE;
++ event = DAZUKO_ON_RMDIR;
++ daemon_allowed = 1;
++ break;
++ case R_READ_ATTRIBUTE:
++ case R_MODIFY_ATTRIBUTE:
++ return daz_check_secoff(owner, attr);
++ default:
++ return DO_NOT_CARE;
++ }
++ break;
++ case T_DEV:
++ switch(request) {
++ case R_READ_WRITE_OPEN:
++ case R_READ_OPEN:
++ case R_APPEND_OPEN:
++ case R_WRITE_OPEN:
++ if( (tid.dev.type == D_char)
++ && (tid.dev.major == CONFIG_RSBAC_DAZ_DEV_MAJOR)
++ ) {
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_DAZ,
++ T_PROCESS,
++ i_tid,
++ A_daz_scanner,
++ &i_attr_val1,
++ FALSE)) {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_daz(): rsbac_get_attr() returned error!\n");
++ return NOT_GRANTED;
++ }
++ /* if scanner, then grant */
++ if (i_attr_val1.daz_scanner)
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++ }
++ else
++ return DO_NOT_CARE;
++ default:
++ return DO_NOT_CARE;
++ }
++ break;
++ case T_PROCESS:
++ switch(request) {
++ case R_READ_ATTRIBUTE:
++ case R_MODIFY_ATTRIBUTE:
++ return daz_check_secoff(owner, attr);
++ default:
++ return DO_NOT_CARE;
++ }
++ break;
++ case T_USER:
++ switch(request) {
++ case R_READ_ATTRIBUTE:
++ case R_MODIFY_ATTRIBUTE:
++ return daz_check_secoff(owner, attr);
++ default:
++ return DO_NOT_CARE;
++ }
++ break;
++ case T_NONE:
++ switch(request) {
++ case R_SWITCH_MODULE:
++ /* we need the switch_target */
++ if(attr != A_switch_target)
++ return NOT_GRANTED;
++ /* do not care for other modules */
++ if( (attr_val.switch_target != SW_DAZ)
++#ifdef CONFIG_RSBAC_SOFTMODE
++ && (attr_val.switch_target != SW_SOFTMODE)
++#endif
++#ifdef CONFIG_RSBAC_FREEZE
++ && (attr_val.switch_target != SW_FREEZE)
++#endif
++ )
++ return DO_NOT_CARE;
++ return daz_check_secoff(owner, attr);
++ default:
++ return DO_NOT_CARE;
++ }
++ break;
++ default:
++ return DO_NOT_CARE;
++ }
++
++/* From here we can only have FILE or DIR targets */
++
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++ if (rsbac_get_attr(SW_DAZ,
++ target,
++ tid,
++ A_daz_scanned,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_daz(): rsbac_get_attr() returned error!\n");
++ return -RSBAC_EREADFAILED;
++ }
++ if(i_attr_val1.daz_scanned == DAZ_clean)
++ return GRANTED;
++#endif
++
++ rsbac_pr_debug(adf_daz, "pid %u (%.15s), scanning required!\n",
++ current->pid, current->comm);
++ xp_id.pid = current->pid;
++ xp_id.file = NULL;
++
++ check_error = dazuko_sys_check(event, daemon_allowed, &xp_id);
++
++ if (!check_error)
++ {
++ dazuko_bzero(&event_p, sizeof(event_p));
++ /*
++ event_p.flags = flags;
++ event_p.set_flags = 1;
++ event_p.mode = mode;
++ event_p.set_mode = 1;
++ */
++ event_p.pid = current->pid;
++ event_p.set_pid = 1;
++ event_p.uid = current_uid();
++ event_p.set_uid = 1;
++
++ dfs = rsbac_smalloc_clear_unlocked(dazuko_file_slab);
++ if (dfs != NULL)
++ {
++ dfs->extra_data = rsbac_smalloc_clear_unlocked(xp_file_slab);
++ if (dfs->extra_data != NULL)
++ {
++ dfs->extra_data->dentry = tid.file.dentry_p;
++
++ error = dazuko_sys_pre(event, dfs, NULL, &event_p);
++
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++ if(error != 2) {
++ if(error == 0)
++ i_attr_val1.daz_scanned = DAZ_clean;
++ else
++ i_attr_val1.daz_scanned = DAZ_infected;
++
++ if (rsbac_set_attr(SW_DAZ,
++ target,
++ tid,
++ A_daz_scanned,
++ i_attr_val1))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_daz(): rsbac_set_attr() returned error!\n");
++ dazuko_file_struct_cleanup(&dfs);
++ return NOT_GRANTED;
++ }
++ }
++#endif
++ rsbac_pr_debug(adf_daz, "pid %u (%.15s), dazuko_sys_pre() result is %i\n",
++ current->pid, current->comm, error);
++ }
++ else
++ {
++ rsbac_sfree(dazuko_file_slab, dfs);
++ dfs = NULL;
++ }
++
++ dazuko_file_struct_cleanup(&dfs);
++ }
++ if(error == 2)
++ return DO_NOT_CARE;
++ if(error == 0) {
++ rsbac_pr_debug(adf_daz, "pid %u (%.15s), file clean!\n",
++ current->pid, current->comm);
++ return GRANTED;
++ } else {
++ rsbac_pr_debug(adf_daz, "pid %u (%.15s), file infected!\n",
++ current->pid, current->comm);
++ return NOT_GRANTED;
++ }
++ }
++ rsbac_pr_debug(adf_daz, "pid %u (%.15s), dazuko_sys_check() result is %i\n",
++ current->pid, current->comm, check_error);
++ return DO_NOT_CARE;
++} /* end of rsbac_adf_request_daz() */
++
++
++/*****************************************************************************/
++/* If the request returned granted and the operation is performed, */
++/* the following function can be called by the AEF to get all aci set */
++/* correctly. For write accesses that are performed fully within the kernel, */
++/* this is usually not done to prevent extra calls, including R_CLOSE for */
++/* cleaning up. Because of this, the write boundary is not adjusted - there */
++/* is no user-level writing anyway... */
++/* The second instance of target specification is the new target, if one has */
++/* been created, otherwise its values are ignored. */
++/* On success, 0 is returned, and an error from rsbac/error.h otherwise. */
++
++inline int rsbac_adf_set_attr_daz(
++ enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_target_t new_target,
++ union rsbac_target_id_t new_tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++{
++ struct dazuko_file_struct *dfs = NULL;
++ struct xp_daemon_id xp_id;
++ int check_error = 0;
++ struct event_properties event_p;
++ int event;
++ int daemon_allowed;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_attribute_value_t i_attr_val2;
++
++ switch(target) {
++ case T_FILE:
++ switch(request) {
++ case R_EXECUTE:
++ /* get daz_scanner for file */
++ if (rsbac_get_attr(SW_DAZ,
++ T_FILE,
++ tid,
++ A_daz_scanner,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_daz(): rsbac_get_attr() returned error!\n");
++ return -RSBAC_EREADFAILED;
++ }
++ /* get for process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_DAZ,
++ T_PROCESS,
++ i_tid,
++ A_daz_scanner,
++ &i_attr_val2,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_daz(): rsbac_get_attr() returned error!\n");
++ return -RSBAC_EREADFAILED;
++ }
++ /* and set for process, if different */
++ if(i_attr_val1.daz_scanner != i_attr_val2.daz_scanner)
++ if (rsbac_set_attr(SW_DAZ,
++ T_PROCESS,
++ i_tid,
++ A_daz_scanner,
++ i_attr_val1))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_daz(): rsbac_set_attr() returned error!\n");
++ return -RSBAC_EWRITEFAILED;
++ }
++ if(daz_ignored(tid))
++ return 0;
++ event = DAZUKO_ON_EXEC;
++ daemon_allowed = 0;
++ break;
++ case R_WRITE:
++ if(daz_ignored(tid))
++ return 0;
++ daz_reset_scanned(tid.file);
++ return 0;
++ case R_CLOSE:
++ if(daz_ignored(tid))
++ return 0;
++ event = DAZUKO_ON_CLOSE;
++ daemon_allowed = 1;
++ if( (attr == A_f_mode)
++ && (attr_val.f_mode & FMODE_WRITE)
++ )
++ daz_reset_scanned(tid.file);
++ break;
++ case R_READ_OPEN:
++ if(daz_ignored(tid))
++ return 0;
++ event = DAZUKO_ON_OPEN;
++ daemon_allowed = 1;
++ break;
++ case R_APPEND_OPEN:
++ case R_READ_WRITE_OPEN:
++ case R_WRITE_OPEN:
++ if(daz_ignored(tid))
++ return 0;
++ daz_reset_scanned(tid.file);
++ event = DAZUKO_ON_OPEN;
++ daemon_allowed = 1;
++ break;
++ case R_DELETE:
++ if(daz_ignored(tid))
++ return 0;
++ daz_reset_scanned(tid.file);
++ event = DAZUKO_ON_UNLINK;
++ daemon_allowed = 1;
++ break;
++ default:
++ return 0;
++ }
++ break;
++ case T_DIR:
++ switch(request) {
++ case R_DELETE:
++ if(daz_ignored(tid))
++ return 0;
++ event = DAZUKO_ON_RMDIR;
++ daemon_allowed = 1;
++ break;
++ default:
++ return 0;
++ }
++ case T_PROCESS:
++ switch(request) {
++ case R_CLONE:
++ /* Get daz_scanner from first process */
++ if (rsbac_get_attr(SW_DAZ,
++ T_PROCESS,
++ tid,
++ A_daz_scanner,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_daz(): rsbac_get_attr() returned error!\n");
++ return -RSBAC_EREADFAILED;
++ }
++ /* Set daz_scanner for new process, if set for first */
++ if ( i_attr_val1.daz_scanner
++ && (rsbac_set_attr(SW_DAZ,
++ T_PROCESS,
++ new_tid,
++ A_daz_scanner,
++ i_attr_val1)) )
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_daz(): rsbac_set_attr() returned error!\n");
++ return -RSBAC_EWRITEFAILED;
++ }
++ return 0;
++ default:
++ return 0;
++ }
++ default:
++ return 0;
++ }
++
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++ /* get daz_scanned for file */
++ if (rsbac_get_attr(SW_DAZ,
++ target,
++ tid,
++ A_daz_scanned,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_daz(): rsbac_get_attr() returned error!\n");
++ return -RSBAC_EREADFAILED;
++ }
++ if(i_attr_val1.daz_scanned == DAZ_clean)
++ return 0;
++#endif
++
++ xp_id.pid = current->pid;
++ xp_id.file = NULL;
++
++ check_error = dazuko_sys_check(event, daemon_allowed, &xp_id);
++
++ if (!check_error)
++ {
++ dazuko_bzero(&event_p, sizeof(event_p));
++ /*
++ event_p.flags = flags;
++ event_p.set_flags = 1;
++ event_p.mode = mode;
++ event_p.set_mode = 1;
++ */
++ event_p.pid = current->pid;
++ event_p.set_pid = 1;
++ event_p.uid = current_uid();
++ event_p.set_uid = 1;
++
++ dfs = rsbac_smalloc_clear_unlocked(dazuko_file_slab);
++ if (dfs != NULL)
++ {
++ dfs->extra_data = rsbac_smalloc_clear_unlocked(xp_file_slab);
++ if (dfs->extra_data != NULL)
++ {
++ dfs->extra_data->dentry = tid.file.dentry_p;
++
++ dazuko_sys_post(event, dfs, NULL, &event_p);
++ dazuko_file_struct_cleanup(&dfs);
++ }
++ else
++ {
++ rsbac_sfree(dazuko_file_slab, dfs);
++ dfs = NULL;
++ }
++ }
++ }
++
++ return 0;
++} /* end of rsbac_adf_set_attr_daz() */
+diff --git a/rsbac/adf/daz/dazuko_call.h b/rsbac/adf/daz/dazuko_call.h
+new file mode 100644
+index 0000000..2ba8e1a
+--- /dev/null
++++ b/rsbac/adf/daz/dazuko_call.h
+@@ -0,0 +1,470 @@
++/* Dazuko. Check parameters of XP calls before making real calls.
++ Written by John Ogness <jogness@antivir.de>
++
++ Copyright (c) 2003, 2004 H+BEDV Datentechnik GmbH
++ All rights reserved.
++
++ Redistribution and use in source and binary forms, with or without
++ modification, are permitted provided that the following conditions
++ are met:
++
++ 1. Redistributions of source code must retain the above copyright notice,
++ this list of conditions and the following disclaimer.
++
++ 2. Redistributions in binary form must reproduce the above copyright notice,
++ this list of conditions and the following disclaimer in the documentation
++ and/or other materials provided with the distribution.
++
++ 3. Neither the name of Dazuko nor the names of its contributors may be used
++ to endorse or promote products derived from this software without specific
++ prior written permission.
++
++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
++ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ POSSIBILITY OF SUCH DAMAGE.
++*/
++
++#ifndef DAZUKO_CALL_H
++#define DAZUKO_CALL_H
++
++#include "dazuko_platform.h"
++
++#include "dazuko_xp.h"
++
++#include <rsbac/helpers.h>
++#include <rsbac/debug.h>
++
++struct xp_mutex;
++struct xp_rwlock;
++struct xp_queue;
++struct xp_atomic;
++struct xp_file;
++struct dazuko_file_struct;
++struct xp_daemon_id;
++
++#define call_xp_sys_hook xp_sys_hook
++#define call_xp_sys_unhook xp_sys_unhook
++#define call_xp_print xp_print
++
++
++/* mutex */
++
++static inline int call_xp_init_mutex(struct xp_mutex *mutex)
++{
++ if (mutex == NULL)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: xp_init_mutex(NULL)\n");
++ return -1;
++ }
++
++ return xp_init_mutex(mutex);
++}
++
++static inline int call_xp_down(struct xp_mutex *mutex)
++{
++ if (mutex == NULL)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: warning: xp_down(NULL)\n");
++ return -1;
++ }
++
++ return xp_down(mutex);
++}
++
++static inline int call_xp_up(struct xp_mutex *mutex)
++{
++ if (mutex == NULL)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: warning: xp_up(NULL)\n");
++ return -1;
++ }
++
++ return xp_up(mutex);
++}
++
++static inline int call_xp_destroy_mutex(struct xp_mutex *mutex)
++{
++ if (mutex == NULL)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: warning: xp_destroy_mutex(NULL)\n");
++ return -1;
++ }
++
++ return xp_destroy_mutex(mutex);
++}
++
++
++/* read-write lock */
++
++static inline int call_xp_init_rwlock(struct xp_rwlock *rwlock)
++{
++ if (rwlock == NULL)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: warning: xp_init_rwlock(NULL)\n");
++ return -1;
++ }
++
++ return xp_init_rwlock(rwlock);
++}
++
++static inline int call_xp_write_lock(struct xp_rwlock *rwlock)
++{
++ if (rwlock == NULL)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: warning: xp_write_lock(NULL)\n");
++ return -1;
++ }
++
++ return xp_write_lock(rwlock);
++}
++
++static inline int call_xp_write_unlock(struct xp_rwlock *rwlock)
++{
++ if (rwlock == NULL)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: warning: xp_write_unlock(NULL)\n");
++ return -1;
++ }
++
++ return xp_write_unlock(rwlock);
++}
++
++static inline int call_xp_read_lock(struct xp_rwlock *rlock)
++{
++ if (rlock == NULL)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: warning: xp_read_lock(NULL)\n");
++ return -1;
++ }
++
++ return xp_read_lock(rlock);
++}
++
++static inline int call_xp_read_unlock(struct xp_rwlock *rlock)
++{
++ if (rlock == NULL)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: warning: xp_read_unlock(NULL)\n");
++ return -1;
++ }
++
++ return xp_read_unlock(rlock);
++}
++
++static inline int call_xp_destroy_rwlock(struct xp_rwlock *rwlock)
++{
++ if (rwlock == NULL)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: warning: xp_destroy_rwlock(NULL)\n");
++ return -1;
++ }
++
++ return xp_destroy_rwlock(rwlock);
++}
++
++
++/* wait-notify queue */
++
++static inline int call_xp_init_queue(struct xp_queue *queue)
++{
++ if (queue == NULL)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: warning: xp_init_queue(NULL)\n");
++ return -1;
++ }
++
++ return xp_init_queue(queue);
++}
++
++static inline int call_xp_wait_until_condition(struct xp_queue *queue, int (*cfunction)(void *), void *cparam, int allow_interrupt)
++{
++ if (queue == NULL)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: warning: xp_wait_until_condition(queue=NULL)\n");
++ return -1;
++ }
++
++ if (cfunction == NULL)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: warning: xp_wait_until_condition(cfunction=NULL)\n");
++ return -1;
++ }
++
++ return xp_wait_until_condition(queue, cfunction, cparam, allow_interrupt);
++}
++
++static inline int call_xp_notify(struct xp_queue *queue)
++{
++ if (queue == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_notify(NULL)\n");
++ return -1;
++ }
++
++ return xp_notify(queue);
++}
++
++static inline int call_xp_destroy_queue(struct xp_queue *queue)
++{
++ if (queue == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_destroy_queue(NULL)\n");
++ return -1;
++ }
++
++ return xp_destroy_queue(queue);
++}
++
++
++/* memory */
++
++static inline int call_xp_copyin(const void *user_src, void *kernel_dest, size_t size)
++{
++ if (user_src == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_copyin(user_src=NULL)\n");
++ return -1;
++ }
++
++ if (kernel_dest == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_copyin(kernel_dest=NULL)\n");
++ return -1;
++ }
++
++ if (size < 1)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_copyin(size=%d)\n", size);
++ return 0;
++ }
++
++ return xp_copyin(user_src, kernel_dest, size);
++}
++
++static inline int call_xp_copyout(const void *kernel_src, void *user_dest, size_t size)
++{
++ if (kernel_src == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_copyout(kernel_src=NULL)\n");
++ return -1;
++ }
++
++ if (user_dest == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_copyout(user_dest=NULL)\n");
++ return -1;
++ }
++
++ if (size < 1)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_copyout(size=%d)\n", size);
++ return 0;
++ }
++
++ return xp_copyout(kernel_src, user_dest, size);
++}
++
++static inline int call_xp_verify_user_writable(const void *user_ptr, size_t size)
++{
++ if (user_ptr == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_verify_user_writable(user_ptr=NULL)\n");
++ return -1;
++ }
++
++ if (size < 1)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_verify_user_writable(size=%d)\n", size);
++ return -1;
++ }
++
++ return xp_verify_user_writable(user_ptr, size);
++}
++
++static inline int call_xp_verify_user_readable(const void *user_ptr, size_t size)
++{
++ if (user_ptr == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_verify_user_readable(user_ptr=NULL)\n");
++ return -1;
++ }
++
++ if (size < 1)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_verify_user_readable(size=%d)\n", size);
++ return -1;
++ }
++
++ return xp_verify_user_readable(user_ptr, size);
++}
++
++
++/* path attribute */
++
++static inline int call_xp_is_absolute_path(const char *path)
++{
++ if (path == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_is_absolute_path(NULL)\n");
++ return 0;
++ }
++
++ return xp_is_absolute_path(path);
++}
++
++
++/* atomic */
++
++static inline int call_xp_atomic_set(struct xp_atomic *atomic, int value)
++{
++ if (atomic == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_atomic_set(atomic=NULL)\n");
++ return -1;
++ }
++
++ return xp_atomic_set(atomic, value);
++}
++
++static inline int call_xp_atomic_inc(struct xp_atomic *atomic)
++{
++ if (atomic == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_atomic_inc(NULL)\n");
++ return -1;
++ }
++
++ return xp_atomic_inc(atomic);
++}
++
++static inline int call_xp_atomic_dec(struct xp_atomic *atomic)
++{
++ if (atomic == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_atomic_dec(NULL)\n");
++ return -1;
++ }
++
++ return xp_atomic_dec(atomic);
++}
++
++static inline int call_xp_atomic_read(struct xp_atomic *atomic)
++{
++ if (atomic == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_atomic_read(NULL)\n");
++ return -1;
++ }
++
++ return xp_atomic_read(atomic);
++}
++
++
++/* file descriptor */
++
++static inline int call_xp_copy_file(struct xp_file *dest, struct xp_file *src)
++{
++ if (dest == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_copy_file(dest=NULL)\n");
++ return -1;
++ }
++
++ if (src == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_copy_file(src=NULL)\n");
++ return -1;
++ }
++
++ return xp_copy_file(dest, src);
++}
++
++static inline int call_xp_compare_file(struct xp_file *file1, struct xp_file *file2)
++{
++ if (file1 == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_compare_file(file1=NULL)\n");
++ return -1;
++ }
++
++ if (file2 == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_compare_file(file2=NULL)\n");
++ return -1;
++ }
++
++ return xp_compare_file(file1, file2);
++}
++
++
++/* file structure */
++
++static inline int call_xp_fill_file_struct(struct dazuko_file_struct *dfs)
++{
++ if (dfs == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_fill_file_struct(NULL)\n");
++ return -1;
++ }
++
++ return xp_fill_file_struct(dfs);
++}
++
++
++/* daemon id */
++
++static inline int call_xp_id_compare(struct xp_daemon_id *id1, struct xp_daemon_id *id2)
++{
++ if (id1 == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_id_compare(id1=NULL)\n");
++ return -1;
++ }
++
++ if (id2 == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_id_compare(id2=NULL)\n");
++ return -1;
++ }
++
++ return xp_id_compare(id1, id2);
++}
++
++static inline int call_xp_id_free(struct xp_daemon_id *id)
++{
++ if (id == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_id_free(NULL)\n");
++ return 0;
++ }
++
++ return xp_id_free(id);
++}
++
++static inline struct xp_daemon_id* call_xp_id_copy(struct xp_daemon_id *id)
++{
++ struct xp_daemon_id *ptr;
++
++ if (id == NULL)
++ {
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_id_copy(NULL)\n");
++ return NULL;
++ }
++
++ ptr = xp_id_copy(id);
++
++ if (ptr == NULL)
++ rsbac_printk(KERN_WARNING, "dazuko: warning: xp_id_copy() -> NULL\n");
++
++ return ptr;
++}
++
++#endif
+diff --git a/rsbac/adf/daz/dazuko_linux26.h b/rsbac/adf/daz/dazuko_linux26.h
+new file mode 100644
+index 0000000..fecce8d
+--- /dev/null
++++ b/rsbac/adf/daz/dazuko_linux26.h
+@@ -0,0 +1,82 @@
++/* Dazuko Linux. Allow Linux 2.6 file access control for 3rd-party applications.
++ Copyright (c) 2003, 2004 H+BEDV Datentechnik GmbH
++ Written by John Ogness <jogness@antivir.de>
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License
++ as published by the Free Software Foundation; either version 2
++ of the License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++*/
++
++#ifndef DAZUKO_LINUX26_H
++#define DAZUKO_LINUX26_H
++
++#include <linux/module.h>
++#include <linux/semaphore.h>
++
++#define DEVICE_NAME "dazuko"
++
++#define XP_ERROR_PERMISSION -EPERM;
++#define XP_ERROR_INTERRUPT -EINTR;
++#define XP_ERROR_BUSY -EBUSY;
++#define XP_ERROR_FAULT -EFAULT;
++#define XP_ERROR_INVALID -EINVAL;
++#define XP_ERROR_NODEVICE -ENODEV;
++
++
++struct xp_daemon_id
++{
++ int pid;
++ struct file *file;
++};
++
++struct xp_file
++{
++ char file;
++};
++
++struct xp_mutex
++{
++ struct semaphore mutex;
++};
++
++struct xp_atomic
++{
++ atomic_t atomic;
++};
++
++struct xp_file_struct
++{
++ int full_filename_length;
++ char *full_filename;
++ int free_full_filename;
++ struct dentry *dentry;
++ int dput_dentry;
++ char *buffer;
++ int free_page_buffer;
++ struct nameidata *nd;
++ struct vfsmount *vfsmount;
++ int mntput_vfsmount;
++ struct inode *inode;
++};
++
++struct xp_queue
++{
++ wait_queue_head_t queue;
++};
++
++struct xp_rwlock
++{
++ rwlock_t rwlock;
++};
++
++#endif
+diff --git a/rsbac/adf/daz/dazuko_platform.h b/rsbac/adf/daz/dazuko_platform.h
+new file mode 100644
+index 0000000..b8ac821
+--- /dev/null
++++ b/rsbac/adf/daz/dazuko_platform.h
+@@ -0,0 +1,2 @@
++#include <linux/version.h>
++#include "dazuko_linux26.h"
+diff --git a/rsbac/adf/daz/dazuko_rsbac.h b/rsbac/adf/daz/dazuko_rsbac.h
+new file mode 100644
+index 0000000..3519be9
+--- /dev/null
++++ b/rsbac/adf/daz/dazuko_rsbac.h
+@@ -0,0 +1,100 @@
++/* Dazuko RSBAC. Allow RSBAC Linux file access control for 3rd-party applications.
++ Copyright (c) 2004 H+BEDV Datentechnik GmbH
++ Written by John Ogness <jogness@antivir.de>
++
++ Copyright (c) 2004-2010 Amon Ott <ao@rsbac.org>
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License
++ as published by the Free Software Foundation; either version 2
++ of the License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++*/
++
++#ifndef DAZUKO_RSBAC_H
++#define DAZUKO_RSBAC_H
++
++#ifdef CONFIG_MODVERSIONS
++#define MODVERSIONS
++#include <config/modversions.h>
++#endif
++
++#include <linux/kernel.h>
++#include <linux/version.h>
++
++#ifdef MODULE
++#include <linux/module.h>
++#endif
++
++#ifndef KERNEL_VERSION
++#define KERNEL_VERSION(a,b,c) ((a)*65536+(b)*256+(c))
++#endif
++
++#include <linux/slab.h>
++#include <asm/atomic.h>
++
++#ifdef CONFIG_SMP
++#ifndef __SMP__
++#define __SMP__
++#endif
++#endif
++
++#include <linux/semaphore.h>
++
++
++#define DEVICE_NAME "dazuko"
++
++#define XP_ERROR_PERMISSION -EPERM;
++#define XP_ERROR_INTERRUPT -EINTR;
++#define XP_ERROR_BUSY -EBUSY;
++#define XP_ERROR_FAULT -EFAULT;
++#define XP_ERROR_INVALID -EINVAL;
++
++
++struct xp_daemon_id
++{
++ int pid;
++ struct file *file;
++};
++
++struct xp_file
++{
++ char c;
++};
++
++struct xp_mutex
++{
++ struct semaphore mutex;
++};
++
++struct xp_atomic
++{
++ atomic_t atomic;
++};
++
++struct xp_file_struct
++{
++ int full_filename_length; /* length of filename */
++ char *full_filename; /* kernelspace filename with full path */
++ struct dentry *dentry; /* used to get inode */
++};
++
++struct xp_queue
++{
++ wait_queue_head_t queue;
++};
++
++struct xp_rwlock
++{
++ rwlock_t rwlock;
++};
++
++#endif
+diff --git a/rsbac/adf/daz/dazuko_xp.c b/rsbac/adf/daz/dazuko_xp.c
+new file mode 100644
+index 0000000..cc9265b
+--- /dev/null
++++ b/rsbac/adf/daz/dazuko_xp.c
+@@ -0,0 +1,2903 @@
++/* DazukoXP. Allow cross platform file access control for 3rd-party applications.
++ Written by John Ogness <jogness@antivir.de>
++
++ Copyright (c) 2002, 2003, 2004 H+BEDV Datentechnik GmbH
++ Copyright (c) 2004-2011 Amon Ott <ao@rsbac.org>
++
++ All rights reserved.
++
++ Redistribution and use in source and binary forms, with or without
++ modification, are permitted provided that the following conditions
++ are met:
++
++ 1. Redistributions of source code must retain the above copyright notice,
++ this list of conditions and the following disclaimer.
++
++ 2. Redistributions in binary form must reproduce the above copyright notice,
++ this list of conditions and the following disclaimer in the documentation
++ and/or other materials provided with the distribution.
++
++ 3. Neither the name of Dazuko nor the names of its contributors may be used
++ to endorse or promote products derived from this software without specific
++ prior written permission.
++
++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
++ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ POSSIBILITY OF SUCH DAMAGE.
++*/
++
++#include <rsbac/types.h>
++#include <rsbac/debug.h>
++
++#include "dazuko_platform.h"
++
++#include "dazuko_xp.h"
++#include "dazukoio.h"
++
++#include "dazuko_call.h"
++
++#define NUM_SLOT_LISTS 5
++#define NUM_SLOTS 25
++
++#define SCAN_ON_OPEN (access_mask & DAZUKO_ON_OPEN)
++#define SCAN_ON_CLOSE (access_mask & DAZUKO_ON_CLOSE)
++#define SCAN_ON_EXEC (access_mask & DAZUKO_ON_EXEC)
++#define SCAN_ON_CLOSE_MODIFIED (access_mask & DAZUKO_ON_CLOSE_MODIFIED)
++
++struct dazuko_path
++{
++ /* A node in a linked list of paths. Used
++ * for the include and exclude lists. */
++
++ struct dazuko_path *next;
++ int len;
++ char path[1]; /* this MUST be at the end of the struct */
++};
++
++struct hash
++{
++ /* A node in a linked list of filenames.
++ * Used for the list of files to be
++ * scanned on close. */
++
++ struct hash *next;
++ struct xp_file file;
++ int dirty;
++ int namelen;
++ char name[1]; /* this MUST be at the end of the struct */
++};
++
++struct daemon_id
++{
++ int unique;
++ struct xp_daemon_id *xp_id;
++};
++
++struct slot
++{
++ /* A representation of a daemon. It holds
++ * all information about the daemon, the
++ * file that is scanned, and the state of
++ * the scanning process. */
++
++ int id;
++ struct daemon_id did; /* identifier for our daemon */
++ int write_mode;
++ int state;
++ int response;
++ int event;
++ int filenamelength; /* not including terminator */
++ char *filename;
++ struct event_properties event_p;
++ struct file_properties file_p;
++ struct xp_mutex mutex;
++};
++
++struct slot_list
++{
++ struct xp_atomic use_count;
++ struct slot slots[NUM_SLOTS];
++ char reg_name[1]; /* this MUST be at the end of the struct */
++};
++
++struct slot_list_container
++{
++ struct slot_list *slot_list;
++ struct xp_mutex mutex;
++};
++
++struct one_slot_state_not_condition_param
++{
++ struct slot *slot;
++ int state;
++};
++
++struct two_slot_state_not_condition_param
++{
++ struct slot *slot1;
++ int state1;
++ struct slot *slot2;
++ int state2;
++};
++
++struct get_ready_slot_condition_param
++{
++ struct slot *slot;
++ struct slot_list *slotlist;
++};
++
++static int unique_count = 1;
++static char access_mask = 7;
++static struct slot_list_container slot_lists[NUM_SLOT_LISTS];
++static struct dazuko_path *incl_paths = NULL;
++static struct dazuko_path *excl_paths = NULL;
++static struct hash *hash = NULL;
++static struct xp_rwlock lock_hash;
++static struct xp_rwlock lock_lists;
++static struct xp_atomic active;
++static struct xp_mutex mutex_unique_count;
++
++static struct xp_queue wait_kernel_waiting_for_free_slot;
++static struct xp_queue wait_daemon_waiting_for_work;
++static struct xp_queue wait_kernel_waiting_while_daemon_works;
++static struct xp_queue wait_daemon_waiting_for_free;
++
++#ifdef CONFIG_RSBAC_DAZ_SELECT
++static struct kmem_cache * dazuko_file_listnode_slab = NULL;
++#endif
++static struct kmem_cache * dazuko_request_slab = NULL;
++static struct kmem_cache * access_compat12_slab = NULL;
++
++int dazuko_vsnprintf(char *str, size_t size, const char *format, va_list ap)
++{
++ char *target;
++ const char *end;
++ int overflow = 0;
++ char number_buffer[32]; /* 32 should be enough to hold any number, right? */
++ const char *s;
++
++ if (str == NULL || size < 1 || format == NULL)
++ return -1;
++
++ target = str;
++ end = (target + size) - 1;
++
++#define DAZUKO_VSNPRINTF_PRINTSTRING \
++ for ( ; *s ; s++) \
++ { \
++ if (target == end) \
++ { \
++ overflow = 1; \
++ goto dazuko_vsnprintf_out; \
++ } \
++ *target = *s; \
++ target++; \
++ }
++
++ for ( ; *format ; format++)
++ {
++ if (target == end)
++ {
++ overflow = 1;
++ goto dazuko_vsnprintf_out;
++ }
++
++ if (*format == '%')
++ {
++ format++;
++
++ switch (*format)
++ {
++ case 's': /* %s */
++ s = va_arg(ap, char *);
++ if (s == NULL)
++ s = "(null)";
++ DAZUKO_VSNPRINTF_PRINTSTRING
++ break;
++
++ case 'd': /* %d */
++ sprintf(number_buffer, "%d", va_arg(ap, int));
++ s = number_buffer;
++ DAZUKO_VSNPRINTF_PRINTSTRING
++ break;
++
++ case 'c': /* %c */
++ *target = va_arg(ap, int);
++ target++;
++ break;
++
++ case 'l': /* %lu */
++ format++;
++ if (*format != 'u')
++ {
++ /* print error message */
++ goto dazuko_vsnprintf_out;
++ }
++ sprintf(number_buffer, "%lu", va_arg(ap, unsigned long));
++ s = number_buffer;
++ DAZUKO_VSNPRINTF_PRINTSTRING
++ break;
++
++ case '0': /* %02x */
++ format++;
++ if (*format != '2')
++ {
++ /* print error message */
++ goto dazuko_vsnprintf_out;
++ }
++ format++;
++ if (*format != 'x')
++ {
++ /* print error message */
++ goto dazuko_vsnprintf_out;
++ }
++ sprintf(number_buffer, "%02x", va_arg(ap, int));
++ s = number_buffer;
++ DAZUKO_VSNPRINTF_PRINTSTRING
++ break;
++
++ default:
++ /* print error message */
++ goto dazuko_vsnprintf_out;
++ }
++ }
++ else
++ {
++ *target = *format;
++ target++;
++ }
++ }
++
++dazuko_vsnprintf_out:
++
++ *target = 0;
++
++ /* We are returning what we've written. If there was an
++ * overflow, the returned value will match "size" rather
++ * than being less than "size"
++ */
++
++ return ((target - str) + overflow);
++}
++
++int dazuko_snprintf(char *str, size_t size, const char *format, ...)
++{
++ va_list ap;
++ int ret;
++
++ va_start(ap, format);
++ ret = dazuko_vsnprintf(str, size, format, ap);
++ va_end(ap);
++
++ return ret;
++}
++
++inline void dazuko_bzero(void *p, int len)
++{
++ /* "zero out" len bytes starting with p */
++
++ char *ptr = (char *)p;
++
++ while (len--)
++ *ptr++ = 0;
++}
++
++static inline int dazuko_get_new_unique(void)
++{
++ int unique;
++
++/* DOWN */
++ call_xp_down(&mutex_unique_count);
++
++ unique = unique_count;
++ unique_count++;
++
++ call_xp_up(&mutex_unique_count);
++/* UP */
++
++ return unique;
++}
++
++static inline int dazuko_slot_state(struct slot *s)
++{
++ int state;
++
++/* DOWN */
++ if (call_xp_down(&(s->mutex)) != 0)
++ return XP_ERROR_INTERRUPT;
++
++ state = s->state;
++
++ call_xp_up(&(s->mutex));
++/* UP */
++
++ return state;
++}
++
++static int one_slot_state_not_condition(void *param)
++{
++ return (dazuko_slot_state(((struct one_slot_state_not_condition_param *)param)->slot)
++ != ((struct one_slot_state_not_condition_param *)param)->state);
++}
++
++static int two_slot_state_not_condition(void *param)
++{
++ return (dazuko_slot_state(((struct two_slot_state_not_condition_param *)param)->slot1)
++ != ((struct two_slot_state_not_condition_param *)param)->state1
++ && dazuko_slot_state(((struct two_slot_state_not_condition_param *)param)->slot2)
++ != ((struct two_slot_state_not_condition_param *)param)->state2);
++}
++
++static inline int __dazuko_change_slot_state(struct slot *s, int from_state, int to_state)
++{
++ /* Make a predicted state transition. We fail if it
++ * is an unpredicted change. We can ALWAYS go to the
++ * to_state if it is the same as from_state. Not SMP safe! */
++
++ if (to_state != from_state)
++ {
++ /* make sure this is a predicted transition and there
++ * is a daemon on this slot (unique != 0)*/
++ if (s->state != from_state || s->did.unique == 0)
++ return 0;
++ }
++
++ s->state = to_state;
++
++ /* handle appropriate wake_up's for basic
++ * state changes */
++
++ if (to_state == DAZUKO_READY)
++ {
++ call_xp_notify(&wait_kernel_waiting_for_free_slot);
++ }
++ else if (to_state == DAZUKO_FREE)
++ {
++ call_xp_notify(&wait_kernel_waiting_while_daemon_works);
++ call_xp_notify(&wait_daemon_waiting_for_free);
++ }
++
++ return 1;
++}
++
++static int dazuko_change_slot_state(struct slot *s, int from_state, int to_state, int release)
++{
++ /* SMP safe version of __dazuko_change_slot_state().
++ * This should only be used if we haven't
++ * already aquired slot.mutex. Use this function
++ * with CAUTION, since the mutex may or may not
++ * be released depending on the return value AND
++ * on the value of the "release" argument. */
++
++ int success;
++
++ /* if we are interrupted, report the state as unpredicted */
++/* DOWN */
++ if (call_xp_down(&(s->mutex)) != 0)
++ return 0;
++
++ success = __dazuko_change_slot_state(s, from_state, to_state);
++
++ /* the mutex is released if the state change was
++ * unpredicted or if the called wants it released */
++ if (!success || release)
++ call_xp_up(&(s->mutex));
++/* UP */
++ return success;
++}
++
++static struct slot * _dazuko_find_slot(struct daemon_id *did, int release, struct slot_list *sl)
++{
++ /* Find the first slot with the same given
++ * pid number. SMP safe. Use this function
++ * with CAUTION, since the mutex may or may not
++ * be released depending on the return value AND
++ * on the value of the "release" argument. */
++
++ int i;
++ struct slot *s = NULL;
++
++ if (sl == NULL)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: invalid slot_list given (bug!)\n");
++ return NULL;
++ }
++
++ for (i=0 ; i<NUM_SLOTS ; i++)
++ {
++ s = &(sl->slots[i]);
++/* DOWN */
++ /* if we are interrupted, we say that no
++ * slot was found */
++ if (call_xp_down(&(s->mutex)) != 0)
++ return NULL;
++
++ if (did == NULL)
++ {
++ /* we are looking for an empty slot */
++ if (s->did.unique == 0 && s->did.xp_id == NULL)
++ {
++ /* we release the mutex only if the
++ * called wanted us to */
++ if (release)
++ call_xp_up(&(s->mutex));
++/* UP */
++ return s;
++ }
++ }
++ else if (s->did.unique == 0 && s->did.xp_id == NULL)
++ {
++ /* this slot is emtpy, so it can't match */
++
++ /* do nothing */
++ }
++ /* xp_id's must match! */
++ else if (call_xp_id_compare(s->did.xp_id, did->xp_id) == 0)
++ {
++ /* unique's must also match (unless unique is negative,
++ * in which case we will trust xp_id) */
++ if (did->unique < 0 || (s->did.unique == did->unique))
++ {
++ /* we release the mutex only if the
++ * called wanted us to */
++ if (release)
++ call_xp_up(&(s->mutex));
++/* UP */
++ return s;
++ }
++ }
++
++ call_xp_up(&(s->mutex));
++/* UP */
++ }
++
++ return NULL;
++}
++
++static struct slot * dazuko_find_slot_and_slotlist(struct daemon_id *did, int release, struct slot_list *slist, struct slot_list **sl_result)
++{
++ struct slot *s;
++ int i;
++ struct slot_list *sl;
++
++ if (slist == NULL)
++ {
++ for (i=0 ; i<NUM_SLOT_LISTS ; i++)
++ {
++/* DOWN */
++ /* if we are interrupted, we say that no
++ * slot was found */
++ if (call_xp_down(&(slot_lists[i].mutex)) != 0)
++ return NULL;
++
++ sl = slot_lists[i].slot_list;
++
++ call_xp_up(&(slot_lists[i].mutex));
++/* UP */
++
++ if (sl != NULL)
++ {
++ s = _dazuko_find_slot(did, release, sl);
++ if (s != NULL)
++ {
++ /* set the current slot_list */
++ if (sl_result != NULL)
++ *sl_result = sl;
++
++ return s;
++ }
++ }
++ }
++ }
++ else
++ {
++ return _dazuko_find_slot(did, release, slist);
++ }
++
++ return NULL;
++}
++
++static inline struct slot * dazuko_find_slot(struct daemon_id *did, int release, struct slot_list *slist)
++{
++ return dazuko_find_slot_and_slotlist(did, release, slist, NULL);
++}
++
++static int dazuko_insert_path_fs(struct dazuko_path **list, char *fs_path, int fs_len)
++{
++ /* Create a new struct dazuko_path structure and insert it
++ * into the linked list given (list argument).
++ * The fs_len argument is to help speed things
++ * up so we don't have to calculate the length
++ * of fs_path. */
++
++ struct dazuko_path *newitem;
++ struct dazuko_path *tmp;
++
++ if (fs_path == NULL || fs_len < 1)
++ return XP_ERROR_INVALID;
++
++ /* we want only absolute paths */
++ if (!call_xp_is_absolute_path(fs_path))
++ return XP_ERROR_INVALID;
++
++ /* create a new struct dazuko_path structure making room for path also */
++ newitem = rsbac_kmalloc(sizeof(struct dazuko_path) + fs_len + 1);
++ if (newitem == NULL)
++ return XP_ERROR_FAULT;
++
++ /* fs_path is already in kernelspace */
++ memcpy(newitem->path, fs_path, fs_len);
++
++ newitem->path[fs_len] = 0;
++
++ while (newitem->path[fs_len-1] == 0)
++ {
++ fs_len--;
++ if (fs_len == 0)
++ break;
++ }
++
++ if (fs_len < 1)
++ {
++ rsbac_kfree(newitem);
++ return XP_ERROR_INVALID;
++ }
++
++ newitem->len = fs_len;
++
++ /* check if this path already exists in the list */
++ for (tmp=*list ; tmp ; tmp=tmp->next)
++ {
++ if (newitem->len == tmp->len)
++ {
++ if (memcmp(newitem->path, tmp->path, tmp->len) == 0)
++ {
++ /* we already have this path */
++
++ rsbac_kfree(newitem);
++
++ return 0;
++ }
++ }
++ }
++
++ DPRINT(("dazuko: adding %s %s\n", (list == &incl_paths) ? "incl" : "excl", newitem->path));
++
++ /* add struct dazuko_path to head of linked list */
++/* LOCK */
++ call_xp_write_lock(&lock_lists);
++ newitem->next = *list;
++ *list = newitem;
++ call_xp_write_unlock(&lock_lists);
++/* UNLOCK */
++
++ return 0;
++}
++
++static void dazuko_remove_all_hash(void)
++{
++ /* Empty the hash linked list. */
++
++ struct hash *tmp;
++
++/* LOCK */
++ call_xp_write_lock(&lock_hash);
++ while (hash)
++ {
++ tmp = hash;
++ hash = hash->next;
++
++ DPRINT(("dazuko: removing hash %s\n", tmp->name));
++
++ rsbac_kfree(tmp);
++ }
++ call_xp_write_unlock(&lock_hash);
++/* UNLOCK */
++}
++
++static void dazuko_remove_all_paths(void)
++{
++ /* Empty both include and exclude struct dazuko_path
++ * linked lists. */
++
++ struct dazuko_path *tmp;
++
++/* LOCK */
++ call_xp_write_lock(&lock_lists);
++
++ /* empty include paths list */
++ while (incl_paths)
++ {
++ tmp = incl_paths;
++ incl_paths = incl_paths->next;
++
++ DPRINT(("dazuko: removing incl %s\n", tmp->path));
++
++ rsbac_kfree(tmp);
++ }
++
++ /* empty exclude paths list */
++ while (excl_paths)
++ {
++ tmp = excl_paths;
++ excl_paths = excl_paths->next;
++
++ DPRINT(("dazuko: removing excl %s\n", tmp->path));
++
++ rsbac_kfree(tmp);
++ }
++
++ call_xp_write_unlock(&lock_lists);
++/* UNLOCK */
++}
++
++static int _dazuko_unregister_daemon(struct daemon_id *did)
++{
++ /* We unregister the daemon by finding the
++ * slot with the same slot->pid as the the
++ * current process id, the daemon. */
++
++ struct slot *s;
++ struct slot_list *sl;
++
++ DPRINT(("dazuko: dazuko_unregister_daemon() [%d]\n", did->unique));
++
++ /* find our slot and hold the mutex
++ * if we find it */
++/* DOWN? */
++ s = dazuko_find_slot_and_slotlist(did, 0, NULL, &sl);
++
++ if (s == NULL)
++ {
++ /* this daemon was not registered */
++ return 0;
++ }
++
++/* DOWN */
++
++ /* clearing the unique and pid makes the slot available */
++ s->did.unique = 0;
++ call_xp_id_free(s->did.xp_id);
++ s->did.xp_id = NULL;
++
++ /* reset slot state */
++ __dazuko_change_slot_state(s, DAZUKO_FREE, DAZUKO_FREE);
++
++ call_xp_atomic_dec(&(sl->use_count));
++
++ call_xp_up(&(s->mutex));
++/* UP */
++
++ /* active should always be positive here, but
++ * let's check just to be sure. ;) */
++ if (call_xp_atomic_read(&active) > 0)
++ {
++ /* active and the kernel usage counter
++ * should always reflect how many daemons
++ * are active */
++
++ call_xp_atomic_dec(&active);
++ }
++ else
++ {
++ rsbac_printk(KERN_WARNING "dazuko: active count error (possible bug)\n");
++ }
++
++ /* Wake up any kernel processes that are
++ * waiting for an available slot. Remove
++ * all the include and exclude paths
++ * if there are no more daemons */
++
++ if (call_xp_atomic_read(&active) == 0)
++ {
++ /* clear out include and exclude paths */
++ /* are we sure we want to do this? */
++ dazuko_remove_all_paths();
++
++ /* clear out hash nodes */
++ dazuko_remove_all_hash();
++ }
++
++ call_xp_notify(&wait_kernel_waiting_for_free_slot);
++ call_xp_notify(&wait_kernel_waiting_while_daemon_works);
++
++ return 0;
++}
++
++int dazuko_unregister_daemon(struct xp_daemon_id *xp_id)
++{
++ struct daemon_id did;
++ int ret;
++
++ if (xp_id == NULL)
++ return 0;
++
++ did.unique = -1;
++ did.xp_id = call_xp_id_copy(xp_id);
++
++ ret = _dazuko_unregister_daemon(&did);
++
++ call_xp_id_free(did.xp_id);
++
++ return ret;
++}
++
++static inline int dazuko_state_error(struct slot *s, int current_state)
++{
++ if (dazuko_change_slot_state(s, current_state, DAZUKO_BROKEN, 1))
++ {
++ call_xp_notify(&wait_kernel_waiting_for_free_slot);
++ call_xp_notify(&wait_kernel_waiting_while_daemon_works);
++ }
++
++ return 0;
++}
++
++static int dazuko_register_daemon(struct daemon_id *did, const char *reg_name, int string_length, int write_mode)
++{
++ const char *p1;
++ char *p2;
++ struct slot *s;
++ struct slot_list *sl;
++ int i;
++
++ rsbac_pr_debug(adf_daz, "Registering daemon %s [%d]\n", reg_name, did->unique);
++
++ if (did == NULL || reg_name == NULL)
++ return XP_ERROR_PERMISSION;
++
++ s = dazuko_find_slot(did, 1, NULL);
++
++ if (s != NULL)
++ {
++ /* We are already registered! */
++
++ rsbac_printk(KERN_INFO "dazuko: daemon %d already assigned to slot[%d]\n", did->unique, s->id);
++
++ return XP_ERROR_PERMISSION;
++ }
++
++ /* Find the slot_list with the matching name. */
++
++ for (i=0 ; i<NUM_SLOT_LISTS ; i++)
++ {
++/* DOWN */
++ /* if we are interrupted, we say that it
++ * was interrupted */
++ if (call_xp_down(&(slot_lists[i].mutex)) != 0)
++ return XP_ERROR_INTERRUPT;
++
++ sl = slot_lists[i].slot_list;
++
++ call_xp_up(&(slot_lists[i].mutex));
++/* UP */
++
++ if (sl != NULL)
++ {
++ p1 = reg_name;
++ p2 = sl->reg_name;
++
++ while (*p1 == *p2)
++ {
++ if (*p1 == 0)
++ break;
++
++ p1++;
++ p2++;
++ }
++
++ if (*p1 == *p2)
++ break;
++ }
++ }
++
++ if (i == NUM_SLOT_LISTS)
++ {
++ /* There is no slot_list with this name. We
++ * need to make one. */
++
++ sl = rsbac_kmalloc_clear_unlocked(sizeof(struct slot_list) + string_length + 1);
++ if (sl == NULL)
++ return XP_ERROR_FAULT;
++
++ call_xp_atomic_set(&(sl->use_count), 0);
++
++ p1 = reg_name;
++ p2 = sl->reg_name;
++
++ while (*p1)
++ {
++ *p2 = *p1;
++
++ p1++;
++ p2++;
++ }
++ *p2 = 0;
++
++ /* give each slot a unique id */
++ for (i=0 ; i<NUM_SLOTS ; i++)
++ {
++ sl->slots[i].id = i;
++ call_xp_init_mutex(&(sl->slots[i].mutex));
++ }
++
++ /* we need to find an empty slot */
++ for (i=0 ; i<NUM_SLOT_LISTS ; i++)
++ {
++/* DOWN */
++ /* if we are interrupted, we need to cleanup
++ * and return error */
++ if (call_xp_down(&(slot_lists[i].mutex)) != 0)
++ {
++ rsbac_kfree(sl);
++ return XP_ERROR_INTERRUPT;
++ }
++
++ if (slot_lists[i].slot_list == NULL)
++ {
++ slot_lists[i].slot_list = sl;
++
++ call_xp_up(&(slot_lists[i].mutex));
++/* UP */
++ break;
++ }
++
++ call_xp_up(&(slot_lists[i].mutex));
++/* UP */
++ }
++
++ if (i == NUM_SLOT_LISTS)
++ {
++ /* no empty slot :( */
++ rsbac_kfree(sl);
++ return XP_ERROR_BUSY;
++ }
++ }
++
++ /* find an available slot and hold the mutex
++ * if we find one */
++/* DOWN? */
++ s = dazuko_find_slot(NULL, 0, sl);
++
++ if (s == NULL)
++ return XP_ERROR_BUSY;
++
++/* DOWN */
++
++ /* We have found a slot, so increment the active
++ * variable and the kernel module use counter.
++ * The module counter will always reflect the
++ * number of daemons. */
++
++ call_xp_atomic_inc(&active);
++
++ /* get new unique id for this process */
++ did->unique = dazuko_get_new_unique();
++
++ s->did.unique = did->unique;
++ s->did.xp_id = call_xp_id_copy(did->xp_id);
++ s->write_mode = write_mode;
++
++ call_xp_atomic_inc(&(sl->use_count));
++
++ /* the daemon is registered, but not yet
++ * ready to receive files */
++ __dazuko_change_slot_state(s, DAZUKO_FREE, DAZUKO_FREE);
++ rsbac_pr_debug(adf_daz, "slot[%d] assigned to daemon %s [%d]", s->id, reg_name, did->unique);
++ call_xp_up(&(s->mutex));
++/* UP */
++
++ return 0;
++}
++
++static struct slot* dazuko_get_an_access(struct daemon_id *did)
++{
++ /* The daemon is requesting a filename of a file
++ * to scan. This code will wait until a filename
++ * is available, or until we should be killed.
++ * (killing is done if any errors occur as well
++ * as when the user kills us) */
++
++ /* If a slot is returned, it will be already locked! */
++
++ int i;
++ struct slot *s;
++ struct one_slot_state_not_condition_param cond_p;
++
++tryagain:
++ /* find our slot */
++ s = dazuko_find_slot(did, 1, NULL);
++
++ if (s == NULL)
++ {
++ i = dazuko_register_daemon(did, "_COMPAT", 7, 1);
++ if (i != 0)
++ {
++ rsbac_printk(KERN_INFO "dazuko: unregistered daemon %d attempted to get access\n", did->unique);
++ return NULL;
++ }
++
++ s = dazuko_find_slot(did, 1, NULL);
++ if (s == NULL)
++ {
++ rsbac_printk(KERN_INFO "dazuko: unregistered daemon %d attempted to get access\n", did->unique);
++ return NULL;
++ }
++
++ rsbac_printk(KERN_INFO "dazuko: warning: daemon %d is using a deprecated protocol\n", did->unique);
++ }
++
++ /* the daemon is now ready to receive a file */
++ dazuko_change_slot_state(s, DAZUKO_READY, DAZUKO_READY, 1);
++
++ cond_p.slot = s;
++ cond_p.state = DAZUKO_READY;
++ if (call_xp_wait_until_condition(&wait_daemon_waiting_for_work, one_slot_state_not_condition, &cond_p, 1) != 0)
++ {
++ /* The user has issued an interrupt.
++ * Return an error. The daemon should
++ * unregister itself. */
++
++ DPRINT(("dazuko: daemon %d killed while waiting for work\n", did->unique));
++
++ if (dazuko_change_slot_state(s, DAZUKO_READY, DAZUKO_BROKEN, 1) || dazuko_change_slot_state(s, DAZUKO_WAITING, DAZUKO_BROKEN, 1))
++ {
++ call_xp_notify(&wait_kernel_waiting_for_free_slot);
++ call_xp_notify(&wait_kernel_waiting_while_daemon_works);
++ }
++
++ return NULL;
++ }
++
++ /* slot SHOULD now be in DAZUKO_WAITING state */
++
++ /* we will be working with the slot, so
++ * we need to lock it */
++
++/* DOWN? */
++ if (!dazuko_change_slot_state(s, DAZUKO_WAITING, DAZUKO_WORKING, 0))
++ {
++ /* State transition error. Try again., */
++
++ goto tryagain;
++ }
++
++/* DOWN */
++
++ /* Slot IS in DAZUKO_WORKING state. Copy all the
++ * necessary information to userspace structure. */
++
++ /* IMPORTANT: slot is still locked! */
++
++ return s; /* access is available */
++}
++
++static int dazuko_return_access(struct daemon_id *did, int response, struct slot *s)
++{
++ /* The daemon has finished scanning a file
++ * and has the response to give. The daemon's
++ * slot should be in the DAZUKO_WORKING state. */
++
++ struct one_slot_state_not_condition_param cond_p;
++
++ /* do we already have a slot? */
++ if (s == NULL)
++ {
++ /* find our slot */
++ s = dazuko_find_slot(did, 1, NULL);
++
++ if (s == NULL)
++ {
++ /* It appears the kernel isn't interested
++ * in us or our response. It gave our slot away! */
++
++ DPRINT(("dazuko: daemon %d unexpectedly lost slot\n", did->unique));
++
++ return XP_ERROR_PERMISSION;
++ }
++ }
++
++ /* we will be writing into the slot, so we
++ * need to lock it */
++
++/* DOWN? */
++ if (!dazuko_change_slot_state(s, DAZUKO_WORKING, DAZUKO_DONE, 0))
++ {
++ /* The slot is in the wrong state. We will
++ * assume the kernel has cancelled the file
++ * access. */
++
++ DPRINT(("dazuko: response from daemon %d on slot[%d] not needed\n", did->unique, s->id));
++
++ return 0;
++ }
++
++/* DOWN */
++
++ s->response = response;
++
++ call_xp_up(&(s->mutex));
++/* UP */
++
++ /* wake up any kernel processes that are
++ * waiting for responses */
++ call_xp_notify(&wait_kernel_waiting_while_daemon_works);
++
++ cond_p.slot = s;
++ cond_p.state = DAZUKO_DONE;
++ if (call_xp_wait_until_condition(&wait_daemon_waiting_for_free, one_slot_state_not_condition, &cond_p, 1) != 0)
++ {
++ /* The user has issued an interrupt.
++ * Return an error. The daemon should
++ * unregister itself. */
++
++ DPRINT(("dazuko: daemon %d killed while waiting for response acknowledgement\n", did->unique));
++
++ return XP_ERROR_INTERRUPT;
++ }
++
++ return 0;
++}
++
++static inline int dazuko_isdigit(const char c)
++{
++ return (c >= '0' && c <= '9');
++}
++
++static inline long dazuko_strtol(const char *string)
++{
++ long num = 1;
++ const char *p = string;
++
++ if (string == NULL)
++ return 0;
++
++ switch (*p)
++ {
++ case '-':
++ num = -1;
++ p++;
++ break;
++
++ case '+':
++ p++;
++ break;
++ }
++
++ if (dazuko_isdigit(*p))
++ {
++ num *= *p - '0';
++ p++;
++ }
++ else
++ {
++ return 0;
++ }
++
++ while (dazuko_isdigit(*p))
++ {
++ num *= 10;
++ num += *p - '0';
++ p++;
++ }
++
++ return num;
++}
++
++static inline int dazuko_strlen(const char *string)
++{
++ const char *p;
++
++ if (string == NULL)
++ return -1;
++
++ for (p=string ; *p ; p++)
++ continue;
++
++ return (p - string);
++}
++
++static inline const char* dazuko_strchr(const char *haystack, char needle)
++{
++ const char *p;
++
++ if (haystack == NULL)
++ return NULL;
++
++ for (p=haystack ; *p ; p++)
++ {
++ if (*p == needle)
++ return p;
++ }
++
++ return NULL;
++}
++
++static inline const char* dazuko_strstr(const char *haystack, const char *needle)
++{
++ const char *p1;
++ const char *p2;
++ const char *p3;
++
++ if (haystack == NULL || needle == NULL)
++ return NULL;
++
++ for (p1=haystack ; *p1 ; p1++)
++ {
++ for (p2=needle,p3=p1 ; *p2&&*p3 ; p2++,p3++)
++ {
++ if (*p2 != *p3)
++ break;
++ }
++
++ if (*p2 == 0)
++ return p1;
++ }
++
++ return NULL;
++}
++
++int dazuko_get_value(const char *key, const char *string, char **value)
++{
++ const char *p1;
++ const char *p2;
++ int size;
++
++ if (value == NULL)
++ return -1;
++
++ *value = NULL;
++
++ if (key == NULL || string == NULL)
++ return -1;
++
++ p1 = dazuko_strstr(string, key);
++ if (p1 == NULL)
++ return -1;
++
++ p1 += dazuko_strlen(key);
++
++ for (p2=p1 ; *p2 && *p2!='\n' ; p2++)
++ continue;
++
++ size = (p2 - p1) + 1;
++ *value = rsbac_kmalloc_unlocked(size);
++ if (*value == NULL)
++ return -1;
++
++ memcpy(*value, p1, size - 1);
++ (*value)[size - 1] = 0;
++
++ return 0;
++}
++
++static inline void dazuko_clear_replybuffer(struct dazuko_request *request)
++{
++ dazuko_bzero(request->reply_buffer, request->reply_buffer_size);
++ request->reply_buffer_size_used = 0;
++}
++
++static inline void dazuko_close_replybuffer(struct dazuko_request *request)
++{
++ request->reply_buffer[request->reply_buffer_size_used] = 0;
++ request->reply_buffer_size_used++;
++}
++
++static inline void dazuko_add_keyvalue_to_replybuffer(struct dazuko_request *request, const char *key, void *value, char vtype)
++{
++
++#define DAZUKO_VSNPRINT(type, name) dazuko_snprintf(request->reply_buffer + request->reply_buffer_size_used, (request->reply_buffer_size - request->reply_buffer_size_used) - 1, "%s%" #type , key, *((name *)value))
++
++ switch (vtype)
++ {
++ case 'd':
++ DAZUKO_VSNPRINT(d, const int);
++ break;
++
++ case 's':
++ DAZUKO_VSNPRINT(s, const char *);
++ break;
++
++ case 'l':
++ DAZUKO_VSNPRINT(lu, const unsigned long);
++ break;
++
++ default:
++ /* all other types treated as chars */
++ DAZUKO_VSNPRINT(c, const char);
++ break;
++ }
++
++ /* update how much buffer we have used */
++ request->reply_buffer_size_used += strlen(request->reply_buffer + request->reply_buffer_size_used);
++}
++
++static inline int dazuko_printable(char c)
++{
++ /* hopefully this counts for all operating systems! */
++
++ return ((c >= ' ') && (c <= '~') && (c != '\\'));
++}
++
++static inline void dazuko_add_esc_to_replybuffer(struct dazuko_request *request, const char *key, char **filename)
++{
++ int found = 0;
++ char *p_rq;
++ const char *limit;
++ const char *p_fn;
++ unsigned char c;
++
++ /* check for escape characters in filename */
++ for (p_fn=*filename ; *p_fn ; p_fn++)
++ {
++ if (!dazuko_printable(*p_fn))
++ {
++ found = 1;
++ break;
++ }
++ }
++
++ if (found)
++ {
++ /* this is expensive, but it will also almost never occur */
++
++ p_rq = request->reply_buffer + request->reply_buffer_size_used;
++ limit = request->reply_buffer + request->reply_buffer_size - 1;
++
++ dazuko_snprintf(p_rq, limit - p_rq, "%s", key);
++ p_rq += strlen(p_rq);
++
++ for (p_fn=*filename ; *p_fn && (p_rq<limit) ; p_fn++)
++ {
++ if (dazuko_printable(*p_fn))
++ {
++ *p_rq = *p_fn;
++ p_rq++;
++ }
++ else
++ {
++ c = *p_fn & 0xFF;
++ dazuko_snprintf(p_rq, limit - p_rq, "\\x%02x", c);
++ p_rq += strlen(p_rq);
++ }
++ }
++
++ request->reply_buffer_size_used += strlen(request->reply_buffer + request->reply_buffer_size_used);
++ }
++ else
++ {
++ /* no escape characters found */
++
++ dazuko_add_keyvalue_to_replybuffer(request, key, filename, 's');
++ }
++}
++
++static int dazuko_set_option(struct daemon_id *did, int opt, void *param, int len)
++{
++ /* The daemon wants to set a configuration
++ * option in the kernel. */
++
++ struct slot *s;
++ int error;
++
++ /* sanity check */
++ if (len < 0 || len > 8192)
++ return XP_ERROR_PERMISSION;
++
++ /* make sure we are already registered
++ * (or that we don't register twice) */
++
++ /* find our slot */
++ s = dazuko_find_slot(did, 1, NULL);
++
++ switch (opt)
++ {
++ case REGISTER:
++ rsbac_printk(KERN_INFO "dazuko: dazuko_set_option does not support REGISTER (bug!)\n");
++ return XP_ERROR_PERMISSION;
++
++ case UNREGISTER:
++ if (s == NULL)
++ {
++ /* We are not registered! */
++
++ return 0;
++ }
++ break;
++
++ default:
++ if (s == NULL)
++ {
++ error = dazuko_register_daemon(did, "_COMPAT", 7, 1);
++ if (error)
++ {
++ rsbac_printk(KERN_INFO "dazuko: unregistered daemon %d attempted access\n", did->unique);
++ return XP_ERROR_PERMISSION;
++ }
++
++ s = dazuko_find_slot(did, 1, NULL);
++ if (s == NULL)
++ {
++ rsbac_printk(KERN_INFO "dazuko: unregistered daemon %d attempted access\n", did->unique);
++ return XP_ERROR_PERMISSION;
++ }
++
++ rsbac_printk(KERN_INFO "dazuko: warning: daemon %d is using a deprecated protocol\n", did->unique);
++ }
++ break;
++ }
++
++ /* check option type and take the appropriate action */
++ switch (opt)
++ {
++ case UNREGISTER:
++ error = _dazuko_unregister_daemon(did);
++ if (error)
++ return error;
++ break;
++
++ case SET_ACCESS_MASK:
++ memcpy(&access_mask, (char *)param, sizeof(char));
++ break;
++
++ case ADD_INCLUDE_PATH:
++ error = dazuko_insert_path_fs(&incl_paths, (char *)param, len);
++ if (error)
++ return error;
++ break;
++
++ case ADD_EXCLUDE_PATH:
++ error = dazuko_insert_path_fs(&excl_paths, (char *)param, len);
++ if (error)
++ return error;
++ break;
++
++ case REMOVE_ALL_PATHS:
++ dazuko_remove_all_paths();
++ break;
++
++ default:
++ rsbac_printk(KERN_INFO "dazuko: daemon %d requested unknown set %d (possible bug)\n", did->unique, opt);
++ break;
++ }
++
++ return 0;
++}
++
++static int dazuko_handle_request(struct dazuko_request *request, struct xp_daemon_id *xp_id)
++{
++ char *value1;
++ char *value2;
++ int error = 0;
++ int type;
++ struct slot *s;
++ struct daemon_id did;
++
++ if (request == NULL || xp_id == NULL)
++ return -1;
++
++ type = request->type[0] + (256 * request->type[1]);
++
++ switch (type)
++ {
++ case REGISTER:
++ /* read "\nRM=regmode\nGN=group" */
++ /* send "\nID=id" */
++
++ if (request->buffer_size <= 0)
++ return -1;
++
++ if (request->reply_buffer_size <= 0)
++ return -1;
++
++ if (dazuko_get_value("\nGN=", request->buffer, &value1) != 0)
++ return -1;
++
++ if (dazuko_get_value("\nRM=", request->buffer, &value2) != 0)
++ {
++ rsbac_kfree(value1);
++ return -1;
++ }
++
++ did.xp_id = call_xp_id_copy(xp_id);
++ did.unique = 0; /* a unique is not yet assigned */
++
++ error = dazuko_register_daemon(&did, value1, dazuko_strlen(value1), dazuko_strchr(value2, 'W') != NULL);
++
++ dazuko_clear_replybuffer(request);
++ dazuko_add_keyvalue_to_replybuffer(request, "\nID=", &(did.unique), 'd');
++ dazuko_close_replybuffer(request);
++
++ rsbac_kfree(value1);
++ rsbac_kfree(value2);
++ call_xp_id_free(did.xp_id);
++
++ break;
++
++ case UNREGISTER:
++ /* read "\nID=id" */
++
++ if (request->buffer_size <= 0)
++ return -1;
++
++ if (dazuko_get_value("\nID=", request->buffer, &value1) != 0)
++ return -1;
++
++ did.xp_id = call_xp_id_copy(xp_id);
++ did.unique = dazuko_strtol(value1);
++
++ error = dazuko_set_option(&did, UNREGISTER, NULL, 0);
++
++ rsbac_kfree(value1);
++ call_xp_id_free(did.xp_id);
++
++ break;
++
++ case SET_ACCESS_MASK:
++ /* read "\nID=id\nAM=mask" */
++
++ if (request->buffer_size <= 0)
++ return -1;
++
++ if (dazuko_get_value("\nID=", request->buffer, &value1) != 0)
++ return -1;
++
++ if (dazuko_get_value("\nAM=", request->buffer, &value2) != 0)
++ {
++ rsbac_kfree(value1);
++ return -1;
++ }
++
++ access_mask = (char)dazuko_strtol(value2);
++
++ rsbac_kfree(value1);
++ rsbac_kfree(value2);
++
++ break;
++
++ case ADD_INCLUDE_PATH:
++ /* read "\nID=id\nPT=path" */
++
++ if (request->buffer_size <= 0)
++ return -1;
++
++ if (dazuko_get_value("\nID=", request->buffer, &value1) != 0)
++ return -1;
++
++ if (dazuko_get_value("\nPT=", request->buffer, &value2) != 0)
++ {
++ rsbac_kfree(value1);
++ return -1;
++ }
++
++ did.xp_id = call_xp_id_copy(xp_id);
++ did.unique = dazuko_strtol(value1);
++
++ error = dazuko_set_option(&did, ADD_INCLUDE_PATH, value2, dazuko_strlen(value2));
++
++ rsbac_kfree(value1);
++ rsbac_kfree(value2);
++ call_xp_id_free(did.xp_id);
++
++ break;
++
++ case ADD_EXCLUDE_PATH:
++ /* read "\nID=id\nPT=path" */
++
++ if (request->buffer_size <= 0)
++ return -1;
++
++ if (dazuko_get_value("\nID=", request->buffer, &value1) != 0)
++ return -1;
++
++ if (dazuko_get_value("\nPT=", request->buffer, &value2) != 0)
++ {
++ rsbac_kfree(value1);
++ return -1;
++ }
++
++ did.xp_id = call_xp_id_copy(xp_id);
++ did.unique = dazuko_strtol(value1);
++
++ error = dazuko_set_option(&did, ADD_EXCLUDE_PATH, value2, dazuko_strlen(value2));
++
++ rsbac_kfree(value1);
++ rsbac_kfree(value2);
++ call_xp_id_free(did.xp_id);
++
++ break;
++
++ case REMOVE_ALL_PATHS:
++ /* read "\nID=id" */
++
++ if (request->buffer_size <= 0)
++ return -1;
++
++ if (dazuko_get_value("\nID=", request->buffer, &value1) != 0)
++ return -1;
++
++ did.xp_id = call_xp_id_copy(xp_id);
++ did.unique = dazuko_strtol(value1);
++
++ error = dazuko_set_option(&did, REMOVE_ALL_PATHS, NULL, 0);
++
++ rsbac_kfree(value1);
++ call_xp_id_free(did.xp_id);
++
++ break;
++
++ case GET_AN_ACCESS:
++ /* read "\nID=id" */
++ /* send "\nFN=file\nFL=flags\nMD=mode\nUI=uid\nPI=pid" */
++
++ if (request->buffer_size <= 0)
++ return -1;
++
++ if (request->reply_buffer_size <= 0)
++ return -1;
++
++ if (dazuko_get_value("\nID=", request->buffer, &value1) != 0)
++ return -1;
++
++ did.xp_id = call_xp_id_copy(xp_id);
++ did.unique = dazuko_strtol(value1);
++
++ rsbac_kfree(value1);
++
++/* DOWN? */
++ s = dazuko_get_an_access(&did);
++
++ if (s == NULL)
++ {
++ call_xp_id_free(did.xp_id);
++ return XP_ERROR_INTERRUPT;
++ }
++/* DOWN */
++
++ /* Slot IS in DAZUKO_WORKING state. Copy all the
++ * necessary information to userspace structure. */
++
++ dazuko_clear_replybuffer(request);
++ dazuko_add_keyvalue_to_replybuffer(request, "\nEV=", &(s->event), 'd');
++ dazuko_add_esc_to_replybuffer(request, "\nFN=", &(s->filename));
++
++ if (s->event_p.set_uid)
++ dazuko_add_keyvalue_to_replybuffer(request, "\nUI=", &(s->event_p.uid), 'd');
++
++ if (s->event_p.set_pid)
++ dazuko_add_keyvalue_to_replybuffer(request, "\nPI=", &(s->event_p.pid), 'd');
++
++ if (s->event_p.set_flags)
++ dazuko_add_keyvalue_to_replybuffer(request, "\nFL=", &(s->event_p.flags), 'd');
++
++ if (s->event_p.set_mode)
++ dazuko_add_keyvalue_to_replybuffer(request, "\nMD=", &(s->event_p.mode), 'd');
++
++ if (s->file_p.set_size)
++ dazuko_add_keyvalue_to_replybuffer(request, "\nFS=", &(s->file_p.size), 'l');
++
++ if (s->file_p.set_uid)
++ dazuko_add_keyvalue_to_replybuffer(request, "\nFU=", &(s->file_p.uid), 'd');
++
++ if (s->file_p.set_gid)
++ dazuko_add_keyvalue_to_replybuffer(request, "\nFG=", &(s->file_p.gid), 'd');
++
++ if (s->file_p.set_mode)
++ dazuko_add_keyvalue_to_replybuffer(request, "\nFM=", &(s->file_p.mode), 'd');
++
++ if (s->file_p.set_device_type)
++ dazuko_add_keyvalue_to_replybuffer(request, "\nDT=", &(s->file_p.device_type), 'd');
++
++ dazuko_close_replybuffer(request);
++
++/* XXX: What do we do if there is a problem copying back to userspace?! */
++/* dazuko_state_error(s, DAZUKO_WORKING); */
++
++ /* are we in read_only mode? */
++ if (!(s->write_mode))
++ {
++ /* the access is immediately (and at the kernel level)
++ * returned */
++
++ call_xp_up(&(s->mutex));
++/* UP */
++
++ dazuko_return_access(&did, 0, s);
++ }
++ else
++ {
++ call_xp_up(&(s->mutex));
++/* UP */
++ }
++
++ call_xp_id_free(did.xp_id);
++
++ break;
++
++ case RETURN_AN_ACCESS:
++ /* read "\nID=id\nDN=deny" */
++
++ if (request->buffer_size <= 0)
++ return -1;
++
++ if (dazuko_get_value("\nID=", request->buffer, &value1) != 0)
++ return -1;
++
++ if (dazuko_get_value("\nDN=", request->buffer, &value2) != 0)
++ {
++ rsbac_kfree(value1);
++ return -1;
++ }
++
++ did.xp_id = call_xp_id_copy(xp_id);
++ did.unique = dazuko_strtol(value1);
++
++ error = dazuko_return_access(&did, dazuko_strtol(value2), NULL);
++
++ rsbac_kfree(value1);
++ rsbac_kfree(value2);
++ call_xp_id_free(did.xp_id);
++
++ break;
++
++ default:
++ rsbac_printk(KERN_INFO "dazuko: daemon made unknown request %d (possible bug)\n", type);
++
++ break;
++ }
++
++ return error;
++}
++
++int dazuko_handle_user_request(struct dazuko_request *user_request, struct xp_daemon_id *xp_id)
++{
++ int error = 0;
++ struct dazuko_request *request;
++ struct dazuko_request *temp_request;
++
++ if (user_request == NULL || xp_id == NULL)
++ return XP_ERROR_FAULT;
++
++ /* allocate kernel request */
++ request = rsbac_smalloc_unlocked(dazuko_request_slab);
++ if (request == NULL)
++ return XP_ERROR_FAULT;
++
++/* use out0 now */
++
++ /* allocate temp kernel request */
++ temp_request = rsbac_smalloc_unlocked(dazuko_request_slab);
++ if (temp_request == NULL)
++ {
++ error = XP_ERROR_FAULT;
++ goto dazuko_handle_user_request_out0;
++ }
++
++/* use out1 now */
++
++ /* copy in the request */
++ if (call_xp_copyin(user_request, temp_request, sizeof(struct dazuko_request)) != 0)
++ {
++ error = XP_ERROR_FAULT;
++ goto dazuko_handle_user_request_out1;
++ }
++
++ memcpy(request->type, temp_request->type, sizeof(char[2]));
++ request->buffer_size = temp_request->buffer_size;
++
++ /* sanity check */
++ if (request->buffer_size < 0 || request->buffer_size > 8192)
++ {
++ error = XP_ERROR_FAULT;
++ goto dazuko_handle_user_request_out1;
++ }
++
++ request->reply_buffer_size = temp_request->reply_buffer_size;
++
++ /* sanity check */
++ if (request->reply_buffer_size < 0 || request->reply_buffer_size > 8192)
++ {
++ error = XP_ERROR_PERMISSION;
++ goto dazuko_handle_user_request_out1;
++ }
++
++ /* allocate buffer */
++ request->buffer = rsbac_kmalloc_unlocked(request->buffer_size + 1);
++ if (request->buffer == NULL)
++ {
++ error = XP_ERROR_FAULT;
++ goto dazuko_handle_user_request_out1;
++ }
++
++/* use out2 now */
++
++ if (request->reply_buffer_size > 0)
++ {
++ /* allocate reply buffer */
++ request->reply_buffer = rsbac_kmalloc_unlocked(request->reply_buffer_size + 1);
++ if (request->reply_buffer == NULL)
++ {
++ error = XP_ERROR_FAULT;
++ goto dazuko_handle_user_request_out2;
++ }
++
++/* use out3 now */
++
++ request->reply_buffer_size_used = 0;
++ }
++
++ /* copy the buffer from userspace to kernelspace */
++ if (call_xp_copyin(temp_request->buffer, request->buffer, request->buffer_size) != 0)
++ {
++ error = XP_ERROR_FAULT;
++ goto dazuko_handle_user_request_out3;
++ }
++
++ request->buffer[request->buffer_size] = 0;
++
++ error = dazuko_handle_request(request, xp_id);
++
++ if (error == 0 && request->reply_buffer_size > 0)
++ {
++ request->reply_buffer[request->reply_buffer_size] = 0;
++
++ temp_request->reply_buffer_size_used = request->reply_buffer_size_used;
++
++ if (call_xp_copyout(temp_request, user_request, sizeof(struct dazuko_request)) != 0)
++ {
++ error = XP_ERROR_FAULT;
++ goto dazuko_handle_user_request_out3;
++ }
++
++ if (request->reply_buffer_size_used > 0)
++ {
++ if (call_xp_copyout(request->reply_buffer, temp_request->reply_buffer, request->reply_buffer_size_used) != 0)
++ {
++ error = XP_ERROR_FAULT;
++ goto dazuko_handle_user_request_out3;
++ }
++ }
++ }
++
++dazuko_handle_user_request_out3:
++ if (request->reply_buffer_size > 0)
++ rsbac_kfree(request->reply_buffer);
++dazuko_handle_user_request_out2:
++ rsbac_kfree(request->buffer);
++dazuko_handle_user_request_out1:
++ rsbac_sfree(dazuko_request_slab, temp_request);
++dazuko_handle_user_request_out0:
++ rsbac_sfree(dazuko_request_slab, request);
++
++ return error;
++}
++
++int dazuko_handle_user_request_compat12(void *ptr, int cmd, struct xp_daemon_id *xp_id)
++{
++ struct access_compat12 *user_request12;
++ struct access_compat12 *temp_request12;
++ int error = 0;
++ struct slot *s;
++ char *k_param;
++ struct daemon_id did;
++ int temp_length;
++ int temp_int;
++
++ if (ptr == NULL || xp_id == NULL)
++ return XP_ERROR_FAULT;
++
++ did.xp_id = call_xp_id_copy(xp_id);
++ did.unique = -1;
++
++ switch (cmd)
++ {
++ case IOCTL_GET_AN_ACCESS:
++ /* The daemon is requesting a filename of a file
++ * to scan. This code will wait until a filename
++ * is available, or until we should be killed.
++ * (killing is done if any errors occur as well
++ * as when the user kills us) */
++
++ user_request12 = (struct access_compat12 *)ptr;
++
++ error = call_xp_verify_user_writable(user_request12, sizeof(struct access_compat12));
++ if (error)
++ {
++ error = XP_ERROR_FAULT;
++ break;
++ }
++
++/* DOWN? */
++ s = dazuko_get_an_access(&did);
++
++ if (s == NULL)
++ {
++ error = XP_ERROR_INTERRUPT;
++ break;
++ }
++
++/* DOWN */
++
++ /* Slot IS in WORKING state. Copy all the
++ * necessary information to userspace structure. */
++
++ if (s->filenamelength >= DAZUKO_FILENAME_MAX_LENGTH_COMPAT12)
++ {
++ /* filename length overflow :( */
++
++ s->filename[DAZUKO_FILENAME_MAX_LENGTH_COMPAT12 - 1] = 0;
++ temp_length = DAZUKO_FILENAME_MAX_LENGTH_COMPAT12;
++ }
++ else
++ {
++ temp_length = s->filenamelength + 1;
++ }
++
++ temp_request12 = rsbac_smalloc_unlocked(access_compat12_slab);
++ if (temp_request12 == NULL)
++ {
++ error = XP_ERROR_FAULT;
++ }
++ else if (call_xp_copyin(user_request12, temp_request12, sizeof(struct access_compat12)) != 0)
++ {
++ error = XP_ERROR_FAULT;
++ }
++
++ if (error == 0)
++ {
++ temp_request12->event = s->event;
++ temp_request12->o_flags = s->event_p.flags;
++ temp_request12->o_mode = s->event_p.mode;
++ temp_request12->uid = s->event_p.uid;
++ temp_request12->pid = s->event_p.pid;
++ memcpy(temp_request12->filename, s->filename, temp_length);
++
++ if (call_xp_copyout(temp_request12, user_request12, sizeof(struct access_compat12)) != 0)
++ {
++ error = XP_ERROR_FAULT;
++ }
++ }
++
++ call_xp_up(&(s->mutex));
++/* UP */
++
++ if (error)
++ {
++ dazuko_state_error(s, DAZUKO_WORKING);
++ }
++
++ if (temp_request12 != NULL)
++ {
++ rsbac_sfree(access_compat12_slab, temp_request12);
++ }
++
++ break;
++
++ case IOCTL_RETURN_ACCESS:
++ /* The daemon has finished scanning a file
++ * and has the response to give. The daemon's
++ * slot should be in the WORKING state. */
++
++ user_request12 = (struct access_compat12 *)ptr;
++
++ error = call_xp_verify_user_readable(user_request12, sizeof(struct access_compat12));
++ if (error)
++ {
++ error = XP_ERROR_FAULT;
++ break;
++ }
++
++ temp_request12 = rsbac_smalloc_unlocked(access_compat12_slab);
++ if (temp_request12 == NULL)
++ {
++ error = XP_ERROR_FAULT;
++ break;
++ }
++
++ if (call_xp_copyin(user_request12, temp_request12, sizeof(struct access_compat12)) != 0)
++ {
++ error = XP_ERROR_FAULT;
++ }
++
++ temp_int = temp_request12->deny;
++
++ rsbac_sfree(access_compat12_slab, temp_request12);
++
++ error = dazuko_return_access(&did, temp_int, NULL);
++ break;
++
++ case IOCTL_SET_OPTION:
++ /* The daemon wants to set a configuration
++ * option in the kernel. */
++
++ error = call_xp_verify_user_readable(ptr, 2*sizeof(int));
++ if (error)
++ {
++ error = XP_ERROR_FAULT;
++ break;
++ }
++
++ /* copy option type from userspace */
++ if (call_xp_copyin(ptr, &temp_int, sizeof(int)) != 0)
++ {
++ error = XP_ERROR_FAULT;
++ break;
++ }
++
++ ptr = ((char *)ptr + sizeof(int));
++
++ /* copy path length from userspace */
++ if (call_xp_copyin(ptr, &temp_length, sizeof(int)) != 0)
++ {
++ error = XP_ERROR_FAULT;
++ break;
++ }
++
++ /* sanity check */
++ if (temp_length < 0 || temp_length > 4096)
++ {
++ error = XP_ERROR_INVALID;
++ break;
++ }
++
++ ptr = ((char *)ptr + sizeof(int));
++
++ error = call_xp_verify_user_readable(ptr, temp_length);
++ if (error)
++ {
++ error = XP_ERROR_FAULT;
++ break;
++ }
++
++ k_param = rsbac_kmalloc_unlocked(temp_length + 1);
++ if (k_param == NULL)
++ {
++ error = XP_ERROR_FAULT;
++ break;
++ }
++
++ /* We must copy the param from userspace to kernelspace. */
++
++ if (call_xp_copyin(ptr, k_param, temp_length) != 0)
++ {
++ rsbac_kfree(k_param);
++ error = XP_ERROR_FAULT;
++ break;
++ }
++
++ k_param[temp_length] = 0;
++
++ if (temp_int == REGISTER)
++ error = dazuko_register_daemon(&did, k_param, temp_length, 1);
++ else
++ error = dazuko_set_option(&did, temp_int, k_param, temp_length);
++
++ rsbac_kfree(k_param);
++
++ break;
++
++ default:
++ rsbac_printk(KERN_INFO "dazuko: daemon requested unknown device_ioctl %d (possible bug)\n", cmd);
++
++ break;
++ }
++
++ call_xp_id_free(did.xp_id);
++
++ return error;
++}
++
++static struct slot * dazuko_get_and_hold_ready_slot(struct slot_list *sl)
++{
++ /* This is a simple search to find a
++ * slot whose state is DAZUKO_READY. This means
++ * it is able to accept work. If a slot
++ * is found, the slot.mutex is held so
++ * it can be filled with work by the caller.
++ * It is the responsibility of the caller
++ * to RELEASE THE MUTEX. */
++
++ int i;
++ struct slot *s;
++
++ for (i=0 ; i<NUM_SLOTS ; i++)
++ {
++ s = &(sl->slots[i]);
++/* DOWN? */
++ if (dazuko_change_slot_state(s, DAZUKO_READY, DAZUKO_WAITING, 0))
++ {
++/* DOWN */
++ return s;
++ }
++ }
++
++ /* we didn't find a slot that is ready for work */
++
++ return NULL;
++}
++
++static int get_ready_slot_condition(void *param)
++{
++ return ((((struct get_ready_slot_condition_param *)param)->slot = dazuko_get_and_hold_ready_slot(((struct get_ready_slot_condition_param *)param)->slotlist)) != NULL
++ || call_xp_atomic_read(&active) == 0
++ || call_xp_atomic_read(&(((struct get_ready_slot_condition_param *)param)->slotlist->use_count)) == 0);
++}
++
++static int dazuko_run_daemon_on_slotlist(int event, char *filename, int filenamelength, struct event_properties *event_p, struct file_properties *file_p, int prev_response, struct slot_list *sl)
++{
++ /* This is the main function called by the kernel
++ * to work with a daemon. */
++
++ int rc;
++ int unique;
++ struct slot *s;
++ struct get_ready_slot_condition_param cond_p1;
++ struct two_slot_state_not_condition_param cond_p2;
++
++begin:
++ /* we initialize the slot value because
++ * we cannot guarentee that it will be
++ * assigned a new value BEFORE !active
++ * is checked */
++ s = NULL;
++
++ /* wait for a slot to become ready */
++ cond_p1.slotlist = sl;
++ cond_p1.slot = s;
++ if (call_xp_wait_until_condition(&wait_kernel_waiting_for_free_slot, get_ready_slot_condition, &cond_p1, 0) != 0)
++ {
++ /* The kernel process was killed while
++ * waiting for a slot to become ready.
++ * This is fine. */
++
++ DPRINT(("dazuko: kernel process %d killed while waiting for free slot\n", event_p->pid));
++
++ return -1; /* user interrupted */
++ }
++
++ /* Make sure we have a slot. We may have
++ * gotten past the last wait because we
++ * are no longer active. */
++
++ s = cond_p1.slot;
++
++ if (s == NULL)
++ {
++ /* We were no longer active. We don't
++ * need to initiate a daemon. This also
++ * means we never acquired the lock. */
++
++ return 0; /* allow access */
++ }
++
++/* DOWN */
++
++ /* the slot is already locked at this point */
++
++ /* grab the daemon's unique */
++ unique = s->did.unique;
++
++ /* At this point we have a locked slot. It IS
++ * sitting in the DAZUKO_WAITING state, waiting for
++ * us to give it some work. */
++
++ /* set up the slot to do work */
++ s->filename = filename;
++ s->event = event;
++ s->response = prev_response;
++ s->filenamelength = filenamelength;
++
++ if (event_p == NULL)
++ dazuko_bzero(&(s->event_p), sizeof(struct event_properties));
++ else
++ memcpy(&(s->event_p), event_p, sizeof(struct event_properties));
++
++ if (file_p == NULL)
++ dazuko_bzero(&(s->file_p), sizeof(struct file_properties));
++ else
++ memcpy(&(s->file_p), file_p, sizeof(struct file_properties));
++
++ /* we are done modifying the slot */
++ call_xp_up(&(s->mutex));
++/* UP */
++
++ /* wake up any daemons waiting for work */
++ call_xp_notify(&wait_daemon_waiting_for_work);
++
++ /* wait until the daemon is finished with the slot */
++ cond_p2.slot1 = s;
++ cond_p2.state1 = DAZUKO_WAITING;
++ cond_p2.slot2 = s;
++ cond_p2.state2 = DAZUKO_WORKING;
++ if (call_xp_wait_until_condition(&wait_kernel_waiting_while_daemon_works, two_slot_state_not_condition, &cond_p2, 0) != 0)
++ {
++ /* The kernel process was killed while
++ * waiting for a daemon to process the file.
++ * This is fine. */
++
++ DPRINT(("dazuko: kernel process %d killed while waiting for daemon response\n", event_p->pid));
++
++ /* change the slot's state to let the
++ * daemon know we are not interested
++ * in a response */
++ dazuko_change_slot_state(s, DAZUKO_FREE, DAZUKO_FREE, 1);
++
++ return -1; /* user interrupted */
++ }
++
++ /* we are working with the slot, so
++ * we need to lock it */
++/* DOWN */
++ if (call_xp_down(&(s->mutex)) != 0)
++ {
++ return -1; /* user interrupted */
++ }
++
++ /* make sure this is the right daemon */
++ if (s->did.unique != unique)
++ {
++ /* This is a different daemon than
++ * the one we assigned work to.
++ * We need to scan again. */
++ call_xp_up(&(s->mutex));
++/* UP */
++ goto begin;
++ }
++
++ /* The slot should now be in the DAZUKO_DONE state. */
++ if (!__dazuko_change_slot_state(s, DAZUKO_DONE, DAZUKO_FREE))
++ {
++ /* The daemon was killed while scanning.
++ * We need to scan again. */
++
++ call_xp_up(&(s->mutex));
++/* UP */
++ goto begin;
++ }
++
++ /* grab the response */
++ rc = s->response;
++
++ call_xp_up(&(s->mutex));
++/* UP */
++
++ /* CONGRATULATIONS! You successfully completed a full state cycle! */
++
++ return rc;
++}
++
++static int dazuko_run_daemon(int event, char *filename, int filenamelength, struct event_properties *event_p, struct file_properties *file_p)
++{
++ struct slot_list *sl;
++ int i;
++ int rc = 0;
++ int error;
++
++ if (event_p != NULL)
++ {
++ /* we don't want to throw the same event twice */
++ if (event_p->thrown)
++ return 0;
++ event_p->thrown = 1;
++ }
++
++ for (i=0 ; i<NUM_SLOT_LISTS ; i++)
++ {
++/* DOWN */
++ /* if we are interrupted, we report error */
++ if (call_xp_down(&(slot_lists[i].mutex)) != 0)
++ return XP_ERROR_INTERRUPT;
++
++ sl = slot_lists[i].slot_list;
++
++ call_xp_up(&(slot_lists[i].mutex));
++/* UP */
++
++ if (sl != NULL)
++ {
++ error = dazuko_run_daemon_on_slotlist(event, filename, filenamelength, event_p, file_p, rc, sl);
++
++ if (error < 0)
++ {
++ /* most likely user interrupt */
++ rc = error;
++ break;
++ }
++ else if (error > 0)
++ {
++ /* this daemon wants access blocked */
++ rc = 1;
++ }
++ }
++ }
++
++ return rc;
++}
++
++inline int dazuko_is_our_daemon(struct xp_daemon_id *xp_id)
++{
++ /* Check if the current process is one
++ * of the daemons. */
++
++ struct daemon_id did;
++ int ret;
++
++ did.xp_id = call_xp_id_copy(xp_id);
++ did.unique = -1;
++
++ ret = (dazuko_find_slot(&did, 1, NULL) != NULL);
++
++ call_xp_id_free(did.xp_id);
++
++ return ret;
++}
++
++#ifdef CONFIG_RSBAC_DAZ_SELECT
++static int dazuko_is_selected(struct dazuko_file_struct *kfs)
++{
++ /* Check if the given filename (with path) is
++ * under our include directories but not under
++ * the exclude directories. */
++
++ struct dazuko_file_listnode *cur;
++ struct dazuko_path *path;
++ int selected = 0;
++ int use_aliases = 1;
++
++ if (kfs == NULL)
++ return 0;
++
++ /* If we are interrupted here, we will report that
++ * this file is not selected. This will make the
++ * kernel allow normal access. Is this dangerous? */
++/* LOCK */
++ call_xp_read_lock(&lock_lists);
++
++ if (kfs->aliases == NULL && kfs->filename != NULL)
++ {
++ /* extension is not using aliases */
++
++ use_aliases = 0;
++
++ kfs->aliases = rsbac_smalloc_clear(dazuko_file_listnode_slab);
++ if (kfs->aliases == NULL)
++ {
++ rsbac_printk(KERN_WARNING "dazuko: warning: access not controlled (%s)\n", kfs->filename);
++ return 0;
++ }
++
++ kfs->aliases->filename = kfs->filename;
++ kfs->aliases->filename_length = kfs->filename_length;
++ }
++
++ for (cur=kfs->aliases ; cur ; cur=cur->next)
++ {
++ if (cur->filename != NULL && cur->filename_length > 0)
++ {
++ /* check if filename is under our include paths */
++ for (path=incl_paths ; path ; path=path->next)
++ {
++ /* the include item must be at least as long as the given filename */
++ if (path->len <= cur->filename_length)
++ {
++ /* the include item should match the beginning of the given filename */
++ if (memcmp(path->path, cur->filename, path->len) == 0)
++ {
++ kfs->filename = cur->filename;
++ kfs->filename_length = cur->filename_length;
++
++ selected = 1;
++ break;
++ }
++ }
++ }
++
++ /* If we didn't find a path, it isn't in our
++ * include directories. It can't be one of
++ * the selected files to scan. */
++ if (!selected)
++ {
++ continue;
++ }
++
++ /* check if filename is under our exclude paths */
++ for (path=excl_paths ; path ; path=path->next)
++ {
++ /* the exclude item must be at least as long as the given filename */
++ if (path->len <= cur->filename_length)
++ {
++ /* the exclude item should match the beginning of the given filename */
++ if (memcmp(path->path, cur->filename, path->len) == 0)
++ {
++ kfs->filename = NULL;
++ kfs->filename_length = 0;
++
++ selected = 0;
++ break;
++ }
++ }
++ }
++
++ /* If we are still selected, then we can stop. */
++ if (selected)
++ break;
++ }
++ }
++
++ call_xp_read_unlock(&lock_lists);
++/* UNLOCK */
++
++ if (!use_aliases)
++ {
++ rsbac_sfree(dazuko_file_listnode_slab, kfs->aliases);
++ kfs->aliases = NULL;
++ }
++
++ return selected;
++}
++#endif
++
++static int dazuko_add_hash(struct xp_file *file, char *filename, int len)
++{
++ /* Add the given file and filename to the linked list
++ * of files to scan once they are closed. */
++
++ struct hash *h;
++
++ /* create a new struct hash structure making room for name also */
++ h = rsbac_kmalloc_unlocked(sizeof(struct hash) + len + 1);
++ if (h == NULL)
++ return XP_ERROR_FAULT;
++
++ /* fill in structure items */
++
++ call_xp_copy_file(&(h->file), file);
++ h->dirty = 0;
++ h->namelen = len;
++ memcpy(h->name, filename, len);
++ h->name[len] = 0;
++
++ /* add the new struct hash item to the head of the
++ * struct hash linked list */
++
++/* LOCK */
++ call_xp_write_lock(&lock_hash);
++ h->next = hash;
++ hash = h;
++ call_xp_write_unlock(&lock_hash);
++/* UNLOCK */
++ return 0;
++}
++
++/* Code based on code from: Swade 12/08/02: Move dirty to end of list */
++static void dazuko_mark_hash_dirty(struct xp_file *file)
++{
++ struct hash *h = NULL;
++ struct hash *entry = NULL;
++ struct hash *prev = NULL;
++ struct hash *prev_entry = NULL;
++
++/* LOCK */
++ call_xp_write_lock(&lock_hash);
++
++ for (h=hash ; h ; h=h->next)
++ {
++ /* not found if hit first dirty entry */
++ if (h->dirty)
++ {
++ entry = NULL;
++ break;
++ }
++
++ if (call_xp_compare_file(&(h->file), file) == 0)
++ {
++ /* we found the entry */
++
++ prev_entry = prev;
++ entry = h;
++ break;
++ }
++
++ prev = h;
++ }
++
++ if (entry)
++ {
++ if (!entry->dirty)
++ {
++ /* mark as dirty */
++ entry->dirty = 1;
++
++ /* If we already are last entry or next
++ * entry dirty, we don't need to move */
++
++ if (entry->next)
++ {
++ if (!entry->next->dirty)
++ {
++ for (h=entry->next ; h ; h=h->next)
++ {
++ if (h->dirty)
++ break;
++
++ prev = h;
++ }
++
++ /* remove from current position */
++ if (prev_entry)
++ prev_entry->next = entry->next;
++ else
++ hash = entry->next;
++
++ if (prev == NULL)
++ {
++ /* insert as first item */
++ entry->next = hash;
++ hash = entry;
++ }
++ else if (h)
++ {
++ /* insert before h (after prev) */
++ entry->next = prev->next;
++ prev->next = entry;
++ }
++ else
++ {
++ /* insert as last item (after prev) */
++ entry->next = NULL;
++ prev->next = entry;
++ }
++ }
++ }
++ }
++ }
++
++ call_xp_write_unlock(&lock_hash);
++/* UNLOCK */
++
++}
++
++static struct hash *dazuko_get_hash(struct xp_file *file)
++{
++ /* Find the given file within our list
++ * and then remove it from the list and
++ * return it. */
++
++ struct hash *prev;
++ struct hash *cur;
++
++/* LOCK */
++ call_xp_write_lock(&lock_hash);
++
++ prev = NULL;
++ cur = hash;
++ while (cur)
++ {
++ if (call_xp_compare_file(&(cur->file), file) == 0)
++ {
++ /* we found the entry */
++
++ /* remove the item from the list */
++ if (!prev)
++ hash = cur->next;
++ else
++ prev->next = cur->next;
++ break;
++ }
++
++ prev = cur;
++ cur = cur->next;
++ }
++
++ call_xp_write_unlock(&lock_hash);
++/* UNLOCK */
++
++ return cur;
++}
++
++static int dazuko_should_scan(struct dazuko_file_struct *kfs)
++{
++ /* Check if we are supposed to scan this file.
++ * This checks for all the correct file types,
++ * permissions, and if it is within the desired
++ * paths to scan. */
++
++ int result = 0;
++
++ /* check if we already know if we scan this file */
++ switch (kfs->should_scan)
++ {
++ /* case 0 means that we do not know yet. This is a little
++ * confusing, because 0 represents uninitialized. However,
++ * the should_scan variable is used in this function ONLY
++ * so this optimization shouldn't cause any problems. */
++
++ case 1:
++ /* we already know it should be scanned */
++ return 1;
++
++ case 2:
++ /* we already know it should not be scanned */
++ return 2;
++ }
++
++ /* make necessary platform-dependent checks */
++ if (call_xp_fill_file_struct(kfs) == 0)
++ {
++#ifdef CONFIG_RSBAC_DAZ_SELECT
++ if (dazuko_is_selected(kfs))
++ {
++#endif
++ /* If we made it this far, we are supposed
++ * to scan this file. We mark it so that
++ * any further immediate inquiries don't have
++ * to do all this work all over again. */
++
++ /* yes, should be scanned */
++ kfs->should_scan = 1;
++
++ result = 1;
++#ifdef CONFIG_RSBAC_DAZ_SELECT
++ }
++ else
++ {
++ /* We will still mark it so that any further
++ * immediate inquiries don't have to do all
++ * this work all over again. */
++
++ /* no, should not be scanned */
++ kfs->should_scan = 2;
++ }
++#endif
++ }
++
++ return result;
++}
++
++inline int dazuko_sys_check(unsigned long event, int daemon_is_allowed, struct xp_daemon_id *xp_id)
++{
++ /* is this event in our mask? */
++ switch (event)
++ {
++ case DAZUKO_ON_OPEN:
++ /* this is a special case because the on_close information needs
++ * to be saved during the on_open event */
++
++ if ((SCAN_ON_OPEN || SCAN_ON_CLOSE || SCAN_ON_CLOSE_MODIFIED) == 0)
++ return -1;
++ break;
++
++ case DAZUKO_ON_CLOSE:
++ /* will need to scan if ON_CLOSE_MODIFIED is in the mask too */
++
++ if ((SCAN_ON_CLOSE || SCAN_ON_CLOSE_MODIFIED) == 0)
++ return -2;
++ break;
++
++ default:
++ if ((access_mask & event) == 0)
++ return -3;
++ break;
++ }
++
++ /* do we have any daemons? */
++ if (call_xp_atomic_read(&active) <= 0)
++ return -4;
++
++ /* should daemons be allowed this event without a scan? */
++ if (daemon_is_allowed)
++ {
++ if (dazuko_is_our_daemon(xp_id))
++ {
++ /* this is one of our daemons, so we will report as
++ * as if this event was not in the mask */
++
++ return -5;
++ }
++ }
++
++ return 0;
++}
++
++inline int dazuko_sys_pre(unsigned long event, struct dazuko_file_struct *kfs, struct xp_file *file, struct event_properties *event_p)
++{
++ /* return codes:
++ * >0 -> access should be blocked
++ * <0 -> access should be blocked (because user interrupted)
++ * 0 -> access is allowed
++ * -2 -> unscanned access should not be taken care of
++ */
++
++ int error = 0;
++ struct hash *h = NULL;
++
++ switch (event)
++ {
++ case DAZUKO_ON_OPEN:
++ /* special case, because this pre may be called
++ * in order to record ON_CLOSE events (in post) */
++
++ if (!SCAN_ON_OPEN)
++ return 2;
++ break;
++
++ case DAZUKO_ON_CLOSE:
++ /* handled in post */
++
++ return 2;
++
++ case DAZUKO_ON_CLOSE_MODIFIED:
++ /* (this is really sys_write) always permitted */
++
++ return 2;
++
++ default:
++ break;
++ }
++
++ if (kfs == NULL)
++ {
++ /* kfs is required */
++
++ rsbac_printk(KERN_WARNING "dazuko: kfs=NULL (possible bug)\n");
++
++ return XP_ERROR_PERMISSION;
++ }
++
++ if (file != NULL)
++ {
++ /* we search for the file descriptor first */
++
++/* LOCK */
++ call_xp_read_lock(&lock_hash);
++
++ for (h=hash ; h ; h=h->next)
++ {
++ if (call_xp_compare_file(&(h->file), file) == 0)
++ {
++ /* we found the file descriptor */
++
++ kfs->filename = rsbac_kmalloc_unlocked(h->namelen + 1);
++ if (kfs->filename != NULL)
++ {
++ memcpy(kfs->filename, h->name, h->namelen);
++ kfs->filename[h->namelen] = 0;
++ kfs->filename_length = h->namelen;
++ kfs->should_scan = 1;
++ }
++ else
++ {
++ /* error allocating, so we get out */
++ h = NULL;
++ }
++ break;
++ }
++ }
++
++ call_xp_read_unlock(&lock_hash);
++/* UNLOCK */
++
++ if (h == NULL && kfs->extra_data == NULL)
++ {
++ /* we don't know this file descriptor
++ * and we cannot fallback on name lookups
++ */
++
++ /* we should not scan this file */
++ kfs->should_scan = 2;
++
++ return 0;
++ }
++ }
++
++ /* make sure we should scan this file */
++ if (dazuko_should_scan(kfs))
++ error = dazuko_run_daemon(event, kfs->filename, kfs->filename_length, event_p, &(kfs->file_p));
++ else
++ return 2;
++
++ if (error > 0)
++ {
++ /* access will be blocked */
++
++ /* dazuko_sys_post should NOT be called! */
++
++ return XP_ERROR_PERMISSION;
++ }
++ else if (error < 0)
++ {
++ /* user interrupted */
++
++ /* dazuko_sys_post should NOT be called! */
++
++ return XP_ERROR_INTERRUPT;
++ }
++
++ /* access allowed */
++
++ return 0;
++}
++
++inline int dazuko_sys_post(unsigned long event, struct dazuko_file_struct *kfs, struct xp_file *file, struct event_properties *event_p)
++{
++ struct hash *h = NULL;
++
++ switch (event)
++ {
++ case DAZUKO_ON_OPEN: /* kfs,file required */
++ /* if the file was opened and we are interested
++ * in scanning on close, add this file to our struct hash list */
++
++ if ((call_xp_atomic_read(&active) > 0) && file != NULL && kfs != NULL)
++ {
++ if (SCAN_ON_OPEN || SCAN_ON_CLOSE || SCAN_ON_CLOSE_MODIFIED)
++ {
++ /* make sure we should scan this file */
++ if (dazuko_should_scan(kfs))
++ {
++ /* hash is added if we were given an xp_file */
++ if (file != NULL)
++ dazuko_add_hash(file, kfs->filename, kfs->filename_length);
++
++ /* this is a fallback in case we didn't process the event in "sys_pre" */
++ dazuko_run_daemon(event, kfs->filename, kfs->filename_length, event_p, &(kfs->file_p));
++ }
++ }
++ }
++ break;
++
++ case DAZUKO_ON_CLOSE: /* file,o_flags,o_mode,pid,uid required */
++ if (file != NULL)
++ {
++ /* find hash entry and remove it from list */
++ h = dazuko_get_hash(file);
++
++ /* if we found the file in our list and the file was
++ * successfully closed, we need to scan it */
++ if (h != NULL)
++ {
++ /* determine if we are scanning on close or close_modified */
++
++ /* note that close_modified has priority over just close */
++
++ if (SCAN_ON_CLOSE_MODIFIED && h->dirty)
++ dazuko_run_daemon(DAZUKO_ON_CLOSE_MODIFIED, h->name, h->namelen, event_p, NULL);
++ else if (SCAN_ON_CLOSE)
++ dazuko_run_daemon(DAZUKO_ON_CLOSE, h->name, h->namelen, event_p, NULL);
++
++ /* clean up the struct hash structure */
++ rsbac_kfree(h);
++ }
++ }
++ else
++ {
++ if (SCAN_ON_CLOSE)
++ {
++ if (dazuko_should_scan(kfs))
++ {
++ dazuko_run_daemon(DAZUKO_ON_CLOSE, kfs->filename, kfs->filename_length, event_p, &(kfs->file_p));
++ }
++ }
++ }
++ break;
++
++ case DAZUKO_ON_CLOSE_MODIFIED: /* file required */
++ if (file != NULL)
++ {
++ /* if we actually wrote something and we found the
++ * file in our list, set it as dirty */
++
++ /* Swade 4/24/02: Move to end of clean list */
++ dazuko_mark_hash_dirty(file);
++ }
++ break;
++
++ default:
++ break;
++ }
++
++ return 0;
++}
++
++inline int dazuko_init(void)
++{
++ int i;
++ int error;
++
++#ifdef CONFIG_RSBAC_DAZ_SELECT
++ dazuko_file_listnode_slab = rsbac_slab_create("rsbac_dazuko_file_listnode",
++ sizeof(struct dazuko_file_listnode));
++#endif
++ dazuko_request_slab = rsbac_slab_create("rsbac_dazuko_request",
++ sizeof(struct dazuko_request));
++ access_compat12_slab = rsbac_slab_create("rsbac_dazuko_access_compat12",
++ sizeof(struct access_compat12));
++
++ call_xp_init_mutex(&mutex_unique_count);
++
++ call_xp_init_rwlock(&lock_hash);
++ call_xp_init_rwlock(&lock_lists);
++
++ call_xp_init_queue(&wait_kernel_waiting_for_free_slot);
++ call_xp_init_queue(&wait_daemon_waiting_for_work);
++ call_xp_init_queue(&wait_kernel_waiting_while_daemon_works);
++ call_xp_init_queue(&wait_daemon_waiting_for_free);
++
++ dazuko_bzero(&slot_lists, sizeof(slot_lists));
++
++ for (i=0 ; i<NUM_SLOT_LISTS ; i++)
++ call_xp_init_mutex(&(slot_lists[i].mutex));
++
++ call_xp_atomic_set(&active, 0);
++
++ error = call_xp_sys_hook();
++
++ if (error == 0)
++ rsbac_printk(KERN_INFO "dazuko: loaded, version=%s\n", VERSION);
++
++ return error;
++}
++
++inline int dazuko_exit(void)
++{
++ int error;
++ int i;
++ int j;
++
++ i = call_xp_atomic_read(&active);
++
++ if (i != 0)
++ {
++ rsbac_printk(KERN_INFO "dazuko: warning: trying to remove Dazuko with %d process%s still registered\n", i, i==1 ? "" : "es");
++ return -1;
++ }
++
++ dazuko_remove_all_paths();
++ dazuko_remove_all_hash();
++
++ error = call_xp_sys_unhook();
++
++ if (error == 0)
++ {
++ call_xp_destroy_mutex(&mutex_unique_count);
++
++ call_xp_destroy_rwlock(&lock_hash);
++ call_xp_destroy_rwlock(&lock_lists);
++
++ call_xp_destroy_queue(&wait_kernel_waiting_for_free_slot);
++ call_xp_destroy_queue(&wait_daemon_waiting_for_work);
++ call_xp_destroy_queue(&wait_kernel_waiting_while_daemon_works);
++ call_xp_destroy_queue(&wait_daemon_waiting_for_free);
++
++ for (i=0 ; i<NUM_SLOT_LISTS ; i++)
++ {
++ if (slot_lists[i].slot_list != NULL)
++ {
++ if (call_xp_atomic_read(&(slot_lists[i].slot_list->use_count)) != 0)
++ rsbac_printk(KERN_WARNING "dazuko: slot_list count was not 0 (possible bug)\n");
++
++ for (j=0 ; j<NUM_SLOTS ; j++)
++ {
++ call_xp_destroy_mutex(&(slot_lists[i].slot_list->slots[j].mutex));
++ }
++
++ rsbac_kfree(slot_lists[i].slot_list);
++ slot_lists[i].slot_list = NULL;
++ }
++
++ call_xp_destroy_mutex(&(slot_lists[i].mutex));
++ }
++
++ rsbac_printk(KERN_INFO "dazuko: unloaded, version=%s\n", VERSION);
++ }
++
++ return error;
++}
+diff --git a/rsbac/adf/daz/dazuko_xp.h b/rsbac/adf/daz/dazuko_xp.h
+new file mode 100644
+index 0000000..d755c87
+--- /dev/null
++++ b/rsbac/adf/daz/dazuko_xp.h
+@@ -0,0 +1,225 @@
++/* DazukoXP. Allow cross platform file access control for 3rd-party applications.
++ Written by John Ogness <jogness@antivir.de>
++
++ Copyright (c) 2002, 2003, 2004, 2005 H+BEDV Datentechnik GmbH
++ All rights reserved.
++
++ Redistribution and use in source and binary forms, with or without
++ modification, are permitted provided that the following conditions
++ are met:
++
++ 1. Redistributions of source code must retain the above copyright notice,
++ this list of conditions and the following disclaimer.
++
++ 2. Redistributions in binary form must reproduce the above copyright notice,
++ this list of conditions and the following disclaimer in the documentation
++ and/or other materials provided with the distribution.
++
++ 3. Neither the name of Dazuko nor the names of its contributors may be used
++ to endorse or promote products derived from this software without specific
++ prior written permission.
++
++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
++ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ POSSIBILITY OF SUCH DAMAGE.
++*/
++
++#ifndef DAZUKO_XP_H
++#define DAZUKO_XP_H
++
++#define VERSION "2.0.5"
++
++#include "dazukoio_xp.h"
++
++/* various requests */
++#define SET_ACCESS_MASK 0
++#define ADD_INCLUDE_PATH 1
++#define ADD_EXCLUDE_PATH 2
++#define REGISTER 3
++#define REMOVE_ALL_PATHS 4
++#define UNREGISTER 5
++#define GET_AN_ACCESS 6
++#define RETURN_AN_ACCESS 7
++
++/* slot states */
++#define DAZUKO_FREE 0 /* the daemon is not ready */
++#define DAZUKO_READY 1 /* a daemon waits for something to do */
++#define DAZUKO_WAITING 2 /* a request is waiting to be served */
++#define DAZUKO_WORKING 3 /* daemon is currently in action */
++#define DAZUKO_DONE 4 /* daemon response is available */
++#define DAZUKO_BROKEN 5 /* invalid state (interrupt from ready,waiting) */
++
++/* file types */
++#define DAZUKO_NONE 0
++#define DAZUKO_REGULAR 1
++#define DAZUKO_DIRECTORY 2
++#define DAZUKO_LINK 3
++
++
++/*********************************************************
++ * structures that MUST be implemented by platform-layer *
++ *********************************************************/
++
++/*
++struct xp_file;
++struct xp_mutex;
++struct xp_atomic;
++struct xp_file_struct;
++struct xp_queue;
++struct xp_rwlock;
++struct xp_daemon_id;
++*/
++
++
++/******************************************
++ * structures available to platform-layer *
++ ******************************************/
++
++struct event_properties
++{
++ int thrown;
++
++ int flags;
++ char set_flags;
++ int mode;
++ char set_mode;
++ int uid;
++ char set_uid;
++ int pid;
++ char set_pid;
++};
++
++struct file_properties
++{
++ unsigned long size;
++ char set_size;
++ int uid;
++ char set_uid;
++ int gid;
++ char set_gid;
++ int mode;
++ char set_mode;
++ int device_type;
++ char set_device_type;
++ int type;
++ char set_type;
++};
++
++struct dazuko_file_listnode
++{
++ char *filename;
++ int filename_length;
++ struct dazuko_file_listnode *next;
++};
++
++struct dazuko_file_struct
++{
++ /* A structure designed for simple and
++ * intelligent memory management when
++ * doing filename lookups in the kernel. */
++
++ int should_scan; /* already know we need to scan? */
++ char *filename; /* filename to report (pointer in alias list) */
++ int filename_length; /* length of filename reported */
++ struct dazuko_file_listnode *aliases; /* list of file names (alias names) */
++ struct file_properties file_p; /* properties of file */
++ struct xp_file_struct *extra_data; /* extra platform-dependant data */
++};
++
++
++/********************************************************
++ * functions that MUST be implemented by platform-layer *
++ ********************************************************/
++
++/* mutex */
++int xp_init_mutex(struct xp_mutex *mutex);
++int xp_down(struct xp_mutex *mutex);
++int xp_up(struct xp_mutex *mutex);
++int xp_destroy_mutex(struct xp_mutex *mutex);
++
++/* read-write lock */
++int xp_init_rwlock(struct xp_rwlock *rwlock);
++int xp_write_lock(struct xp_rwlock *rwlock);
++int xp_write_unlock(struct xp_rwlock *rwlock);
++int xp_read_lock(struct xp_rwlock *rlock);
++int xp_read_unlock(struct xp_rwlock *rlock);
++int xp_destroy_rwlock(struct xp_rwlock *rwlock);
++
++/* wait-notify queue */
++int xp_init_queue(struct xp_queue *queue);
++int xp_wait_until_condition(struct xp_queue *queue, int (*cfunction)(void *), void *cparam, int allow_interrupt);
++int xp_notify(struct xp_queue *queue);
++int xp_destroy_queue(struct xp_queue *queue);
++
++/* memory */
++void* xp_malloc(size_t size);
++int xp_free(void *ptr);
++int xp_copyin(const void *user_src, void *kernel_dest, size_t size);
++int xp_copyout(const void *kernel_src, void *user_dest, size_t size);
++int xp_verify_user_writable(const void *user_ptr, size_t size);
++int xp_verify_user_readable(const void *user_ptr, size_t size);
++
++/* path attribute */
++int xp_is_absolute_path(const char *path);
++
++/* atomic */
++int xp_atomic_set(struct xp_atomic *atomic, int value);
++int xp_atomic_inc(struct xp_atomic *atomic);
++int xp_atomic_dec(struct xp_atomic *atomic);
++int xp_atomic_read(struct xp_atomic *atomic);
++
++/* file descriptor */
++int xp_copy_file(struct xp_file *dest, struct xp_file *src);
++int xp_compare_file(struct xp_file *file1, struct xp_file *file2);
++
++/* system hook */
++int xp_sys_hook(void);
++int xp_sys_unhook(void);
++
++/* file structure */
++int xp_fill_file_struct(struct dazuko_file_struct *dfs);
++
++/* daemon id */
++int xp_id_compare(struct xp_daemon_id *id1, struct xp_daemon_id *id2);
++int xp_id_free(struct xp_daemon_id *id);
++struct xp_daemon_id* xp_id_copy(struct xp_daemon_id *id);
++
++/* output */
++int xp_print(const char *fmt, ...);
++
++/* debug */
++#ifdef DEBUG
++#define DPRINT(fmt) xp_print fmt
++#else
++#define DPRINT(fmt)
++#endif
++
++
++/*****************************************
++ * functions available to platform-layer *
++ *****************************************/
++
++int dazuko_vsnprintf(char *str, size_t size, const char *format, va_list ap);
++int dazuko_snprintf(char *str, size_t size, const char *format, ...);
++int dazuko_is_our_daemon(struct xp_daemon_id *xp_id);
++int dazuko_get_value(const char *key, const char *string, char **value);
++int dazuko_unregister_daemon(struct xp_daemon_id *xp_id);
++int dazuko_handle_user_request(struct dazuko_request *user_request, struct xp_daemon_id *xp_id);
++int dazuko_handle_user_request_compat12(void *ptr, int cmd, struct xp_daemon_id *xp_id);
++int dazuko_get_filename_length(char *filename);
++void dazuko_bzero(void *p, int len);
++int dazuko_sys_check(unsigned long event, int daemon_is_allowed, struct xp_daemon_id *xp_id);
++int dazuko_sys_pre(unsigned long event, struct dazuko_file_struct *kfs, struct xp_file *file, struct event_properties *event_p);
++int dazuko_sys_post(unsigned long event, struct dazuko_file_struct *kfs, struct xp_file *file, struct event_properties *event_p);
++int dazuko_init(void);
++int dazuko_exit(void);
++
++#endif
+diff --git a/rsbac/adf/daz/dazukoio.h b/rsbac/adf/daz/dazukoio.h
+new file mode 100644
+index 0000000..9c8ea39
+--- /dev/null
++++ b/rsbac/adf/daz/dazukoio.h
+@@ -0,0 +1,96 @@
++/* Dazuko Interface. Interace with Dazuko for file access control.
++ Written by John Ogness <jogness@antivir.de>
++
++ Copyright (c) 2002, 2003, 2004 H+BEDV Datentechnik GmbH
++ All rights reserved.
++
++ Redistribution and use in source and binary forms, with or without
++ modification, are permitted provided that the following conditions
++ are met:
++
++ 1. Redistributions of source code must retain the above copyright notice,
++ this list of conditions and the following disclaimer.
++
++ 2. Redistributions in binary form must reproduce the above copyright notice,
++ this list of conditions and the following disclaimer in the documentation
++ and/or other materials provided with the distribution.
++
++ 3. Neither the name of Dazuko nor the names of its contributors may be used
++ to endorse or promote products derived from this software without specific
++ prior written permission.
++
++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
++ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ POSSIBILITY OF SUCH DAMAGE.
++*/
++
++#ifndef DAZUKOIO_H
++#define DAZUKOIO_H
++
++/* event types */
++#define DAZUKO_ON_OPEN 1
++#define DAZUKO_ON_CLOSE 2
++#define DAZUKO_ON_EXEC 4
++#define DAZUKO_ON_CLOSE_MODIFIED 8
++#define DAZUKO_ON_UNLINK 16
++#define DAZUKO_ON_RMDIR 32
++
++struct dazuko_access
++{
++ int deny;
++ int event;
++ char set_event;
++ int flags;
++ char set_flags;
++ int mode;
++ char set_mode;
++ int uid;
++ char set_uid;
++ int pid;
++ char set_pid;
++ char *filename;
++ char set_filename;
++ unsigned long file_size;
++ char set_file_size;
++ int file_uid;
++ char set_file_uid;
++ int file_gid;
++ char set_file_gid;
++ int file_mode;
++ char set_file_mode;
++ int file_device;
++ char set_file_device;
++};
++
++struct dazuko_id;
++typedef struct dazuko_id dazuko_id_t;
++
++/* single-threaded API */
++int dazukoRegister(const char *groupName, const char *mode);
++int dazukoSetAccessMask(unsigned long accessMask);
++int dazukoAddIncludePath(const char *path);
++int dazukoAddExcludePath(const char *path);
++int dazukoRemoveAllPaths(void);
++int dazukoGetAccess(struct dazuko_access **acc);
++int dazukoReturnAccess(struct dazuko_access **acc);
++int dazukoUnregister(void);
++
++/* thread-safe API (as long as each thread has its own "dazuko_id_t") */
++int dazukoRegister_TS(dazuko_id_t **dazuko, const char *groupName, const char *mode);
++int dazukoSetAccessMask_TS(dazuko_id_t *dazuko, unsigned long accessMask);
++int dazukoAddIncludePath_TS(dazuko_id_t *dazuko, const char *path);
++int dazukoAddExcludePath_TS(dazuko_id_t *dazuko, const char *path);
++int dazukoRemoveAllPaths_TS(dazuko_id_t *dazuko);
++int dazukoGetAccess_TS(dazuko_id_t *dazuko, struct dazuko_access **acc);
++int dazukoReturnAccess_TS(dazuko_id_t *dazuko, struct dazuko_access **acc);
++int dazukoUnregister_TS(dazuko_id_t **dazuko);
++
++#endif
+diff --git a/rsbac/adf/daz/dazukoio_xp.h b/rsbac/adf/daz/dazukoio_xp.h
+new file mode 100644
+index 0000000..4f8ccf0
+--- /dev/null
++++ b/rsbac/adf/daz/dazukoio_xp.h
+@@ -0,0 +1,100 @@
++/* DazukoXP Interface. Interace with Dazuko for file access control.
++ Written by John Ogness <jogness@antivir.de>
++
++ Copyright (c) 2002, 2003, 2004 H+BEDV Datentechnik GmbH
++ All rights reserved.
++
++ Redistribution and use in source and binary forms, with or without
++ modification, are permitted provided that the following conditions
++ are met:
++
++ 1. Redistributions of source code must retain the above copyright notice,
++ this list of conditions and the following disclaimer.
++
++ 2. Redistributions in binary form must reproduce the above copyright notice,
++ this list of conditions and the following disclaimer in the documentation
++ and/or other materials provided with the distribution.
++
++ 3. Neither the name of Dazuko nor the names of its contributors may be used
++ to endorse or promote products derived from this software without specific
++ prior written permission.
++
++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
++ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ POSSIBILITY OF SUCH DAMAGE.
++*/
++
++#ifndef DAZUKOIO_XP_H
++#define DAZUKOIO_XP_H
++
++/* various requests */
++#define SET_ACCESS_MASK 0
++#define ADD_INCLUDE_PATH 1
++#define ADD_EXCLUDE_PATH 2
++#define REGISTER 3
++#define REMOVE_ALL_PATHS 4
++#define UNREGISTER 5
++#define GET_AN_ACCESS 6
++#define RETURN_AN_ACCESS 7
++
++/* this is just a large number to "guarentee"
++ to contain the full filename */
++#define DAZUKO_FILENAME_MAX_LENGTH 6144
++
++/* this is the hard-limit file length restriction from
++ the 1.x series */
++#define DAZUKO_FILENAME_MAX_LENGTH_COMPAT12 4095
++
++struct dazuko_request
++{
++ char type[2];
++ int buffer_size;
++ char *buffer;
++ int reply_buffer_size;
++ char *reply_buffer;
++ int reply_buffer_size_used;
++};
++
++struct dazuko_id
++{
++ int device;
++ int dev_major;
++ int id;
++ int write_mode;
++};
++
++/* compat12 ioctls */
++
++#define IOCTL_SET_OPTION 0
++#define IOCTL_GET_AN_ACCESS 1
++#define IOCTL_RETURN_ACCESS 2
++
++/* compat12 structures */
++
++struct access_compat12
++{
++ int deny; /* set to deny file access */
++ int event; /* ON_OPEN, etc */
++ int o_flags; /* access flags */
++ int o_mode; /* access mode */
++ int uid; /* user id */
++ int pid; /* user process id */
++ char filename[DAZUKO_FILENAME_MAX_LENGTH_COMPAT12]; /* accessed file */
++};
++
++struct option_compat12
++{
++ int command;
++ int buffer_length;
++ char buffer[DAZUKO_FILENAME_MAX_LENGTH_COMPAT12];
++};
++
++#endif
+diff --git a/rsbac/adf/ff/Makefile b/rsbac/adf/ff/Makefile
+new file mode 100644
+index 0000000..139b30b
+--- /dev/null
++++ b/rsbac/adf/ff/Makefile
+@@ -0,0 +1,9 @@
++#
++# File: rsbac/adf/ff/Makefile
++#
++# Makefile for the Linux rsbac ff decision module.
++#
++# Author and (c) 1999-2012 Amon Ott <ao@rsbac.org>
++#
++
++obj-y := ff_main.o
+diff --git a/rsbac/adf/ff/ff_main.c b/rsbac/adf/ff/ff_main.c
+new file mode 100644
+index 0000000..12409c7
+--- /dev/null
++++ b/rsbac/adf/ff/ff_main.c
+@@ -0,0 +1,700 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - File Flags */
++/* File: rsbac/adf/ff/main.c */
++/* */
++/* Author and (c) 1999-2009: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 12/Oct/2009 */
++/*************************************************** */
++
++#include <linux/types.h>
++#include <linux/string.h>
++#include <linux/fs.h>
++#include <rsbac/aci.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/debug.h>
++
++#include <asm/uaccess.h>
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++
++static enum rsbac_adf_req_ret_t
++ check_flags_ff(enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_ff_flags_t flags)
++ {
++ union rsbac_attribute_value_t i_attr_val1;
++
++ /* get target's file flags */
++ if (rsbac_get_attr(SW_FF, target,
++ tid,
++ A_ff_flags,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING "check_flags_ff(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++
++ /* Access is granted, if none of the flags in argument flags is set */
++ if (i_attr_val1.ff_flags & flags)
++ return(NOT_GRANTED);
++ else
++ return(GRANTED);
++ }
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++inline enum rsbac_adf_req_ret_t
++ rsbac_adf_request_ff (enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ enum rsbac_adf_req_ret_t result = DO_NOT_CARE;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ int err=0;
++
++ switch (request)
++ {
++ case R_GET_STATUS_DATA:
++ switch(target)
++ {
++ case T_SCD:
++ switch(tid.scd)
++ {
++ case ST_rsbac_log:
++ case ST_rsbac_remote_log:
++ break;
++ default:
++ return GRANTED;
++ }
++ i_tid.user = owner;
++ if ((err=rsbac_get_attr(SW_FF, T_USER,
++ i_tid,
++ A_ff_role,
++ &i_attr_val1,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_ff(): rsbac_get_attr() returned error %i!\n",err);
++ return(NOT_GRANTED);
++ }
++ if ( (i_attr_val1.system_role == SR_security_officer)
++ || (i_attr_val1.system_role == SR_auditor)
++ )
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++ default:
++ return(DO_NOT_CARE);
++ }
++
++#if defined(CONFIG_RSBAC_FF_UM_PROT)
++ case R_GET_PERMISSIONS_DATA:
++ switch(target)
++ {
++ case T_USER:
++ case T_GROUP:
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_FF,
++ T_USER,
++ i_tid,
++ A_ff_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_ff()", A_ff_role);
++ return(NOT_GRANTED);
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ /* We do not care about */
++ /* all other cases */
++ default: return(DO_NOT_CARE);
++ }
++#endif
++
++ case R_READ:
++ switch(target)
++ {
++ case T_DIR:
++ return(check_flags_ff(target,tid,
++ FF_search_only));
++
++#ifdef CONFIG_RSBAC_RW
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ return(check_flags_ff(target,tid,
++ FF_write_only));
++#endif
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_READ_OPEN:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ return(check_flags_ff(target,tid,
++ FF_execute_only | FF_write_only));
++ case T_DIR:
++ return(check_flags_ff(target,tid,
++ FF_search_only));
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_MAP_EXEC:
++ case R_EXECUTE:
++ switch(target)
++ {
++ case T_FILE:
++ return(check_flags_ff(target,tid,
++ FF_write_only | FF_no_execute | FF_append_only));
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_APPEND_OPEN:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_execute_only));
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_READ_WRITE_OPEN:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_execute_only
++ | FF_write_only | FF_append_only));
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_CHDIR:
++ switch(target)
++ {
++ case T_DIR:
++ return(check_flags_ff(target,tid,
++ FF_search_only));
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ /* Creating dir or (pseudo) file IN target dir! */
++ case R_CREATE:
++ switch(target)
++ {
++ case T_DIR:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_search_only));
++
++#if defined(CONFIG_RSBAC_FF_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_FF,
++ T_USER,
++ i_tid,
++ A_ff_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_ff()", A_ff_role);
++ return(NOT_GRANTED);
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++#endif
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_DELETE:
++ case R_RENAME:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_execute_only | FF_no_delete_or_rename
++ | FF_append_only));
++ case T_DIR:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_search_only | FF_no_delete_or_rename));
++
++#if defined(CONFIG_RSBAC_FF_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_FF,
++ T_USER,
++ i_tid,
++ A_ff_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_ff()", A_ff_role);
++ return(NOT_GRANTED);
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++#endif
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_CHANGE_GROUP:
++ case R_MODIFY_PERMISSIONS_DATA:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_execute_only | FF_append_only));
++ case T_DIR:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_search_only));
++
++#if defined(CONFIG_RSBAC_FF_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_FF,
++ T_USER,
++ i_tid,
++ A_ff_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_ff()", A_ff_role);
++ return(NOT_GRANTED);
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++#endif
++
++ /* all other cases are undefined */
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_CHANGE_OWNER:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_execute_only | FF_append_only));
++ case T_DIR:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_search_only));
++ /* all other cases are undefined */
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_SEARCH:
++ switch(target)
++ {
++ case T_FILE:
++ case T_DIR:
++ case T_SYMLINK:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ i_tid.user = owner;
++ if ((err = rsbac_get_attr(SW_FF, T_USER,
++ i_tid,
++ A_ff_role,
++ &i_attr_val1, TRUE))) {
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_ff(): rsbac_get_attr() returned error %i!\n",err);
++ return (NOT_GRANTED);
++ }
++ if (i_attr_val1.system_role == (SR_security_officer || SR_auditor))
++ return (GRANTED);
++ else
++ return(check_flags_ff(target,tid,
++ FF_no_search));
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_LINK_HARD:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ case T_SYMLINK:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_execute_only));
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_MODIFY_ACCESS_DATA:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_execute_only | FF_append_only));
++ case T_DIR:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_search_only));
++
++ /* all other cases are undefined */
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_MODIFY_ATTRIBUTE:
++ switch(attr)
++ {
++ case A_ff_flags:
++ case A_system_role:
++ case A_ff_role:
++ #ifdef CONFIG_RSBAC_FF_AUTH_PROT
++ case A_auth_may_setuid:
++ case A_auth_may_set_cap:
++ case A_auth_start_uid:
++ case A_auth_start_euid:
++ case A_auth_start_gid:
++ case A_auth_start_egid:
++ case A_auth_learn:
++ case A_auth_add_f_cap:
++ case A_auth_remove_f_cap:
++ #endif
++ #ifdef CONFIG_RSBAC_FF_GEN_PROT
++ case A_log_array_low:
++ case A_log_array_high:
++ case A_log_program_based:
++ case A_log_user_based:
++ case A_symlink_add_remote_ip:
++ case A_symlink_add_uid:
++ case A_symlink_add_rc_role:
++ case A_linux_dac_disable:
++ case A_pseudo:
++ case A_fake_root_uid:
++ case A_audit_uid:
++ case A_auid_exempt:
++ case A_remote_ip:
++ case A_vset:
++ case A_program_file:
++ #endif
++ /* All attributes (remove target!) */
++ case A_none:
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_FF, T_USER,
++ i_tid,
++ A_ff_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_ff(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_MODIFY_SYSTEM_DATA:
++ switch(target)
++ {
++ case T_SCD:
++ switch(tid.scd)
++ {
++ case ST_rsbac_log:
++ case ST_rsbac_remote_log:
++ break;
++ case ST_kmem:
++ return NOT_GRANTED;
++ default:
++ return GRANTED;
++ }
++ /* Get role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_FF, T_USER,
++ i_tid,
++ A_ff_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_ff(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* grant only for secoff */
++ if ( (i_attr_val1.system_role == SR_security_officer)
++ || (i_attr_val1.system_role == SR_auditor)
++ )
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_MOUNT:
++ case R_UMOUNT:
++ switch(target)
++ {
++ case T_FILE:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_execute_only
++ | FF_write_only | FF_append_only | FF_no_mount));
++ case T_DIR:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_search_only | FF_no_mount));
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_SWITCH_LOG:
++ switch(target)
++ {
++ case T_NONE:
++ /* test owner's ff_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_FF, T_USER,
++ i_tid,
++ A_ff_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_ff(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* security officer? -> grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_SWITCH_MODULE:
++ switch(target)
++ {
++ case T_NONE:
++ /* we need the switch_target */
++ if(attr != A_switch_target)
++ return NOT_GRANTED;
++ /* do not care for other modules */
++ if( (attr_val.switch_target != SW_FF)
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ && (attr_val.switch_target != SW_SOFTMODE)
++ #endif
++ #ifdef CONFIG_RSBAC_FREEZE
++ && (attr_val.switch_target != SW_FREEZE)
++ #endif
++ #ifdef CONFIG_RSBAC_FF_AUTH_PROT
++ && (attr_val.switch_target != SW_AUTH)
++ #endif
++ )
++ return(DO_NOT_CARE);
++ /* test owner's ff_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_FF, T_USER,
++ i_tid,
++ A_ff_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_ff(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* security officer? -> grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_TRUNCATE:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_execute_only | FF_append_only));
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_WRITE_OPEN:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_execute_only | FF_append_only));
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_WRITE:
++ switch(target)
++ {
++ case T_DIR:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_search_only));
++
++#ifdef CONFIG_RSBAC_RW
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ return(check_flags_ff(target,tid,
++ FF_read_only | FF_execute_only));
++#endif
++#if defined(CONFIG_RSBAC_FF_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_FF,
++ T_USER,
++ i_tid,
++ A_ff_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_ff()", A_ff_role);
++ return(NOT_GRANTED);
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++#endif
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++
++/*********************/
++ default: return DO_NOT_CARE;
++ }
++
++ return result;
++ } /* end of rsbac_adf_request_ff() */
++
++
++/******************************************/
++#ifdef CONFIG_RSBAC_SECDEL
++inline rsbac_boolean_t rsbac_need_overwrite_ff(struct dentry * dentry_p)
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ if( !dentry_p
++ || !dentry_p->d_inode)
++ return FALSE;
++
++ i_tid.file.device = dentry_p->d_sb->s_dev;
++ i_tid.file.inode = dentry_p->d_inode->i_ino;
++ i_tid.file.dentry_p = dentry_p;
++ /* get target's file flags */
++ if (rsbac_get_attr(SW_FF, T_FILE,
++ i_tid,
++ A_ff_flags,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_need_overwrite_ff(): rsbac_get_attr() returned error!\n");
++ return FALSE;
++ }
++
++ /* overwrite, if secure_delete is set */
++ if (i_attr_val1.ff_flags & FF_secure_delete)
++ return TRUE;
++ else
++ return FALSE;
++ }
++#endif
++
++/* end of rsbac/adf/ff/main.c */
+diff --git a/rsbac/adf/jail/Makefile b/rsbac/adf/jail/Makefile
+new file mode 100644
+index 0000000..17d5b98
+--- /dev/null
++++ b/rsbac/adf/jail/Makefile
+@@ -0,0 +1,14 @@
++#
++# File: rsbac/adf/jail/Makefile
++#
++# Makefile for the Linux rsbac jail decision module.
++#
++# Author and (c) 1999-2012 Amon Ott <ao@rsbac.org>
++#
++
++obj-y := jail_syscalls.o
++# decisions only in non-maint mode
++ifneq ($(CONFIG_RSBAC_MAINT),y)
++obj-y += jail_main.o
++endif
++
+diff --git a/rsbac/adf/jail/jail_main.c b/rsbac/adf/jail/jail_main.c
+new file mode 100644
+index 0000000..0c838ee
+--- /dev/null
++++ b/rsbac/adf/jail/jail_main.c
+@@ -0,0 +1,1395 @@
++/**************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - Authorization module */
++/* File: rsbac/adf/jail/jail_main.c */
++/* */
++/* Author and (c) 1999-2011: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 17/Oct/2011 */
++/**************************************************** */
++
++#include <linux/string.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/network.h>
++#include <rsbac/debug.h>
++#include <rsbac/jail.h>
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++static inline rsbac_boolean_t jail_dev_tty(struct rsbac_dev_desc_t dev)
++{
++ if (dev.type != D_char)
++ return FALSE;
++ if (((dev.major >= 2)
++ && (dev.major <= 4)
++ )
++ || ((dev.major >= 128)
++ && (dev.major <= 143)
++ )
++ )
++ return TRUE;
++ else
++ return FALSE;
++}
++
++static rsbac_jail_id_t
++jail_get_id(enum rsbac_target_t target, union rsbac_target_id_t tid)
++{
++ int err;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ if ((err = rsbac_get_attr(SW_JAIL,
++ target,
++ tid, A_jail_id, &i_attr_val1, TRUE))) {
++ rsbac_ds_get_error("jail_get_id()", A_jail_id);
++ return 0;
++ } else
++ return i_attr_val1.jail_id;
++}
++
++static rsbac_jail_id_t jail_get_id_process(rsbac_pid_t pid)
++{
++ int err;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ i_tid.process = pid;
++ if ((err = rsbac_get_attr(SW_JAIL, T_PROCESS,
++ i_tid, A_jail_id, &i_attr_val1, TRUE))) {
++ rsbac_ds_get_error("jail_get_id_process()", A_jail_id);
++ return 0;
++ } else
++ return i_attr_val1.jail_id;
++}
++
++static inline rsbac_jail_id_t jail_get_parent_process(rsbac_pid_t pid)
++{
++ int err;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ i_tid.process = pid;
++ if ((err = rsbac_get_attr(SW_JAIL, T_PROCESS,
++ i_tid, A_jail_parent, &i_attr_val1, TRUE))) {
++ rsbac_ds_get_error("jail_get_parent_process()", A_jail_parent);
++ return 0;
++ } else
++ return i_attr_val1.jail_parent;
++}
++
++#if defined(CONFIG_RSBAC_NET_OBJ)
++static inline rsbac_jail_ip_t jail_get_ip_process(rsbac_pid_t pid)
++{
++ int err;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ i_tid.process = pid;
++ if ((err = rsbac_get_attr(SW_JAIL, T_PROCESS,
++ i_tid, A_jail_ip, &i_attr_val1, TRUE))) {
++ rsbac_ds_get_error("jail_get_ip_process()", A_jail_ip);
++ return 0;
++ } else
++ return i_attr_val1.jail_ip;
++}
++#endif
++
++static rsbac_jail_flags_t jail_get_flags_process(rsbac_pid_t pid)
++{
++ int err;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ i_tid.process = pid;
++ if ((err = rsbac_get_attr(SW_JAIL, T_PROCESS, i_tid,
++ A_jail_flags, &i_attr_val1, TRUE))) {
++ rsbac_ds_get_error("jail_get_flags_process()",
++ A_jail_flags);
++ return 0;
++ } else
++ return i_attr_val1.jail_flags;
++}
++
++static inline rsbac_jail_scd_vector_t jail_get_scd_get_process(rsbac_pid_t pid)
++{
++ int err;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ i_tid.process = pid;
++ if ((err = rsbac_get_attr(SW_JAIL, T_PROCESS, i_tid,
++ A_jail_scd_get, &i_attr_val1, TRUE))) {
++ rsbac_ds_get_error("jail_get_scd_get_process()",
++ A_jail_scd_get);
++ return 0;
++ } else
++ return i_attr_val1.jail_scd_get;
++}
++
++static inline rsbac_jail_scd_vector_t jail_get_scd_modify_process(rsbac_pid_t pid)
++{
++ int err;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ i_tid.process = pid;
++ if ((err = rsbac_get_attr(SW_JAIL, T_PROCESS, i_tid,
++ A_jail_scd_modify,
++ &i_attr_val1, TRUE))) {
++ rsbac_ds_get_error("jail_get_scd_modify_process()",
++ A_jail_scd_modify);
++ return 0;
++ } else
++ return i_attr_val1.jail_scd_modify;
++}
++
++static enum rsbac_adf_req_ret_t
++jail_check_sysrole(rsbac_uid_t owner,
++ enum rsbac_system_role_t role)
++{
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_JAIL, T_USER, i_tid,
++ A_jail_role, &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error("jail_check_sysrole()", A_jail_role);
++ return (NOT_GRANTED);
++ }
++ /* if correct role, then grant */
++ if (i_attr_val1.system_role == role)
++ return (GRANTED);
++ else
++ return (NOT_GRANTED);
++}
++
++#if defined(CONFIG_RSBAC_NET_OBJ)
++enum rsbac_adf_req_ret_t
++jail_check_ip(rsbac_pid_t pid, union rsbac_target_id_t tid)
++{
++ rsbac_jail_ip_t jail_ip;
++ rsbac_jail_flags_t jail_flags;
++
++ if (!tid.netobj.sock_p) {
++ rsbac_printk(KERN_WARNING
++ "jail_check_ip(): NULL sock_p!\n");
++ return NOT_GRANTED;
++ }
++ if (!tid.netobj.sock_p->ops) {
++ return DO_NOT_CARE;
++ }
++ switch (tid.netobj.sock_p->ops->family) {
++ case AF_UNIX:
++ return DO_NOT_CARE;
++
++ case AF_INET:
++ switch (tid.netobj.sock_p->type) {
++ case SOCK_STREAM:
++ case SOCK_DGRAM:
++ case SOCK_RDM:
++ jail_ip = jail_get_ip_process(pid);
++ if (jail_ip == INADDR_ANY)
++ return GRANTED;
++ jail_flags = jail_get_flags_process(pid);
++ if (tid.netobj.local_addr) {
++ struct sockaddr_in *addr =
++ tid.netobj.local_addr;
++
++ if ((jail_ip == addr->sin_addr.s_addr)
++ || (
++ (jail_flags &
++ JAIL_allow_inet_localhost)
++ && (addr->sin_addr.s_addr ==
++ RSBAC_JAIL_LOCALHOST)
++ )
++#if defined(CONFIG_RSBAC_JAIL_NET_ADJUST)
++ || (
++ (jail_flags &
++ JAIL_auto_adjust_inet_any)
++ && (addr->sin_addr.s_addr ==
++ INADDR_ANY)
++ )
++#endif
++ )
++ return GRANTED;
++ else {
++ rsbac_pr_debug(adf_jail, "local_addr does not match jail_ip -> NOT_GRANTED!\n");
++ return NOT_GRANTED;
++ }
++ } else if ((tid.netobj.remote_addr)
++ && (jail_flags &
++ JAIL_allow_inet_localhost)
++ &&
++ (((struct sockaddr_in *) tid.netobj.
++ remote_addr)->sin_addr.s_addr ==
++ RSBAC_JAIL_LOCALHOST)
++ )
++ return GRANTED;
++ else {
++ if (((jail_ip ==
++ inet_sk(tid.netobj.sock_p->sk)->
++ inet_rcv_saddr)
++ && (jail_ip ==
++ inet_sk(tid.netobj.sock_p->sk)->
++ inet_saddr)
++ )
++ || (
++ (jail_flags &
++ JAIL_allow_inet_localhost)
++ &&
++ ((inet_sk(tid.netobj.sock_p->sk)->
++ inet_saddr == RSBAC_JAIL_LOCALHOST)
++ || (
++ inet_sk(tid.netobj.sock_p->sk)->
++ inet_daddr == RSBAC_JAIL_LOCALHOST)
++ )
++ )
++#if defined(CONFIG_RSBAC_JAIL_NET_ADJUST)
++ || (
++ (jail_flags &
++ JAIL_auto_adjust_inet_any)
++ && (inet_sk(tid.netobj.sock_p->sk)->
++ inet_rcv_saddr == INADDR_ANY)
++ && (inet_sk(tid.netobj.sock_p->sk)->
++ inet_saddr == INADDR_ANY)
++ )
++#endif
++ )
++ return GRANTED;
++ else {
++ rsbac_pr_debug(adf_jail, "sk->inet_rcv_saddr or sk->inet_saddr does not match jail_ip -> NOT_GRANTED!\n");
++ return NOT_GRANTED;
++ }
++ }
++
++ case SOCK_RAW:
++ if (jail_get_flags_process(pid) &
++ JAIL_allow_inet_raw)
++ return GRANTED;
++ else {
++ rsbac_pr_debug(adf_jail, "network type is raw and allow_inet_raw is not set -> NOT_GRANTED!\n");
++ return NOT_GRANTED;
++ }
++
++ default:
++ rsbac_pr_debug(adf_jail, "network type not STREAM, DGRAM, RDM or RAW -> NOT_GRANTED!\n");
++ return NOT_GRANTED;
++ }
++
++ case AF_NETLINK:
++ if (jail_get_flags_process(pid) &
++ (JAIL_allow_all_net_family | JAIL_allow_netlink))
++ return GRANTED;
++ else {
++ rsbac_pr_debug(adf_jail, "network family is NETLINK and neither allow_netlink nor allow_all_net_family is set -> NOT_GRANTED!\n");
++ return NOT_GRANTED;
++ }
++
++ default:
++ if (jail_get_flags_process(pid) &
++ JAIL_allow_all_net_family)
++ return GRANTED;
++ else {
++ rsbac_pr_debug(adf_jail, "network family not UNIX or INET and allow_all_net_family not set -> NOT_GRANTED!\n");
++ return NOT_GRANTED;
++ }
++ }
++}
++#endif
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++enum rsbac_adf_req_ret_t
++rsbac_adf_request_jail(enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++{
++ rsbac_jail_id_t jail_id;
++ rsbac_jail_id_t jail_id_object;
++ rsbac_jail_flags_t jail_flags;
++
++ switch(target) {
++ case T_DEV:
++ switch(request) {
++ case R_SEND:
++ if (jail_get_id_process(caller_pid))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++ case R_APPEND_OPEN:
++ case R_WRITE_OPEN:
++ if (jail_get_id_process(caller_pid)) {
++ jail_flags =
++ jail_get_flags_process(caller_pid);
++ if (!(jail_flags & JAIL_allow_dev_write))
++ return NOT_GRANTED;
++ else if (jail_dev_tty(tid.dev)
++ && !(jail_flags &
++ JAIL_allow_tty_open)
++ )
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++ } else
++ return GRANTED;
++ case R_READ_OPEN:
++ if (jail_get_id_process(caller_pid)) {
++ jail_flags =
++ jail_get_flags_process(caller_pid);
++ if (!(jail_flags & JAIL_allow_dev_read))
++ return NOT_GRANTED;
++ else if (jail_dev_tty(tid.dev)
++ && !(jail_flags &
++ JAIL_allow_tty_open)
++ )
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++ } else
++ return GRANTED;
++ case R_READ_WRITE_OPEN:
++ if (jail_get_id_process(caller_pid)) {
++ jail_flags =
++ jail_get_flags_process(caller_pid);
++ if (!(jail_flags & JAIL_allow_dev_read)
++ || !(jail_flags & JAIL_allow_dev_write)
++ )
++ return NOT_GRANTED;
++ else if (jail_dev_tty(tid.dev)
++ && !(jail_flags &
++ JAIL_allow_tty_open)
++ )
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++ } else
++ return GRANTED;
++ case R_GET_STATUS_DATA:
++ if (jail_get_id_process(caller_pid)
++ && !(jail_get_flags_process(caller_pid) &
++ JAIL_allow_dev_get_status)
++ )
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++ case R_MODIFY_SYSTEM_DATA:
++ if (jail_get_id_process(caller_pid)
++ && !(jail_get_flags_process(caller_pid) &
++ JAIL_allow_dev_mod_system)
++ )
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++ case R_READ:
++ if (jail_get_id_process(caller_pid)
++ && !(jail_get_flags_process(caller_pid) &
++ JAIL_allow_dev_read)
++ )
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++ case R_WRITE:
++ if (jail_get_id_process(caller_pid)
++ && !(jail_get_flags_process(caller_pid) &
++ JAIL_allow_dev_write)
++ )
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++ default:
++ return DO_NOT_CARE;
++ }
++ case T_DIR:
++ switch(request) {
++ case R_CREATE:
++ if (!jail_get_id_process(caller_pid))
++ return GRANTED;
++ /* no mknod for devices or suid/sgid */
++ if ((attr == A_create_data)
++ && (S_ISCHR(attr_val.create_data.mode)
++ || S_ISBLK(attr_val.create_data.mode)
++ || ((attr_val.create_data.mode & (S_ISUID | S_ISGID))
++ && !(jail_get_flags_process(caller_pid) & JAIL_allow_suid_files)
++ )
++ )
++ )
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++ case R_MODIFY_PERMISSIONS_DATA:
++ if (jail_get_id_process(caller_pid)
++ && (attr == A_mode)
++ && (attr_val.mode & (S_ISUID | S_ISGID))
++ && !(jail_get_flags_process(caller_pid) & JAIL_allow_suid_files)
++ )
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++ default:
++ return DO_NOT_CARE;
++ }
++ case T_FILE:
++ switch(request) {
++ case R_ADD_TO_KERNEL:
++ case R_REMOVE_FROM_KERNEL:
++ if (jail_get_id_process(caller_pid))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++ case R_MOUNT:
++ case R_UMOUNT:
++ if (!jail_get_id_process(caller_pid)
++ || (jail_get_flags_process(caller_pid) & JAIL_allow_mount)
++ )
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++ case R_MODIFY_PERMISSIONS_DATA:
++ if (jail_get_id_process(caller_pid)
++ && (attr == A_mode)
++ && (attr_val.mode & (S_ISUID | S_ISGID))
++ && !(jail_get_flags_process(caller_pid) & JAIL_allow_suid_files)
++ )
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++ default:
++ return DO_NOT_CARE;
++ }
++ case T_PROCESS:
++ switch(request) {
++ case R_GET_STATUS_DATA:
++ case R_SEND_SIGNAL:
++ case R_MODIFY_SYSTEM_DATA:
++ case R_TRACE:
++ jail_id = jail_get_id_process(caller_pid);
++ if (!jail_id
++ || (jail_id == jail_get_id(target, tid))
++ )
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++ case R_MODIFY_ATTRIBUTE:
++ switch (attr) {
++ case A_jail_id:
++ case A_jail_ip:
++ case A_jail_flags:
++ case A_jail_max_caps:
++ case A_jail_parent:
++ case A_jail_scd_get:
++ case A_jail_scd_modify:
++ /* All attributes (remove target!) */
++ case A_none:
++ if (jail_get_id_process(caller_pid))
++ return NOT_GRANTED;
++
++ /* Security Officer? */
++ return jail_check_sysrole(owner,
++ SR_security_officer);
++ default:
++ return DO_NOT_CARE;
++ }
++ case R_READ_ATTRIBUTE:
++ switch (attr) {
++ case A_jail_id:
++ case A_jail_ip:
++ case A_jail_flags:
++ case A_jail_max_caps:
++ case A_jail_parent:
++ case A_jail_scd_get:
++ case A_jail_scd_modify:
++ /* All attributes (remove target!) */
++ case A_none:
++ if (jail_get_id_process(caller_pid))
++ return NOT_GRANTED;
++
++ /* Security Officer? */
++ if (jail_check_sysrole(owner, SR_administrator) ==
++ NOT_GRANTED)
++ return jail_check_sysrole(owner,
++ SR_security_officer);
++ else
++ return GRANTED;
++ default:
++ return (DO_NOT_CARE);
++ }
++ default:
++ return DO_NOT_CARE;
++ }
++ case T_UNIXSOCK:
++ switch(request) {
++ case R_SEND:
++ case R_CONNECT:
++ case R_LISTEN:
++ case R_ACCEPT:
++ case R_RECEIVE:
++#ifdef CONFIG_RSBAC_RW
++ case R_READ:
++ case R_WRITE:
++#endif
++ case R_BIND:
++ jail_id = jail_get_id_process(caller_pid);
++ if (!jail_id)
++ return GRANTED;
++ if (attr == A_process) {
++ union rsbac_target_id_t i_tid;
++ rsbac_jail_id_t jail_id_parent;
++
++ i_tid.process = attr_val.process;
++ jail_id_parent = jail_get_parent_process(caller_pid);
++ if((jail_id != (jail_id_object = jail_get_id(T_PROCESS, i_tid)))
++ && !((jail_flags = jail_get_flags_process(caller_pid)) & JAIL_allow_external_ipc)
++ && (!(jail_flags & JAIL_allow_parent_ipc)
++ || (jail_id_object != jail_id_parent)
++ )
++ && (!(jail_flags & JAIL_allow_ipc_to_syslog)
++ || (jail_id_object != rsbac_jail_syslog_jail_id)
++ )
++ && (!(jail_get_flags_process(attr_val.process) & JAIL_allow_parent_ipc)
++ || (jail_get_parent_process(attr_val.process) != jail_id)
++ )
++ ) {
++ rsbac_pr_debug(adf_jail,
++ "process jail %u does not match partner process jail %u, parent jail is %u -> NOT_GRANTED!\n",
++ jail_id, jail_id_object, jail_id_parent);
++ return NOT_GRANTED;
++ }
++ } else {
++ if(!(jail_get_flags_process(caller_pid) & JAIL_allow_external_ipc)) {
++ rsbac_pr_debug(adf_jail,
++ "process jail is %u, no allow_ipc and partner process unknown -> NOT_GRANTED!\n",
++ jail_id);
++ return NOT_GRANTED;
++ }
++ }
++ return GRANTED;
++ default:
++ return DO_NOT_CARE;
++ }
++#ifdef CONFIG_RSBAC_NET_OBJ
++ case T_NETOBJ:
++ switch(request) {
++ case R_SEND:
++ case R_RECEIVE:
++ case R_CONNECT:
++ case R_LISTEN:
++ case R_ACCEPT:
++ case R_GET_PERMISSIONS_DATA:
++ case R_MODIFY_PERMISSIONS_DATA:
++ case R_GET_STATUS_DATA:
++ case R_READ:
++ case R_WRITE:
++ case R_BIND:
++ if (!jail_get_id_process(caller_pid))
++ return GRANTED;
++ return (jail_check_ip(caller_pid, tid));
++ case R_CREATE:
++ if (!jail_get_id_process(caller_pid))
++ return GRANTED;
++ if (!tid.netobj.sock_p) {
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_jail(): NULL sock_p on CREATE!\n");
++ return NOT_GRANTED;
++ }
++ if (!tid.netobj.sock_p->ops) {
++ return DO_NOT_CARE;
++ }
++ switch (tid.netobj.sock_p->ops->family) {
++ case AF_UNIX:
++ return (GRANTED);
++
++ case AF_INET:
++ switch (tid.netobj.sock_p->type) {
++ case SOCK_STREAM:
++ case SOCK_DGRAM:
++ case SOCK_RDM:
++ if (tid.netobj.sock_p->sk
++ && (tid.netobj.sock_p->sk->
++ sk_protocol == IPPROTO_RAW)
++ ) {
++ jail_flags =
++ jail_get_flags_process
++ (caller_pid);
++ if (jail_flags &
++ JAIL_allow_inet_raw)
++ return (GRANTED);
++ else
++ return NOT_GRANTED;
++ } else
++ return GRANTED;
++
++ case SOCK_RAW:
++ jail_flags =
++ jail_get_flags_process
++ (caller_pid);
++ if (jail_flags &
++ JAIL_allow_inet_raw)
++ return (GRANTED);
++ else
++ return NOT_GRANTED;
++
++ default:
++ return (NOT_GRANTED);
++ }
++
++ case AF_NETLINK:
++ jail_flags = jail_get_flags_process(caller_pid);
++ if (jail_flags &
++ (JAIL_allow_all_net_family | JAIL_allow_netlink))
++ return GRANTED;
++ else {
++ rsbac_pr_debug(adf_jail, "network family is NETLINK and neither allow_netlink nor allow_all_net_family is set -> NOT_GRANTED!\n");
++ return NOT_GRANTED;
++ }
++ default:
++ jail_flags = jail_get_flags_process(caller_pid);
++ if (jail_flags & JAIL_allow_all_net_family)
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++ }
++ default:
++ return DO_NOT_CARE;
++ }
++#endif /* NET_OBJ */
++ case T_IPC:
++ switch(request) {
++ case R_ALTER:
++ case R_APPEND_OPEN:
++ case R_WRITE_OPEN:
++ case R_READ_OPEN:
++ case R_READ_WRITE_OPEN:
++ case R_DELETE:
++ case R_MODIFY_PERMISSIONS_DATA:
++ case R_GET_STATUS_DATA:
++ jail_id = jail_get_id_process(caller_pid);
++ if (!jail_id
++ || (jail_id == (jail_id_object = jail_get_id(target, tid)))
++ || ((jail_flags = jail_get_flags_process(caller_pid)) &
++ JAIL_allow_external_ipc)
++ || ((jail_flags & JAIL_allow_parent_ipc)
++ && (jail_get_parent_process(caller_pid) == jail_id_object)
++ )
++ || ((jail_flags & JAIL_allow_ipc_to_syslog)
++ && (rsbac_jail_syslog_jail_id == jail_id_object)
++ )
++ )
++ return GRANTED;
++ else {
++ rsbac_pr_debug(adf_jail,
++ "process jail %u does not match IPC object jail %u -> NOT_GRANTED!\n",
++ jail_id, jail_id_object);
++ return NOT_GRANTED;
++ }
++ case R_CREATE:
++ return GRANTED;
++ case R_MODIFY_ATTRIBUTE:
++ switch (attr) {
++ case A_jail_id:
++ /* All attributes (remove target!) */
++ case A_none:
++ if (jail_get_id_process(caller_pid))
++ return NOT_GRANTED;
++
++ /* Security Officer? */
++ return jail_check_sysrole(owner,
++ SR_security_officer);
++ default:
++ return DO_NOT_CARE;
++ }
++ case R_READ_ATTRIBUTE:
++ switch (attr) {
++ case A_jail_id:
++ /* All attributes (remove target!) */
++ case A_none:
++ if (jail_get_id_process(caller_pid))
++ return NOT_GRANTED;
++
++ /* Security Officer? */
++ if (jail_check_sysrole(owner, SR_administrator) ==
++ NOT_GRANTED)
++ return jail_check_sysrole(owner,
++ SR_security_officer);
++ else
++ return GRANTED;
++ default:
++ return (DO_NOT_CARE);
++ }
++ default:
++ jail_id = jail_get_id_process(caller_pid);
++ if (!jail_id)
++ return GRANTED;
++ if((jail_flags = jail_get_flags_process(caller_pid)) &
++ JAIL_allow_external_ipc)
++ return GRANTED;
++ jail_id_object = jail_get_id(target, tid);
++ if((jail_flags & JAIL_allow_parent_ipc)
++ && (jail_get_parent_process(caller_pid) == jail_id_object))
++ return GRANTED;
++ if((attr == A_process)
++ && (jail_get_flags_process(attr_val.process) & JAIL_allow_parent_ipc)
++ && (jail_get_parent_process(attr_val.process) == jail_id))
++ return GRANTED;
++ if((jail_flags & JAIL_allow_ipc_to_syslog)
++ && (rsbac_jail_syslog_jail_id == jail_id_object))
++ return GRANTED;
++ if(jail_id != jail_id_object) {
++ rsbac_pr_debug(adf_jail,
++ "process jail %u does not match IPC object jail %u -> NOT_GRANTED!\n",
++ jail_id, jail_id_object);
++ return NOT_GRANTED;
++ }
++ if (attr == A_process) {
++ union rsbac_target_id_t i_tid;
++ rsbac_jail_id_t jail_id_parent;
++
++ i_tid.process = attr_val.process;
++ jail_id_parent = jail_get_parent_process(caller_pid);
++ if((jail_id != (jail_id_object = jail_get_id(T_PROCESS, i_tid)))
++ && !(jail_flags & JAIL_allow_external_ipc)
++ && (!(jail_flags & JAIL_allow_parent_ipc)
++ || (jail_id_object != jail_id_parent)
++ )
++ ) {
++ rsbac_pr_debug(adf_jail,
++ "process jail %u does not match partner process jail %u, parent jail is %u -> NOT_GRANTED!\n",
++ jail_id, jail_id_object, jail_id_parent);
++ return NOT_GRANTED;
++ }
++ }
++ return GRANTED;
++ }
++ case T_FIFO:
++ case T_SYMLINK:
++ switch(request) {
++ case R_MODIFY_PERMISSIONS_DATA:
++ if (jail_get_id_process(caller_pid)
++ && (attr == A_mode)
++ && (attr_val.mode & (S_ISUID | S_ISGID))
++ && !(jail_get_flags_process(caller_pid) & JAIL_allow_suid_files)
++ )
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++ default:
++ return DO_NOT_CARE;
++ }
++ case T_SCD:
++ switch(request) {
++ case R_MODIFY_PERMISSIONS_DATA:
++ if (jail_get_id_process(caller_pid))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++ case R_GET_STATUS_DATA:
++ if (jail_get_id_process(caller_pid)) {
++ if (jail_get_scd_get_process(caller_pid) &
++ RSBAC_SCD_VECTOR(tid.scd))
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++ } else
++ return GRANTED;
++ case R_MODIFY_SYSTEM_DATA:
++ if (jail_get_id_process(caller_pid)) {
++ if (jail_get_scd_modify_process(caller_pid)
++ & RSBAC_SCD_VECTOR(tid.scd))
++ return (GRANTED);
++ else
++ return NOT_GRANTED;
++ } else
++ return GRANTED;
++ default:
++ return DO_NOT_CARE;
++ }
++ case T_NONE:
++ switch(request) {
++ case R_ADD_TO_KERNEL:
++ case R_REMOVE_FROM_KERNEL:
++ case R_SHUTDOWN:
++ if (jail_get_id_process(caller_pid))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++ case R_SWITCH_LOG:
++ if (jail_get_id_process(caller_pid))
++ return NOT_GRANTED;
++ /* test owner's fc_role */
++ return jail_check_sysrole(owner,
++ SR_security_officer);
++ case R_SWITCH_MODULE:
++ /* we need the switch_target */
++ if (attr != A_switch_target)
++ return NOT_GRANTED;
++ /* do not care for other modules */
++ if ((attr_val.switch_target != SW_JAIL)
++#ifdef CONFIG_RSBAC_SOFTMODE
++ && (attr_val.switch_target != SW_SOFTMODE)
++#endif
++#ifdef CONFIG_RSBAC_FREEZE
++ && (attr_val.switch_target != SW_FREEZE)
++#endif
++ )
++ return (DO_NOT_CARE);
++ if (jail_get_id_process(caller_pid))
++ return NOT_GRANTED;
++ /* test owner's fc_role */
++ return jail_check_sysrole(owner,
++ SR_security_officer);
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE
++ /* switching Linux DAC */
++ case R_MODIFY_PERMISSIONS_DATA:
++ if (jail_get_id_process(caller_pid))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++#endif
++ default:
++ return DO_NOT_CARE;
++ }
++ case T_NETDEV:
++ switch(request) {
++#ifdef CONFIG_RSBAC_JAIL_NET_DEV_PROT
++ case R_MODIFY_SYSTEM_DATA:
++ case R_BIND:
++ if (jail_get_id_process(caller_pid))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++#endif
++ default:
++ return DO_NOT_CARE;
++ }
++
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ case T_NETTEMP:
++ switch(request) {
++ case R_CREATE:
++ case R_DELETE:
++ case R_WRITE:
++ if (jail_get_id_process(caller_pid))
++ return NOT_GRANTED;
++ return jail_check_sysrole(owner, SR_security_officer);
++ case R_READ:
++ if (jail_get_id_process(caller_pid))
++ return NOT_GRANTED;
++ if (jail_check_sysrole(owner, SR_security_officer)
++ == GRANTED)
++ return GRANTED;
++ return jail_check_sysrole(owner, SR_administrator);
++ default:
++ return DO_NOT_CARE;
++ }
++#endif
++
++ case T_USER:
++ switch(request) {
++ case R_MODIFY_ATTRIBUTE:
++ switch (attr) {
++ case A_system_role:
++ case A_jail_role:
++ /* All attributes (remove target!) */
++ case A_none:
++ if (jail_get_id_process(caller_pid))
++ return NOT_GRANTED;
++
++ /* Security Officer? */
++ return jail_check_sysrole(owner,
++ SR_security_officer);
++ default:
++ return DO_NOT_CARE;
++ }
++ case R_READ_ATTRIBUTE:
++ switch (attr) {
++ case A_system_role:
++ case A_jail_role:
++ /* All attributes (remove target!) */
++ case A_none:
++ if (jail_get_id_process(caller_pid))
++ return NOT_GRANTED;
++
++ /* Security Officer? */
++ if (jail_check_sysrole(owner, SR_administrator) ==
++ NOT_GRANTED)
++ return jail_check_sysrole(owner,
++ SR_security_officer);
++ else
++ return GRANTED;
++ default:
++ return (DO_NOT_CARE);
++ }
++ default:
++ return DO_NOT_CARE;
++ }
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++}
++
++/*****************************************************************************/
++/* If the request returned granted and the operation is performed, */
++/* the following function can be called by the AEF to get all aci set */
++/* correctly. For write accesses that are performed fully within the kernel, */
++/* this is usually not done to prevent extra calls, including R_CLOSE for */
++/* cleaning up. */
++/* The second instance of target specification is the new target, if one has */
++/* been created, otherwise its values are ignored. */
++/* On success, 0 is returned, and an error from rsbac/error.h otherwise. */
++
++int rsbac_adf_set_attr_jail(enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_target_t new_target,
++ union rsbac_target_id_t new_tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++{
++#ifdef CONFIG_RSBAC_JAIL_NET_ADJUST
++ int err;
++#endif
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_attribute_value_t i_attr_val2;
++
++ switch (request) {
++ case R_CHANGE_OWNER:
++ switch (target) {
++ case T_PROCESS:
++ /* Adjust Linux caps */
++ i_tid.process = caller_pid;
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if (!rsbac_softmode)
++#endif
++ {
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_max_caps,
++ &i_attr_val1, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_max_caps);
++ } else {
++ struct cred *override_cred;
++
++ override_cred = prepare_creds();
++ if (!override_cred)
++ return -ENOMEM;
++ override_cred->cap_permitted.cap[0] &= i_attr_val1.jail_max_caps.cap[0];
++ override_cred->cap_effective.cap[0] &= i_attr_val1.jail_max_caps.cap[0];
++ override_cred->cap_inheritable.cap[0] &= i_attr_val1.jail_max_caps.cap[0];
++ override_cred->cap_permitted.cap[1] &= i_attr_val1.jail_max_caps.cap[1];
++ override_cred->cap_effective.cap[1] &= i_attr_val1.jail_max_caps.cap[1];
++ override_cred->cap_inheritable.cap[1] &= i_attr_val1.jail_max_caps.cap[1];
++ commit_creds(override_cred);
++ }
++ }
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++ case R_CLONE:
++ if (target == T_PROCESS) {
++ union rsbac_attribute_value_t i_attr_val3;
++ union rsbac_attribute_value_t i_attr_val4;
++ union rsbac_attribute_value_t i_attr_val5;
++ union rsbac_attribute_value_t i_attr_val6;
++
++ /* Get jail_id from first process */
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ tid,
++ A_jail_id,
++ &i_attr_val1, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_id);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Do not copy anything, if not jailed - defaults are fine */
++ if(!i_attr_val1.jail_id)
++ return 0;
++ /* Get jail_ip from first process */
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ tid,
++ A_jail_ip,
++ &i_attr_val2, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_ip);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Get jail_flags from first process */
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ tid,
++ A_jail_flags,
++ &i_attr_val3, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_flags);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Get jail_max_caps from first process */
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ tid,
++ A_jail_max_caps,
++ &i_attr_val4, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_max_caps);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Get jail_scd_get from first process */
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ tid,
++ A_jail_scd_get,
++ &i_attr_val5, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_scd_get);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Get jail_scd_modify from first process */
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ tid,
++ A_jail_scd_modify,
++ &i_attr_val6, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_scd_modify);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Set jail_id for new process */
++ if (rsbac_set_attr(SW_JAIL,
++ T_PROCESS,
++ new_tid,
++ A_jail_id, i_attr_val1)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_id);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Set jail_ip for new process */
++ if (rsbac_set_attr(SW_JAIL,
++ T_PROCESS,
++ new_tid,
++ A_jail_ip, i_attr_val2)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_ip);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Set jail_flags for new process */
++ if (rsbac_set_attr(SW_JAIL,
++ T_PROCESS,
++ new_tid,
++ A_jail_flags, i_attr_val3)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_flags);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Set jail_max_caps for new process */
++ if (rsbac_set_attr(SW_JAIL,
++ T_PROCESS,
++ new_tid,
++ A_jail_max_caps, i_attr_val4)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_max_caps);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Set jail_scd_get for new process */
++ if (rsbac_set_attr(SW_JAIL,
++ T_PROCESS,
++ new_tid,
++ A_jail_scd_get, i_attr_val5)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_scd_get);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Set jail_scd_modify for new process */
++ if (rsbac_set_attr(SW_JAIL,
++ T_PROCESS,
++ new_tid,
++ A_jail_scd_modify,
++ i_attr_val6)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_scd_modify);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ return 0;
++ } else
++ return 0;
++
++ case R_EXECUTE:
++ switch (target) {
++ case T_FILE:
++ /* Adjust Linux caps */
++ i_tid.process = caller_pid;
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if (!rsbac_softmode)
++#endif
++ {
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_max_caps,
++ &i_attr_val1, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_max_caps);
++ } else {
++ struct cred *override_cred;
++
++ override_cred = prepare_creds();
++ if (!override_cred)
++ return -ENOMEM;
++ override_cred->cap_permitted.cap[0] &= i_attr_val1.jail_max_caps.cap[0];
++ override_cred->cap_effective.cap[0] &= i_attr_val1.jail_max_caps.cap[0];
++ override_cred->cap_inheritable.cap[0] &= i_attr_val1.jail_max_caps.cap[0];
++ override_cred->cap_permitted.cap[1] &= i_attr_val1.jail_max_caps.cap[1];
++ override_cred->cap_effective.cap[1] &= i_attr_val1.jail_max_caps.cap[1];
++ override_cred->cap_inheritable.cap[1] &= i_attr_val1.jail_max_caps.cap[1];
++ commit_creds(override_cred);
++ }
++ }
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++ case R_CREATE:
++ switch (target) {
++ case T_IPC:
++ /* Get jail_id from process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_id,
++ &i_attr_val1, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_id);
++ return -RSBAC_EREADFAILED;
++ }
++ if (i_attr_val1.jail_id) {
++ /* Set jail_id for new IPC */
++ if (rsbac_set_attr(SW_JAIL,
++ T_IPC,
++ tid, A_jail_id, i_attr_val1)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_id);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ return 0;
++
++#ifdef CONFIG_RSBAC_JAIL_NET_ADJUST
++ case T_NETOBJ:
++ if (!tid.netobj.sock_p) {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_jail(): NULL sock_p!\n");
++ return 0;
++ }
++ if (!tid.netobj.sock_p->ops) {
++ return 0;
++ }
++ switch (tid.netobj.sock_p->ops->family) {
++ case AF_INET:
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_ip,
++ &i_attr_val1, TRUE))) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_ip);
++ return -RSBAC_EREADFAILED;
++ }
++ if (i_attr_val1.jail_ip == INADDR_ANY)
++ return 0;
++ if ((err = rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_flags,
++ &i_attr_val2, TRUE))) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_flags);
++ return -RSBAC_EREADFAILED;
++ }
++ if (i_attr_val2.
++ jail_flags & JAIL_auto_adjust_inet_any) {
++ inet_sk(tid.netobj.sock_p->sk)->inet_rcv_saddr =
++ i_attr_val1.jail_ip;
++ inet_sk(tid.netobj.sock_p->sk)->inet_saddr =
++ i_attr_val1.jail_ip;
++ }
++ return 0;
++
++ default:
++ break;
++ }
++#endif
++
++ default:
++ return 0;
++ }
++
++ case R_BIND:
++ switch (target) {
++ case T_IPC:
++ /* Get jail_id from process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_id,
++ &i_attr_val1, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_id);
++ return -RSBAC_EREADFAILED;
++ }
++ if (i_attr_val1.jail_id) {
++ /* Set jail_id for new IPC */
++ if (rsbac_set_attr(SW_JAIL,
++ T_IPC,
++ tid, A_jail_id, i_attr_val1)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_id);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ return 0;
++
++#ifdef CONFIG_RSBAC_JAIL_NET_ADJUST
++ case T_NETOBJ:
++ if (!tid.netobj.sock_p) {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_jail(): NULL sock_p!\n");
++ return 0;
++ }
++ if (!tid.netobj.sock_p->ops) {
++ return 0;
++ }
++ switch (tid.netobj.sock_p->ops->family) {
++ case AF_INET:
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_ip,
++ &i_attr_val1, TRUE))) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_ip);
++ return -RSBAC_EREADFAILED;
++ }
++ if (i_attr_val1.jail_ip == INADDR_ANY)
++ return 0;
++ if ((err = rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_flags,
++ &i_attr_val2, TRUE))) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_flags);
++ return -RSBAC_EREADFAILED;
++ }
++ if (i_attr_val2.
++ jail_flags & JAIL_auto_adjust_inet_any) {
++ inet_sk(tid.netobj.sock_p->sk)->inet_rcv_saddr =
++ i_attr_val1.jail_ip;
++ inet_sk(tid.netobj.sock_p->sk)->inet_saddr =
++ i_attr_val1.jail_ip;
++ }
++ return 0;
++
++ default:
++ break;
++ }
++#endif
++ default:
++ return 0;
++ }
++
++ case R_CONNECT:
++ switch (target) {
++ case T_IPC:
++ if (new_target != T_IPC)
++ return 0;
++ /* Get jail_id from old IPC */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_JAIL,
++ T_IPC,
++ tid,
++ A_jail_id,
++ &i_attr_val1, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_id);
++ return -RSBAC_EREADFAILED;
++ }
++ if (i_attr_val1.jail_id) {
++ /* Set jail_id for new IPC */
++ if (rsbac_set_attr(SW_JAIL,
++ T_IPC,
++ new_tid, A_jail_id, i_attr_val1)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_jail()",
++ A_jail_id);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ return 0;
++
++ default:
++ return 0;
++ }
++
++ default:
++ return 0;
++ }
++
++ return 0;
++}
+diff --git a/rsbac/adf/jail/jail_syscalls.c b/rsbac/adf/jail/jail_syscalls.c
+new file mode 100644
+index 0000000..ed06b09
+--- /dev/null
++++ b/rsbac/adf/jail/jail_syscalls.c
+@@ -0,0 +1,301 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - JAIL module */
++/* File: rsbac/adf/jail/syscalls.c */
++/* */
++/* Author and (c) 1999-2012: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 07/May/2012 */
++/*************************************************** */
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/errno.h>
++#include <linux/version.h>
++#include <linux/syscalls.h>
++#include <linux/file.h>
++#include <linux/fdtable.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/error.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/debug.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/network.h>
++#include <rsbac/jail.h>
++#include <asm/uaccess.h>
++
++static rsbac_jail_id_t next_id = 1;
++
++/* Create a jail for current process */
++/* Note: It is allowed to create jails within jails, but with restrictions */
++int rsbac_jail_sys_jail(rsbac_version_t version,
++ char __user * path,
++ rsbac_jail_ip_t ip,
++ rsbac_jail_flags_t flags,
++ rsbac_cap_vector_t max_caps,
++ rsbac_jail_scd_vector_t scd_get,
++ rsbac_jail_scd_vector_t scd_modify)
++{
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ int err = 0;
++ rsbac_jail_id_t parent = 0;
++#ifdef CONFIG_RSBAC_NET
++ int chk_addr_ret;
++#endif
++
++ if(version != RSBAC_JAIL_VERSION)
++ return -RSBAC_EINVALIDVERSION;
++
++#ifdef CONFIG_RSBAC_NET
++ chk_addr_ret = inet_addr_type(&init_net, ip);
++ if (ip != INADDR_ANY &&
++ chk_addr_ret != RTN_LOCAL &&
++ chk_addr_ret != RTN_MULTICAST &&
++ chk_addr_ret != RTN_BROADCAST)
++ return -EADDRNOTAVAIL;
++#endif
++
++ /* Get jail_id for this process */
++ i_tid.process = task_pid(current);
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_id,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_jail_sys_jail()", A_jail_id);
++ return(-RSBAC_EREADFAILED);
++ }
++
++ if (i_attr_val1.jail_id)
++ { /* this process is already in a jail -> limit ip and flags */
++ parent = i_attr_val1.jail_id;
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_flags,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_jail_sys_jail()", A_jail_flags);
++ return(-RSBAC_EREADFAILED);
++ }
++
++ flags &= i_attr_val1.jail_flags | JAIL_allow_parent_ipc;
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_scd_get,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_jail_sys_jail()", A_jail_scd_get);
++ return(-RSBAC_EREADFAILED);
++ }
++
++ scd_get &= i_attr_val1.jail_scd_get;
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_scd_modify,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_jail_sys_jail()", A_jail_scd_modify);
++ return(-RSBAC_EREADFAILED);
++ }
++
++ scd_modify &= i_attr_val1.jail_scd_modify;
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_ip,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_jail_sys_jail()", A_jail_ip);
++ return(-RSBAC_EREADFAILED);
++ }
++
++ if(i_attr_val1.jail_ip)
++ ip = i_attr_val1.jail_ip;
++
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_max_caps,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_jail_sys_jail()", A_jail_max_caps);
++ return(-RSBAC_EREADFAILED);
++ }
++
++ max_caps.cap[0] &= i_attr_val1.jail_max_caps.cap[0];
++ max_caps.cap[1] &= i_attr_val1.jail_max_caps.cap[1];
++ }
++
++ /* check syslog id */
++ if(flags & JAIL_this_is_syslog) {
++ if( rsbac_jail_syslog_jail_id
++ && rsbac_jail_exists(rsbac_jail_syslog_jail_id)
++ )
++ return -RSBAC_EEXISTS;
++ }
++
++ if(path)
++ {
++ mm_segment_t oldfs;
++ struct file * file;
++ struct files_struct *files = current->files;
++ struct fdtable *fdt;
++ int fd;
++
++ err = sys_chroot(path);
++ if(err)
++ return err;
++ /* Set current user space to kernel space, because sys_chdir() takes name */
++ /* from user space */
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++ err = sys_chdir("/");
++ /* Set current user space back to user space */
++ set_fs(oldfs);
++
++restart:
++ rcu_read_lock();
++ fdt = files_fdtable(files);
++ fdt = rcu_dereference((files)->fdt);
++
++ for(fd=0; fd < fdt->max_fds; fd++)
++ {
++ file = fcheck(fd);
++ if( file
++ && file->f_dentry
++ && file->f_dentry->d_inode
++ && S_ISDIR(file->f_dentry->d_inode->i_mode)
++ )
++ {
++ char * filename;
++
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ filename = rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN + 4);
++ if(filename)
++ rsbac_get_full_path(file->f_dentry, filename, CONFIG_RSBAC_MAX_PATH_LEN);
++#else
++ filename = rsbac_kmalloc(RSBAC_MAXNAMELEN + 4);
++ if(filename)
++ rsbac_get_full_path(file->f_dentry, filename, RSBAC_MAXNAMELEN);
++#endif
++
++ rsbac_printk(KERN_INFO
++ "rsbac_jail_sys_jail(): avoid possible chroot breakout by closing open dir fd %u, inode %u, device %02u:%02u, path %s\n",
++ fd,
++ file->f_dentry->d_inode->i_ino,
++ MAJOR(file->f_dentry->d_sb->s_dev),
++ MINOR(file->f_dentry->d_sb->s_dev),
++ filename);
++ if(filename)
++ rsbac_kfree(filename);
++
++ rcu_read_unlock();
++ sys_close(fd);
++ goto restart;
++ }
++ }
++ rcu_read_unlock();
++ }
++
++ /* Set jail_id for this process - number might wrap, so better check */
++ i_attr_val1.jail_id = next_id++;
++ while (!i_attr_val1.jail_id || rsbac_jail_exists(i_attr_val1.jail_id))
++ i_attr_val1.jail_id = next_id++;
++
++ if (rsbac_set_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_id,
++ i_attr_val1))
++ {
++ rsbac_ds_set_error("rsbac_jail_sys_jail()", A_jail_id);
++ return(-RSBAC_EWRITEFAILED);
++ }
++
++ if (flags & JAIL_this_is_syslog) {
++ rsbac_jail_syslog_jail_id = i_attr_val1.jail_id;
++ }
++
++ /* Set jail_parent for this process */
++ i_attr_val1.jail_parent = parent;
++ if (rsbac_set_attr(SW_JAIL, T_PROCESS, i_tid, A_jail_parent, i_attr_val1)) {
++ rsbac_ds_set_error("rsbac_jail_sys_jail()", A_jail_parent);
++ return (-RSBAC_EWRITEFAILED);
++ }
++
++ /* Set jail_ip for this process */
++ i_attr_val1.jail_ip = ip;
++ if (rsbac_set_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_ip,
++ i_attr_val1))
++ {
++ rsbac_ds_set_error("rsbac_jail_sys_jail()", A_jail_ip);
++ return(-RSBAC_EWRITEFAILED);
++ }
++
++ /* Set jail_flags for this process */
++ i_attr_val1.jail_flags = flags;
++ if (rsbac_set_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_flags,
++ i_attr_val1))
++ {
++ rsbac_ds_set_error("rsbac_jail_sys_jail()", A_jail_flags);
++ return(-RSBAC_EWRITEFAILED);
++ }
++
++ /* Set jail_max_caps for this process */
++ i_attr_val1.jail_max_caps.cap[0] = max_caps.cap[0];
++ i_attr_val1.jail_max_caps.cap[1] = max_caps.cap[1];
++ if (rsbac_set_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_max_caps,
++ i_attr_val1))
++ {
++ rsbac_ds_set_error("rsbac_jail_sys_jail()", A_jail_max_caps);
++ return(-RSBAC_EWRITEFAILED);
++ }
++
++ /* Set jail_scd_get for this process */
++ i_attr_val1.jail_scd_get = scd_get;
++ if (rsbac_set_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_scd_get,
++ i_attr_val1))
++ {
++ rsbac_ds_set_error("rsbac_jail_sys_jail()", A_jail_scd_get);
++ return(-RSBAC_EWRITEFAILED);
++ }
++
++ /* Set jail_scd_modify for this process */
++ i_attr_val1.jail_scd_modify = scd_modify;
++ if (rsbac_set_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_scd_modify,
++ i_attr_val1))
++ {
++ rsbac_ds_set_error("rsbac_jail_sys_jail()", A_jail_scd_modify);
++ return(-RSBAC_EWRITEFAILED);
++ }
++ return err;
++}
+diff --git a/rsbac/adf/mac/Makefile b/rsbac/adf/mac/Makefile
+new file mode 100644
+index 0000000..fdc3655
+--- /dev/null
++++ b/rsbac/adf/mac/Makefile
+@@ -0,0 +1,13 @@
++#
++# File: rsbac/adf/mac/Makefile
++#
++# Makefile for the Linux rsbac mac decision module.
++#
++# Author and (c) 1999-2012 Amon Ott <ao@rsbac.org>
++#
++
++obj-y := mac_syscalls.o
++# decisions only in non-maint mode
++ifneq ($(CONFIG_RSBAC_MAINT),y)
++obj-y += mac_main.o
++endif
+diff --git a/rsbac/adf/mac/mac_main.c b/rsbac/adf/mac/mac_main.c
+new file mode 100644
+index 0000000..14cc950
+--- /dev/null
++++ b/rsbac/adf/mac/mac_main.c
+@@ -0,0 +1,4900 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - Mandatory Access Control */
++/* File: rsbac/adf/mac/main.c */
++/* */
++/* Author and (c) 1999-2011: Amon Ott <ao@rsbac.org> */
++/* MAC_LIGHT Modifications (c) 2000 Stanislav Ievlev */
++/* and (c) 2001 Amon Ott */
++/* */
++/* Last modified: 12/Jul/2011 */
++/*************************************************** */
++
++#include <linux/string.h>
++#include <rsbac/aci.h>
++#include <rsbac/mac.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/debug.h>
++#include <rsbac/rkmem.h>
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++static enum rsbac_adf_req_ret_t
++mac_check_role(rsbac_uid_t owner,
++ enum rsbac_system_role_t role)
++{
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_MAC,
++ T_USER,
++ i_tid, A_mac_role, &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error("mac_check_role", A_mac_role);
++ return (NOT_GRANTED);
++ }
++ /* if correct role, then grant */
++ if (i_attr_val1.system_role == role)
++ return (GRANTED);
++ else {
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: wrong mac_role %u -> NOT_GRANTED!\n",
++ current->pid, current->comm,
++ i_attr_val1.system_role);
++ return (NOT_GRANTED);
++ }
++}
++
++/* auto_write() */
++/* This function builds a decision for write-only access based on */
++/* ss-property and *-property. The Subject is given by process-id pid, */
++/* its attributes are taken from the data structures module. */
++/* For the object, only security_level is given to become independent */
++/* from different object/target types. */
++/* If attribute mac_auto is set, the current_security_level is changed */
++/* within min_write and max_read boundaries to allow for more accesses.*/
++/* If set_level is TRUE, the current_security_level and read/write */
++/* boundaries are set to appropiate values, otherwise they are only */
++/* checked. This provides only one function for decision and attribute */
++/* setting. */
++/* Trusted processes (attr. mac_trusted set) are always granted write */
++/* access. */
++
++static enum rsbac_adf_req_ret_t
++auto_write_attr(rsbac_pid_t pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t t_level_attr,
++ enum rsbac_attribute_t t_cat_attr,
++ rsbac_boolean_t set_level)
++{
++ rsbac_security_level_t curr_level;
++ rsbac_mac_category_vector_t curr_categories;
++ rsbac_security_level_t target_sec_level;
++ rsbac_mac_category_vector_t target_categories;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t attr_val1;
++ union rsbac_attribute_value_t attr_val2;
++ rsbac_mac_process_flags_t flags;
++ rsbac_boolean_t mac_auto_used_level = FALSE;
++ rsbac_boolean_t mac_auto_used_cat = FALSE;
++ rsbac_boolean_t raise_object_level = FALSE;
++ rsbac_boolean_t raise_object_cat = FALSE;
++
++ /* first check for mac_override, which allows everything */
++ i_tid.process = pid;
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_mac_process_flags, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_write", A_none);
++ return (NOT_GRANTED);
++ }
++ flags = attr_val1.mac_process_flags;
++ if (flags & MAC_override)
++ return GRANTED;
++
++ /* Get current security level */
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_current_sec_level, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_write", A_none);
++ return (NOT_GRANTED);
++ }
++ curr_level = attr_val1.security_level;
++ /* Get current categories */
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_mac_curr_categories, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_write", A_none);
++ return (NOT_GRANTED);
++ }
++ curr_categories = attr_val1.mac_categories;
++ /* Get target security level */
++ if (rsbac_get_attr(SW_MAC, target, tid, t_level_attr, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_write", A_none);
++ return (NOT_GRANTED);
++ }
++ target_sec_level = attr_val1.security_level;
++ /* Get target categories */
++ if (rsbac_get_attr(SW_MAC, target, tid, t_cat_attr, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_write", A_none);
++ return (NOT_GRANTED);
++ }
++ target_categories = attr_val1.mac_categories;
++
++ if (target_sec_level > curr_level) {
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_security_level, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_write", A_none);
++ return (NOT_GRANTED);
++ }
++ if (attr_val1.security_level < target_sec_level) {
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: security_level %u under target_sec_level %u, no override -> NOT_GRANTED!\n",
++ current->pid, current->comm,
++ attr_val1.security_level,
++ target_sec_level);
++ return (NOT_GRANTED);
++ }
++ /* curr_level < target_level <= max_level -> need mac_auto,
++ * write_up, trusted (at process)
++ * or shared (at object) */
++ if (flags & MAC_auto)
++ mac_auto_used_level = TRUE;
++ else {
++ if (!(flags & MAC_write_up)
++ && !(flags & MAC_trusted)
++ ) {
++ /* Try mac_file_flags on the target,
++ * if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if ((attr_val1.
++ mac_file_flags & MAC_write_up)
++ || (attr_val1.
++ mac_file_flags &
++ MAC_trusted)
++ ) {
++ break;
++ }
++ /* fall through */
++
++ default:
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: current security_level %u under target_sec_level %u, no auto, write_up, trusted -> NOT_GRANTED!\n",
++ current->pid,
++ current->comm,
++ curr_level,
++ target_sec_level);
++ return (NOT_GRANTED);
++ }
++ }
++ }
++ } else if (target_sec_level < curr_level) {
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_min_security_level, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_write", A_none);
++ return (NOT_GRANTED);
++ }
++ if (attr_val1.security_level > target_sec_level) {
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: min_security_level %u over target_sec_level %u, no override -> NOT_GRANTED!\n",
++ current->pid,
++ current->comm, attr_val1.security_level,
++ target_sec_level);
++ return (NOT_GRANTED);
++ }
++ /* min_level <= target_level < curr_level -> need mac_auto,
++ * write_down or trusted */
++ if (flags & MAC_auto) {
++ /* check max_read boundary */
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_max_read_open, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (attr_val1.security_level > target_sec_level) {
++ if (!(flags & MAC_write_down)
++ && !(flags & MAC_trusted)
++ ) {
++ /* Try mac_file_flags on the target,
++ * if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_write",
++ A_none);
++ return
++ (NOT_GRANTED);
++ }
++ if ((attr_val1.
++ mac_file_flags &
++ MAC_write_down)
++ || (attr_val1.
++ mac_file_flags &
++ MAC_trusted)
++ ) {
++ if (attr_val1.
++ mac_file_flags
++ & MAC_auto) {
++ raise_object_level
++ = TRUE;
++ }
++ break;
++ }
++ /* fall through */
++
++ default:
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: max_read_open %u over target_sec_level %u, no write_down or trusted -> NOT_GRANTED!\n",
++ current->pid,
++ current->comm,
++ attr_val1.
++ security_level,
++ target_sec_level);
++ return (NOT_GRANTED);
++ }
++ }
++ } else
++ mac_auto_used_level = TRUE;
++ } else {
++ if (!(flags & MAC_write_down)
++ && !(flags & MAC_trusted)
++ ) {
++ /* Try mac_file_flags on the target,
++ * if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if ((attr_val1.
++ mac_file_flags &
++ MAC_write_down)
++ || (attr_val1.
++ mac_file_flags &
++ MAC_trusted)
++ ) {
++ if (attr_val1.
++ mac_file_flags &
++ MAC_auto) {
++ raise_object_level
++ = TRUE;
++ }
++ break;
++ }
++ /* fall through */
++
++ default:
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: current security_level %u over target_sec_level %u, no auto, write_down or trusted -> NOT_GRANTED!\n",
++ current->pid,
++ current->comm,
++ curr_level,
++ target_sec_level);
++ return (NOT_GRANTED);
++ }
++ }
++ }
++ }
++
++ if ((target_categories & curr_categories) != target_categories) {
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_mac_categories, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_write", A_none);
++ return (NOT_GRANTED);
++ }
++ if ((target_categories & attr_val1.mac_categories) !=
++ target_categories) {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_adf_mac) {
++ char *tmp =
++ rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ char *tmp2 =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp2) {
++ u64tostrmac(tmp,
++ attr_val1.
++ mac_categories);
++ u64tostrmac(tmp2,
++ target_categories);
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: max_categories %s under target categories %s, no override -> NOT_GRANTED!\n",
++ current->pid,
++ current->comm,
++ tmp, tmp2);
++ rsbac_kfree(tmp2);
++ }
++ rsbac_kfree(tmp);
++ }
++ }
++#endif
++ return (NOT_GRANTED);
++ }
++ /* curr_categories < target_categories <= max_categories -> need mac_auto,
++ * write_up or trusted */
++ if (flags & MAC_auto)
++ mac_auto_used_cat = TRUE;
++ else {
++ if (!(flags & MAC_write_up)
++ && !(flags & MAC_trusted)
++ ) {
++ /* Try mac_file_flags on the target,
++ * if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if ((attr_val1.
++ mac_file_flags & MAC_write_up)
++ || (attr_val1.
++ mac_file_flags &
++ MAC_trusted)
++ )
++ break;
++ /* fall through */
++
++ default:
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_adf_mac) {
++ char *tmp =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ char *tmp2 =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp2) {
++ u64tostrmac
++ (tmp,
++ curr_categories);
++ u64tostrmac
++ (tmp2,
++ target_categories);
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: curr_categories %s under target categories %s, no auto, write_up or trusted -> NOT_GRANTED!\n",
++ current->
++ pid,
++ current->
++ comm,
++ tmp,
++ tmp2);
++ rsbac_kfree
++ (tmp2);
++ }
++ rsbac_kfree(tmp);
++ }
++ }
++#endif
++ return (NOT_GRANTED);
++ }
++ }
++ }
++ } else
++ if ((target_categories & curr_categories) != curr_categories) {
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_mac_min_categories, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_write", A_none);
++ return (NOT_GRANTED);
++ }
++ if ((target_categories & attr_val1.mac_categories) !=
++ attr_val1.mac_categories) {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_adf_mac) {
++ char *tmp =
++ rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ char *tmp2 =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp2) {
++ u64tostrmac(tmp,
++ attr_val1.
++ mac_categories);
++ u64tostrmac(tmp2,
++ target_categories);
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: min_categories %s over target categories %s, no override -> NOT_GRANTED!\n",
++ current->pid,
++ current->comm,
++ tmp, tmp2);
++ rsbac_kfree(tmp2);
++ }
++ rsbac_kfree(tmp);
++ }
++ }
++#endif
++ return (NOT_GRANTED);
++ }
++ /* min_level <= target_level < curr_level -> need mac_auto,
++ * write_down or trusted */
++ if (flags & MAC_auto) {
++ /* check max_read boundary */
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_max_read_categories, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if ((target_categories & attr_val1.
++ mac_categories) != attr_val1.mac_categories) {
++ if (!(flags & MAC_write_down)
++ && !(flags & MAC_trusted)
++ ) {
++ /* Try mac_file_flags on the target,
++ * if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_write",
++ A_none);
++ return
++ (NOT_GRANTED);
++ }
++ if ((attr_val1.
++ mac_file_flags &
++ MAC_write_down)
++ || (attr_val1.
++ mac_file_flags &
++ MAC_trusted)
++ ) {
++ if (attr_val1.
++ mac_file_flags
++ & MAC_auto) {
++ raise_object_cat
++ = TRUE;
++ }
++ break;
++ }
++ /* fall through */
++
++ default:
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_adf_mac) {
++ char *tmp =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ char *tmp2
++ =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp2) {
++ u64tostrmac
++ (tmp,
++ attr_val1.
++ mac_categories);
++ u64tostrmac
++ (tmp2,
++ target_categories);
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: max_read_categories %s over target categories %s, no write_down or trusted -> NOT_GRANTED!\n",
++ current->
++ pid,
++ current->
++ comm,
++ tmp,
++ tmp2);
++ rsbac_kfree
++ (tmp2);
++ }
++ rsbac_kfree
++ (tmp);
++ }
++ }
++#endif
++ return (NOT_GRANTED);
++ }
++ }
++ } else
++ mac_auto_used_cat = TRUE;
++ } else {
++ if (!(flags & MAC_write_down)
++ && !(flags & MAC_trusted)
++ ) {
++ /* Try mac_file_flags on the target, if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if ((attr_val1.
++ mac_file_flags &
++ MAC_write_down)
++ || (attr_val1.
++ mac_file_flags &
++ MAC_trusted)
++ ) {
++ if (attr_val1.
++ mac_file_flags &
++ MAC_auto) {
++ raise_object_cat =
++ TRUE;
++ }
++ break;
++ }
++ /* fall through */
++
++ default:
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_adf_mac) {
++ char *tmp =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ char *tmp2 =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp2) {
++ u64tostrmac
++ (tmp,
++ curr_categories);
++ u64tostrmac
++ (tmp2,
++ target_categories);
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: curr_categories %s over target categories %s, no auto, write_down or trusted -> NOT_GRANTED!\n",
++ current->
++ pid,
++ current->
++ comm,
++ tmp,
++ tmp2);
++ rsbac_kfree
++ (tmp2);
++ }
++ rsbac_kfree(tmp);
++ }
++ }
++#endif
++ return (NOT_GRANTED);
++ }
++ }
++ }
++ }
++
++ /* grant area */
++
++ /* adjust current_sec_level and min_write_level, */
++ /* if set_level is true and mac_auto has been used */
++ if (set_level && (mac_auto_used_level || raise_object_level)
++ ) {
++#ifdef CONFIG_RSBAC_MAC_LOG_LEVEL_CHANGE
++ {
++ char *target_type_name;
++ char *target_id_name;
++
++ target_type_name = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (target_type_name) {
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name
++ =
++ rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN
++ + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++#else
++ target_id_name =
++ rsbac_kmalloc(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++#endif
++ if (target_id_name) {
++ get_target_name(target_type_name,
++ target,
++ target_id_name,
++ tid);
++
++ if (mac_auto_used_level) {
++ rsbac_printk(KERN_INFO "mac_auto_write(): Changing process %u (%.15s, owner %u) current level from %u to %u for %s %s\n",
++ pid,
++ current->comm,
++ current_uid(),
++ curr_level,
++ target_sec_level,
++ target_type_name,
++ target_id_name);
++ } else {
++ rsbac_printk(KERN_INFO "mac_auto_write(): Process %u (%.15s, owner %u): Raising object level from %u to %u for %s %s\n",
++ pid,
++ current->comm,
++ current_uid(),
++ target_sec_level,
++ curr_level,
++ target_type_name,
++ target_id_name);
++ }
++ rsbac_kfree(target_id_name);
++ }
++ rsbac_kfree(target_type_name);
++ }
++ }
++#endif
++ if (mac_auto_used_level) {
++ i_tid.process = pid;
++ attr_val1.current_sec_level = target_sec_level;
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_min_write_open, &attr_val2, TRUE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (attr_val1.min_write_open <
++ attr_val2.min_write_open) {
++ if (rsbac_set_attr(SW_MAC, T_PROCESS, i_tid, A_min_write_open, attr_val1)) { /* failed! */
++ rsbac_ds_set_error
++ ("mac_auto_write", A_none);
++ return (NOT_GRANTED);
++ }
++ }
++ if (rsbac_set_attr(SW_MAC, T_PROCESS, i_tid, A_current_sec_level, attr_val1)) { /* failed! */
++ rsbac_ds_set_error("mac_auto_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ } else {
++ attr_val1.security_level = curr_level;
++ if (rsbac_set_attr(SW_MAC, target, tid, A_security_level, attr_val1)) { /* failed! */
++ rsbac_ds_set_error("mac_auto_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ }
++ }
++ /* adjust current_categories and min_write_categories, */
++ /* if set_level is true and mac_auto has been used */
++ if (set_level && (mac_auto_used_cat || raise_object_cat)
++ ) {
++#ifdef CONFIG_RSBAC_MAC_LOG_LEVEL_CHANGE
++ {
++ char *target_type_name =
++ rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (target_type_name) {
++ char *target_id_name;
++
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name
++ =
++ rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN
++ + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++#else
++ target_id_name =
++ rsbac_kmalloc(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++#endif
++ if (target_id_name) {
++ char *tmp1 =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++ if (tmp1) {
++ char *tmp2 =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++ if (tmp2) {
++ get_target_name
++ (target_type_name,
++ target,
++ target_id_name,
++ tid);
++
++ if (mac_auto_used_cat) {
++ rsbac_printk
++ (KERN_INFO "mac_auto_write(): Changing process %u (%.15s, owner %u) current categories from %s to %s for %s %s\n",
++ pid,
++ current->
++ comm,
++ current_uid(),
++ u64tostrmac
++ (tmp1,
++ curr_categories),
++ u64tostrmac
++ (tmp2,
++ target_categories),
++ target_type_name,
++ target_id_name);
++ } else {
++ rsbac_printk
++ (KERN_INFO "mac_auto_write(): Process %u (%.15s, owner %u): raising current categories from %s to %s for %s %s\n",
++ pid,
++ current->
++ comm,
++ current_uid(),
++ u64tostrmac
++ (tmp2,
++ target_categories),
++ u64tostrmac
++ (tmp1,
++ curr_categories),
++ target_type_name,
++ target_id_name);
++ }
++ rsbac_kfree(tmp2);
++ }
++ rsbac_kfree(tmp1);
++ }
++ rsbac_kfree(target_id_name);
++ }
++ rsbac_kfree(target_type_name);
++ }
++ }
++#endif
++ if (mac_auto_used_cat) {
++ i_tid.process = pid;
++ attr_val1.mac_categories = target_categories;
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_min_write_categories, &attr_val2, TRUE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if ((attr_val1.mac_categories & attr_val2.
++ mac_categories)
++ != attr_val2.mac_categories) {
++ if (rsbac_set_attr(SW_MAC, T_PROCESS, i_tid, A_min_write_categories, attr_val1)) { /* failed! */
++ rsbac_ds_set_error
++ ("mac_auto_write", A_none);
++ return (NOT_GRANTED);
++ }
++ }
++ if (rsbac_set_attr(SW_MAC, T_PROCESS, i_tid, A_mac_curr_categories, attr_val1)) { /* failed! */
++ rsbac_ds_set_error("mac_auto_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ } else {
++ attr_val1.mac_categories = curr_categories;
++ if (rsbac_set_attr(SW_MAC, target, tid, A_mac_categories, attr_val1)) { /* failed! */
++ rsbac_ds_set_error("mac_auto_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ }
++ }
++
++ /* Everything done, so return */
++ return (GRANTED);
++}
++
++static enum rsbac_adf_req_ret_t
++auto_write(rsbac_pid_t pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid, rsbac_boolean_t set_level)
++{
++ return auto_write_attr(pid,
++ target,
++ tid,
++ A_security_level,
++ A_mac_categories, set_level);
++}
++
++/* auto_read() */
++/* This function works similar to auto_write() */
++
++static enum rsbac_adf_req_ret_t
++auto_read_attr(rsbac_pid_t pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t t_level_attr,
++ enum rsbac_attribute_t t_cat_attr,
++ rsbac_boolean_t set_level)
++{
++ rsbac_security_level_t curr_level;
++ rsbac_mac_category_vector_t curr_categories;
++ rsbac_security_level_t target_sec_level;
++ rsbac_mac_category_vector_t target_categories;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t attr_val1;
++ union rsbac_attribute_value_t attr_val2;
++ rsbac_mac_process_flags_t flags;
++ rsbac_boolean_t mac_auto_used_level = FALSE;
++ rsbac_boolean_t mac_auto_used_cat = FALSE;
++ rsbac_boolean_t set_level_level = FALSE;
++ rsbac_boolean_t set_level_cat = FALSE;
++
++ /* first check for mac_override, which allows everything */
++ i_tid.process = pid;
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_mac_process_flags, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read", A_none);
++ return (NOT_GRANTED);
++ }
++ flags = attr_val1.mac_process_flags;
++ if (flags & MAC_override)
++ return GRANTED;
++
++ /* Get current security level */
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_current_sec_level, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read", A_none);
++ return (NOT_GRANTED);
++ }
++ curr_level = attr_val1.security_level;
++ /* Get current categories */
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_mac_curr_categories, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read", A_none);
++ return (NOT_GRANTED);
++ }
++ curr_categories = attr_val1.mac_categories;
++ /* Get target security level */
++ if (rsbac_get_attr(SW_MAC, target, tid, t_level_attr, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read", A_none);
++ return (NOT_GRANTED);
++ }
++ target_sec_level = attr_val1.security_level;
++ /* Get target categories */
++ if (rsbac_get_attr(SW_MAC, target, tid, t_cat_attr, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read", A_none);
++ return (NOT_GRANTED);
++ }
++ target_categories = attr_val1.mac_categories;
++
++ if (target_sec_level > curr_level) {
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_security_level, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read", A_none);
++ return (NOT_GRANTED);
++ }
++ if (attr_val1.security_level < target_sec_level) {
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: security_level %u under target_sec_level %u, no override -> NOT_GRANTED!\n",
++ current->pid, current->comm,
++ attr_val1.security_level,
++ target_sec_level);
++ return (NOT_GRANTED);
++ }
++ /* curr_level < target_level <= max_level -> need mac_auto, read_up or trusted (with read option) */
++ if (flags & MAC_auto) {
++ /* check min_write boundary */
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_min_write_open, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (attr_val1.security_level < target_sec_level) {
++ if (!(flags & MAC_read_up)
++#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ
++ && !(flags & MAC_trusted)
++#endif
++ ) {
++ /* Try mac_file_flags on the target, if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_read",
++ A_none);
++ return
++ (NOT_GRANTED);
++ }
++ if ((attr_val1.
++ mac_file_flags &
++ MAC_read_up)
++#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ
++ || (attr_val1.
++ mac_file_flags &
++ MAC_trusted)
++#endif
++ ) {
++ break;
++ }
++ /* fall through */
++
++ default:
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: min_write_open %u under target_sec_level %u, no read_up or trusted -> NOT_GRANTED!\n",
++ current->pid,
++ current->comm,
++ attr_val1.
++ security_level,
++ target_sec_level);
++ return (NOT_GRANTED);
++ }
++ }
++ } else {
++ mac_auto_used_level = TRUE;
++ set_level_level = TRUE;
++ }
++ } else {
++ if (!(flags & MAC_read_up)
++#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ
++ && !(flags & MAC_trusted)
++#endif
++ ) {
++ /* Try mac_file_flags on the target, if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_read",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if ((attr_val1.
++ mac_file_flags & MAC_read_up)
++#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ
++ || (attr_val1.
++ mac_file_flags &
++ MAC_trusted)
++#endif
++ ) {
++ break;
++ }
++ /* fall through */
++
++ default:
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: current level %u under target_sec_level %u, no auto, read_up or trusted -> NOT_GRANTED!\n",
++ current->pid,
++ current->comm,
++ curr_level,
++ target_sec_level);
++ return (NOT_GRANTED);
++ }
++ }
++ }
++ } else if (target_sec_level < curr_level) {
++ if (flags & MAC_auto) {
++ mac_auto_used_level = TRUE;
++ }
++ }
++ if ((target_categories & curr_categories) != target_categories) {
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_mac_categories, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read", A_none);
++ return (NOT_GRANTED);
++ }
++ if ((target_categories & attr_val1.mac_categories) !=
++ target_categories) {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_adf_mac) {
++ char *tmp =
++ rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ char *tmp2 =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp2) {
++ u64tostrmac(tmp,
++ attr_val1.
++ mac_categories);
++ u64tostrmac(tmp2,
++ target_categories);
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: max_categories %s under target categories %s, no override -> NOT_GRANTED!\n",
++ current->pid,
++ current->comm,
++ tmp, tmp2);
++ rsbac_kfree(tmp2);
++ }
++ rsbac_kfree(tmp);
++ }
++ }
++#endif
++ return (NOT_GRANTED);
++ }
++ /* curr_categories < target_categories <= max_categories -> need mac_auto,
++ * read_up or trusted */
++ if (flags & MAC_auto) {
++ /* check min_write boundary */
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_min_write_categories, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if ((target_categories & attr_val1.
++ mac_categories) != target_categories) {
++ if (!(flags & MAC_read_up)
++#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ
++ && !(flags & MAC_trusted)
++#endif
++ ) {
++ /* Try mac_file_flags on the target,
++ * if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_read",
++ A_none);
++ return
++ (NOT_GRANTED);
++ }
++ if ((attr_val1.
++ mac_file_flags &
++ MAC_read_up)
++#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ
++ || (attr_val1.
++ mac_file_flags &
++ MAC_trusted)
++#endif
++ ) {
++ break;
++ }
++ /* fall through */
++
++ default:
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_adf_mac) {
++ char *tmp =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ char *tmp2
++ =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp2) {
++ u64tostrmac
++ (tmp,
++ attr_val1.
++ mac_categories);
++ u64tostrmac
++ (tmp2,
++ target_categories);
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: min_write_categories %s under target categories %s, no read_up or trusted with read option -> NOT_GRANTED!\n",
++ current->
++ pid,
++ current->
++ comm,
++ tmp,
++ tmp2);
++ rsbac_kfree
++ (tmp2);
++ }
++ rsbac_kfree
++ (tmp);
++ }
++ }
++#endif
++ return (NOT_GRANTED);
++ }
++ }
++ } else {
++ mac_auto_used_cat = TRUE;
++ set_level_cat = TRUE;
++ }
++ } else {
++ if (!(flags & MAC_read_up)
++#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ
++ && !(flags & MAC_trusted)
++#endif
++ ) {
++ /* Try mac_file_flags on the target,
++ * if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_read",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if ((attr_val1.
++ mac_file_flags & MAC_read_up)
++#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ
++ || (attr_val1.
++ mac_file_flags &
++ MAC_trusted)
++#endif
++ ) {
++ break;
++ }
++ /* fall through */
++
++ default:
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_adf_mac) {
++ char *tmp =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ char *tmp2 =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp2) {
++ u64tostrmac
++ (tmp,
++ curr_categories);
++ u64tostrmac
++ (tmp2,
++ target_categories);
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: curr_categories %s under target categories %s, no auto, read_up or trusted with read option -> NOT_GRANTED!\n",
++ current->
++ pid,
++ current->
++ comm,
++ tmp,
++ tmp2);
++ rsbac_kfree
++ (tmp2);
++ }
++ rsbac_kfree(tmp);
++ }
++ }
++#endif
++ return (NOT_GRANTED);
++ }
++ }
++ }
++ } else
++ if ((target_categories & curr_categories) != curr_categories) {
++ if (flags & MAC_auto) {
++ mac_auto_used_level = TRUE;
++ }
++ }
++
++ /* grant area */
++
++ /* adjust current_sec_level and max_read_level, */
++ /* if set_level is true and mac_auto has been used */
++ if (set_level && mac_auto_used_level) {
++ i_tid.process = pid;
++ attr_val1.current_sec_level = target_sec_level;
++ if (set_level_level) {
++#ifdef CONFIG_RSBAC_MAC_LOG_LEVEL_CHANGE
++ char *target_type_name;
++ char *target_id_name;
++
++ target_type_name = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (target_type_name) {
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name
++ =
++ rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN
++ + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++#else
++ target_id_name =
++ rsbac_kmalloc(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++#endif
++ if (target_id_name) {
++ get_target_name(target_type_name,
++ target,
++ target_id_name,
++ tid);
++
++ rsbac_printk(KERN_INFO "mac_auto_read(): Changing process %u (%.15s, owner %u) current level from %u to %u for %s %s\n",
++ pid,
++ current->comm,
++ current_uid(),
++ curr_level,
++ target_sec_level,
++ target_type_name,
++ target_id_name);
++ rsbac_kfree(target_id_name);
++ }
++ rsbac_kfree(target_type_name);
++ }
++#endif
++ if (rsbac_set_attr(SW_MAC, T_PROCESS, i_tid, A_current_sec_level, attr_val1)) { /* failed! */
++ rsbac_ds_set_error("mac_auto_read",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ }
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_max_read_open, &attr_val2, TRUE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read", A_none);
++ return (NOT_GRANTED);
++ }
++ if (attr_val1.max_read_open > attr_val2.max_read_open) {
++ if (rsbac_set_attr(SW_MAC, T_PROCESS, i_tid, A_max_read_open, attr_val1)) { /* failed! */
++ rsbac_ds_set_error("mac_auto_read",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ }
++ }
++ /* adjust current_categories and max_read_categories, */
++ /* if set_level is true and mac_auto has been used */
++ if (set_level && mac_auto_used_cat) {
++ i_tid.process = pid;
++ attr_val1.mac_categories = target_categories;
++ if (set_level_cat) {
++#ifdef CONFIG_RSBAC_MAC_LOG_LEVEL_CHANGE
++ char *target_type_name =
++ rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (target_type_name) {
++ char *target_id_name;
++
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name
++ =
++ rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN
++ + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++#else
++ target_id_name =
++ rsbac_kmalloc(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++#endif
++ if (target_id_name) {
++ char *tmp1 =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++ if (tmp1) {
++ char *tmp2 =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++ if (tmp2) {
++ get_target_name
++ (target_type_name,
++ target,
++ target_id_name,
++ tid);
++
++ rsbac_printk
++ (KERN_INFO "mac_auto_read(): Changing process %u (15%s, owner %u) current categories from %s to %s for %s %s\n",
++ pid,
++ current->comm,
++ current_uid(),
++ u64tostrmac
++ (tmp1,
++ curr_categories),
++ u64tostrmac
++ (tmp2,
++ target_categories),
++ target_type_name,
++ target_id_name);
++ rsbac_kfree(tmp2);
++ }
++ rsbac_kfree(tmp1);
++ }
++ rsbac_kfree(target_id_name);
++ }
++ rsbac_kfree(target_type_name);
++ }
++#endif
++ if (rsbac_set_attr(SW_MAC, T_PROCESS, i_tid, A_mac_curr_categories, attr_val1)) { /* failed! */
++ rsbac_ds_set_error("mac_auto_read",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ }
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_max_read_categories, &attr_val2, TRUE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read", A_none);
++ return (NOT_GRANTED);
++ }
++ if ((attr_val1.mac_categories & attr_val2.mac_categories)
++ != attr_val1.mac_categories) {
++ if (rsbac_set_attr(SW_MAC, T_PROCESS, i_tid, A_max_read_categories, attr_val1)) { /* failed! */
++ rsbac_ds_set_error("mac_auto_read",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ }
++ }
++
++ /* Everything done, so return */
++ return (GRANTED);
++}
++
++static enum rsbac_adf_req_ret_t
++auto_read(rsbac_pid_t pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid, rsbac_boolean_t set_level)
++{
++ return auto_read_attr(pid,
++ target,
++ tid,
++ A_security_level,
++ A_mac_categories, set_level);
++}
++
++
++/* auto-read-write() */
++/* combines auto-read and auto-write */
++
++static enum rsbac_adf_req_ret_t
++auto_read_write_attr(rsbac_pid_t pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t t_level_attr,
++ enum rsbac_attribute_t t_cat_attr,
++ rsbac_boolean_t set_level)
++{
++ rsbac_security_level_t curr_level;
++ rsbac_mac_category_vector_t curr_categories;
++ rsbac_security_level_t target_sec_level;
++ rsbac_mac_category_vector_t target_categories;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t attr_val1;
++ union rsbac_attribute_value_t attr_val2;
++ rsbac_mac_process_flags_t flags;
++ rsbac_boolean_t mac_auto_used_level = FALSE;
++ rsbac_boolean_t mac_auto_used_cat = FALSE;
++ rsbac_boolean_t raise_object_level = FALSE;
++ rsbac_boolean_t raise_object_cat = FALSE;
++
++ /* first check for mac_override, which allows everything */
++ i_tid.process = pid;
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_mac_process_flags, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read_write", A_none);
++ return (NOT_GRANTED);
++ }
++ flags = attr_val1.mac_process_flags;
++ if (flags & MAC_override)
++ return GRANTED;
++
++ /* Get current security level */
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_current_sec_level, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read_write", A_none);
++ return (NOT_GRANTED);
++ }
++ curr_level = attr_val1.security_level;
++ /* Get current categories */
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_mac_curr_categories, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read_write", A_none);
++ return (NOT_GRANTED);
++ }
++ curr_categories = attr_val1.mac_categories;
++ /* Get target security level */
++ if (rsbac_get_attr(SW_MAC, target, tid, t_level_attr, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read_write", A_none);
++ return (NOT_GRANTED);
++ }
++ target_sec_level = attr_val1.security_level;
++ /* Get target categories */
++ if (rsbac_get_attr(SW_MAC, target, tid, t_cat_attr, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read_write", A_none);
++ return (NOT_GRANTED);
++ }
++ target_categories = attr_val1.mac_categories;
++
++ if (target_sec_level > curr_level) {
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_security_level, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read_write", A_none);
++ return (NOT_GRANTED);
++ }
++ if (attr_val1.security_level < target_sec_level) {
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: security_level %u under target_sec_level %u, no override -> NOT_GRANTED!\n",
++ current->pid, current->comm,
++ attr_val1.security_level,
++ target_sec_level);
++ return (NOT_GRANTED);
++ }
++ /* curr_level < target_level <= max_level */
++ /* -> need mac_auto, (write_up && read_up)
++ * or trusted (with read option) */
++ if (flags & MAC_auto) {
++ /* check min_write boundary */
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_min_write_open, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (attr_val1.security_level < target_sec_level) {
++ if (!
++ ((flags & MAC_write_up)
++ && (flags & MAC_read_up))
++#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ
++&& !(flags & MAC_trusted)
++#endif
++ ) {
++ /* Try mac_file_flags on the target, if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_read_write",
++ A_none);
++ return
++ (NOT_GRANTED);
++ }
++ if (((attr_val1.
++ mac_file_flags &
++ MAC_write_up)
++ && (attr_val1.
++ mac_file_flags &
++ MAC_read_up)
++ )
++#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ
++ || (flags &
++ MAC_trusted)
++#endif
++ ) {
++ break;
++ }
++ /* fall through */
++
++ default:
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: min_write_open %u under target_sec_level %u, no read_up or trusted -> NOT_GRANTED!\n",
++ current->pid,
++ current->comm,
++ attr_val1.
++ security_level,
++ target_sec_level);
++ return (NOT_GRANTED);
++ }
++ }
++ } else
++ mac_auto_used_level = TRUE;
++ } else {
++ if (!
++ ((flags & MAC_write_up)
++ && (flags & MAC_read_up))
++#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ
++&& !(flags & MAC_trusted)
++#endif
++ ) {
++ /* Try mac_file_flags on the target, if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (((attr_val1.
++ mac_file_flags &
++ MAC_write_up)
++ && (attr_val1.
++ mac_file_flags &
++ MAC_read_up)
++ )
++#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ
++ || (flags & MAC_trusted)
++#endif
++ ) {
++ break;
++ }
++ /* fall through */
++
++ default:
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: current level %u under target_sec_level %u, no auto, (write_up && read_up) or trusted -> NOT_GRANTED!\n",
++ current->pid,
++ current->comm,
++ curr_level,
++ target_sec_level);
++ return (NOT_GRANTED);
++ }
++ }
++ }
++ } else if (target_sec_level < curr_level) {
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_min_security_level, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read_write", A_none);
++ return (NOT_GRANTED);
++ }
++ if (attr_val1.security_level > target_sec_level) {
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: min_security_level %u over target_sec_level %u, no override -> NOT_GRANTED!\n",
++ current->pid,
++ current->comm, attr_val1.security_level,
++ target_sec_level);
++ return (NOT_GRANTED);
++ }
++ /* min_level <= target_level < curr_level -> need mac_auto,
++ * write_down or trusted */
++ if (flags & MAC_auto) {
++ /* check max_read boundary */
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_max_read_open, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (attr_val1.security_level > target_sec_level) {
++ if (!(flags & MAC_write_down)
++ && !(flags & MAC_trusted)
++ ) {
++ /* Try mac_file_flags on the target,
++ * if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_read_write",
++ A_none);
++ return
++ (NOT_GRANTED);
++ }
++ if ((attr_val1.
++ mac_file_flags &
++ MAC_write_down)
++ || (attr_val1.
++ mac_file_flags &
++ MAC_trusted)
++ ) {
++ if (attr_val1.
++ mac_file_flags
++ & MAC_auto) {
++ raise_object_level
++ = TRUE;
++ }
++ break;
++ }
++ /* fall through */
++
++ default:
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: max_read_open %u over target_sec_level %u, no write_down or trusted -> NOT_GRANTED!\n",
++ current->pid,
++ current->comm,
++ attr_val1.
++ security_level,
++ target_sec_level);
++ return (NOT_GRANTED);
++ }
++ }
++ } else
++ mac_auto_used_level = TRUE;
++ } else {
++ if (!(flags & MAC_write_down)
++ && !(flags & MAC_trusted)
++ ) {
++ /* Try mac_file_flags on the target,
++ * if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if ((attr_val1.
++ mac_file_flags &
++ MAC_write_down)
++ || (attr_val1.
++ mac_file_flags &
++ MAC_trusted)
++ ) {
++ if (attr_val1.
++ mac_file_flags &
++ MAC_auto) {
++ raise_object_level
++ = TRUE;
++ }
++ break;
++ }
++ /* fall through */
++
++ default:
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: current security_level %u over target_sec_level %u, no auto, write_down or trusted -> NOT_GRANTED!\n",
++ current->pid,
++ current->comm,
++ curr_level,
++ target_sec_level);
++ return (NOT_GRANTED);
++ }
++ }
++ }
++ }
++ if ((target_categories & curr_categories) != target_categories) {
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_mac_categories, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read_write", A_none);
++ return (NOT_GRANTED);
++ }
++ if ((target_categories & attr_val1.mac_categories) !=
++ target_categories) {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_adf_mac) {
++ char *tmp =
++ rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ char *tmp2 =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp2) {
++ u64tostrmac(tmp,
++ attr_val1.
++ mac_categories);
++ u64tostrmac(tmp2,
++ target_categories);
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: max_categories %s under target categories %s, no override -> NOT_GRANTED!\n",
++ current->pid,
++ current->comm,
++ tmp, tmp2);
++ rsbac_kfree(tmp2);
++ }
++ rsbac_kfree(tmp);
++ }
++ }
++#endif
++ return (NOT_GRANTED);
++ }
++ /* curr_categories < target_categories <= max_categories */
++ /* -> need mac_auto, (read_up && write_up) or
++ * trusted (with read option) */
++ if (flags & MAC_auto) {
++ /* check min_write boundary */
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_min_write_categories, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if ((target_categories & attr_val1.
++ mac_categories) != target_categories) {
++ if (!
++ ((flags & MAC_write_up)
++ && (flags & MAC_read_up))
++#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ
++&& !(flags & MAC_trusted)
++#endif
++ ) {
++ /* Try mac_file_flags on the target,
++ * if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_read_write",
++ A_none);
++ return
++ (NOT_GRANTED);
++ }
++ if (((attr_val1.
++ mac_file_flags &
++ MAC_write_up)
++ && (attr_val1.
++ mac_file_flags &
++ MAC_read_up)
++ )
++#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ
++ || (flags &
++ MAC_trusted)
++#endif
++ ) {
++ break;
++ }
++ /* fall through */
++
++ default:
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_adf_mac) {
++ char *tmp =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ char *tmp2
++ =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp2) {
++ u64tostrmac
++ (tmp,
++ attr_val1.
++ mac_categories);
++ u64tostrmac
++ (tmp2,
++ target_categories);
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: min_write_categories %s under target categories %s, no (read_up and write_up) or trusted with read option -> NOT_GRANTED!\n",
++ current->
++ pid,
++ current->
++ comm,
++ tmp,
++ tmp2);
++ rsbac_kfree
++ (tmp2);
++ }
++ rsbac_kfree
++ (tmp);
++ }
++ }
++#endif
++ return (NOT_GRANTED);
++ }
++ }
++ } else
++ mac_auto_used_cat = TRUE;
++ } else {
++ if (!
++ ((flags & MAC_write_up)
++ && (flags & MAC_read_up))
++#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ
++&& !(flags & MAC_trusted)
++#endif
++ ) {
++ /* Try mac_file_flags on the target,
++ * if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (((attr_val1.
++ mac_file_flags &
++ MAC_write_up)
++ && (attr_val1.
++ mac_file_flags &
++ MAC_read_up)
++ )
++#ifdef CONFIG_RSBAC_MAC_TRUSTED_READ
++ || (flags & MAC_trusted)
++#endif
++ ) {
++ break;
++ }
++ /* fall through */
++
++ default:
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_adf_mac) {
++ char *tmp =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ char *tmp2 =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp2) {
++ u64tostrmac
++ (tmp,
++ curr_categories);
++ u64tostrmac
++ (tmp2,
++ target_categories);
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: curr_categories %s under target categories %s, no auto, (read_up and write_up) or trusted -> NOT_GRANTED!\n",
++ current->
++ pid,
++ current->
++ comm,
++ tmp,
++ tmp2);
++ rsbac_kfree
++ (tmp2);
++ }
++ rsbac_kfree(tmp);
++ }
++ }
++#endif
++ return (NOT_GRANTED);
++ }
++ }
++ }
++ } else
++ if ((target_categories & curr_categories) != curr_categories) {
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_mac_min_categories, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read_write", A_none);
++ return (NOT_GRANTED);
++ }
++ if ((target_categories & attr_val1.mac_categories) !=
++ attr_val1.mac_categories) {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_adf_mac) {
++ char *tmp =
++ rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ char *tmp2 =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp2) {
++ u64tostrmac(tmp,
++ attr_val1.
++ mac_categories);
++ u64tostrmac(tmp2,
++ target_categories);
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: min_categories %s over target categories %s, no override -> NOT_GRANTED!\n",
++ current->pid,
++ current->comm,
++ tmp, tmp2);
++ rsbac_kfree(tmp2);
++ }
++ rsbac_kfree(tmp);
++ }
++ }
++#endif
++ return (NOT_GRANTED);
++ }
++ /* min_level <= target_level < curr_level -> need mac_auto,
++ * write_down or trusted */
++ if (flags & MAC_auto) {
++ /* check max_read boundary */
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_max_read_categories, &attr_val1, FALSE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if ((target_categories & attr_val1.
++ mac_categories) != attr_val1.mac_categories) {
++ if (!(flags & MAC_write_down)
++ && !(flags & MAC_trusted)
++ ) {
++ /* Try mac_file_flags on the target,
++ * if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_read_write",
++ A_none);
++ return
++ (NOT_GRANTED);
++ }
++ if ((attr_val1.
++ mac_file_flags &
++ MAC_write_down)
++ || (attr_val1.
++ mac_file_flags &
++ MAC_trusted)
++ ) {
++ if (attr_val1.
++ mac_file_flags
++ & MAC_auto) {
++ raise_object_cat
++ = TRUE;
++ }
++ break;
++ }
++ /* fall through */
++
++ default:
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_adf_mac) {
++ char *tmp =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ char *tmp2
++ =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp2) {
++ u64tostrmac
++ (tmp,
++ attr_val1.
++ mac_categories);
++ u64tostrmac
++ (tmp2,
++ target_categories);
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: max_read_categories %s over target categories %s, no write_down or trusted -> NOT_GRANTED!\n",
++ current->
++ pid,
++ current->
++ comm,
++ tmp,
++ tmp2);
++ rsbac_kfree
++ (tmp2);
++ }
++ rsbac_kfree
++ (tmp);
++ }
++ }
++#endif
++ return (NOT_GRANTED);
++ }
++ }
++ } else
++ mac_auto_used_cat = TRUE;
++ } else {
++ if (!(flags & MAC_write_down)
++ && !(flags & MAC_trusted)
++ ) {
++ /* Try mac_file_flags on the target,
++ * if FD object */
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_file_flags, &attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if ((attr_val1.
++ mac_file_flags &
++ MAC_write_down)
++ || (attr_val1.
++ mac_file_flags &
++ MAC_trusted)
++ ) {
++ if (attr_val1.
++ mac_file_flags &
++ MAC_auto) {
++ raise_object_cat =
++ TRUE;
++ }
++ break;
++ }
++ /* fall through */
++
++ default:
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_adf_mac) {
++ char *tmp =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ char *tmp2 =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++
++ if (tmp2) {
++ u64tostrmac
++ (tmp,
++ curr_categories);
++ u64tostrmac
++ (tmp2,
++ target_categories);
++ rsbac_pr_debug(adf_mac, "pid %u/%.15s: curr_categories %s over target categories %s, no auto, write_down or trusted -> NOT_GRANTED!\n",
++ current->
++ pid,
++ current->
++ comm,
++ tmp,
++ tmp2);
++ rsbac_kfree
++ (tmp2);
++ }
++ rsbac_kfree(tmp);
++ }
++ }
++#endif
++ return (NOT_GRANTED);
++ }
++ }
++ }
++ }
++
++ /* grant area */
++
++ /* adjust current_sec_level and min_write_level, */
++ /* if set_level is true and mac_auto has been used */
++ if (set_level && (mac_auto_used_level || raise_object_level)
++ ) {
++#ifdef CONFIG_RSBAC_MAC_LOG_LEVEL_CHANGE
++ {
++ char *target_type_name;
++ char *target_id_name;
++
++ target_type_name = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (target_type_name) {
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name
++ =
++ rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN
++ + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++#else
++ target_id_name =
++ rsbac_kmalloc(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++#endif
++ if (target_id_name) {
++ get_target_name(target_type_name,
++ target,
++ target_id_name,
++ tid);
++
++ if (mac_auto_used_level) {
++ rsbac_printk(KERN_INFO "mac_auto_read_write(): Changing process %u (%.15s, owner %u) current level from %u to %u for %s %s\n",
++ pid,
++ current->comm,
++ current_uid(),
++ curr_level,
++ target_sec_level,
++ target_type_name,
++ target_id_name);
++ } else {
++ rsbac_printk(KERN_INFO "mac_auto_read_write(): Process %u (%.15s, owner %u): Raising object level from %u to %u for %s %s\n",
++ pid,
++ current->comm,
++ current_uid(),
++ target_sec_level,
++ curr_level,
++ target_type_name,
++ target_id_name);
++ }
++ rsbac_kfree(target_id_name);
++ }
++ rsbac_kfree(target_type_name);
++ }
++ }
++#endif
++ if (mac_auto_used_level) {
++ i_tid.process = pid;
++ attr_val1.current_sec_level = target_sec_level;
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_min_write_open, &attr_val2, TRUE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (attr_val1.min_write_open <
++ attr_val2.min_write_open) {
++ if (rsbac_set_attr(SW_MAC, T_PROCESS, i_tid, A_min_write_open, attr_val1)) { /* failed! */
++ rsbac_ds_set_error
++ ("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ }
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_max_read_open, &attr_val2, TRUE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (attr_val1.max_read_open >
++ attr_val2.max_read_open) {
++ if (rsbac_set_attr(SW_MAC, T_PROCESS, i_tid, A_max_read_open, attr_val1)) { /* failed! */
++ rsbac_ds_set_error
++ ("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ }
++ if (rsbac_set_attr(SW_MAC, T_PROCESS, i_tid, A_current_sec_level, attr_val1)) { /* failed! */
++ rsbac_ds_set_error("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ } else {
++ attr_val1.security_level = curr_level;
++ if (rsbac_set_attr(SW_MAC, target, tid, A_security_level, attr_val1)) { /* failed! */
++ rsbac_ds_set_error("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ }
++ }
++ /* adjust current_categories and min_write_categories, */
++ /* if set_level is true and mac_auto has been used */
++ if (set_level && (mac_auto_used_cat || raise_object_cat)
++ ) {
++#ifdef CONFIG_RSBAC_MAC_LOG_LEVEL_CHANGE
++ {
++ char *target_type_name =
++ rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (target_type_name) {
++ char *target_id_name;
++
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name
++ =
++ rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN
++ + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++#else
++ target_id_name =
++ rsbac_kmalloc(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++#endif
++ if (target_id_name) {
++ char *tmp1 =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++ if (tmp1) {
++ char *tmp2 =
++ rsbac_kmalloc
++ (RSBAC_MAXNAMELEN);
++ if (tmp2) {
++ get_target_name
++ (target_type_name,
++ target,
++ target_id_name,
++ tid);
++
++ if (mac_auto_used_cat) {
++ rsbac_printk
++ (KERN_INFO "mac_auto_read_write(): Changing process %u (%.15s, owner %u) current categories from %s to %s for %s %s\n",
++ pid,
++ current->
++ comm,
++ current_uid(),
++ u64tostrmac
++ (tmp1,
++ curr_categories),
++ u64tostrmac
++ (tmp2,
++ target_categories),
++ target_type_name,
++ target_id_name);
++ } else {
++ rsbac_printk
++ (KERN_INFO "mac_auto_read_write(): Process %u (%.15s, owner %u): raising current categories from %s to %s for %s %s\n",
++ pid,
++ current->
++ comm,
++ current_uid(),
++ u64tostrmac
++ (tmp2,
++ target_categories),
++ u64tostrmac
++ (tmp1,
++ curr_categories),
++ target_type_name,
++ target_id_name);
++ }
++ rsbac_kfree(tmp2);
++ }
++ rsbac_kfree(tmp1);
++ }
++ rsbac_kfree(target_id_name);
++ }
++ rsbac_kfree(target_type_name);
++ }
++ }
++#endif
++ if (mac_auto_used_cat) {
++ i_tid.process = pid;
++ attr_val1.mac_categories = target_categories;
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_min_write_categories, &attr_val2, TRUE)) { /* failed! */
++ rsbac_ds_set_error("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if ((attr_val1.mac_categories & attr_val2.
++ mac_categories)
++ != attr_val2.mac_categories) {
++ if (rsbac_set_attr(SW_MAC, T_PROCESS, i_tid, A_min_write_categories, attr_val1)) { /* failed! */
++ rsbac_ds_set_error
++ ("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ }
++ if (rsbac_get_attr(SW_MAC, T_PROCESS, i_tid, A_max_read_categories, &attr_val2, TRUE)) { /* failed! */
++ rsbac_ds_get_error("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if ((attr_val1.mac_categories & attr_val2.
++ mac_categories)
++ != attr_val1.mac_categories) {
++ if (rsbac_set_attr(SW_MAC, T_PROCESS, i_tid, A_max_read_categories, attr_val1)) { /* failed! */
++ rsbac_ds_set_error
++ ("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ }
++ if (rsbac_set_attr(SW_MAC, T_PROCESS, i_tid, A_mac_curr_categories, attr_val1)) { /* failed! */
++ rsbac_ds_set_error("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ } else {
++ attr_val1.mac_categories = curr_categories;
++ if (rsbac_set_attr(SW_MAC, target, tid, A_mac_categories, attr_val1)) { /* failed! */
++ rsbac_ds_set_error("mac_auto_read_write",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ }
++ }
++
++ /* Everything done, so return */
++ return (GRANTED);
++}
++
++static enum rsbac_adf_req_ret_t
++auto_read_write(rsbac_pid_t pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid, rsbac_boolean_t set_level)
++{
++ return auto_read_write_attr(pid,
++ target,
++ tid,
++ A_security_level,
++ A_mac_categories, set_level);
++}
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++inline enum rsbac_adf_req_ret_t
++rsbac_adf_request_mac(enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++{
++ enum rsbac_adf_req_ret_t result = DO_NOT_CARE;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++#if defined(CONFIG_RSBAC_MAC_NET_OBJ_PROT)
++ union rsbac_attribute_value_t i_attr_val2;
++#endif
++
++ switch (request) {
++ case R_ADD_TO_KERNEL:
++ switch (target) {
++ case T_FILE:
++ case T_DEV:
++ case T_NONE:
++ /* test owner's mac_role */
++ return mac_check_role(owner, SR_administrator);
++
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_ALTER:
++ /* only for IPC */
++ if (target == T_IPC) {
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++ } else
++ /* all other targets are unknown */
++ return (DO_NOT_CARE);
++ break;
++
++ case R_APPEND_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++ break;
++ case T_IPC:
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++ break;
++ case T_DEV:
++ /* Only check for devices with mac_check set */
++ if (rsbac_get_attr(SW_MAC,
++ T_DEV,
++ tid,
++ A_mac_check,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error("rsbac_adf_request_mac",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (!i_attr_val1.mac_check)
++ return (DO_NOT_CARE);
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++ break;
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_CHANGE_GROUP:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++ case T_IPC:
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++
++#if defined(CONFIG_RSBAC_MAC_UM_PROT)
++ case T_USER:
++ /* Security Officer? */
++ return mac_check_role(owner, SR_security_officer);
++#endif
++ /* We do not care about */
++ /* all other cases */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_CHANGE_OWNER:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++
++ case T_IPC:
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_CHDIR:
++ switch (target) {
++ case T_DIR:
++ /* and perform auto-read without setting attributes */
++ return (auto_read(caller_pid, target, tid, FALSE));
++ break;
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_CREATE:
++ switch (target) {
++ /* Creating dir or (pseudo) file IN target dir! */
++ case T_DIR:
++#ifdef CONFIG_RSBAC_MAC_LIGHT
++ return GRANTED;
++#else
++ /* Mode of created item is ignored! */
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++#endif
++ break;
++
++#ifdef CONFIG_RSBAC_MAC_NET_OBJ_PROT
++ case T_NETTEMP:
++ return mac_check_role(owner, SR_security_officer);
++
++ case T_NETOBJ:
++ /* and perform auto-write without setting attributes */
++ return (auto_write_attr(caller_pid,
++ target,
++ tid,
++ A_local_sec_level,
++ A_local_mac_categories,
++ FALSE));
++#endif
++
++#if defined(CONFIG_RSBAC_MAC_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++ /* Security Officer? */
++ return mac_check_role(owner, SR_security_officer);
++#endif
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_DELETE:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++ break;
++ case T_IPC:
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++ break;
++
++#ifdef CONFIG_RSBAC_MAC_NET_OBJ_PROT
++ case T_NETTEMP:
++ return mac_check_role(owner, SR_security_officer);
++#endif
++#if defined(CONFIG_RSBAC_MAC_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++ /* Security Officer? */
++ return mac_check_role(owner, SR_security_officer);
++#endif
++
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_EXECUTE:
++ case R_MAP_EXEC:
++ switch (target) {
++ case T_FILE:
++ /* and perform auto-read without setting attributes */
++ return (auto_read(caller_pid, target, tid, FALSE));
++
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_GET_PERMISSIONS_DATA:
++ switch (target) {
++#if defined(CONFIG_RSBAC_MAC_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++ /* Security Officer? */
++ return mac_check_role(owner, SR_security_officer);
++#endif
++#ifdef CONFIG_RSBAC_MAC_NET_OBJ_PROT
++ case T_NETOBJ:
++ /* and perform auto-read without setting attributes */
++ return (auto_read_attr(caller_pid,
++ target,
++ tid,
++ A_local_sec_level,
++ A_local_mac_categories,
++ FALSE));
++#endif
++
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_GET_STATUS_DATA:
++ switch (target) {
++ case T_SCD:
++ /* target rsbaclog? only for secoff */
++ if (tid.scd != ST_rsbac_log)
++ return (GRANTED);
++ /* Secoff? */
++ if (mac_check_role(owner, SR_security_officer) ==
++ NOT_GRANTED)
++ return mac_check_role(owner, SR_auditor);
++ else
++ return GRANTED;
++
++ case T_PROCESS:
++ /* perform auto-read without setting attributes */
++ return (auto_read_attr(caller_pid,
++ target,
++ tid,
++ A_current_sec_level,
++ A_mac_curr_categories,
++ FALSE));
++
++#ifdef CONFIG_RSBAC_MAC_NET_OBJ_PROT
++ case T_NETOBJ:
++ /* and perform auto-read without setting attributes */
++ return (auto_read_attr(caller_pid,
++ target,
++ tid,
++ A_local_sec_level,
++ A_local_mac_categories,
++ FALSE));
++#endif
++
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_LINK_HARD:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_SYMLINK:
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++ break;
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_MODIFY_ACCESS_DATA:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++ break;
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_MODIFY_ATTRIBUTE:
++ switch (attr) {
++ case A_security_level:
++ case A_initial_security_level:
++ case A_local_sec_level:
++ case A_remote_sec_level:
++ case A_min_security_level:
++ case A_mac_categories:
++ case A_mac_initial_categories:
++ case A_local_mac_categories:
++ case A_remote_mac_categories:
++ case A_mac_min_categories:
++ case A_mac_user_flags:
++ case A_mac_process_flags:
++ case A_mac_file_flags:
++ case A_system_role:
++ case A_mac_role:
++ case A_current_sec_level:
++ case A_mac_curr_categories:
++ case A_min_write_open:
++ case A_max_read_open:
++ case A_min_write_categories:
++ case A_max_read_categories:
++ case A_mac_check:
++ case A_mac_auto:
++ case A_mac_prop_trusted:
++ case A_symlink_add_mac_level:
++#ifdef CONFIG_RSBAC_MAC_GEN_PROT
++ case A_pseudo:
++ case A_log_array_low:
++ case A_log_array_high:
++ case A_local_log_array_low:
++ case A_local_log_array_high:
++ case A_remote_log_array_low:
++ case A_remote_log_array_high:
++ case A_log_program_based:
++ case A_log_user_based:
++ case A_symlink_add_remote_ip:
++ case A_symlink_add_uid:
++ case A_linux_dac_disable:
++ case A_fake_root_uid:
++ case A_audit_uid:
++ case A_auid_exempt:
++ case A_remote_ip:
++ case A_kernel_thread:
++ case A_vset:
++ case A_program_file:
++#endif
++#ifdef CONFIG_RSBAC_MAC_AUTH_PROT
++ case A_auth_may_setuid:
++ case A_auth_may_set_cap:
++ case A_auth_start_uid:
++ case A_auth_start_euid:
++ case A_auth_start_gid:
++ case A_auth_start_egid:
++ case A_auth_learn:
++ case A_auth_add_f_cap:
++ case A_auth_remove_f_cap:
++ case A_auth_last_auth:
++#endif
++ /* All attributes (remove target!) */
++ case A_none:
++ /* Security Officer? */
++ return mac_check_role(owner, SR_security_officer);
++
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_MODIFY_PERMISSIONS_DATA:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_IPC:
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++ break;
++
++ case T_SCD:
++#ifdef CONFIG_RSBAC_USER_MOD_IOPERM
++ if (tid.scd == ST_ioports)
++ return GRANTED;
++#endif
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_MAC,
++ T_USER,
++ i_tid,
++ A_mac_role,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error("rsbac_adf_request_mac",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return (GRANTED);
++ /* For booting: if administrator and ioports, then grant */
++ if ((i_attr_val1.system_role == SR_administrator)
++ && (tid.scd == ST_ioports))
++ return (GRANTED);
++ else
++ return (NOT_GRANTED);
++
++#ifdef CONFIG_RSBAC_MAC_NET_OBJ_PROT
++ case T_NETOBJ:
++ /* and perform auto-write without setting attributes */
++ return (auto_write_attr(caller_pid,
++ target,
++ tid,
++ A_local_sec_level,
++ A_local_mac_categories,
++ FALSE));
++#endif
++
++#if defined(CONFIG_RSBAC_MAC_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++ /* Security Officer? */
++ return mac_check_role(owner, SR_security_officer);
++#endif
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE
++ /* switching Linux DAC */
++ case T_NONE:
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_MAC,
++ T_USER,
++ i_tid,
++ A_mac_role,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error("rsbac_adf_request_mac",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return (GRANTED);
++ else
++ return (NOT_GRANTED);
++#endif
++
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_MODIFY_SYSTEM_DATA:
++ switch (target) {
++ case T_SCD:
++ /* target rlimit? no problem, but needed -> grant */
++ if ( (tid.scd == ST_rlimit)
++ || (tid.scd == ST_priority)
++ || (tid.scd == ST_mlock)
++ )
++ return (GRANTED);
++ /* Get role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_MAC,
++ T_USER,
++ i_tid,
++ A_mac_role,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error("rsbac_adf_request_mac",
++ A_none);
++ return NOT_GRANTED;
++ }
++ /* if rsbaclog: grant only for secoff and auditor */
++ if (tid.scd == ST_rsbac_log) {
++ if ((i_attr_val1.system_role ==
++ SR_security_officer)
++ || (i_attr_val1.system_role ==
++ SR_auditor)
++ )
++ return (GRANTED);
++ else
++ return (NOT_GRANTED);
++ }
++ /* if rsbac_log_remote: grant only for secoff */
++ if (tid.scd == ST_rsbac_remote_log) {
++ if ((i_attr_val1.system_role ==
++ SR_security_officer)
++ )
++ return (GRANTED);
++ else
++ return (NOT_GRANTED);
++ }
++ /* if rsbac: grant for secoff and adminr */
++ if (tid.scd == ST_rsbac) {
++ if ((i_attr_val1.system_role ==
++ SR_security_officer)
++ || (i_attr_val1.system_role ==
++ SR_administrator)
++ )
++ return (GRANTED);
++ else
++ return (NOT_GRANTED);
++ }
++ /* if administrator, then grant */
++ if (i_attr_val1.system_role == SR_administrator)
++ return (GRANTED);
++ else
++ return (NOT_GRANTED);
++
++ case T_DEV:
++ if (tid.dev.type == D_block)
++ return mac_check_role(owner,
++ SR_administrator);
++ else
++ return DO_NOT_CARE;
++
++ case T_PROCESS:
++ /* and perform auto-write without setting attributes */
++ return (auto_write_attr(caller_pid,
++ target,
++ tid,
++ A_current_sec_level,
++ A_mac_curr_categories,
++ FALSE));
++
++#ifdef CONFIG_RSBAC_MAC_NET_DEV_PROT
++ case T_NETDEV:
++ return mac_check_role(owner, SR_administrator);
++#endif
++#ifdef CONFIG_RSBAC_MAC_NET_OBJ_PROT
++ case T_NETOBJ:
++ /* and perform auto-write without setting attributes */
++ return (auto_write_attr(caller_pid,
++ target,
++ tid,
++ A_local_sec_level,
++ A_local_mac_categories,
++ FALSE));
++#endif
++
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_MOUNT:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_DEV:
++ /* test owner's mac_role: Administrator? */
++#ifndef CONFIG_RSBAC_MAC_LIGHT
++ if (mac_check_role(owner, SR_administrator) ==
++ NOT_GRANTED)
++ return (NOT_GRANTED);
++#endif
++ /* test read-write access to mount dir / dev: */
++ /* and perform auto-read(-write) without setting of attributes */
++ if ((target == T_DEV)
++ && (attr == A_mode)
++ && (attr_val.mode & MS_RDONLY))
++ return (auto_read(caller_pid,
++ target, tid, FALSE));
++ else
++ return (auto_read_write(caller_pid,
++ target,
++ tid, FALSE));
++
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_READ:
++ switch (target) {
++ case T_DIR:
++#ifdef CONFIG_RSBAC_RW
++ case T_IPC:
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++#endif
++ /* and perform auto-read without setting attributes */
++ return (auto_read(caller_pid, target, tid, FALSE));
++ break;
++
++#ifdef CONFIG_RSBAC_RW
++ case T_DEV:
++ /* Only check for devices with mac_check set */
++ if (rsbac_get_attr(SW_MAC,
++ T_DEV,
++ tid,
++ A_mac_check,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error("rsbac_adf_request_mac",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (!i_attr_val1.mac_check)
++ return (DO_NOT_CARE);
++ /* and perform auto-read without setting attributes */
++ return (auto_read(caller_pid, target, tid, FALSE));
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_MAC_NET_OBJ_PROT)
++ case T_NETTEMP:
++ if (mac_check_role(owner, SR_security_officer) ==
++ GRANTED)
++ return GRANTED;
++ return mac_check_role(owner, SR_administrator);
++
++ case T_NETOBJ:
++ /* and perform auto-read without setting attributes */
++ return (auto_read_attr(caller_pid,
++ target,
++ tid,
++ A_remote_sec_level,
++ A_remote_mac_categories,
++ FALSE));
++#endif
++
++#if defined(CONFIG_RSBAC_MAC_UM_PROT)
++ case T_USER:
++ /* Security Officer or Admin? */
++ if (mac_check_role(owner, SR_security_officer) ==
++ GRANTED)
++ return GRANTED;
++ else
++ return mac_check_role(owner,
++ SR_administrator);
++#endif
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++
++ case R_READ_ATTRIBUTE:
++ switch (attr) {
++ case A_owner:
++ case A_security_level:
++ case A_local_sec_level:
++ case A_remote_sec_level:
++ case A_min_security_level:
++ case A_mac_categories:
++ case A_local_mac_categories:
++ case A_remote_mac_categories:
++ case A_mac_min_categories:
++ case A_pseudo:
++ case A_system_role:
++ case A_mac_role:
++ case A_current_sec_level:
++ case A_min_write_open:
++ case A_max_read_open:
++ case A_mac_user_flags:
++ case A_mac_process_flags:
++ case A_mac_check:
++ case A_mac_auto:
++ case A_mac_prop_trusted:
++ case A_mac_file_flags:
++ case A_initial_security_level:
++ case A_mac_initial_categories:
++ case A_symlink_add_mac_level:
++#ifdef CONFIG_RSBAC_MAC_GEN_PROT
++ case A_log_array_low:
++ case A_log_array_high:
++ case A_log_program_based:
++ case A_log_user_based:
++ case A_symlink_add_remote_ip:
++ case A_symlink_add_uid:
++ case A_fake_root_uid:
++ case A_audit_uid:
++ case A_auid_exempt:
++ case A_remote_ip:
++ case A_kernel_thread:
++ case A_vset:
++ case A_program_file:
++#endif
++#ifdef CONFIG_RSBAC_MAC_AUTH_PROT
++ case A_auth_may_setuid:
++ case A_auth_may_set_cap:
++ case A_auth_start_uid:
++ case A_auth_start_euid:
++ case A_auth_start_gid:
++ case A_auth_start_egid:
++ case A_auth_learn:
++ case A_auth_last_auth:
++#endif
++ /* Security Officer ot Admin? */
++ if (mac_check_role(owner, SR_security_officer) ==
++ GRANTED)
++ return GRANTED;
++ else
++ return mac_check_role(owner,
++ SR_administrator);
++
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_READ_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_IPC:
++ /* and perform auto-read without setting attributes */
++ return (auto_read(caller_pid, target, tid, FALSE));
++ break;
++ case T_DEV:
++ /* Only check for devices with mac_check set */
++ if (rsbac_get_attr(SW_MAC,
++ T_DEV,
++ tid,
++ A_mac_check,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error("rsbac_adf_request_mac",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (!i_attr_val1.mac_check)
++ return (DO_NOT_CARE);
++ /* and perform auto-read without setting attributes */
++ return (auto_read(caller_pid, target, tid, FALSE));
++ break;
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_READ_WRITE_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_IPC:
++ /* and perform auto-read-write without setting attributes */
++ return (auto_read_write(caller_pid,
++ target, tid, FALSE));
++
++ case T_DEV:
++ /* Only check for devices with mac_check set */
++ if (rsbac_get_attr(SW_MAC,
++ T_DEV,
++ tid,
++ A_mac_check,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error("rsbac_adf_request_mac",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (!i_attr_val1.mac_check)
++ return (DO_NOT_CARE);
++ /* and perform auto-read-write without setting attributes */
++ return (auto_read_write(caller_pid,
++ target, tid, FALSE));
++
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_REMOVE_FROM_KERNEL:
++ switch (target) {
++ case T_FILE:
++ case T_DEV:
++ case T_NONE:
++ /* test owner's mac_role */
++ return mac_check_role(owner, SR_administrator);
++
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_SHUTDOWN:
++ switch (target) {
++ case T_NONE:
++ /* test owner's mac_role */
++ return mac_check_role(owner, SR_administrator);
++
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_RENAME:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ /* and perform auto-write without setting attributes */
++ result = auto_write(caller_pid,
++ target, tid, FALSE);
++ /* if parent dir might change, convert inherit to explicit level/cat:
++ get and set effective value */
++ if (((result == GRANTED)
++ || (result == DO_NOT_CARE)
++ )
++ && ((attr != A_new_dir_dentry_p)
++ || (attr_val.new_dir_dentry_p !=
++ tid.file.dentry_p->d_parent)
++ )
++ ) {
++ if (rsbac_get_attr(SW_MAC, target, tid, A_security_level, &i_attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("rsbac_adf_request_mac",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (rsbac_set_attr(SW_MAC, target, tid, A_security_level, i_attr_val1)) { /* failed! */
++ rsbac_ds_set_error
++ ("rsbac_adf_request_mac",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (rsbac_get_attr(SW_MAC, target, tid, A_mac_categories, &i_attr_val1, TRUE)) { /* failed! */
++ rsbac_ds_get_error
++ ("rsbac_adf_request_mac",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (rsbac_set_attr(SW_MAC, target, tid, A_mac_categories, i_attr_val1)) { /* failed! */
++ rsbac_ds_set_error
++ ("rsbac_adf_request_mac",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ }
++ return result;
++ break;
++#if defined(CONFIG_RSBAC_MAC_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++ /* Security Officer? */
++ return mac_check_role(owner, SR_security_officer);
++#endif
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++
++ case R_SEARCH:
++ switch (target) {
++ case T_DIR:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ /* and perform auto-read without setting attributes */
++ return (auto_read(caller_pid, target, tid, FALSE));
++ break;
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_SEND_SIGNAL:
++ switch (target) {
++ case T_PROCESS:
++ /* and perform auto-write without setting attributes */
++ return (auto_write_attr(caller_pid,
++ target,
++ tid,
++ A_current_sec_level,
++ A_mac_curr_categories,
++ FALSE));
++
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_SWITCH_LOG:
++ switch (target) {
++ case T_NONE:
++ /* test owner's mac_role */
++ return mac_check_role(owner, SR_security_officer);
++
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_SWITCH_MODULE:
++ switch (target) {
++ case T_NONE:
++ /* we need the switch_target */
++ if (attr != A_switch_target)
++ return NOT_GRANTED;
++ /* do not care for other modules */
++ if ((attr_val.switch_target != SW_MAC)
++#ifdef CONFIG_RSBAC_MAC_AUTH_PROT
++ && (attr_val.switch_target != SW_AUTH)
++#endif
++#ifdef CONFIG_RSBAC_SOFTMODE
++ && (attr_val.switch_target != SW_SOFTMODE)
++#endif
++#ifdef CONFIG_RSBAC_FREEZE
++ && (attr_val.switch_target != SW_FREEZE)
++#endif
++ )
++ return (DO_NOT_CARE);
++ /* test owner's mac_role */
++ return mac_check_role(owner, SR_security_officer);
++
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_TRACE:
++ switch (target) {
++ case T_PROCESS:
++ /* and perform auto-read-write without setting attributes */
++ return (auto_read_write_attr(caller_pid,
++ target,
++ tid,
++ A_current_sec_level,
++ A_mac_curr_categories,
++ FALSE));
++
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_TRUNCATE:
++ switch (target) {
++ case T_FILE:
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++ break;
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_UMOUNT:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_DEV:
++#ifdef CONFIG_RSBAC_MAC_LIGHT
++ return (GRANTED);
++#else
++ return mac_check_role(owner, SR_administrator);
++#endif
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_WRITE:
++ switch (target) {
++ case T_DIR:
++ case T_IPC:
++#ifdef CONFIG_RSBAC_RW
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++#endif
++ /* Mode of created item is ignored! */
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++
++
++#ifdef CONFIG_RSBAC_RW
++ case T_DEV:
++ /* Only check for devices with mac_check set */
++ if (rsbac_get_attr(SW_MAC,
++ T_DEV,
++ tid,
++ A_mac_check,
++ &i_attr_val1, FALSE)) {
++ rsbac_ds_get_error("rsbac_adf_request_mac",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (!i_attr_val1.mac_check)
++ return (DO_NOT_CARE);
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_MAC_NET_OBJ_PROT)
++ case T_NETTEMP:
++ return mac_check_role(owner, SR_security_officer);
++
++ case T_NETOBJ:
++ /* test write access to target: get its sec_level */
++ if (rsbac_get_attr(SW_MAC,
++ target,
++ tid,
++ A_remote_sec_level,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error("rsbac_adf_request_mac",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (rsbac_get_attr(SW_MAC,
++ target,
++ tid,
++ A_remote_mac_categories,
++ &i_attr_val2, TRUE)) {
++ rsbac_ds_get_error("rsbac_adf_request_mac",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ /* and perform auto-write without setting attributes */
++ return (auto_write_attr(caller_pid,
++ target,
++ tid,
++ A_remote_sec_level,
++ A_remote_mac_categories,
++ FALSE));
++#endif
++
++#if defined(CONFIG_RSBAC_MAC_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++ /* Security Officer? */
++ return mac_check_role(owner, SR_security_officer);
++#endif
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_WRITE_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_IPC:
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++ break;
++ case T_DEV:
++ /* Only check for devices with mac_check set */
++ if (rsbac_get_attr(SW_MAC,
++ T_DEV,
++ tid,
++ A_mac_check,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error("rsbac_adf_request_mac",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (!i_attr_val1.mac_check)
++ return (DO_NOT_CARE);
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_SEND:
++ switch (target) {
++ case T_DEV:
++ /* Only check for devices with mac_check set */
++ if (rsbac_get_attr(SW_MAC,
++ T_DEV,
++ tid,
++ A_mac_check,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error("rsbac_adf_request_mac",
++ A_none);
++ return (NOT_GRANTED);
++ }
++ if (!i_attr_val1.mac_check)
++ return (DO_NOT_CARE);
++ /* and perform auto-write without setting attributes */
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++
++ case T_UNIXSOCK:
++ return (auto_write(caller_pid,
++ target, tid, FALSE));
++
++#if defined(CONFIG_RSBAC_MAC_NET_OBJ_PROT)
++ case T_NETOBJ:
++ /* and perform auto-read-write without setting attributes */
++ return (auto_read_write_attr(caller_pid,
++ target,
++ tid,
++ A_remote_sec_level,
++ A_remote_mac_categories,
++ FALSE));
++
++#endif
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_BIND:
++ case R_LISTEN:
++ switch (target) {
++ case T_UNIXSOCK:
++ return (auto_read(caller_pid,
++ target, tid, FALSE));
++#if defined(CONFIG_RSBAC_MAC_NET_OBJ_PROT)
++ case T_NETOBJ:
++ /* and perform auto-read-write without setting attributes */
++ return (auto_read_write_attr(caller_pid,
++ target,
++ tid,
++ A_local_sec_level,
++ A_local_mac_categories,
++ FALSE));
++
++ /* all other cases are unknown */
++#endif
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ case R_ACCEPT:
++ case R_CONNECT:
++ case R_RECEIVE:
++ switch (target) {
++ case T_UNIXSOCK:
++ return (auto_read_write(caller_pid,
++ target, tid, FALSE));
++#if defined(CONFIG_RSBAC_MAC_NET_OBJ_PROT)
++ case T_NETOBJ:
++ /* and perform auto-read-write without setting attributes */
++ return (auto_read_write_attr(caller_pid,
++ target,
++ tid,
++ A_remote_sec_level,
++ A_remote_mac_categories,
++ FALSE));
++#endif
++ /* all other cases are unknown */
++ default:
++ return (DO_NOT_CARE);
++ }
++
++ default:
++ return DO_NOT_CARE;
++ }
++
++ return result;
++}
++
++
++/*****************************************************************************/
++/* If the request returned granted and the operation is performed, */
++/* the following function can be called by the AEF to get all aci set */
++/* correctly. For write accesses that are performed fully within the kernel, */
++/* this is usually not done to prevent extra calls, including R_CLOSE for */
++/* cleaning up. Because of this, the write boundary is not adjusted - there */
++/* is no user-level writing anyway... */
++/* The second instance of target specification is the new target, if one has */
++/* been created, otherwise its values are ignored. */
++/* On success, 0 is returned, and an error from rsbac/error.h otherwise. */
++
++inline int rsbac_adf_set_attr_mac(enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_target_t new_target,
++ union rsbac_target_id_t new_tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++{
++ enum rsbac_adf_req_ret_t result = DO_NOT_CARE;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_attribute_value_t i_attr_val2;
++ union rsbac_attribute_value_t i_attr_val3;
++ union rsbac_attribute_value_t i_attr_val4;
++ union rsbac_attribute_value_t i_attr_val5;
++ union rsbac_attribute_value_t i_attr_val6;
++ union rsbac_attribute_value_t i_attr_val7;
++ union rsbac_attribute_value_t i_attr_val8;
++ union rsbac_attribute_value_t i_attr_val9;
++ rsbac_boolean_t inherit;
++
++ switch (request) {
++ case R_APPEND_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_IPC:
++ /* test write access to target: get its sec_level */
++ if ((target == T_FILE)
++ || (target == T_FIFO)
++ )
++ inherit = TRUE;
++ else
++ inherit = FALSE;
++ if (rsbac_get_attr(SW_MAC,
++ target,
++ tid,
++ A_security_level,
++ &i_attr_val1, inherit)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (rsbac_get_attr(SW_MAC,
++ target,
++ tid,
++ A_mac_categories,
++ &i_attr_val2, inherit)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* and perform auto-write with setting attributes */
++ result = auto_write(caller_pid, target, tid, TRUE);
++ if ((result == GRANTED) || (result == DO_NOT_CARE))
++ return 0;
++ else
++ return (-RSBAC_EDECISIONMISMATCH);
++ break;
++ case T_DEV:
++ /* Only check for devices with mac_check set */
++ if (rsbac_get_attr(SW_MAC,
++ T_DEV,
++ tid,
++ A_mac_check,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (!i_attr_val1.mac_check)
++ return 0;
++ /* and perform auto-write with setting attributes */
++ result = auto_write(caller_pid, target, tid, TRUE);
++ if ((result == GRANTED) || (result == DO_NOT_CARE))
++ return 0;
++ else
++ return (-RSBAC_EDECISIONMISMATCH);
++ break;
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++ case R_CHANGE_OWNER:
++ switch (target) {
++ /* Changing process owner affects access decisions, */
++ /* so attributes have to be adjusted. */
++ case T_PROCESS:
++ /* For target process there MUST be a new owner specified */
++ if (attr != A_owner)
++ return (-RSBAC_EINVALIDATTR);
++
++ /* Get owner-sec-level and mac_categories for new owner */
++ i_tid.user = attr_val.owner;
++ if (rsbac_get_attr(SW_MAC,
++ T_USER,
++ i_tid,
++ A_security_level,
++ &i_attr_val2, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (rsbac_get_attr(SW_MAC,
++ T_USER,
++ i_tid,
++ A_mac_categories,
++ &i_attr_val3, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* set owner-sec-level and mac_categories for process to new values */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_security_level,
++ i_attr_val2)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_categories,
++ i_attr_val3)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Get min_write_open and min_write_categories of process */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_min_write_open,
++ &i_attr_val4, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_min_write_categories,
++ &i_attr_val5, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* adjust min_write_open and min_write_categories, if too high */
++ if (i_attr_val2.security_level <
++ i_attr_val4.min_write_open) {
++ i_attr_val4.min_write_open =
++ i_attr_val2.security_level;
++ if (rsbac_set_attr
++ (SW_MAC, T_PROCESS, tid, A_min_write_open,
++ i_attr_val4)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac",
++ A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ }
++ /* does process have categories in min_write
++ * that the new owner has not? */
++ /* If yes, throw them out. */
++ if ((i_attr_val3.mac_categories & i_attr_val5.
++ mac_categories)
++ != i_attr_val5.mac_categories) {
++ i_attr_val5.mac_categories &=
++ i_attr_val3.mac_categories;
++ if (rsbac_set_attr
++ (SW_MAC, T_PROCESS, tid,
++ A_min_write_categories,
++ i_attr_val5)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac",
++ A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ }
++ /* Get owner-initial-sec-level and
++ * mac_initial_categories for new owner */
++ /* These values will be adjusted by
++ * max_read / min_write and then used as */
++ /* new current level/categories. */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_MAC,
++ T_USER,
++ i_tid,
++ A_initial_security_level,
++ &i_attr_val6, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_initial_security_level,
++ i_attr_val6)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++#if 0
++ /* restrict current_level to be a maximum of min_write */
++ if (i_attr_val6.security_level >
++ i_attr_val4.min_write_open)
++ i_attr_val6.security_level =
++ i_attr_val4.min_write_open;
++#endif
++ if (rsbac_get_attr(SW_MAC,
++ T_USER,
++ i_tid,
++ A_mac_initial_categories,
++ &i_attr_val7, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_initial_categories,
++ i_attr_val7)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++#if 0
++ /* restrict current_categories to be a maximum of min_write */
++ if ((i_attr_val7.mac_categories & i_attr_val5.
++ mac_categories) != i_attr_val7.mac_categories)
++ i_attr_val7.mac_categories &=
++ i_attr_val5.mac_categories;
++#endif
++ /* Get owner-min-sec-level and mac_min_categories for new owner */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_MAC,
++ T_USER,
++ i_tid,
++ A_min_security_level,
++ &i_attr_val8, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (rsbac_get_attr(SW_MAC,
++ T_USER,
++ i_tid,
++ A_mac_min_categories,
++ &i_attr_val9, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* set owner-sec-level and mac_categories for process to new values */
++ /* owner is set by main dispatcher! */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_min_security_level,
++ i_attr_val8)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_min_categories,
++ i_attr_val9)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Get max_read_open and max_read_categories of process */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_max_read_open,
++ &i_attr_val4, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_max_read_categories,
++ &i_attr_val5, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* adjust max_read_open and max_read_categories, if too low */
++ if (i_attr_val8.security_level >
++ i_attr_val4.max_read_open) {
++ i_attr_val4.max_read_open =
++ i_attr_val8.security_level;
++ if (rsbac_set_attr
++ (SW_MAC, T_PROCESS, tid, A_max_read_open,
++ i_attr_val4)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac",
++ A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ }
++#if 0
++ /* adjust current sec level to a minimum of max_read */
++ if (i_attr_val6.security_level <
++ i_attr_val4.max_read_open)
++ i_attr_val6.security_level =
++ i_attr_val4.max_read_open;
++#endif
++ /* but never set it over new max_level or under new min_level */
++ if (i_attr_val6.security_level >
++ i_attr_val2.security_level)
++ i_attr_val6.security_level =
++ i_attr_val2.security_level;
++ else if (i_attr_val6.security_level <
++ i_attr_val8.security_level)
++ i_attr_val6.security_level =
++ i_attr_val8.security_level;
++ if (rsbac_set_attr
++ (SW_MAC, T_PROCESS, tid, A_current_sec_level,
++ i_attr_val6)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++
++ /* does new owner have categories in min_categories that the process max_read
++ has not? */
++ /* If yes, add them. */
++ if ((i_attr_val9.mac_categories & i_attr_val5.
++ mac_categories)
++ != i_attr_val9.mac_categories) {
++ i_attr_val5.mac_categories |=
++ i_attr_val9.mac_categories;
++ if (rsbac_set_attr
++ (SW_MAC, T_PROCESS, tid,
++ A_max_read_categories, i_attr_val5)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac",
++ A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ }
++#if 0
++ /* adjust current categories to include all from max_read (from initial) */
++ if ((i_attr_val7.mac_categories & i_attr_val5.
++ mac_categories) != i_attr_val5.mac_categories)
++ i_attr_val7.mac_categories |=
++ i_attr_val5.mac_categories;
++#endif
++ /* but never set it over new max_cats or under new min_cats */
++ if ((i_attr_val7.mac_categories & i_attr_val3.
++ mac_categories) != i_attr_val7.mac_categories)
++ i_attr_val7.mac_categories &=
++ i_attr_val3.mac_categories;
++ else if ((i_attr_val7.mac_categories & i_attr_val9.
++ mac_categories) !=
++ i_attr_val9.mac_categories)
++ i_attr_val7.mac_categories |=
++ i_attr_val9.mac_categories;
++ if (rsbac_set_attr
++ (SW_MAC, T_PROCESS, tid, A_mac_curr_categories,
++ i_attr_val7)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++
++ /* Get mac_user_flags from user */
++ i_tid.user = attr_val.owner;
++ if (rsbac_get_attr(SW_MAC,
++ T_USER,
++ i_tid,
++ A_mac_user_flags,
++ &i_attr_val3, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ i_attr_val1.mac_process_flags =
++ i_attr_val3.mac_user_flags;
++ /* adjust flags - first get old process flags */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_process_flags,
++ &i_attr_val2, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if ((i_attr_val2.
++ mac_process_flags & MAC_program_auto)
++ && (i_attr_val3.
++ mac_user_flags & MAC_allow_auto)
++ )
++ i_attr_val1.mac_process_flags |= MAC_auto;
++
++ i_attr_val1.mac_process_flags &= RSBAC_MAC_P_FLAGS;
++
++ if (!(i_attr_val1.mac_process_flags & MAC_trusted)) {
++ if (rsbac_mac_p_truset_member
++ (caller_pid, owner))
++ i_attr_val1.mac_process_flags |=
++ MAC_trusted;
++ }
++ /* Set mac_process_flags on process */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_process_flags,
++ i_attr_val1)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* OK, we are ready */
++ return 0;
++
++ /* We do not care about other cases here */
++ default:
++ return 0;
++ }
++
++ case R_CLONE:
++ if (target == T_PROCESS) {
++ /* Get owner-sec-level from first process */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_security_level,
++ &i_attr_val2, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Get current-sec-level from first process... */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_current_sec_level,
++ &i_attr_val3, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Get min_write_open from first process */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_min_write_open,
++ &i_attr_val4, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Get max_read_open from first process */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_max_read_open,
++ &i_attr_val5, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Get mac_process_flags from first process */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_process_flags,
++ &i_attr_val7, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Set owner_sec_level for new process */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ new_tid,
++ A_security_level,
++ i_attr_val2)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Set current_sec_level for new process */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ new_tid,
++ A_current_sec_level,
++ i_attr_val3)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Set min_write_open for new process */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ new_tid,
++ A_min_write_open,
++ i_attr_val4)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Set max_read_open for new process */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ new_tid,
++ A_max_read_open, i_attr_val5)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Set mac_process_flags for new process */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ new_tid,
++ A_mac_process_flags,
++ i_attr_val7)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++
++ /* Get mac_categories from first process */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_categories,
++ &i_attr_val2, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Get mac_curr_categories from first process... */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_curr_categories,
++ &i_attr_val3, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Get min_write_categories from first process */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_min_write_categories,
++ &i_attr_val4, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Get max_read_categories from first process */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_max_read_categories,
++ &i_attr_val5, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Get initial_sec_level from first process */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_initial_security_level,
++ &i_attr_val6, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Get initial_categories from first process */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_initial_categories,
++ &i_attr_val7, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Set mac_categories for new process */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ new_tid,
++ A_mac_categories,
++ i_attr_val2)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Set mac_curr_categories for new process */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ new_tid,
++ A_mac_curr_categories,
++ i_attr_val3)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Set min_write_categories for new process */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ new_tid,
++ A_min_write_categories,
++ i_attr_val4)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Set max_read_categories for new process */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ new_tid,
++ A_max_read_categories,
++ i_attr_val5)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Set initial_security_level for new process */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ new_tid,
++ A_initial_security_level,
++ i_attr_val6)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Set initial_categories for new process */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ new_tid,
++ A_mac_initial_categories,
++ i_attr_val7)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Get owner-min_sec-level/cat from first process */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_min_security_level,
++ &i_attr_val2, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_min_categories,
++ &i_attr_val3, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Set min_security_level for new process */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ new_tid,
++ A_min_security_level,
++ i_attr_val2)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Set min_categories for new process */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ new_tid,
++ A_mac_min_categories,
++ i_attr_val3)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ if (rsbac_mac_copy_pp_truset
++ (tid.process, new_tid.process)) {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_mac(): rsbac_mac_copy_pp_truset() returned error!\n");
++ return (-RSBAC_EWRITEFAILED);
++ }
++ return 0;
++ } else
++ return 0;
++
++ case R_CREATE:
++ switch (target) {
++ /* Creating dir or (pseudo) file IN target dir! */
++ case T_DIR:
++ /* Mode of created item is ignored! */
++ /* and perform auto-write without(!)
++ * setting of attributes - no need */
++ /* -> decision consistency check only */
++ /* only check, if not MAC_LIGHT */
++#ifndef CONFIG_RSBAC_MAC_LIGHT
++ result = auto_write(caller_pid,
++ target, tid, FALSE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++#endif
++ /* test write access to target: get its sec_level */
++ if (rsbac_get_attr(SW_MAC,
++ T_DIR,
++ tid,
++ A_security_level,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (rsbac_get_attr(SW_MAC,
++ T_DIR,
++ tid,
++ A_mac_categories,
++ &i_attr_val2, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Get current_sec_level from process (initialized to owner_sec_level)... */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_current_sec_level,
++ &i_attr_val3, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++#ifdef CONFIG_RSBAC_MAC_SMART_INHERIT
++ /* Only set, if different than inherited value */
++ if (i_attr_val3.security_level !=
++ i_attr_val1.security_level)
++#endif
++ /* Set security-level for new item */
++ if (rsbac_set_attr(SW_MAC,
++ new_target,
++ new_tid,
++ A_security_level,
++ i_attr_val3)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac",
++ A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Get current_categories from process (initialized to owner_categories)... */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_mac_curr_categories,
++ &i_attr_val3, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++#ifdef CONFIG_RSBAC_MAC_SMART_INHERIT
++ /* Only set, if different than inherited value */
++ if (i_attr_val3.mac_categories !=
++ i_attr_val2.mac_categories)
++#endif
++ /* Set mac_categories for new item */
++ if (rsbac_set_attr(SW_MAC,
++ new_target,
++ new_tid,
++ A_mac_categories,
++ i_attr_val3)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac",
++ A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ return 0;
++ break;
++
++ case T_IPC:
++ i_tid.process = caller_pid;
++ /* Get current-sec-level from process... */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_current_sec_level,
++ &i_attr_val1, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Set security-level for this ipc item */
++ if (rsbac_set_attr(SW_MAC,
++ T_IPC,
++ tid,
++ A_security_level,
++ i_attr_val1)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Get mac_curr_categories from process... */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_mac_curr_categories,
++ &i_attr_val1, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Set curr_categories for new item */
++ if (rsbac_set_attr(SW_MAC,
++ T_IPC,
++ tid,
++ A_mac_categories,
++ i_attr_val1)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ return 0;
++ break;
++
++#ifdef CONFIG_RSBAC_MAC_NET_OBJ_PROT
++ case T_NETOBJ:
++ i_tid.process = caller_pid;
++ /* Get current-sec-level from process... */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_current_sec_level,
++ &i_attr_val1, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Set local security-level for this netobj item */
++ if (rsbac_set_attr(SW_MAC,
++ target,
++ tid,
++ A_local_sec_level,
++ i_attr_val1)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* Get mac_curr_categories from process... */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_mac_curr_categories,
++ &i_attr_val1, FALSE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ /* Set local curr_categories for new item */
++ if (rsbac_set_attr(SW_MAC,
++ target,
++ tid,
++ A_local_mac_categories,
++ i_attr_val1)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ return 0;
++ break;
++#endif
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++ /* removal of targets is done in main adf dispatcher! */
++ case R_DELETE:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_IPC:
++ /* and perform auto-write without(!) setting of attributes */
++ /* - no information flow apart from missing file */
++ /* -> decision consistency check only */
++ result = auto_write(caller_pid,
++ target, tid, FALSE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++ else
++ return 0;
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++ case R_EXECUTE:
++ switch (target) {
++ case T_FILE:
++ /* copy trusted user list from file to process */
++ if (rsbac_mac_copy_fp_truset(tid.file, caller_pid)) {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_mac(): rsbac_mac_copy_fp_truset() returned error!\n");
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* perform auto-read with setting of attributes */
++ result = auto_read(caller_pid, target, tid, TRUE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++
++ /* reset current_sec_level, mac_auto, min_write_open */
++ /* and max_read_open for process */
++ i_tid.process = caller_pid;
++
++#ifdef CONFIG_RSBAC_MAC_RESET_CURR
++ /* First, set current_sec_level and min_write_open to process owner's initial and seclevel */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_initial_security_level,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_mac_initial_categories,
++ &i_attr_val2, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_current_sec_level,
++ i_attr_val1)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_mac_curr_categories,
++ i_attr_val2)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++#endif
++#if 0
++ /* Now, set min_write_open to process owner's seclevel */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_security_level,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++#endif
++ i_attr_val1.min_write_open = SL_max;
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_min_write_open,
++ i_attr_val1)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++#if 0
++ /* Next, set min_write_categories to process owner's mac_categories */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_mac_categories,
++ &i_attr_val2, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++#endif
++ i_attr_val2.mac_categories =
++ RSBAC_MAC_MAX_CAT_VECTOR;
++ if (rsbac_set_attr
++ (SW_MAC, T_PROCESS, i_tid, A_min_write_categories,
++ i_attr_val2)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* reset max_read boundary */
++#if 0
++ /* Get owner-min-sec-level and mac_min_categories for owner */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_min_security_level,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_mac_min_categories,
++ &i_attr_val2, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++#endif
++ i_attr_val1.max_read_open = SL_min;
++ i_attr_val2.mac_categories =
++ RSBAC_MAC_MIN_CAT_VECTOR;
++ if (rsbac_set_attr
++ (SW_MAC, T_PROCESS, i_tid, A_max_read_open,
++ i_attr_val1)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* reset category max_read boundary */
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_max_read_categories,
++ i_attr_val2)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ /* set flags */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_mac_process_flags,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (rsbac_get_attr(SW_MAC,
++ target,
++ tid,
++ A_mac_auto,
++ &i_attr_val2, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (i_attr_val2.mac_auto) {
++ i_attr_val1.mac_process_flags |=
++ MAC_program_auto;
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_MAC,
++ T_USER,
++ i_tid,
++ A_mac_user_flags,
++ &i_attr_val2, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac",
++ A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (i_attr_val2.
++ mac_user_flags & MAC_allow_auto)
++ i_attr_val1.mac_process_flags |=
++ MAC_auto;
++ else
++ i_attr_val1.mac_process_flags &=
++ ~MAC_auto;
++ i_tid.process = caller_pid;
++ } else {
++ i_attr_val1.mac_process_flags &=
++ ~MAC_program_auto;
++ i_attr_val1.mac_process_flags &= ~MAC_auto;
++ }
++ if (rsbac_get_attr(SW_MAC,
++ T_FILE,
++ tid,
++ A_mac_prop_trusted,
++ &i_attr_val3, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (!(i_attr_val3.mac_prop_trusted)
++ || !(i_attr_val1.
++ mac_process_flags & MAC_trusted)
++ ) {
++ if (rsbac_mac_p_truset_member
++ (caller_pid, owner))
++ i_attr_val1.mac_process_flags |=
++ MAC_trusted;
++ else {
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_MAC,
++ T_USER,
++ i_tid,
++ A_mac_user_flags,
++ &i_attr_val2,
++ TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac",
++ A_none);
++ return
++ (-RSBAC_EREADFAILED);
++ }
++ if (i_attr_val2.
++ mac_user_flags & MAC_trusted)
++ i_attr_val1.
++ mac_process_flags |=
++ MAC_trusted;
++ else
++ i_attr_val1.
++ mac_process_flags &=
++ ~MAC_trusted;
++ i_tid.process = caller_pid;
++ }
++ }
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ i_tid,
++ A_mac_process_flags,
++ i_attr_val1)) {
++ rsbac_ds_set_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EWRITEFAILED);
++ }
++ return 0;
++
++ /* all other cases */
++ default:
++ return 0;
++ }
++
++ case R_MOUNT:
++ switch (target) {
++ case T_DIR:
++ case T_DEV:
++ /* and perform auto-read(-write) with setting of attributes */
++ if ((target == T_DEV)
++ && (attr == A_mode)
++ && (attr_val.mode & MS_RDONLY))
++ result = auto_read(caller_pid,
++ target, tid, TRUE);
++ else
++ result = auto_read_write(caller_pid,
++ target,
++ tid, TRUE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++ else
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++ case R_READ:
++ switch (target) {
++ case T_DIR:
++#ifdef CONFIG_RSBAC_RW
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_IPC:
++#endif
++ /* and perform auto-read with setting of attributes */
++ result = auto_read(caller_pid, target, tid, TRUE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++ return 0;
++
++#ifdef CONFIG_RSBAC_RW
++ case T_DEV:
++ /* Only check for devices with mac_check set */
++ if (rsbac_get_attr(SW_MAC,
++ T_DEV,
++ tid,
++ A_mac_check,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (!i_attr_val1.mac_check)
++ return 0;
++ /* and perform auto-read with setting of attributes */
++ result = auto_read(caller_pid, target, tid, TRUE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++ return 0;
++#endif
++
++#ifdef CONFIG_RSBAC_MAC_NET_OBJ_PROT
++ case T_NETOBJ:
++ /* and perform auto-read with setting of attributes */
++ result = auto_read_attr(caller_pid,
++ target,
++ tid,
++ A_remote_sec_level,
++ A_remote_mac_categories,
++ TRUE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++ return 0;
++#endif
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++ case R_READ_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_IPC:
++ /* and perform auto-read with setting attributes */
++ result = auto_read(caller_pid, target, tid, TRUE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++ return 0;
++
++ case T_DEV:
++ /* Only check for devices with mac_check set */
++ if (rsbac_get_attr(SW_MAC,
++ T_DEV,
++ tid,
++ A_mac_check,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (!i_attr_val1.mac_check)
++ return 0;
++ /* and perform auto-read with setting attributes */
++ result = auto_read(caller_pid, target, tid, TRUE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++ case R_READ_WRITE_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_IPC:
++ /* and perform auto-read-write without setting attributes */
++ result = auto_read_write(caller_pid,
++ target, tid, TRUE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++ return 0;
++
++ case T_DEV:
++ /* Only check for devices with mac_check set */
++ if (rsbac_get_attr(SW_MAC,
++ T_DEV,
++ tid,
++ A_mac_check,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (!i_attr_val1.mac_check)
++ return 0;
++ /* and perform auto-read-write with setting of attributes */
++ result = auto_read_write(caller_pid,
++ target, tid, TRUE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++ return 0;
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++ case R_SEARCH:
++ switch (target) {
++ case T_DIR:
++ case T_SYMLINK:
++ /* and perform auto-read with setting of attributes */
++ result = auto_read(caller_pid, target, tid, TRUE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++ return 0;
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++ case R_TRACE:
++ switch (target) {
++ case T_PROCESS:
++ /* and perform auto-read-write with setting attributes */
++ result = auto_read_write_attr(caller_pid,
++ target,
++ tid,
++ A_current_sec_level,
++ A_mac_curr_categories,
++ TRUE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++ case R_WRITE:
++ case R_WRITE_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_IPC:
++ /* and perform auto-write with setting attributes */
++ result = auto_write(caller_pid, target, tid, TRUE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++ return 0;
++
++ case T_DEV:
++ /* Only check for devices with mac_check set */
++ if (rsbac_get_attr(SW_MAC,
++ T_DEV,
++ tid,
++ A_mac_check,
++ &i_attr_val1, TRUE)) {
++ rsbac_ds_get_error
++ ("rsbac_adf_set_attr_mac", A_none);
++ return (-RSBAC_EREADFAILED);
++ }
++ if (!i_attr_val1.mac_check)
++ return 0;
++ /* and perform auto-write with setting attributes */
++ result = auto_write(caller_pid, target, tid, TRUE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++ return 0;
++
++#ifdef CONFIG_RSBAC_MAC_NET_OBJ_PROT
++ case T_NETOBJ:
++ /* and perform auto-write with setting attributes */
++ result = auto_write_attr(caller_pid,
++ target,
++ tid,
++ A_remote_sec_level,
++ A_remote_mac_categories,
++ TRUE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++ return 0;
++#endif
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++#ifdef CONFIG_RSBAC_MAC_NET_OBJ_PROT
++ case R_BIND:
++ case R_LISTEN:
++ switch (target) {
++ case T_NETOBJ:
++ /* and perform auto-write with setting attributes */
++ result = auto_write_attr(caller_pid,
++ target,
++ tid,
++ A_local_sec_level,
++ A_local_mac_categories,
++ TRUE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++ case R_ACCEPT:
++ case R_CONNECT:
++ case R_SEND:
++ case R_RECEIVE:
++ switch (target) {
++ case T_NETOBJ:
++ /* and perform auto-write with setting attributes */
++ result = auto_write_attr(caller_pid,
++ target,
++ tid,
++ A_remote_sec_level,
++ A_remote_mac_categories,
++ TRUE);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return (-RSBAC_EDECISIONMISMATCH);
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++#endif
++ default:
++ return 0;
++ }
++
++ return 0;
++}
+diff --git a/rsbac/adf/mac/mac_syscalls.c b/rsbac/adf/mac/mac_syscalls.c
+new file mode 100644
+index 0000000..bfd5687
+--- /dev/null
++++ b/rsbac/adf/mac/mac_syscalls.c
+@@ -0,0 +1,709 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - Mandatory Access Control */
++/* File: rsbac/adf/mac/syscalls.c */
++/* */
++/* Author and (c) 1999-2011: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 12/Jul/2011 */
++/*************************************************** */
++
++#include <linux/string.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/mac.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/debug.h>
++#include <rsbac/rkmem.h>
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++#ifndef CONFIG_RSBAC_MAINT
++static int
++ mac_sys_check_role(enum rsbac_system_role_t role)
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ i_tid.user = current_uid();
++ if (rsbac_get_attr(SW_MAC,
++ T_USER,
++ i_tid,
++ A_mac_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("mac_sys_check_role", A_mac_role);
++ return -EPERM;
++ }
++ /* if correct role, then grant */
++ if (i_attr_val1.system_role == role)
++ return 0;
++ else
++ return -EPERM;
++ }
++#endif
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++/*****************************************************************************/
++/* This function allows processes to set their own current security level */
++/* via sys_rsbac_mac_set_curr_seclevel() system call. */
++/* The level must keep within the min_write_open/max_read_open-boundary and */
++/* must not be greater than owner_sec_level. Setting current_sec_level by */
++/* this function also turns off auto-levelling via mac_auto. */
++
++int rsbac_mac_set_curr_level(rsbac_security_level_t level,
++ rsbac_mac_category_vector_t categories)
++ {
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val1;
++#ifndef CONFIG_RSBAC_MAINT
++ rsbac_mac_process_flags_t flags;
++#endif
++
++ if( (level > SL_max)
++ && (level != SL_none)
++ )
++ return -RSBAC_EINVALIDVALUE;
++
++ tid.process = task_pid(current);
++
++#ifndef CONFIG_RSBAC_MAINT
++ /* check flags */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_process_flags,
++ &attr_val1,
++ FALSE))
++ { /* failed! */
++ rsbac_ds_get_error("rsbac_mac_set_curr_level", A_none);
++ return(-RSBAC_EREADFAILED);
++ }
++ flags = attr_val1.mac_process_flags;
++ if( !(flags & MAC_auto)
++ && !(flags & MAC_trusted)
++ && !(flags & MAC_override)
++ )
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: no auto, trusted or override -> not granted \n",
++ current_uid(),
++ current->pid,
++ current->comm);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_MAC]
++ #endif
++ )
++ #endif
++ return -EPERM;
++ }
++
++ /* override allows full range */
++ if(!(flags & MAC_override))
++ {
++ if(level != SL_none)
++ {
++ /* get maximum security level */
++ tid.process = task_pid(current);
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_security_level,
++ &attr_val1,
++ FALSE))
++ { /* failed! */
++ rsbac_ds_get_error("rsbac_mac_set_curr_level", A_none);
++ return(-RSBAC_EREADFAILED);
++ }
++ /* if level is too high -> error */
++ if (level > attr_val1.security_level)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested level %u over max level %u, no override -> not granted \n",
++ current_uid(),
++ current->pid,
++ current->comm,
++ level,
++ attr_val1.security_level);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_MAC]
++ #endif
++ )
++ #endif
++ return -EPERM;
++ }
++ /* get minimum security level */
++ tid.process = task_pid(current);
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_min_security_level,
++ &attr_val1,
++ FALSE))
++ { /* failed! */
++ rsbac_ds_get_error("rsbac_mac_set_curr_level", A_none);
++ return(-RSBAC_EREADFAILED);
++ }
++ /* if level is too low -> error */
++ if (level < attr_val1.security_level)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested level %u under min level %u, no override -> not granted \n",
++ current_uid(),
++ current->pid,
++ current->comm,
++ level,
++ attr_val1.security_level);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_MAC]
++ #endif
++ )
++ #endif
++ return -EPERM;
++ }
++
++ /* auto needed? -> stay inside boundaries */
++ if(!(flags & MAC_trusted))
++ {
++ /* check against upper/write boundary */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_min_write_open,
++ &attr_val1,
++ FALSE))
++ { /* failed! */
++ rsbac_ds_get_error("rsbac_mac_set_curr_level", A_none);
++ return(-RSBAC_EREADFAILED);
++ }
++ if (level > attr_val1.min_write_open)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested level %u over min_write_open %u, no override or trusted -> not granted \n",
++ current_uid(),
++ current->pid,
++ current->comm,
++ level,
++ attr_val1.min_write_open);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_MAC]
++ #endif
++ )
++ #endif
++ return -EPERM;
++ }
++
++ /* check against lower/read boundary */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_max_read_open,
++ &attr_val1,
++ FALSE))
++ { /* failed! */
++ rsbac_ds_get_error("rsbac_mac_set_curr_level", A_none);
++ return(-RSBAC_EREADFAILED);
++ }
++ if (level < attr_val1.max_read_open)
++ return(-EPERM);
++ }
++ }
++ if(categories != RSBAC_MAC_MIN_CAT_VECTOR)
++ {
++ /* get maximum categories */
++ tid.process = task_pid(current);
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_categories,
++ &attr_val1,
++ FALSE))
++ { /* failed! */
++ rsbac_ds_get_error("rsbac_mac_set_curr_level", A_none);
++ return(-RSBAC_EREADFAILED);
++ }
++ /* if categories are no subset -> error */
++ if ((categories & attr_val1.mac_categories) != categories)
++ {
++ char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp2)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested categories %s over max categories %s, no override -> not granted \n",
++ current_uid(),
++ current->pid,
++ current->comm,
++ u64tostrmac(tmp, categories),
++ u64tostrmac(tmp2, attr_val1.mac_categories));
++ rsbac_kfree(tmp2);
++ }
++ rsbac_kfree(tmp);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_MAC]
++ #endif
++ )
++ #endif
++ return -EPERM;
++ }
++ /* get minimum categories */
++ tid.process = task_pid(current);
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_min_categories,
++ &attr_val1,
++ FALSE))
++ { /* failed! */
++ rsbac_ds_get_error("rsbac_mac_set_curr_level", A_none);
++ return(-RSBAC_EREADFAILED);
++ }
++ /* if level is too low -> error */
++ if ((categories & attr_val1.mac_categories) != attr_val1.mac_categories)
++ {
++ char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp2)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested categories %s under min categories %s, no override -> not granted \n",
++ current_uid(),
++ current->pid,
++ current->comm,
++ u64tostrmac(tmp, categories),
++ u64tostrmac(tmp2, attr_val1.mac_categories));
++ rsbac_kfree(tmp2);
++ }
++ rsbac_kfree(tmp);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_MAC]
++ #endif
++ )
++ #endif
++ return -EPERM;
++ }
++
++ /* auto needed? -> stay inside boundaries */
++ if(!(flags & MAC_trusted))
++ {
++ /* check against upper/write boundary */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_min_write_categories,
++ &attr_val1,
++ FALSE))
++ { /* failed! */
++ rsbac_ds_get_error("rsbac_mac_set_curr_level", A_none);
++ return(-RSBAC_EREADFAILED);
++ }
++ if ((categories & attr_val1.mac_categories) != categories)
++ {
++ char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp2)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested categories %s over min_write categories %s, no override or trusted -> not granted \n",
++ current_uid(),
++ current->pid,
++ current->comm,
++ u64tostrmac(tmp, categories),
++ u64tostrmac(tmp2, attr_val1.mac_categories));
++ rsbac_kfree(tmp2);
++ }
++ rsbac_kfree(tmp);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_MAC]
++ #endif
++ )
++ #endif
++ return -EPERM;
++ }
++ /* check against lower/read boundary */
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_max_read_categories,
++ &attr_val1,
++ FALSE))
++ { /* failed! */
++ rsbac_ds_get_error("rsbac_mac_set_curr_level", A_none);
++ return(-RSBAC_EREADFAILED);
++ }
++ if ((categories & attr_val1.mac_categories) != attr_val1.mac_categories)
++ {
++ char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ char * tmp2 = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp2)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_mac_set_curr_level(): uid %u, pid %u/%.15s: requested categories %s under max_read categories %s, no override or trusted -> not granted \n",
++ current_uid(),
++ current->pid,
++ current->comm,
++ u64tostrmac(tmp, categories),
++ u64tostrmac(tmp2, attr_val1.mac_categories));
++ rsbac_kfree(tmp2);
++ }
++ rsbac_kfree(tmp);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_MAC]
++ #endif
++ )
++ #endif
++ return -EPERM;
++ }
++ }
++ }
++ }
++#endif /* ifndef CONFIG_RSBAC_MAINT */
++
++ /* OK, checks passed: set values */
++ if(level != SL_none)
++ {
++ attr_val1.current_sec_level = level;
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_current_sec_level,
++ attr_val1))
++ { /* failed! */
++ rsbac_ds_set_error("rsbac_mac_set_curr_level", A_none);
++ return(-RSBAC_EWRITEFAILED);
++ }
++ }
++ if(categories != RSBAC_MAC_MIN_CAT_VECTOR)
++ {
++ attr_val1.mac_categories = categories;
++ if (rsbac_set_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_curr_categories,
++ attr_val1))
++ { /* failed! */
++ rsbac_ds_set_error("rsbac_mac_set_curr_level", A_none);
++ return(-RSBAC_EWRITEFAILED);
++ }
++ }
++ return(0);
++ }
++
++/* getting own levels as well - no restrictions */
++int rsbac_mac_get_curr_level(rsbac_security_level_t * level_p,
++ rsbac_mac_category_vector_t * categories_p)
++ {
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val;
++
++ tid.process = task_pid(current);
++ if(level_p)
++ {
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_current_sec_level,
++ &attr_val,
++ FALSE))
++ { /* failed! */
++ rsbac_ds_get_error("rsbac_mac_get_curr_level", A_none);
++ return(-RSBAC_EREADFAILED);
++ }
++ *level_p = attr_val.current_sec_level;
++ }
++ if(categories_p)
++ {
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_curr_categories,
++ &attr_val,
++ FALSE))
++ { /* failed! */
++ rsbac_ds_get_error("rsbac_mac_get_curr_level", A_none);
++ return(-RSBAC_EREADFAILED);
++ }
++ *categories_p = attr_val.mac_categories;
++ }
++ return 0;
++ }
++
++int rsbac_mac_get_max_level(rsbac_security_level_t * level_p,
++ rsbac_mac_category_vector_t * categories_p)
++ {
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val;
++
++ tid.process = task_pid(current);
++ if(level_p)
++ {
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_security_level,
++ &attr_val,
++ FALSE))
++ { /* failed! */
++ rsbac_ds_get_error("rsbac_mac_get_max_level", A_none);
++ return(-RSBAC_EREADFAILED);
++ }
++ *level_p = attr_val.security_level;
++ }
++ if(categories_p)
++ {
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_categories,
++ &attr_val,
++ FALSE))
++ { /* failed! */
++ rsbac_ds_get_error("rsbac_mac_get_max_level", A_none);
++ return(-RSBAC_EREADFAILED);
++ }
++ *categories_p = attr_val.mac_categories;
++ }
++ return 0;
++ }
++
++
++int rsbac_mac_get_min_level(rsbac_security_level_t * level_p,
++ rsbac_mac_category_vector_t * categories_p)
++ {
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val;
++
++ tid.process = task_pid(current);
++ if(level_p)
++ {
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_min_security_level,
++ &attr_val,
++ FALSE))
++ { /* failed! */
++ rsbac_ds_get_error("rsbac_mac_get_min_level", A_none);
++ return(-RSBAC_EREADFAILED);
++ }
++ *level_p = attr_val.security_level;
++ }
++ if(categories_p)
++ {
++ if (rsbac_get_attr(SW_MAC,
++ T_PROCESS,
++ tid,
++ A_mac_min_categories,
++ &attr_val,
++ FALSE))
++ { /* failed! */
++ rsbac_ds_get_error("rsbac_mac_get_min_level", A_none);
++ return(-RSBAC_EREADFAILED);
++ }
++ *categories_p = attr_val.mac_categories;
++ }
++ return 0;
++ }
++
++int rsbac_mac_add_p_tru(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ rsbac_uid_t uid,
++ rsbac_time_t ttl)
++ {
++/* check only in non-maint mode */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_MAC
++ if(rsbac_switch_mac)
++#endif
++ {
++ if(mac_sys_check_role(SR_security_officer))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_mac_add_p_tru(): adding MAC trusted user %u to process %u denied for process %u!\n",
++ uid,
++ pid,
++ current->pid);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_MAC]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++ }
++#endif
++
++ /* OK, check passed. Add the truability. */
++ if(rsbac_mac_add_to_p_truset(ta_number, pid, uid, ttl))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_mac_add_p_tru(): rsbac_mac_add_to_p_truset() returned error!\n");
++ return(-RSBAC_EWRITEFAILED);
++ }
++ return 0;
++ }
++
++int rsbac_mac_remove_p_tru(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ rsbac_uid_t uid)
++ {
++/* check only in non-maint mode */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_MAC
++ if(rsbac_switch_mac)
++#endif
++ {
++ if(mac_sys_check_role(SR_security_officer))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_mac_remove_p_tru(): removing MAC trusted user %u from process %u denied for process %u!\n",
++ uid,
++ pid,
++ current->pid);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_MAC]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++ }
++#endif
++ /* OK, check passed. Try to remove the trusted user */
++ return(rsbac_mac_remove_from_p_truset(ta_number, pid, uid));
++ }
++
++int rsbac_mac_add_f_tru(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_mac_file_t file,
++ rsbac_uid_t uid,
++ rsbac_time_t ttl)
++ {
++/* check only in non-maint mode */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_MAC
++ if(rsbac_switch_mac)
++#endif
++ {
++ if(mac_sys_check_role(SR_security_officer))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_mac_add_f_tru(): adding MAC trusted user %u to file %u on device %02u:%02u denied for process %u!\n",
++ uid,
++ file.inode,
++ MAJOR(file.device),
++ MINOR(file.device),
++ current->pid);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_MAC]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++ }
++#endif
++
++ if(rsbac_mac_add_to_f_truset(ta_number, file, uid, ttl))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_mac_add_f_tru(): rsbac_mac_add_to_f_truset() returned error!\n");
++ return(-RSBAC_EWRITEFAILED);
++ }
++ return 0;
++ }
++
++int rsbac_mac_remove_f_tru(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_mac_file_t file,
++ rsbac_uid_t uid)
++ {
++/* check only in non-maint mode */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_MAC
++ if(rsbac_switch_mac)
++#endif
++ {
++ if(mac_sys_check_role(SR_security_officer))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_mac_remove_f_tru(): removing MAC trusted user %u from file %u on device %02u:%02u denied for process %u!\n",
++ uid,
++ file.inode,
++ MAJOR(file.device),
++ MINOR(file.device),
++ current->pid);
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_MAC]
++ #endif
++ )
++ #endif
++ return(-EPERM);
++ }
++ }
++#endif
++
++ return(rsbac_mac_remove_from_f_truset(ta_number, file, uid));
++ }
++
++
++/* end of rsbac/adf/mac/syscalls.c */
+diff --git a/rsbac/adf/pax/Makefile b/rsbac/adf/pax/Makefile
+new file mode 100644
+index 0000000..14181cf
+--- /dev/null
++++ b/rsbac/adf/pax/Makefile
+@@ -0,0 +1,9 @@
++#
++# File: rsbac/adf/pax/Makefile
++#
++# Makefile for the Linux rsbac pax decision module.
++#
++# Author and (c) 1999-2012 Amon Ott <ao@rsbac.org>
++#
++
++obj-y := pax_main.o
+diff --git a/rsbac/adf/pax/pax_main.c b/rsbac/adf/pax/pax_main.c
+new file mode 100644
+index 0000000..277c8ba
+--- /dev/null
++++ b/rsbac/adf/pax/pax_main.c
+@@ -0,0 +1,241 @@
++/**************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - PAX */
++/* File: rsbac/adf/pax/pax_main.c */
++/* */
++/* Author and (c) 1999-2008: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 26/Feb/2008 */
++/**************************************************** */
++
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/pax_getname.h>
++#include <rsbac/debug.h>
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++/**** PaX set flags func ****/
++#if defined(CONFIG_RSBAC_PAX) && (defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS))
++
++#include <linux/binfmts.h>
++
++#if defined(CONFIG_PAX_HAVE_ACL_FLAGS)
++void pax_set_initial_flags(struct linux_binprm * bprm)
++#else
++void rsbac_pax_set_flags_func(struct linux_binprm * bprm)
++#endif
++ {
++ int err;
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val;
++
++ if(!rsbac_is_initialized())
++ return;
++ tid.file.device = bprm->file->f_dentry->d_sb->s_dev;
++ tid.file.inode = bprm->file->f_dentry->d_inode->i_ino;
++ tid.file.dentry_p = bprm->file->f_dentry;
++ err = rsbac_get_attr(SW_PAX,
++ T_FILE,
++ tid,
++ A_pax_flags,
++ &attr_val,
++ TRUE);
++ if(!err)
++ {
++ pax_check_flags(&attr_val.pax_flags);
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pax)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pax_set_flags_func(): changing flags for process %u from %lx to %lx from device %02u:%02u inode %u\n",
++ current->pid,
++ current->flags & RSBAC_PAX_ALL_FLAGS,
++ attr_val.pax_flags,
++ MAJOR(tid.file.device),MINOR(tid.file.device),
++ tid.file.inode);
++ }
++#endif
++ /* Set flags for process */
++ current->mm->pax_flags = (current->mm->pax_flags & ~RSBAC_PAX_ALL_FLAGS) | attr_val.pax_flags;
++ }
++ else
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pax_set_flags_func(): get_data for device %02u:%02u, inode %u returned error %i!\n",
++ MAJOR(tid.file.device),
++ MINOR(tid.file.device),
++ tid.file.inode,
++ err);
++ }
++ }
++#endif
++
++
++inline enum rsbac_adf_req_ret_t
++ rsbac_adf_request_pax (enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ switch (request)
++ {
++ case R_MODIFY_ATTRIBUTE:
++ switch(attr)
++ {
++ case A_system_role:
++ case A_pax_role:
++ case A_pax_flags:
++ /* All attributes (remove target!) */
++ case A_none:
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PAX,
++ T_USER,
++ i_tid,
++ A_pax_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_pax()", A_pax_role);
++ return(NOT_GRANTED);
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_READ_ATTRIBUTE:
++ switch(attr)
++ {
++ case A_system_role:
++ case A_pax_role:
++ case A_pax_flags:
++ /* All attributes (remove target!) */
++ case A_none:
++ /* Security Officer or Admin? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PAX,
++ T_USER,
++ i_tid,
++ A_pax_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_pax()", A_pax_role);
++ return(NOT_GRANTED);
++ }
++ /* if sec_officer, then grant */
++ if( (i_attr_val1.system_role == SR_security_officer)
++ || (i_attr_val1.system_role == SR_administrator)
++ )
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_SWITCH_LOG:
++ switch(target)
++ {
++ case T_NONE:
++ /* test owner's pax_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PAX,
++ T_USER,
++ i_tid,
++ A_pax_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_pax()", A_pax_role);
++ return(NOT_GRANTED);
++ }
++ /* security officer? -> grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ /* all other cases are unknown */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_SWITCH_MODULE:
++ switch(target)
++ {
++ case T_NONE:
++ /* we need the switch_target */
++ if(attr != A_switch_target)
++ return NOT_GRANTED;
++ /* do not care for other modules */
++ if( (attr_val.switch_target != SW_PAX)
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ && (attr_val.switch_target != SW_SOFTMODE)
++ #endif
++ #ifdef CONFIG_RSBAC_FREEZE
++ && (attr_val.switch_target != SW_FREEZE)
++ #endif
++ )
++ return(DO_NOT_CARE);
++ /* test owner's pax_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PAX,
++ T_USER,
++ i_tid,
++ A_pax_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_pax()", A_pax_role);
++ return(NOT_GRANTED);
++ }
++ /* security officer? -> grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ /* all other cases are unknown */
++ default: return(DO_NOT_CARE);
++ }
++
++/*********************/
++ default: return DO_NOT_CARE;
++ }
++
++ return DO_NOT_CARE;
++ } /* end of rsbac_adf_request_pax() */
++
++
++/* end of rsbac/adf/pax/pax_main.c */
+diff --git a/rsbac/adf/pm/Makefile b/rsbac/adf/pm/Makefile
+new file mode 100644
+index 0000000..e74040b
+--- /dev/null
++++ b/rsbac/adf/pm/Makefile
+@@ -0,0 +1,13 @@
++#
++# File: rsbac/adf/pm/Makefile
++#
++# Makefile for the Linux rsbac pm decision module.
++#
++# Author and (c) 1999-2012 Amon Ott <ao@rsbac.org>
++#
++
++obj-y := pm_syscalls.o
++# decisions only in non-maint mode
++ifneq ($(CONFIG_RSBAC_MAINT),y)
++obj-y += pm_main.o
++endif
+diff --git a/rsbac/adf/pm/pm_main.c b/rsbac/adf/pm/pm_main.c
+new file mode 100644
+index 0000000..036bc97
+--- /dev/null
++++ b/rsbac/adf/pm/pm_main.c
+@@ -0,0 +1,3182 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - Privacy Model */
++/* File: rsbac/adf/pm/main.c */
++/* */
++/* Author and (c) 1999-2009: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 06/Oct/2009 */
++/*************************************************** */
++
++#include <linux/string.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/error.h>
++#include <rsbac/debug.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/pm.h>
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++static rsbac_pm_purpose_id_t
++ get_ipc_purpose(struct rsbac_ipc_t ipc_id)
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ /* get pm_ipc_purpose of given ipc */
++ i_tid.ipc = ipc_id;
++ if (rsbac_get_attr(SW_PM,
++ T_IPC,
++ i_tid,
++ A_pm_ipc_purpose,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "get_ipc_purpose(): rsbac_get_attr() returned error!\n");
++ return(0);
++ }
++ return(i_attr_val1.pm_ipc_purpose);
++ }
++
++static enum rsbac_adf_req_ret_t
++ tp_check(rsbac_pid_t caller_pid)
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ /* get pm_process_type of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_process_type,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "tp_check(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ if(i_attr_val1.pm_process_type == PP_TP)
++ return(NOT_GRANTED);
++ else
++ return(DO_NOT_CARE);
++ };
++
++/* This function does the actual checking for */
++/* necessary(access) and (purpose-binding or consent). */
++/* Additionally, information flow checking is done via input and output */
++/* purpose sets. */
++static enum rsbac_adf_req_ret_t
++ na_and_pp_or_cs( rsbac_pid_t caller_pid,
++ struct rsbac_fs_file_t file,
++ rsbac_pm_accesses_t acc)
++ {
++ rsbac_pm_task_id_t task;
++ rsbac_pm_object_class_id_t object_class;
++ rsbac_pm_tp_id_t tp;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_pm_target_id_t i_pm_tid;
++ union rsbac_pm_data_value_t i_data_val1;
++ union rsbac_pm_data_value_t i_data_val2;
++ union rsbac_pm_set_id_t i_pm_set_id;
++ union rsbac_pm_set_member_t i_pm_set_member;
++ int error;
++
++ /* get object_class of file */
++ i_tid.file = file;
++ if (rsbac_get_attr(SW_PM,
++ T_FILE,
++ i_tid,
++ A_pm_object_class,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "na_and_pp_or_cs(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ object_class = i_attr_val1.pm_object_class;
++ /* if there is no class for this file, this is an error! */
++ /* (all personal data must have a class assigned, and this */
++ /* function must never be called for anything else) */
++ if(!object_class)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_WARNING
++ "na_and_pp_or_cs(): personal_data with NIL class!\n");
++#endif
++ return(NOT_GRANTED);
++ }
++
++ /* get current_task of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_current_task,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "na_and_pp_or_cs(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ task = i_attr_val1.pm_current_task;
++ if(!task)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "na_and_pp_or_cs(): no current_task for calling process trying to access personal_data\n");
++#endif
++ return(NOT_GRANTED);
++ }
++
++ /* get pm_tp of caller-process */
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_tp,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "na_and_pp_or_cs(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ tp = i_attr_val1.pm_tp;
++ if(!tp)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "na_and_pp_or_cs(): calling process trying to access personal_data has no TP-id\n");
++#endif
++ return(NOT_GRANTED);
++ }
++
++ /* get necessary accesses */
++ i_pm_tid.na.task = task;
++ i_pm_tid.na.object_class = object_class;
++ i_pm_tid.na.tp = tp;
++ if ((error = rsbac_pm_get_data(0,
++ PMT_NA,
++ i_pm_tid,
++ PD_accesses,
++ &i_data_val1)))
++ {
++ if(error != -RSBAC_EINVALIDTARGET)
++ rsbac_printk(KERN_WARNING
++ "na_and_pp_or_cs(): rsbac_pm_get_data() returned error %i!\n",
++ error);
++ return(NOT_GRANTED);
++ }
++ /* is requested access mode included in access mask? */
++ if((acc & i_data_val1.accesses) != acc)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "na_and_pp_or_cs(): requested access mode is not necessary\n");
++#endif
++ return(NOT_GRANTED);
++ }
++
++ /* OK, access is necessary -> check (purpose-bind or consent) */
++ /* first try purpose-binding */
++
++ /* get purpose-id of current_task */
++ i_pm_tid.task = task;
++ if ((error = rsbac_pm_get_data(0,
++ PMT_TASK,
++ i_pm_tid,
++ PD_purpose,
++ &i_data_val1)))
++ {
++ rsbac_printk(KERN_WARNING
++ "na_and_pp_or_cs(): rsbac_get_data() for current_TASK/purpose returned error %i!\n",
++ error);
++ return(NOT_GRANTED);
++ }
++ if(!i_data_val1.purpose)
++ {
++ rsbac_printk(KERN_WARNING
++ "na_and_pp_or_cs(): task %i has NIL purpose!\n",task);
++ return(NOT_GRANTED);
++ }
++ /* get purpose-set-id of class */
++ i_pm_tid.object_class = object_class;
++ if ((error = rsbac_pm_get_data(0,
++ PMT_CLASS,
++ i_pm_tid,
++ PD_pp_set,
++ &i_data_val2)))
++ {
++ if(error != -RSBAC_EINVALIDTARGET)
++ rsbac_printk(KERN_WARNING
++ "na_and_pp_or_cs(): rsbac_pm_get_data() returned error %i!\n",
++ error);
++ return(NOT_GRANTED);
++ }
++ /* OK, if task's purpose is in class's purpose_set */
++ i_pm_set_id.pp_set = i_data_val2.pp_set;
++ i_pm_set_member.pp = i_data_val1.purpose;
++ if (!rsbac_pm_set_member(0,PS_PP,i_pm_set_id,i_pm_set_member))
++ { /* purpose binding failed -> try consent */
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "na_and_pp_or_cs(): purpose of current_task of calling process is NOT in purpose set of class of file -> trying consent\n");
++#endif
++ i_pm_tid.cs.purpose = i_data_val1.purpose;
++ i_pm_tid.cs.file = file;
++ if(!rsbac_pm_exists(0,PMT_CS,i_pm_tid))
++ { /* neither pp-binding, nor consent -> do not grant */
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "na_and_pp_or_cs(): there is no consent for this purpose for file\n");
++#endif
++ return(NOT_GRANTED);
++ }
++ }
++
++ /* information flow check */
++
++ /* read access: is purpose set of class of file superset of process */
++ /* output purpose set? If not -> do not grant access */
++ /* (Output purpose set id is process id) */
++ if( (acc & RSBAC_PM_A_READ)
++ && !rsbac_pm_pp_superset(i_pm_set_id.pp_set, caller_pid) )
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "na_and_pp_or_cs(): failed information flow check for read access\n");
++#endif
++ return(NOT_GRANTED);
++ }
++
++ /* write access: is purpose set of class of file subset of process */
++ /* input purpose set? If not -> do not grant access */
++ /* (Input purpose set id is also process id) */
++ if( (acc & RSBAC_PM_A_WRITE_TO_FILE)
++ && !rsbac_pm_pp_subset(i_pm_set_id.pp_set, caller_pid) )
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "na_and_pp_or_cs(): failed information flow check for write access\n");
++#endif
++ return(NOT_GRANTED);
++ }
++
++ /* OK, all checks done. GRANT! */
++ return(GRANTED);
++ }
++
++/* reduced version for IPC objects */
++static enum rsbac_adf_req_ret_t
++ na_and_pp_ipc( rsbac_pm_task_id_t task,
++ rsbac_pid_t caller_pid,
++ rsbac_pm_accesses_t acc,
++ struct rsbac_ipc_t ipc_id)
++ {
++ rsbac_pm_tp_id_t tp;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_pm_target_id_t i_pm_tid;
++ union rsbac_pm_data_value_t i_data_val1;
++ union rsbac_pm_set_id_t i_pm_set_id;
++ union rsbac_pm_set_member_t i_pm_set_member;
++ int error;
++
++ if(!task)
++ return(NOT_GRANTED);
++
++ /* get pm_tp of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_tp,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "na_and_pp_ipc(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ tp = i_attr_val1.pm_tp;
++ if(!tp)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "na_and_pp_ipc(): calling process trying to access ipc has task, but no TP-id\n");
++#endif
++ return(NOT_GRANTED);
++ }
++ return(NOT_GRANTED);
++
++ /* get necessary accesses */
++ i_pm_tid.na.task = task;
++ i_pm_tid.na.object_class = RSBAC_PM_IPC_OBJECT_CLASS_ID;
++ i_pm_tid.na.tp = tp;
++ if ((error = rsbac_pm_get_data(0,
++ PMT_NA,
++ i_pm_tid,
++ PD_accesses,
++ &i_data_val1)))
++ {
++ if(error != -RSBAC_EINVALIDTARGET)
++ rsbac_printk(KERN_WARNING
++ "na_and_pp_ipc(): rsbac_pm_get_data() returned error %i!\n",
++ error);
++ return(NOT_GRANTED);
++ }
++ /* is requested access mode included in access mask? */
++ if((acc & i_data_val1.accesses) != acc)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "na_and_pp_ipc(): requested access mode is not necessary\n");
++#endif
++ return(NOT_GRANTED);
++ }
++
++ /* OK, access is necessary -> check purpose-bind */
++ /* get purpose-id of current_task */
++ i_pm_tid.task = task;
++ if ((error = rsbac_pm_get_data(0,
++ PMT_TASK,
++ i_pm_tid,
++ PD_purpose,
++ &i_data_val1)))
++ {
++ rsbac_printk(KERN_WARNING
++ "na_and_pp_ipc(): rsbac_get_data() for current_TASK/purpose returned error %i!\n",
++ error);
++ return(NOT_GRANTED);
++ }
++ if(!i_data_val1.purpose)
++ {
++ rsbac_printk(KERN_WARNING
++ "na_and_pp_ipc(): task %i has NIL purpose!\n",task);
++ return(NOT_GRANTED);
++ }
++ /* get ipc_purpose of IPC-object */
++ i_tid.ipc = ipc_id;
++ if (rsbac_get_attr(SW_PM,
++ T_IPC,
++ i_tid,
++ A_pm_ipc_purpose,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "na_and_pp_ipc(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++
++ /* grant, if task's purpose is ipc's ipc_purpose or if */
++ /* IPC-pp is NIL and access is read-only */
++ if (!( (i_data_val1.purpose == i_attr_val1.pm_ipc_purpose)
++ || (!i_data_val1.purpose && !(acc & RSBAC_PM_A_WRITING) ) ) )
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "na_and_pp_ipc(): purpose of current_task of calling process is NOT ipc_purpose\n");
++#endif
++ return(NOT_GRANTED);
++ }
++ /* information flow check */
++
++ /* read access: is purpose of ipc object NIL or no other purpose in */
++ /* output purpose set? If not -> do not grant access */
++ /* (Output purpose set id is process id) */
++ if( (acc & RSBAC_PM_A_READ)
++ && i_attr_val1.pm_ipc_purpose
++ && !rsbac_pm_pp_only(i_attr_val1.pm_ipc_purpose, caller_pid) )
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "na_and_pp_ipc(): failed information flow check for read access\n");
++#endif
++ return(NOT_GRANTED);
++ }
++
++ /* write access: is purpose of ipc in */
++ /* input purpose set? If not -> do not grant access */
++ /* (Input purpose set id is also process id) */
++ if(acc & RSBAC_PM_A_WRITE_TO_FILE)
++ {
++ i_pm_set_id.in_pp_set = caller_pid;
++ i_pm_set_member.pp = i_attr_val1.pm_ipc_purpose;
++ if (!rsbac_pm_set_member(0, PS_IN_PP, i_pm_set_id, i_pm_set_member) )
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "na_and_pp_or_cs(): failed information flow check for write access\n");
++#endif
++ return(NOT_GRANTED);
++ }
++ }
++ /* OK, all checks done. GRANT! */
++ return(GRANTED);
++ }
++
++
++static enum rsbac_adf_req_ret_t
++ na_ipc(rsbac_pm_task_id_t task,
++ rsbac_pid_t caller_pid,
++ rsbac_pm_accesses_t acc)
++ {
++ rsbac_pm_tp_id_t tp;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_pm_target_id_t i_pm_tid;
++ union rsbac_pm_data_value_t i_data_val1;
++ int error;
++
++ if(!task)
++ return(NOT_GRANTED);
++
++ /* get pm_tp of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_tp,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "na_ipc(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ tp = i_attr_val1.pm_tp;
++ if(!tp)
++ return(NOT_GRANTED);
++
++ /* get necessary accesses */
++ i_pm_tid.na.task = task;
++ i_pm_tid.na.object_class = RSBAC_PM_IPC_OBJECT_CLASS_ID;
++ i_pm_tid.na.tp = tp;
++ if ((error = rsbac_pm_get_data(0,
++ PMT_NA,
++ i_pm_tid,
++ PD_accesses,
++ &i_data_val1)))
++ {
++ if(error != -RSBAC_EINVALIDTARGET)
++ rsbac_printk(KERN_WARNING
++ "na_ipc(): rsbac_pm_get_data() returned error %i!\n",
++ error);
++ return(NOT_GRANTED);
++ }
++ /* is requested access mode included in access mask? */
++ if((acc & i_data_val1.accesses) == acc)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++ }
++
++static enum rsbac_adf_req_ret_t
++ na_dev(rsbac_pid_t caller_pid,
++ rsbac_pm_accesses_t acc,
++ struct rsbac_dev_desc_t dev)
++ {
++ rsbac_pm_tp_id_t tp;
++ rsbac_pm_task_id_t task;
++ rsbac_pm_object_class_id_t object_class;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_pm_target_id_t i_pm_tid;
++ union rsbac_pm_data_value_t i_data_val1;
++ int error;
++
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_current_task,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "na_dev(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ task = i_attr_val1.pm_current_task;
++ /* if current_task = NIL -> do not grant */
++ if(!task)
++ {
++ return(NOT_GRANTED);
++ }
++
++ /* get pm_tp of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_tp,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "na_dev(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ tp = i_attr_val1.pm_tp;
++ if(!tp)
++ return(NOT_GRANTED);
++
++ /* get pm_object_class of dev target */
++ i_tid.dev = dev;
++ if (rsbac_get_attr(SW_PM,
++ T_DEV,
++ i_tid,
++ A_pm_object_class,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "na_dev(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ object_class = i_attr_val1.pm_object_class;
++
++ /* get necessary accesses */
++ i_pm_tid.na.task = task;
++ i_pm_tid.na.object_class = object_class;
++ i_pm_tid.na.tp = tp;
++ if ((error = rsbac_pm_get_data(0,
++ PMT_NA,
++ i_pm_tid,
++ PD_accesses,
++ &i_data_val1)))
++ {
++ if(error != -RSBAC_EINVALIDTARGET)
++ rsbac_printk(KERN_WARNING
++ "na_dev(): rsbac_pm_get_data() returned error %i!\n",
++ error);
++ return(NOT_GRANTED);
++ }
++ /* is requested access mode included in access mask? */
++ if((acc & i_data_val1.accesses) == acc)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++ }
++
++/* This function does the adjustment of input- and output-purpose-set of */
++/* the calling process according to type of access and purpose set of class */
++/* of file. */
++static int
++ adjust_in_out_pp( rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ struct rsbac_fs_file_t file,
++ rsbac_pm_accesses_t acc)
++ {
++ rsbac_pm_object_class_id_t object_class;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_pm_target_id_t i_pm_tid;
++ union rsbac_pm_data_value_t i_data_val1;
++ int error;
++
++ /* get pm_object_type of file */
++ i_tid.file = file;
++ if (rsbac_get_attr(SW_PM,
++ target,
++ i_tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "adjust_in_out_pp(): rsbac_get_attr() returned error!\n");
++ return(-RSBAC_EREADFAILED);
++ }
++ /* we only adjust for personal_data */
++ if(i_attr_val1.pm_object_type != PO_personal_data)
++ return(0);
++
++ /* only personal_data left -> */
++ /* get object_class of file */
++ i_tid.file = file;
++ if (rsbac_get_attr(SW_PM,
++ target,
++ i_tid,
++ A_pm_object_class,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "adjust_in_out_pp(): rsbac_get_attr() returned error!\n");
++ return(-RSBAC_EREADFAILED);
++ }
++ object_class = i_attr_val1.pm_object_class;
++ /* if there is no class for this file, this is an error! */
++ /* (all personal data must have a class assigned, and here */
++ /* must never be anything else) */
++ if(!object_class)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_WARNING
++ "adjust_in_out_pp(): personal_data with NIL class!\n");
++#endif
++ return(-RSBAC_EINVALIDVALUE);
++ }
++
++ /* get pp_set-id of class */
++ i_pm_tid.object_class = object_class;
++ if ((error = rsbac_pm_get_data(0,
++ PMT_CLASS,
++ i_pm_tid,
++ PD_pp_set,
++ &i_data_val1)))
++ {
++ if(error != -RSBAC_EINVALIDTARGET)
++ rsbac_printk(KERN_WARNING
++ "adjust_in_out_pp(): rsbac_pm_get_data() returned error %i!\n",
++ error);
++ else
++ rsbac_printk(KERN_WARNING
++ "adjust_in_out_pp(): class %i of file does not exist!\n",
++ object_class);
++ return(-RSBAC_EREADFAILED);
++ }
++
++ /* adjust information flow check boundaries */
++
++ /* read access: create intersection of input-purpose-set of process and */
++ /* purpose-set of class of file in input-purpose-set of process */
++ /* (Input purpose set id is process id) */
++ if( (acc & RSBAC_PM_A_READ)
++ && rsbac_pm_pp_intersec(i_data_val1.pp_set, caller_pid) )
++ {
++ rsbac_printk(KERN_WARNING
++ "adjust_in_out_pp(): call to rsbac_pm_pp_intersec failed\n");
++ error = -RSBAC_EWRITEFAILED;
++ }
++
++ /* write access: create union of output-purpose-set of process and */
++ /* purpose-set of class of file in output-purpose-set of process */
++ /* (Output purpose set id is process id) */
++ if( (acc & RSBAC_PM_A_WRITE_TO_FILE)
++ && rsbac_pm_pp_union(i_data_val1.pp_set, caller_pid) )
++ {
++ rsbac_printk(KERN_WARNING
++ "adjust_in_out_pp(): call to rsbac_pm_pp_union failed\n");
++ error = -RSBAC_EWRITEFAILED;
++ }
++
++ /* OK, everything is done. */
++ return(error);
++ }
++
++/* This function does the adjustment of input- and output-purpose-set of */
++/* the calling process according to type of access and ipc-purpose of ipc */
++/* object. */
++static int
++ adjust_in_out_pp_ipc( rsbac_pid_t caller_pid,
++ struct rsbac_ipc_t ipc,
++ rsbac_pm_accesses_t acc)
++ {
++ union rsbac_pm_set_id_t i_pm_set_id;
++ union rsbac_pm_set_member_t i_pm_set_member;
++ rsbac_pm_purpose_id_t i_pm_pp;
++ int error = 0;
++
++ /* get IPC-purpose */
++ i_pm_pp = get_ipc_purpose(ipc);
++ /* if ipc_purpose is 0, this cannot be a TP -> no access to personal data */
++ /* -> no flow control */
++ if(!i_pm_pp)
++ return(0);
++
++ /* adjust information flow check boundaries */
++
++ /* read access: create intersection of input-purpose-set of process and */
++ /* purpose-set of ipc in input-purpose-set of process -> clear set and */
++ /* add ipc-purpose, because ipc-purpose must have been in it at decision */
++ /* (Input purpose set id is process id) */
++ if(acc & RSBAC_PM_A_READ)
++ {
++ i_pm_set_id.in_pp_set = caller_pid;
++ /* if set does not exist, create it */
++ if( !rsbac_pm_set_exist(0,PS_IN_PP, i_pm_set_id)
++ && rsbac_pm_create_set(0,PS_IN_PP, i_pm_set_id) )
++ {
++ rsbac_printk(KERN_WARNING
++ "adjust_in_out_pp_ipc(): call to rsbac_pm_create_set returned error\n");
++ error = -RSBAC_EWRITEFAILED;
++ }
++ if(rsbac_pm_clear_set(0,PS_IN_PP, i_pm_set_id) )
++ {
++ rsbac_printk(KERN_WARNING
++ "adjust_in_out_pp_ipc(): call to rsbac_pm_clear_set returned error\n");
++ error = -RSBAC_EWRITEFAILED;
++ }
++ i_pm_set_member.pp = i_pm_pp;
++ if(rsbac_pm_add_to_set(0,PS_IN_PP, i_pm_set_id, i_pm_set_member) )
++ {
++ rsbac_printk(KERN_WARNING
++ "adjust_in_out_pp_ipc(): call to rsbac_pm_add_to_set returned error\n");
++ error = -RSBAC_EWRITEFAILED;
++ }
++ }
++
++ /* write access: create union of output-purpose-set of process and */
++ /* purpose-set of ipc in output-purpose-set of process -> */
++ /* add ipc-purpose to output-purpose-set */
++ /* (Input purpose set id is process id) */
++ if(acc & RSBAC_PM_A_WRITE_TO_FILE)
++ {
++ i_pm_set_id.out_pp_set = caller_pid;
++ /* if set does not exist, create it */
++ if( !rsbac_pm_set_exist(0,PS_OUT_PP, i_pm_set_id)
++ && rsbac_pm_create_set(0,PS_OUT_PP, i_pm_set_id) )
++ {
++ rsbac_printk(KERN_WARNING
++ "adjust_in_out_pp_ipc(): call to rsbac_pm_create_set returned error\n");
++ error = -RSBAC_EWRITEFAILED;
++ }
++ /* add ipc_purpose to set */
++ i_pm_set_member.pp = i_pm_pp;
++ if(rsbac_pm_add_to_set(0,PS_OUT_PP, i_pm_set_id, i_pm_set_member) )
++ {
++ rsbac_printk(KERN_WARNING
++ "adjust_in_out_pp_ipc(): call to rsbac_pm_add_to_set returned error\n");
++ error = -RSBAC_EWRITEFAILED;
++ }
++ }
++
++ /* OK, everything is done. */
++ return(error);
++ }
++
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++inline enum rsbac_adf_req_ret_t
++ rsbac_adf_request_pm (enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ enum rsbac_adf_req_ret_t result = DO_NOT_CARE;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_attribute_value_t i_attr_val2;
++ union rsbac_pm_target_id_t i_pm_tid;
++ union rsbac_pm_data_value_t i_data_val1;
++ union rsbac_pm_set_id_t i_pm_set_id;
++ union rsbac_pm_set_member_t i_pm_set_member;
++ rsbac_pm_purpose_id_t i_pm_pp;
++ int error;
++
++ switch (request)
++ {
++ case R_ADD_TO_KERNEL:
++ switch(target)
++ {
++ case T_FILE:
++ case T_DEV:
++ case T_NONE:
++ /* test owner's pm_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PM,
++ T_USER,
++ i_tid,
++ A_pm_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* only administrators are allowed to do this */
++ if (i_attr_val1.pm_role != PR_system_admin)
++ return(NOT_GRANTED);
++ else
++ return(GRANTED);
++
++ /* all other cases */
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_APPEND_OPEN:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ target,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* no append_open on TPs */
++ if(i_attr_val1.pm_object_type == PO_TP)
++ return(NOT_GRANTED);
++ /* TPs must not write on other than personal_data */
++ if(i_attr_val1.pm_object_type != PO_personal_data)
++ return(tp_check(caller_pid));
++
++ /* only personal_data left -> */
++ /* check necessary && (purpose_bind || consent) */
++ return(na_and_pp_or_cs(caller_pid,
++ tid.file,
++ RSBAC_PM_A_APPEND));
++ break;
++
++ /* Appending to devices is no problem here */
++ case T_DEV:
++ return(DO_NOT_CARE);
++
++ case T_IPC:
++ /* get IPC-purpose */
++ i_pm_pp = get_ipc_purpose(tid.ipc);
++ /* if IPC-pp is NIL -> process type must be NIL */
++ if(!i_pm_pp)
++ {
++ /* get process-type of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_process_type,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ if(i_attr_val1.pm_process_type == PP_TP)
++ return(NOT_GRANTED);
++ else
++ return(GRANTED);
++ }
++ /* OK, we do have an IPC-purpose */
++ /* get current_task of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_current_task,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if current_task = NIL -> do not grant */
++ if(!i_attr_val1.pm_current_task)
++ {
++ return(NOT_GRANTED);
++ }
++ /* check necessary && purpose_bind */
++ return(na_and_pp_ipc(i_attr_val1.pm_current_task,
++ caller_pid,
++ RSBAC_PM_A_APPEND,
++ tid.ipc));
++ break;
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_CHANGE_GROUP:
++ switch(target)
++ {
++ /* We do not care about process or user groups */
++ /* all other cases */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_CHANGE_OWNER:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ case T_SYMLINK:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ target,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* no access on TPs and personal_data*/
++ if( (i_attr_val1.pm_object_type == PO_TP)
++ || (i_attr_val1.pm_object_type == PO_personal_data))
++ return(NOT_GRANTED);
++ else
++ return(GRANTED);
++ break;
++
++ /* processes may only be given to other user, if */
++ /* current_task is authorized for him. */
++ /* If CONFIG_RSBAC_PM_ROLE_PROT is set, only changing */
++ /* to or from pm_role general_user is allowed. */
++ case T_PROCESS:
++ /* get current_task of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_current_task,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if task = NIL: no problem, grant */
++ if(!i_attr_val1.pm_current_task)
++ return(GRANTED);
++
++ /* get task_set_id of process-owner */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PM,
++ T_USER,
++ i_tid,
++ A_pm_task_set,
++ &i_attr_val2,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if user has no set of authorized tasks -> do not grant */
++ if(!i_attr_val2.pm_task_set)
++ return(NOT_GRANTED);
++
++ /* grant, if task is in owner's authorized task_set */
++ i_pm_set_id.task_set = i_attr_val2.pm_task_set;
++ i_pm_set_member.task = i_attr_val1.pm_current_task;
++ if (rsbac_pm_set_member(0,PS_TASK,i_pm_set_id,i_pm_set_member))
++ return(GRANTED);
++ /* else: don't... */
++ else
++ return(NOT_GRANTED);
++
++ /* Change-owner without or for other target: do not care */
++ case T_DIR:
++ case T_IPC:
++ case T_NONE:
++ return(DO_NOT_CARE);
++ /* all other cases are undefined */
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_CLONE:
++ if (target == T_PROCESS)
++ {
++ /* get process_type of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_process_type,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* cloning is only allowed for normal processes */
++ if(i_attr_val1.pm_process_type == PP_none)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++ }
++ else
++ return(DO_NOT_CARE);
++
++ case R_CREATE:
++ switch(target)
++ {
++ /* Creating dir or (pseudo) file IN target dir! */
++ case T_DIR:
++ /* get process_type of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_process_type,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* we only care for TPs here */
++ if(i_attr_val1.pm_process_type != PP_TP)
++ return(DO_NOT_CARE);
++
++ /* get current_task of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_current_task,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ if(!i_attr_val1.pm_current_task)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_adf_request_pm(): no current_task for calling process trying to access personal_data\n");
++#endif
++ return(NOT_GRANTED);
++ }
++
++ /* get pm_tp of caller-process */
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_tp,
++ &i_attr_val2,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ if(!i_attr_val2.pm_tp)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_adf_request_pm(): calling process trying to access personal_data has no TP-id\n");
++#endif
++ return(NOT_GRANTED);
++ }
++
++ /* get necessary accesses for NIL class */
++ i_pm_tid.na.task = i_attr_val1.pm_current_task;
++ i_pm_tid.na.object_class = 0;
++ i_pm_tid.na.tp = i_attr_val2.pm_tp;
++ if ((error = rsbac_pm_get_data(0,
++ PMT_NA,
++ i_pm_tid,
++ PD_accesses,
++ &i_data_val1)))
++ {
++ if(error != -RSBAC_EINVALIDTARGET)
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_pm_get_data() returned error %i!\n",
++ error);
++ return(NOT_GRANTED);
++ }
++ /* is requested access mode included in access mask? */
++ if(!(RSBAC_PM_A_CREATE & i_data_val1.accesses))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_adf_request_pm(): requested access mode CREATE for class NIL is not necessary\n");
++#endif
++ return(NOT_GRANTED);
++ }
++
++ /* OK, create is necessary -> grant */
++ return(GRANTED);
++ break;
++
++ case T_IPC:
++ /* get current_task of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_current_task,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if current_task = NIL, do not care */
++ if(!i_attr_val1.pm_current_task)
++ return(DO_NOT_CARE);
++
++ /* check necessary */
++ return(na_ipc(i_attr_val1.pm_current_task,
++ caller_pid,
++ RSBAC_PM_A_CREATE));
++ break;
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_DELETE:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ case T_SYMLINK:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ target,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if TP: only TP_Manager */
++ if(i_attr_val1.pm_object_type == PO_TP)
++ {
++ /* test owner's pm_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PM,
++ T_USER,
++ i_tid,
++ A_pm_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ if(i_attr_val1.pm_role == PR_tp_manager)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++ }
++ /* do not care for other than personal_data */
++ if(i_attr_val1.pm_object_type != PO_personal_data)
++ return(DO_NOT_CARE);
++
++ /* check necessary && (purpose_bind || consent) */
++ /* (in fact, necessary means allowed here) */
++ return(na_and_pp_or_cs(caller_pid,
++ tid.file,
++ RSBAC_PM_A_DELETE));
++ break;
++
++ case T_IPC:
++ /* get current_task of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_current_task,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if current_task = NIL, ipc_purpose must be NIL */
++ if(!i_attr_val1.pm_current_task)
++ {
++ if(!get_ipc_purpose(tid.ipc))
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++ }
++ /* check necessary && purpose_bind */
++ return(na_and_pp_ipc(i_attr_val1.pm_current_task,
++ caller_pid,
++ RSBAC_PM_A_DELETE,
++ tid.ipc));
++ break;
++
++ case T_DIR:
++ return(DO_NOT_CARE);
++ break;
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_EXECUTE:
++ case R_MAP_EXEC:
++ switch(target)
++ {
++ case T_FILE:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ T_FILE,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if not TP: do not care */
++ if(i_attr_val1.pm_object_type != PO_TP)
++ return(DO_NOT_CARE);
++
++ /* get pm_tp of target */
++ if (rsbac_get_attr(SW_PM,
++ T_FILE,
++ tid,
++ A_pm_tp,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if no tp: error! */
++ if(!i_attr_val1.pm_tp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): file with object_type TP has no tp_id!\n");
++ return(NOT_GRANTED);
++ }
++ /* get current_task of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_current_task,
++ &i_attr_val2,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if there is no current task, do not grant */
++ if(!i_attr_val2.pm_current_task)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_adf_request_pm(): no current_task for process trying to execute TP\n");
++#endif
++ return(NOT_GRANTED);
++ }
++ /* get tp_set_id of current_task */
++ i_pm_tid.task = i_attr_val2.pm_current_task;
++ if ((error = rsbac_pm_get_data(0,
++ PMT_TASK,
++ i_pm_tid,
++ PD_tp_set,
++ &i_data_val1)))
++ {
++ if(error != -RSBAC_EINVALIDTARGET)
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_pm_get_data() returned error %i!\n",
++ error);
++ return(NOT_GRANTED);
++ }
++ /* if there is no tp set, do not grant */
++ if(!i_data_val1.tp_set)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_adf_request_pm(): no tp_set for current_task of process trying to execute TP\n");
++#endif
++ return(NOT_GRANTED);
++ }
++
++ /* grant, if file's tp is in process-current-task's */
++ /* authorized tp_set */
++ i_pm_set_id.tp_set = i_data_val1.tp_set;
++ i_pm_set_member.tp = i_attr_val1.pm_tp;
++ if (rsbac_pm_set_member(0,PS_TP,i_pm_set_id,i_pm_set_member))
++ return(GRANTED);
++ /* else: don't... */
++ else
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_adf_request_pm(): tp %i of file is not in tp_set %i of current_task %i of process\n",
++ i_attr_val1.pm_tp, i_data_val1.tp_set, i_attr_val2.pm_current_task);
++#endif
++ return(NOT_GRANTED);
++ }
++
++ /* all other cases are undefined */
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_GET_STATUS_DATA:
++ switch(target)
++ {
++ case T_SCD:
++ /* target rsbac_log? only for secoff and dataprot */
++ if (tid.scd != ST_rsbac_log)
++ return(GRANTED);
++ /* Secoff or dataprot? */
++ i_tid.user = owner;
++ if ((error=rsbac_get_attr(SW_PM,
++ T_USER,
++ i_tid,
++ A_pm_role,
++ &i_attr_val1,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error %i!\n",
++ error);
++ return(NOT_GRANTED);
++ }
++ /* grant only for secoff and dataprot */
++ if ( (i_attr_val1.pm_role == PR_security_officer)
++ || (i_attr_val1.pm_role == PR_data_protection_officer)
++ )
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++ default:
++ return(DO_NOT_CARE);
++ };
++
++ case R_LINK_HARD:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ case T_SYMLINK:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ target,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if OT = TP or OT = personal_data -> do not grant, else do */
++ if( (i_attr_val1.pm_object_type == PO_TP)
++ || (i_attr_val1.pm_object_type == PO_personal_data))
++ return(NOT_GRANTED);
++ else
++ return(GRANTED);
++ break;
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_MODIFY_ACCESS_DATA:
++ case R_RENAME:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ case T_SYMLINK:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ target,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++
++ /* if personal_data -> do not grant */
++ if(i_attr_val1.pm_object_type == PO_personal_data)
++ return(NOT_GRANTED);
++ /* alternative: check necessary && (purpose_bind || consent) */
++ /* return(na_and_pp_or_cs(caller_pid,
++ tid.file,
++ RSBAC_PM_A_WRITE)); */
++
++ /* if TP: only TP_Manager, else: do not care */
++ if(i_attr_val1.pm_object_type != PO_TP)
++ return(DO_NOT_CARE);
++ /* test owner's pm_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PM,
++ T_USER,
++ i_tid,
++ A_pm_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ if(i_attr_val1.pm_role == PR_tp_manager)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++ break;
++
++ case T_DIR:
++ return(DO_NOT_CARE);
++ break;
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_MODIFY_ATTRIBUTE:
++ switch(attr)
++ {
++ /* all pm relevant attributes are changed via sys_rsbac_pm */
++ /* using tickets in most cases -> deny here */
++ case A_pm_object_type:
++ case A_pm_tp:
++ case A_pm_role:
++ case A_pm_process_type:
++ case A_pm_current_task:
++ case A_pm_object_class:
++ case A_pm_ipc_purpose:
++ case A_pm_program_type:
++ case A_pm_task_set:
++ #ifdef CONFIG_RSBAC_PM_GEN_PROT
++ case A_owner:
++ case A_pseudo:
++ case A_vset:
++ case A_program_file:
++ #endif
++ #ifdef CONFIG_RSBAC_PM_AUTH_PROT
++ case A_auth_may_setuid:
++ case A_auth_may_set_cap:
++ case A_auth_start_uid:
++ case A_auth_start_euid:
++ case A_auth_start_gid:
++ case A_auth_start_egid:
++ case A_auth_last_auth:
++ #endif
++ return(NOT_GRANTED);
++ /* All attributes (remove target!) */
++ case A_none:
++ #ifdef CONFIG_RSBAC_PM_AUTH_PROT
++ case A_auth_add_f_cap:
++ case A_auth_remove_f_cap:
++ case A_auth_learn:
++ #endif
++ switch(target)
++ { /* special care for pm-relevant files and devs*/
++ case T_FILE:
++ case T_FIFO:
++ case T_DEV:
++ /* get object_type */
++ if (rsbac_get_attr(SW_PM,
++ target,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if OT is PM-relevant -> do not grant */
++ if( (i_attr_val1.pm_object_type != PO_none)
++ && (i_attr_val1.pm_object_type != PO_non_personal_data))
++ return(NOT_GRANTED);
++ else
++ return(GRANTED);
++
++ /* we do not care for dirs or symlinks */
++ case T_DIR:
++ case T_SYMLINK:
++ return(DO_NOT_CARE);
++
++ /* we do care for users, and if PM is active, we use */
++ /* tickets to delete user attributes, so do not grant.*/
++ /* take care: if other models are active, their */
++ /* additional restrictions are not met! */
++ case T_USER:
++ return(NOT_GRANTED);
++
++ /* no removing of process attributes */
++ case T_PROCESS:
++ return(NOT_GRANTED);
++
++ case T_IPC:
++ /* get ipc_purpose */
++ if (rsbac_get_attr(SW_PM,
++ T_IPC,
++ tid,
++ A_pm_ipc_purpose,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if a purpose is set -> do not grant, else: who cares? */
++ if(i_attr_val1.pm_ipc_purpose)
++ return(NOT_GRANTED);
++ else
++ return(GRANTED);
++
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ #ifdef CONFIG_RSBAC_PM_GEN_PROT
++ case A_log_array_low:
++ case A_log_array_high:
++ case A_log_program_based:
++ case A_log_user_based:
++ case A_symlink_add_uid:
++ case A_symlink_add_remote_ip:
++ case A_fake_root_uid:
++ case A_audit_uid:
++ case A_auid_exempt:
++ /* test owner's pm_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PM,
++ T_USER,
++ i_tid,
++ A_pm_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* security officer? -> grant */
++ if (i_attr_val1.pm_role == PR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++ #endif
++
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_MODIFY_PERMISSIONS_DATA:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ target,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if TP: only TP_Manager, else: do not care */
++ if(i_attr_val1.pm_object_type != PO_TP)
++ return(DO_NOT_CARE);
++ /* test owner's pm_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PM,
++ T_USER,
++ i_tid,
++ A_pm_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ if(i_attr_val1.pm_role == PR_tp_manager)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++ break;
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_MODIFY_SYSTEM_DATA:
++ switch(target)
++ {
++ case T_SCD:
++ /* target rlimit? no problem, but needed -> grant */
++ if (tid.scd == ST_rlimit || tid.scd == ST_mlock)
++ return(GRANTED);
++ /* Administrator? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PM,
++ T_USER,
++ i_tid,
++ A_pm_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if rsbac_log: grant only for secoff and dataprot */
++ if(tid.scd == ST_rsbac_log)
++ {
++ if ( (i_attr_val1.pm_role == PR_security_officer)
++ || (i_attr_val1.pm_role == PR_data_protection_officer)
++ )
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++ }
++ /* if rsbac_log_remote: grant only for secoff */
++ if(tid.scd == ST_rsbac_remote_log)
++ {
++ if ( (i_attr_val1.pm_role == PR_security_officer)
++ || (i_attr_val1.pm_role == PR_data_protection_officer)
++ )
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++ }
++ /* other scds: if administrator, then grant */
++ if (i_attr_val1.pm_role == PR_system_admin)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_MOUNT:
++ switch(target)
++ {
++ case T_FILE:
++ case T_DIR:
++ case T_DEV:
++ /* Administrator? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PM,
++ T_USER,
++ i_tid,
++ A_pm_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if administrator, then grant */
++ if (i_attr_val1.pm_role == PR_system_admin)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_READ:
++ switch(target)
++ {
++#ifdef CONFIG_RSBAC_RW
++ case T_FILE:
++ case T_FIFO:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ target,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* no read_open on TPs */
++ if(i_attr_val1.pm_object_type == PO_TP)
++ return(NOT_GRANTED);
++ /* do not care for other than personal_data */
++ if(i_attr_val1.pm_object_type != PO_personal_data)
++ return(DO_NOT_CARE);
++
++ /* check necessary && (purpose_bind || consent) */
++ return(na_and_pp_or_cs(caller_pid,
++ tid.file,
++ RSBAC_PM_A_READ));
++ break;
++
++ case T_DEV:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ T_DEV,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* check read_open only on devs containing personal_data */
++ if(i_attr_val1.pm_object_type != PO_personal_data)
++ return(DO_NOT_CARE);
++ /* check necessary && purpose_bind */
++ return(na_dev(caller_pid,
++ RSBAC_PM_A_READ,
++ tid.dev));
++
++#ifdef CONFIG_RSBAC_RW_SOCK
++ case T_IPC:
++ /* get current_task of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_current_task,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if current_task = NIL, ipc_purpose must be NIL */
++ if(!i_attr_val1.pm_current_task)
++ {
++ if(!get_ipc_purpose(tid.ipc))
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++ }
++ /* check necessary && purpose_bind */
++ return(na_and_pp_ipc(i_attr_val1.pm_current_task,
++ caller_pid,
++ RSBAC_PM_A_READ,
++ tid.ipc));
++ break;
++#endif /* RW_SOCK */
++#endif /* RW */
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_READ_ATTRIBUTE:
++ switch(attr)
++ {
++ case A_pm_object_type:
++ case A_pm_tp:
++ case A_pm_role:
++ case A_pm_process_type:
++ case A_pm_current_task:
++ case A_pm_object_class:
++ case A_pm_ipc_purpose:
++ case A_pm_program_type:
++ case A_pm_task_set:
++ #ifdef CONFIG_RSBAC_PM_GEN_PROT
++ case A_owner:
++ case A_pseudo:
++ case A_log_array_low:
++ case A_log_array_high:
++ case A_log_program_based:
++ case A_log_user_based:
++ case A_symlink_add_remote_ip:
++ case A_symlink_add_uid:
++ case A_fake_root_uid:
++ case A_audit_uid:
++ case A_auid_exempt:
++ case A_vset:
++ case A_program_file:
++ #endif
++ #ifdef CONFIG_RSBAC_PM_AUTH_PROT
++ case A_auth_may_setuid:
++ case A_auth_may_set_cap:
++ case A_auth_start_uid:
++ case A_auth_start_euid:
++ case A_auth_start_gid:
++ case A_auth_start_egid:
++ case A_auth_learn:
++ case A_auth_last_auth:
++ #endif
++ /* Security Officer or Data Protection Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PM,
++ T_USER,
++ i_tid,
++ A_pm_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if sec_officer or data_prot_off, then grant */
++ if( (i_attr_val1.pm_role == PR_security_officer)
++ || (i_attr_val1.pm_role == PR_data_protection_officer))
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_READ_OPEN:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ target,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* no read_open on TPs */
++ if(i_attr_val1.pm_object_type == PO_TP)
++ return(NOT_GRANTED);
++ /* do not care for other than personal_data */
++ if(i_attr_val1.pm_object_type != PO_personal_data)
++ return(DO_NOT_CARE);
++
++ /* check necessary && (purpose_bind || consent) */
++ return(na_and_pp_or_cs(caller_pid,
++ tid.file,
++ RSBAC_PM_A_READ));
++ break;
++
++ case T_DEV:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ T_DEV,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* check read_open only on devs containing personal_data */
++ if(i_attr_val1.pm_object_type != PO_personal_data)
++ return(DO_NOT_CARE);
++ /* check necessary && purpose_bind */
++ return(na_dev(caller_pid,
++ RSBAC_PM_A_READ,
++ tid.dev));
++
++ case T_IPC:
++ /* get current_task of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_current_task,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if current_task = NIL, ipc_purpose must be NIL */
++ if(!i_attr_val1.pm_current_task)
++ {
++ if(!get_ipc_purpose(tid.ipc))
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++ }
++ /* check necessary && purpose_bind */
++ return(na_and_pp_ipc(i_attr_val1.pm_current_task,
++ caller_pid,
++ RSBAC_PM_A_READ,
++ tid.ipc));
++ break;
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_READ_WRITE_OPEN:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ target,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* no read_write_open on TPs */
++ if(i_attr_val1.pm_object_type == PO_TP)
++ return(NOT_GRANTED);
++ /* TPs must not write on other than personal_data */
++ if(i_attr_val1.pm_object_type != PO_personal_data)
++ return(tp_check(caller_pid));
++
++ /* check necessary && (purpose_bind || consent) */
++ return(na_and_pp_or_cs(caller_pid,
++ tid.file,
++ RSBAC_PM_A_READ | RSBAC_PM_A_WRITE));
++ break;
++
++ case T_DEV:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ T_DEV,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* check read_write_open only on devs containing personal_data or TPs*/
++ if( (i_attr_val1.pm_object_type != PO_personal_data)
++ && (i_attr_val1.pm_object_type != PO_TP) )
++ return(DO_NOT_CARE);
++ /* check necessary && purpose_bind */
++ return(na_dev(caller_pid,
++ RSBAC_PM_A_READ | RSBAC_PM_A_WRITE,
++ tid.dev));
++
++ case T_IPC:
++ /* get IPC-purpose */
++ i_pm_pp = get_ipc_purpose(tid.ipc);
++ /* if IPC-pp is NIL -> process type must be NIL */
++ if(!i_pm_pp)
++ {
++ /* get process-type of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_process_type,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ if(i_attr_val1.pm_process_type == PP_TP)
++ return(NOT_GRANTED);
++ else
++ return(GRANTED);
++ }
++ /* OK, we do have an IPC-purpose */
++ /* get current_task of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_current_task,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if current_task = NIL -> do not grant */
++ if(!i_attr_val1.pm_current_task)
++ {
++ return(NOT_GRANTED);
++ }
++ /* check necessary && purpose_bind */
++ return(na_and_pp_ipc(i_attr_val1.pm_current_task,
++ caller_pid,
++ RSBAC_PM_A_READ | RSBAC_PM_A_WRITE,
++ tid.ipc));
++ break;
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_REMOVE_FROM_KERNEL:
++ switch(target)
++ {
++ case T_FILE:
++ case T_DEV:
++ case T_NONE:
++ /* test owner's pm_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PM,
++ T_USER,
++ i_tid,
++ A_pm_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* only administrators are allowed to do this */
++ if (i_attr_val1.pm_role != PR_system_admin)
++ return(NOT_GRANTED);
++ /* That's it */
++ return(GRANTED);
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++/* case R_RENAME: see R_MODIFY_ACCESS_DATA */
++
++ case R_SEND_SIGNAL:
++ switch(target)
++ {
++ case T_PROCESS:
++ /* TPs are not allowed to send signals */
++ /* get process_type of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_process_type,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* we do not allow TPs here */
++ if(i_attr_val1.pm_process_type == PP_TP)
++ return(NOT_GRANTED);
++
++ /* SIGKILL to TPs is restricted to tp_managers to prevent */
++ /* inconsistencies */
++ /* get process_type of target-process */
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ tid,
++ A_pm_process_type,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* we only care for TPs here */
++ if(i_attr_val1.pm_process_type != PP_TP)
++ return(DO_NOT_CARE);
++
++ /* test owner's pm_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PM,
++ T_USER,
++ i_tid,
++ A_pm_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* only tp_managers are allowed to do this */
++ if (i_attr_val1.pm_role != PR_tp_manager)
++ return(NOT_GRANTED);
++ /* That's it */
++ return(GRANTED);
++
++ /* all other cases are undefined */
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_SHUTDOWN:
++ switch(target)
++ {
++ case T_NONE:
++ /* test owner's pm_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PM,
++ T_USER,
++ i_tid,
++ A_pm_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* only administrators are allowed to do this */
++ if (i_attr_val1.pm_role != PR_system_admin)
++ return(NOT_GRANTED);
++ /* That's it */
++ return(GRANTED);
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_SWITCH_LOG:
++ switch(target)
++ {
++ case T_NONE:
++ /* test owner's pm_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PM,
++ T_USER,
++ i_tid,
++ A_pm_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* security officer? -> grant */
++ if (i_attr_val1.pm_role == PR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_SWITCH_MODULE:
++ switch(target)
++ {
++ case T_NONE:
++ /* we need the switch_target */
++ if(attr != A_switch_target)
++ return NOT_GRANTED;
++ /* deny PM to be switched, do not care for others */
++ if( (attr_val.switch_target == SW_PM)
++ #ifdef CONFIG_RSBAC_PM_AUTH_PROT
++ || (attr_val.switch_target == SW_AUTH)
++ #endif
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ || (attr_val.switch_target == SW_SOFTMODE)
++ #endif
++ #ifdef CONFIG_RSBAC_FREEZE
++ || (attr_val.switch_target == SW_FREEZE)
++ #endif
++ )
++ return(NOT_GRANTED);
++ else
++ return(DO_NOT_CARE);
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ /* notify only, handled by adf-dispatcher */
++ case R_TERMINATE:
++ if (target == T_PROCESS)
++ { /* Remove input and output purpose set of process */
++ i_pm_set_id.in_pp_set = tid.process;
++ rsbac_pm_remove_set(0, PS_IN_PP, i_pm_set_id);
++ i_pm_set_id.out_pp_set = tid.process;
++ rsbac_pm_remove_set(0, PS_OUT_PP, i_pm_set_id);
++ return(GRANTED);
++ }
++ else
++ return(DO_NOT_CARE);
++
++ case R_TRACE:
++ switch(target)
++ {
++ case T_PROCESS:
++ /* get process_type of calling process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_process_type,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* do not grant for TPs */
++ if(i_attr_val1.pm_process_type == PP_TP)
++ return(NOT_GRANTED);
++
++ /* get process_type of target-process */
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ tid,
++ A_pm_process_type,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* do not grant for TPs */
++ if(i_attr_val1.pm_process_type == PP_TP)
++ return(NOT_GRANTED);
++
++ /* neither P1 nor P2 is TP -> grant */
++ return(GRANTED);
++
++ /* all other cases are undefined */
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_TRUNCATE:
++ switch(target)
++ {
++ case T_FILE:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ T_FILE,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* no append_open on TPs */
++ if(i_attr_val1.pm_object_type == PO_TP)
++ return(NOT_GRANTED);
++ /* TPs must not write on other than personal_data */
++ if(i_attr_val1.pm_object_type != PO_personal_data)
++ return(tp_check(caller_pid));
++
++ /* check necessary && (purpose_bind || consent) */
++ return(na_and_pp_or_cs(caller_pid,
++ tid.file,
++ RSBAC_PM_A_WRITE));
++ break;
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_UMOUNT:
++ switch(target)
++ {
++ case T_FILE:
++ case T_DIR:
++ case T_DEV:
++ /* Administrator? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_PM,
++ T_USER,
++ i_tid,
++ A_pm_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if administrator, then grant */
++ if (i_attr_val1.pm_role == PR_system_admin)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_WRITE:
++ switch(target)
++ {
++#ifdef CONFIG_RSBAC_RW
++ case T_FILE:
++ case T_FIFO:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ target,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* no append_open on TPs */
++ if(i_attr_val1.pm_object_type == PO_TP)
++ return(NOT_GRANTED);
++ /* TPs must not write on other than personal_data */
++ if(i_attr_val1.pm_object_type != PO_personal_data)
++ return(tp_check(caller_pid));
++
++ /* check necessary && (purpose_bind || consent) */
++ return(na_and_pp_or_cs(caller_pid,
++ tid.file,
++ RSBAC_PM_A_WRITE));
++ break;
++
++ case T_DEV:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ T_DEV,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* check write_open only on devs containing personal_data or TPs*/
++ if( (i_attr_val1.pm_object_type != PO_personal_data)
++ && (i_attr_val1.pm_object_type != PO_TP) )
++ return(DO_NOT_CARE);
++ /* check necessary && purpose_bind */
++ return(na_dev(caller_pid,
++ RSBAC_PM_A_WRITE,
++ tid.dev));
++
++#ifdef CONFIG_RSBAC_RW_SOCK
++ case T_IPC:
++ /* get IPC-purpose */
++ i_pm_pp = get_ipc_purpose(tid.ipc);
++ /* if IPC-pp is NIL -> process type must be NIL */
++ if(!i_pm_pp)
++ {
++ /* get process-type of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_process_type,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ if(i_attr_val1.pm_process_type == PP_TP)
++ return(NOT_GRANTED);
++ else
++ return(GRANTED);
++ }
++ /* OK, we do have an IPC-purpose */
++ /* get current_task of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_current_task,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if current_task = NIL -> do not grant */
++ if(!i_attr_val1.pm_current_task)
++ {
++ return(NOT_GRANTED);
++ }
++ /* check necessary && purpose_bind */
++ return(na_and_pp_ipc(i_attr_val1.pm_current_task,
++ caller_pid,
++ RSBAC_PM_A_WRITE,
++ tid.ipc));
++ break;
++#endif
++#endif
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_WRITE_OPEN:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ target,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* no append_open on TPs */
++ if(i_attr_val1.pm_object_type == PO_TP)
++ return(NOT_GRANTED);
++ /* TPs must not write on other than personal_data */
++ if(i_attr_val1.pm_object_type != PO_personal_data)
++ return(tp_check(caller_pid));
++
++ /* check necessary && (purpose_bind || consent) */
++ return(na_and_pp_or_cs(caller_pid,
++ tid.file,
++ RSBAC_PM_A_WRITE));
++ break;
++
++ case T_DEV:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ T_DEV,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* check write_open only on devs containing personal_data or TPs*/
++ if( (i_attr_val1.pm_object_type != PO_personal_data)
++ && (i_attr_val1.pm_object_type != PO_TP) )
++ return(DO_NOT_CARE);
++ /* check necessary && purpose_bind */
++ return(na_dev(caller_pid,
++ RSBAC_PM_A_WRITE,
++ tid.dev));
++
++ case T_IPC:
++ /* get IPC-purpose */
++ i_pm_pp = get_ipc_purpose(tid.ipc);
++ /* if IPC-pp is NIL -> process type must be NIL */
++ if(!i_pm_pp)
++ {
++ /* get process-type of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_process_type,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ if(i_attr_val1.pm_process_type == PP_TP)
++ return(NOT_GRANTED);
++ else
++ return(GRANTED);
++ }
++ /* OK, we do have an IPC-purpose */
++ /* get current_task of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_current_task,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if current_task = NIL -> do not grant */
++ if(!i_attr_val1.pm_current_task)
++ {
++ return(NOT_GRANTED);
++ }
++ /* check necessary && purpose_bind */
++ return(na_and_pp_ipc(i_attr_val1.pm_current_task,
++ caller_pid,
++ RSBAC_PM_A_WRITE,
++ tid.ipc));
++ break;
++
++ /* all other cases are undefined */
++ default: return(DO_NOT_CARE);
++ }
++
++
++/*********************/
++ default: return DO_NOT_CARE;
++ }
++
++ return(result);
++ } /* end of rsbac_adf_request_pm() */
++
++
++/*****************************************************************************/
++/* If the request returned granted and the operation is performed, */
++/* the following function can be called by the AEF to get all aci set */
++/* correctly. For write accesses that are performed fully within the kernel, */
++/* this is usually not done to prevent extra calls, including R_CLOSE for */
++/* cleaning up. */
++/* The second instance of target specification is the new target, if one has */
++/* been created, otherwise its values are ignored. */
++/* On success, 0 is returned, and an error from rsbac/error.h otherwise. */
++
++int rsbac_adf_set_attr_pm(
++ enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_target_t new_target,
++ union rsbac_target_id_t new_tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_attribute_value_t i_attr_val2;
++ union rsbac_attribute_value_t i_attr_val3;
++ union rsbac_attribute_value_t i_attr_val4;
++ union rsbac_pm_target_id_t i_pm_tid;
++ union rsbac_pm_data_value_t i_data_val1;
++ int error;
++
++ switch (request)
++ {
++ case R_APPEND_OPEN:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ return(adjust_in_out_pp(caller_pid,
++ target,
++ tid.file,
++ RSBAC_PM_A_APPEND));
++ case T_IPC:
++ return(adjust_in_out_pp_ipc(caller_pid,
++ tid.ipc,
++ RSBAC_PM_A_APPEND));
++ case T_DEV:
++ return(0);
++ default:
++ return(0);
++ }
++#ifdef CONFIG_RSBAC_RW
++ case R_READ:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ return(adjust_in_out_pp(caller_pid,
++ target,
++ tid.file,
++ RSBAC_PM_A_READ));
++#ifdef CONFIG_RSBAC_RW_SOCK
++ case T_IPC:
++ return(adjust_in_out_pp_ipc(caller_pid,
++ tid.ipc,
++ RSBAC_PM_A_READ));
++#endif
++ default:
++ return(0);
++ }
++#endif
++ case R_READ_OPEN:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ return(adjust_in_out_pp(caller_pid,
++ target,
++ tid.file,
++ RSBAC_PM_A_READ));
++ case T_IPC:
++ return(adjust_in_out_pp_ipc(caller_pid,
++ tid.ipc,
++ RSBAC_PM_A_READ));
++ case T_DIR:
++ case T_DEV:
++ return(0);
++ default:
++ return(0);
++ }
++ case R_READ_WRITE_OPEN:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ return(adjust_in_out_pp(caller_pid,
++ target,
++ tid.file,
++ RSBAC_PM_A_READ | RSBAC_PM_A_WRITE));
++ case T_IPC:
++ return(adjust_in_out_pp_ipc(caller_pid,
++ tid.ipc,
++ RSBAC_PM_A_READ | RSBAC_PM_A_WRITE));
++ case T_DEV:
++ return(0);
++ default:
++ return(0);
++ }
++
++#ifdef CONFIG_RSBAC_RW
++ case R_WRITE:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ return(adjust_in_out_pp(caller_pid,
++ target,
++ tid.file,
++ RSBAC_PM_A_WRITE));
++#ifdef CONFIG_RSBAC_RW_SOCK
++ case T_IPC:
++ return(adjust_in_out_pp_ipc(caller_pid,
++ tid.ipc,
++ RSBAC_PM_A_WRITE));
++#endif
++ default:
++ return(0);
++ }
++#endif
++
++ case R_WRITE_OPEN:
++ switch(target)
++ {
++ case T_FILE:
++ case T_FIFO:
++ return(adjust_in_out_pp(caller_pid,
++ target,
++ tid.file,
++ RSBAC_PM_A_WRITE));
++ case T_DEV:
++ return(0);
++
++ case T_IPC:
++ return(adjust_in_out_pp_ipc(caller_pid,
++ tid.ipc,
++ RSBAC_PM_A_WRITE));
++ default:
++ return(0);
++ }
++
++ case R_CLONE:
++ if (target == T_PROCESS)
++ {
++ /* Get owner from first process (provided on call) */
++ i_attr_val1.owner = owner;
++ /* Get pm_tp from first process */
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ tid,
++ A_pm_tp,
++ &i_attr_val2,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_pm(): rsbac_get_attr() returned error!\n");
++ return(-RSBAC_EREADFAILED);
++ }
++ /* Get pm_current_task from first process... */
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ tid,
++ A_pm_current_task,
++ &i_attr_val3,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_pm(): rsbac_get_attr() returned error!\n");
++ return(-RSBAC_EREADFAILED);
++ }
++ /* Get pm_process_type from first process */
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ tid,
++ A_pm_process_type,
++ &i_attr_val4,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_pm(): rsbac_get_attr() returned error!\n");
++ return(-RSBAC_EREADFAILED);
++ }
++ /* Set pm_tp for new process */
++ if (rsbac_set_attr(SW_PM,
++ T_PROCESS,
++ new_tid,
++ A_pm_tp,
++ i_attr_val2))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_pm(): rsbac_set_attr() returned error!\n");
++ return(-RSBAC_EWRITEFAILED);
++ }
++ /* Set pm_current_task for new process */
++ if (rsbac_set_attr(SW_PM,
++ T_PROCESS,
++ new_tid,
++ A_pm_current_task,
++ i_attr_val3))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_pm(): rsbac_set_attr() returned error!\n");
++ return(-RSBAC_EWRITEFAILED);
++ }
++ /* Set pm_process_type for new process */
++ if (rsbac_set_attr(SW_PM,
++ T_PROCESS,
++ new_tid,
++ A_pm_process_type,
++ i_attr_val4))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_pm(): rsbac_set_attr() returned error!\n");
++ return(-RSBAC_EWRITEFAILED);
++ }
++ return(0);
++ }
++ else
++ return(0);
++
++ case R_CREATE:
++ switch(target)
++ {
++ /* Creating dir or (pseudo) file IN target dir! */
++ case T_DIR:
++ /* Mode of created item is ignored! */
++
++ /* Is calling process a TP? */
++ /* get process_type of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_process_type,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(NOT_GRANTED);
++ }
++ /* if TP: Set pm_object_class to purpose default class for new item */
++ if(i_attr_val1.pm_process_type == PP_TP)
++ {
++ /* get current_task of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_current_task,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_pm(): rsbac_get_attr() returned error!\n");
++ return(RSBAC_EREADFAILED);
++ }
++ if(!i_attr_val1.pm_current_task)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_adf_set_attr_pm(): no current_task for calling process trying to access personal_data\n");
++#endif
++ return(RSBAC_EREADFAILED);
++ }
++ /* get purpose of current_task */
++ i_pm_tid.task = i_attr_val1.pm_current_task;
++ if ((error = rsbac_pm_get_data(0,
++ PMT_TASK,
++ i_pm_tid,
++ PD_purpose,
++ &i_data_val1)))
++ {
++ if(error != -RSBAC_EINVALIDTARGET)
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_pm(): rsbac_pm_get_data() returned error %i!\n",
++ error);
++ return(error);
++ }
++ /* if there is no purpose, return error */
++ if(!i_data_val1.purpose)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_adf_set_attr_pm(): no purpose for current_task of process trying to execute TP\n");
++#endif
++ return(RSBAC_EREADFAILED);
++ }
++ /* get def_class of purpose of current_task */
++ i_pm_tid.pp = i_data_val1.purpose;
++ if ((error = rsbac_pm_get_data(0,
++ PMT_PP,
++ i_pm_tid,
++ PD_def_class,
++ &i_data_val1)))
++ {
++ if(error != -RSBAC_EINVALIDTARGET)
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_set_attr_pm(): rsbac_pm_get_data() returned error %i!\n",
++ error);
++ return(error);
++ }
++ i_attr_val1.pm_object_class = i_data_val1.def_class;
++ }
++ else /* calling process is no TP */
++ /* set class to NIL */
++ i_attr_val1.pm_object_class = 0;
++
++ if (rsbac_get_attr(SW_PM,
++ new_target,
++ new_tid,
++ A_pm_object_class,
++ &i_attr_val2,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(-RSBAC_EREADFAILED);
++ }
++ if(i_attr_val1.pm_object_class != i_attr_val2.pm_object_class)
++ {
++ if (rsbac_set_attr(SW_PM,
++ new_target,
++ new_tid,
++ A_pm_object_class,
++ i_attr_val1))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_pm(): rsbac_set_attr() returned error!\n");
++ return(-RSBAC_EWRITEFAILED);
++ }
++ }
++ /* Set pm_tp for new item */
++ i_attr_val1.pm_tp = 0;
++ if (rsbac_get_attr(SW_PM,
++ new_target,
++ new_tid,
++ A_pm_tp,
++ &i_attr_val2,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(-RSBAC_EREADFAILED);
++ }
++ if(i_attr_val1.pm_tp != i_attr_val2.pm_tp)
++ {
++ if (rsbac_set_attr(SW_PM,
++ new_target,
++ new_tid,
++ A_pm_tp,
++ i_attr_val1))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_pm(): rsbac_set_attr() returned error!\n");
++ return(-RSBAC_EWRITEFAILED);
++ }
++ }
++
++ /* get process_type of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_process_type,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(-RSBAC_EREADFAILED);
++ }
++ /* Set pm_object_type for new item */
++ if(new_target == T_DIR)
++ i_attr_val1.pm_object_type = PO_dir;
++ else
++ /* files: if process is TP, set to personal_data */
++ /* to prevent unrestricted access */
++ if(i_attr_val1.pm_process_type == PP_TP)
++ i_attr_val1.pm_object_type = PO_personal_data;
++ else
++ i_attr_val1.pm_object_type = PO_none;
++ if (rsbac_get_attr(SW_PM,
++ new_target,
++ new_tid,
++ A_pm_object_type,
++ &i_attr_val2,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(-RSBAC_EREADFAILED);
++ }
++ if(i_attr_val1.pm_object_type != i_attr_val2.pm_object_type)
++ {
++ if (rsbac_set_attr(SW_PM,
++ new_target,
++ new_tid,
++ A_pm_object_type,
++ i_attr_val1))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_pm(): rsbac_set_attr() returned error!\n");
++ return(-RSBAC_EWRITEFAILED);
++ }
++ }
++ return(0);
++ break;
++
++ case T_IPC:
++ /* Set pm_ipc_purpose for new item */
++ /* get current_task of caller-process */
++ i_tid.process = caller_pid;
++ if (rsbac_get_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_current_task,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(-RSBAC_EREADFAILED);
++ }
++ /* if current_task = NIL, ipc_purpose must be NIL */
++ if(!i_attr_val1.pm_current_task)
++ i_attr_val1.pm_ipc_purpose = 0;
++ else
++ {
++ /* get purpose of current_task */
++ i_pm_tid.task = i_attr_val1.pm_current_task;
++ if ((error = rsbac_pm_get_data(0,
++ PMT_TASK,
++ i_pm_tid,
++ PD_purpose,
++ &i_data_val1)))
++ {
++ if(error == -RSBAC_EINVALIDTARGET)
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): pm_current_task of calling process is invalid!\n");
++ else
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_pm_get_data() returned error %i!\n",
++ error);
++ return(-RSBAC_EREADFAILED);
++ }
++ i_attr_val1.pm_ipc_purpose = i_data_val1.purpose;
++ }
++ if (rsbac_get_attr(SW_PM,
++ target,
++ tid,
++ A_pm_ipc_purpose,
++ &i_attr_val2,
++ FALSE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(-RSBAC_EREADFAILED);
++ }
++ if(i_attr_val1.pm_ipc_purpose != i_attr_val2.pm_ipc_purpose)
++ {
++ if (rsbac_set_attr(SW_PM,
++ target,
++ tid,
++ A_pm_ipc_purpose,
++ i_attr_val1))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_pm(): rsbac_set_attr() returned error!\n");
++ return(-RSBAC_EWRITEFAILED);
++ }
++ }
++ return(0);
++ break;
++
++ /* all other cases are undefined */
++ default:
++ return(0);
++ }
++
++ case R_EXECUTE:
++ switch(target)
++ {
++ case T_FILE:
++ /* get pm_object_type of target */
++ if (rsbac_get_attr(SW_PM,
++ T_FILE,
++ tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(-RSBAC_EREADFAILED);
++ }
++ /* if not TP: do nothing */
++ if(i_attr_val1.pm_object_type != PO_TP)
++ return(0);
++
++ /* get pm_tp of target */
++ if (rsbac_get_attr(SW_PM,
++ T_FILE,
++ tid,
++ A_pm_tp,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): rsbac_get_attr() returned error!\n");
++ return(-RSBAC_EREADFAILED);
++ }
++ /* if no tp: error! */
++ if(!i_attr_val1.pm_tp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_pm(): file with object_type TP has no tp_id!\n");
++ return(-RSBAC_EINVALIDVALUE);
++ }
++ /* Set pm_tp for this process */
++ i_tid.process = caller_pid;
++ if (rsbac_set_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_tp,
++ i_attr_val1))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_pm(): rsbac_set_attr() returned error!\n");
++ return(-RSBAC_EWRITEFAILED);
++ }
++ /* Set pm_process_type for this process */
++ i_attr_val1.pm_process_type = PP_TP;
++ if (rsbac_set_attr(SW_PM,
++ T_PROCESS,
++ i_tid,
++ A_pm_process_type,
++ i_attr_val1))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_pm(): rsbac_set_attr() returned error!\n");
++ return(-RSBAC_EWRITEFAILED);
++ }
++ return(0);
++
++ /* all other cases are undefined */
++ default:
++ return(0);
++ }
++
++/*********************/
++
++ default: return 0;
++ }
++
++ return 0;
++ } /* end of rsbac_adf_set_attr_pm() */
++
++/******************************************/
++#ifdef CONFIG_RSBAC_SECDEL
++rsbac_boolean_t rsbac_need_overwrite_pm(struct dentry * dentry_p)
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ if( !dentry_p
++ || !dentry_p->d_inode)
++ return FALSE;
++
++ i_tid.file.device = dentry_p->d_sb->s_dev;
++ i_tid.file.inode = dentry_p->d_inode->i_ino;
++ i_tid.file.dentry_p = dentry_p;
++ /* get target's file flags */
++ if (rsbac_get_attr(SW_PM,
++ T_FILE,
++ i_tid,
++ A_pm_object_type,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_need_overwrite_pm(): rsbac_get_attr() returned error!\n");
++ return FALSE;
++ }
++
++ /* overwrite, if personal data */
++ if (i_attr_val1.pm_object_type == PO_personal_data)
++ return TRUE;
++ else
++ return FALSE;
++ }
++#endif
++
++/* end of rsbac/adf/pm/main.c */
+diff --git a/rsbac/adf/pm/pm_syscalls.c b/rsbac/adf/pm/pm_syscalls.c
+new file mode 100644
+index 0000000..6124ced
+--- /dev/null
++++ b/rsbac/adf/pm/pm_syscalls.c
+@@ -0,0 +1,3305 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - Privacy Model */
++/* File: rsbac/adf/pm/syscalls.c */
++/* */
++/* Author and (c) 1999-2012: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 07/May/2012 */
++/*************************************************** */
++
++#include <linux/string.h>
++#include <rsbac/aci.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/stat.h>
++#include <linux/syscalls.h>
++#include <linux/fdtable.h>
++#include <linux/namei.h>
++#include <linux/file.h>
++#include <linux/mount.h>
++#include <rsbac/pm_types.h>
++#include <rsbac/pm.h>
++#include <rsbac/pm_getname.h>
++#include <rsbac/error.h>
++#include <rsbac/debug.h>
++#include <rsbac/helpers.h>
++#include <rsbac/adf.h>
++#include <rsbac/adf_main.h>
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++/************************************************* */
++/* Declarations */
++/************************************************* */
++
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++static int pm_get_file(const char * name,
++ enum rsbac_target_t * target_p,
++ union rsbac_target_id_t * tid_p)
++ {
++ int error = 0;
++ struct dentry * dentry_p;
++ struct path path;
++
++ /* get file dentry */
++ if ((error = user_lpath(name, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG "pm_get_file(): call to user_lpath() returned %i\n", error);
++#endif
++ return -RSBAC_EINVALIDTARGET;
++ }
++ dentry_p = path.dentry;
++ if (!dentry_p->d_inode)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "pm_get_file(): file not found\n");
++#endif
++ return -RSBAC_EINVALIDTARGET;
++ }
++ if(S_ISREG(dentry_p->d_inode->i_mode))
++ {
++ /* copy device and inode */
++ tid_p->file.device = dentry_p->d_sb->s_dev;
++ tid_p->file.inode = dentry_p->d_inode->i_ino;
++ tid_p->file.dentry_p = dentry_p;
++ *target_p = T_FILE;
++ }
++ else if(S_ISFIFO(dentry_p->d_inode->i_mode))
++ {
++ /* copy device and inode */
++ tid_p->file.device = dentry_p->d_sb->s_dev;
++ tid_p->file.inode = dentry_p->d_inode->i_ino;
++ tid_p->file.dentry_p = dentry_p;
++ *target_p = T_FIFO;
++ }
++ else if(S_ISBLK(dentry_p->d_inode->i_mode))
++ {
++ /* copy dev data */
++ tid_p->dev.type = D_block;
++ tid_p->dev.major = RSBAC_MAJOR(dentry_p->d_inode->i_rdev);
++ tid_p->dev.minor = RSBAC_MINOR(dentry_p->d_inode->i_rdev);
++ *target_p = T_DEV;
++ }
++ else if(S_ISCHR(dentry_p->d_inode->i_mode))
++ {
++ /* copy dev data */
++ tid_p->dev.type = D_char;
++ tid_p->dev.major = RSBAC_MAJOR(dentry_p->d_inode->i_rdev);
++ tid_p->dev.minor = RSBAC_MINOR(dentry_p->d_inode->i_rdev);
++ *target_p = T_DEV;
++ }
++ else
++ error = -RSBAC_EINVALIDTARGET;
++ /* and free inode */
++ dput(dentry_p);
++ /* return */
++ return error;
++ }
++
++/************************************************** */
++/* Externally visible functions */
++/************************************************* */
++
++/*****************************************************************************/
++/* This function is called via sys_rsbac_pm() system call */
++/* and serves as a dispatcher for all PM dependant system calls. */
++
++int rsbac_pm(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_function_type_t function,
++ union rsbac_pm_function_param_t param,
++ rsbac_pm_tkt_id_t tkt)
++ {
++ union rsbac_pm_all_data_value_t all_data;
++ enum rsbac_target_t target;
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val;
++ union rsbac_pm_target_id_t pm_tid;
++ union rsbac_pm_target_id_t pm_tid2;
++ union rsbac_pm_data_value_t data_val;
++ int error = 0;
++ rsbac_uid_t owner;
++ enum rsbac_pm_role_t role;
++ struct rsbac_pm_purpose_list_item_t pp_set;
++ union rsbac_pm_set_id_t pm_set_id;
++ union rsbac_pm_set_member_t pm_set_member;
++ union rsbac_pm_tkt_internal_function_param_t tkt_i_function_param;
++ struct rsbac_fs_file_t file;
++ struct rsbac_dev_desc_t dev;
++ char tmp[80];
++ struct timespec now = CURRENT_TIME;
++ rsbac_boolean_t class_exists = FALSE;
++
++/* No processing possible before init (called at boot time) */
++ if (!rsbac_is_initialized())
++ return -RSBAC_ENOTINITIALIZED;
++
++ get_pm_function_type_name(tmp,function);
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_ds_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): called for function %s (No.%i)\n",
++ tmp,function);
++#endif
++ /* Getting basic information about caller */
++ /* only useful for real process, not idle or init */
++ if (current->pid > 1)
++ owner = current_uid();
++ else /* caller_pid <= 1 -> kernel or init are always owned by root */
++ owner = 0;
++
++ /* getting owner's pm_role from rsbac system */
++ tid.user = owner;
++ error = rsbac_ta_get_attr(ta_number,SW_PM,T_USER,tid,A_pm_role,&attr_val,TRUE);
++ if (error)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* something weird happened */
++ }
++ role = attr_val.pm_role;
++
++ switch(function)
++ {
++ case PF_create_ticket:
++ /* check, whether this ticket id already exists */
++ pm_tid.tkt = param.create_ticket.id;
++ if(rsbac_pm_exists(ta_number,
++ PMT_TKT,
++ pm_tid))
++ return -RSBAC_EEXISTS;
++
++ /* Check caller's pm_role, if needed, get file id for filename from */
++ /* param.x.filename, and copy params to tkt_internal_func_params. */
++ /* This part depends on the function the ticket shall be for. */
++ switch(param.create_ticket.function_type)
++ {
++ case PTF_add_na:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ tkt_i_function_param.add_na
++ = param.create_ticket.function_param.add_na;
++ break;
++
++ case PTF_delete_na:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ tkt_i_function_param.delete_na
++ = param.create_ticket.function_param.delete_na;
++ break;
++
++ case PTF_add_task:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ tkt_i_function_param.add_task
++ = param.create_ticket.function_param.add_task;
++ break;
++
++ case PTF_delete_task:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ tkt_i_function_param.delete_task
++ = param.create_ticket.function_param.delete_task;
++ break;
++
++ case PTF_add_object_class:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ /* class-id 0, IPC and DEV are used internally, reject */
++ if( !param.create_ticket.function_param.add_object_class.id
++ || (param.create_ticket.function_param.add_object_class.id
++ == RSBAC_PM_IPC_OBJECT_CLASS_ID)
++ || (param.create_ticket.function_param.add_object_class.id
++ == RSBAC_PM_DEV_OBJECT_CLASS_ID))
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): add_object_class: reserved class-id 0, %u or %u requested!\n",
++ RSBAC_PM_IPC_OBJECT_CLASS_ID,
++ RSBAC_PM_DEV_OBJECT_CLASS_ID);
++ return -RSBAC_EINVALIDVALUE;
++ }
++ /* copy class-id */
++ tkt_i_function_param.tkt_add_object_class.id
++ = param.create_ticket.function_param.add_object_class.id;
++ /* init pp_set-id for this ticket to 0 */
++ tkt_i_function_param.tkt_add_object_class.pp_set
++ = 0;
++ /* get purposes from user space and add them to set */
++ if(param.create_ticket.function_param.add_object_class.pp_list_p)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_ds_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): getting pp_list from user space\n");
++#endif
++ /* set a unique pp_set-id for this ticket (negative tkt-id) */
++ pm_set_id.pp_set = -param.create_ticket.id;
++ if((error = rsbac_pm_create_set(ta_number,PS_PP,pm_set_id)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_create_set() for PP returned error %i",
++ error);
++ return -RSBAC_EWRITEFAILED;
++ }
++ rsbac_get_user(&pp_set,
++ param.create_ticket.function_param.add_object_class.pp_list_p,
++ sizeof(pp_set));
++ pm_set_member.pp = pp_set.id;
++ if((error = rsbac_pm_add_to_set(ta_number,PS_PP,pm_set_id,pm_set_member)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_add_to_set() for PP returned error %i",
++ error);
++ rsbac_pm_remove_set(ta_number,PS_PP,pm_set_id);
++ return -RSBAC_EWRITEFAILED;
++ }
++
++ while(pp_set.next)
++ {
++ rsbac_get_user(&pp_set,
++ pp_set.next,
++ sizeof(pp_set));
++ pm_set_member.pp = pp_set.id;
++ if((error = rsbac_pm_add_to_set(ta_number,PS_PP,pm_set_id,pm_set_member)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_add_to_set() for PP returned error %i",
++ error);
++ rsbac_pm_remove_set(ta_number,PS_PP,pm_set_id);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ tkt_i_function_param.tkt_add_object_class.pp_set
++ = -param.create_ticket.id;
++ }
++ break;
++
++ case PTF_delete_object_class:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ tkt_i_function_param.delete_object_class
++ = param.create_ticket.function_param.delete_object_class;
++ break;
++
++ case PTF_add_authorized_tp:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ tkt_i_function_param.add_authorized_tp
++ = param.create_ticket.function_param.add_authorized_tp;
++ break;
++
++ case PTF_delete_authorized_tp:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ tkt_i_function_param.delete_authorized_tp
++ = param.create_ticket.function_param.delete_authorized_tp;
++ break;
++
++ case PTF_add_consent:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ /* get file id */
++ if ((error = pm_get_file(param.create_ticket.function_param.add_consent.filename,
++ &target,
++ &tid)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): call to pm_get_file() returned error %i\n",
++ error);
++#endif
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* target must be file */
++ if(target != T_FILE)
++ return -RSBAC_EINVALIDTARGET;
++ tkt_i_function_param.tkt_add_consent.file = tid.file;
++ tkt_i_function_param.tkt_add_consent.purpose
++ = param.create_ticket.function_param.add_consent.purpose;
++ break;
++
++ case PTF_delete_consent:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ /* get file id */
++ if ((error = pm_get_file(param.create_ticket.function_param.delete_consent.filename,
++ &target,
++ &tid)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): call to pm_get_file() returned error %i\n",
++ error);
++#endif
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* target must be file */
++ if(target != T_FILE)
++ return -RSBAC_EINVALIDTARGET;
++ tkt_i_function_param.tkt_delete_consent.file = tid.file;
++ tkt_i_function_param.tkt_delete_consent.purpose
++ = param.create_ticket.function_param.delete_consent.purpose;
++ break;
++
++ case PTF_add_purpose:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ tkt_i_function_param.add_purpose
++ = param.create_ticket.function_param.add_purpose;
++ break;
++
++ case PTF_delete_purpose:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ tkt_i_function_param.delete_purpose
++ = param.create_ticket.function_param.delete_purpose;
++ break;
++
++ case PTF_add_responsible_user:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ tkt_i_function_param.add_responsible_user
++ = param.create_ticket.function_param.add_responsible_user;
++ break;
++
++ case PTF_delete_responsible_user:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ tkt_i_function_param.delete_responsible_user
++ = param.create_ticket.function_param.delete_responsible_user;
++ break;
++
++ case PTF_delete_user_aci:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ tkt_i_function_param.delete_user_aci.id
++ = param.create_ticket.function_param.delete_user_aci.id;
++ break;
++
++ case PTF_set_role:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ tkt_i_function_param.set_role
++ = param.create_ticket.function_param.set_role;
++ break;
++
++ case PTF_set_object_class:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ /* get file id */
++ if ((error = pm_get_file(param.create_ticket.function_param.set_object_class.filename,
++ &target,
++ &tid)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): call to pm_get_file() returned error %i\n",
++ error);
++#endif
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* target must be file */
++ if( (target != T_FILE)
++ && (target != T_FIFO)
++ )
++ return -RSBAC_EINVALIDTARGET;
++ tkt_i_function_param.tkt_set_object_class.file = tid.file;
++ tkt_i_function_param.tkt_set_object_class.object_class
++ = param.create_ticket.function_param.set_object_class.object_class;
++ break;
++
++#ifdef CONFIG_RSBAC_SWITCH_PM
++ case PTF_switch_pm:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ tkt_i_function_param.switch_pm
++ = param.create_ticket.function_param.switch_pm;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_AUTH
++ case PTF_switch_auth:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ tkt_i_function_param.switch_auth
++ = param.create_ticket.function_param.switch_auth;
++ break;
++#endif
++
++ case PTF_set_device_object_type:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ /* get file id */
++ if ((error = pm_get_file(param.create_ticket.function_param.set_device_object_type.filename,
++ &target,
++ &tid)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): call to pm_get_file() returned error %i\n",
++ error);
++#endif
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* target must be dev */
++ if(target != T_DEV)
++ return -RSBAC_EINVALIDTARGET;
++ tkt_i_function_param.tkt_set_device_object_type.dev = tid.dev;
++ tkt_i_function_param.tkt_set_device_object_type.object_type
++ = param.create_ticket.function_param.set_device_object_type.object_type;
++ tkt_i_function_param.tkt_set_device_object_type.object_class
++ = param.create_ticket.function_param.set_device_object_type.object_class;
++ break;
++
++ case PTF_set_auth_may_setuid:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ /* get file id */
++ if ((error = pm_get_file(param.create_ticket.function_param.set_auth_may_setuid.filename,
++ &target,
++ &tid)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): call to pm_get_file() returned error %i\n",
++ error);
++#endif
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* target must be file */
++ if(target != T_FILE)
++ return -RSBAC_EINVALIDTARGET;
++ tkt_i_function_param.tkt_set_auth_may_setuid.file = tid.file;
++ tkt_i_function_param.tkt_set_auth_may_setuid.value
++ = param.create_ticket.function_param.set_auth_may_setuid.value;
++ break;
++
++ case PTF_set_auth_may_set_cap:
++ if(role != PR_data_protection_officer)
++ return -RSBAC_EPERM;
++ /* get file id */
++ if ((error = pm_get_file(param.create_ticket.function_param.set_auth_may_set_cap.filename,
++ &target,
++ &tid)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): call to pm_get_file() returned error %i\n",
++ error);
++#endif
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* target must be dev */
++ if(target != T_FILE)
++ return -RSBAC_EINVALIDTARGET;
++ tkt_i_function_param.tkt_set_auth_may_set_cap.file = tid.file;
++ tkt_i_function_param.tkt_set_auth_may_set_cap.value
++ = param.create_ticket.function_param.set_auth_may_set_cap.value;
++ break;
++
++ case PTF_add_authorized_task:
++ case PTF_delete_authorized_task:
++ /* copy parameters */
++ if(param.create_ticket.function_type
++ == PTF_add_authorized_task)
++ {
++ tkt_i_function_param.add_authorized_task
++ = param.create_ticket.function_param.add_authorized_task;
++ }
++ else
++ {
++ tkt_i_function_param.delete_authorized_task
++ = param.create_ticket.function_param.delete_authorized_task;
++ }
++ /* DPOs are OK */
++ if(role == PR_data_protection_officer)
++ break;
++ /* if not DPO: */
++ /* is process owner responsible user for target task? */
++ /* get ru_set_id for target task */
++ if(param.create_ticket.function_type
++ == PTF_add_authorized_task)
++ {
++ pm_tid.task
++ = param.create_ticket.function_param.add_authorized_task.task;
++ }
++ else
++ {
++ pm_tid.task
++ = param.create_ticket.function_param.delete_authorized_task.task;
++ }
++ if((error = rsbac_pm_get_data(ta_number,
++ PMT_TASK,
++ pm_tid,
++ PD_ru_set,
++ &data_val)))
++ return -RSBAC_EREADFAILED;
++ /* if ru_set is 0, there is no responsible user -> error */
++ if(!data_val.ru_set)
++ return -RSBAC_EPERM;
++ /* check, whether owner is responsible user for this task */
++ pm_set_id.ru_set = data_val.ru_set;
++ pm_set_member.ru = owner;
++ if(!rsbac_pm_set_member(ta_number,PS_RU,pm_set_id,pm_set_member))
++ {
++ /* illegal issuer -> delete ticket */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++ /* OK, test passed */
++ break;
++
++ default:
++ /* anything else should never be issued */
++ return -RSBAC_EINVALIDVALUE;
++ }
++
++ /* all checks passed -> add ticket */
++ all_data.tkt.id = param.create_ticket.id;
++ all_data.tkt.issuer = owner;
++ all_data.tkt.function_type = param.create_ticket.function_type;
++ all_data.tkt.function_param = tkt_i_function_param;
++ all_data.tkt.valid_until = param.create_ticket.valid_for + now.tv_sec;
++ error = rsbac_pm_add_target(ta_number,
++ PMT_TKT,
++ all_data);
++ if(error && (param.create_ticket.function_type == PTF_add_object_class))
++ {
++ rsbac_pm_remove_set(ta_number,PS_PP,pm_set_id);
++ }
++ return error;
++ /* end of create_ticket */
++
++ case PF_add_na:
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_add_na)
++ || (all_data.tkt.function_param.add_na.task
++ != param.add_na.task)
++ || (all_data.tkt.function_param.add_na.object_class
++ != param.add_na.object_class)
++ || (all_data.tkt.function_param.add_na.tp
++ != param.add_na.tp)
++ || (all_data.tkt.function_param.add_na.accesses
++ != param.add_na.accesses) )
++ return -RSBAC_EPERM;
++
++ /* check, whether task exists */
++ pm_tid2.task = param.add_na.task;
++ if(!rsbac_pm_exists(ta_number,
++ PMT_TASK,
++ pm_tid2))
++ return -RSBAC_EINVALIDVALUE;
++ /* check, whether class exists (not for IPC, DEV and NIL) */
++ if( param.add_na.object_class
++ && (param.add_na.object_class != RSBAC_PM_IPC_OBJECT_CLASS_ID)
++ && (param.add_na.object_class != RSBAC_PM_DEV_OBJECT_CLASS_ID))
++ {
++ pm_tid2.object_class = param.add_na.object_class;
++ if(!rsbac_pm_exists(ta_number,
++ PMT_CLASS,
++ pm_tid2))
++ return -RSBAC_EINVALIDVALUE;
++ }
++ /* check, whether tp exists */
++ pm_tid2.tp = param.add_na.tp;
++ if(!rsbac_pm_exists(ta_number,
++ PMT_TP,
++ pm_tid2))
++ return -RSBAC_EINVALIDVALUE;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> remove target */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* check: lookup NA accesses for this id */
++ pm_tid.na.task = param.add_na.task;
++ pm_tid.na.object_class = param.add_na.object_class;
++ pm_tid.na.tp = param.add_na.tp;
++ error = rsbac_pm_get_data(ta_number,
++ PMT_NA,
++ pm_tid,
++ PD_accesses,
++ &data_val);
++ switch(error)
++ { /* if 0 -> found -> set accesses to new value */
++ case 0:
++ data_val.accesses = param.add_na.accesses;
++ rsbac_pm_set_data(ta_number,
++ PMT_NA,
++ pm_tid,
++ PD_accesses,
++ data_val);
++ return 0;
++
++ /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found -> add */
++ case -RSBAC_EINVALIDTARGET:
++ case -RSBAC_ENOTFOUND:
++ all_data.na.task = param.add_na.task;
++ all_data.na.object_class = param.add_na.object_class;
++ all_data.na.tp = param.add_na.tp;
++ all_data.na.accesses = param.add_na.accesses;
++ if((error = rsbac_pm_add_target(ta_number,
++ PMT_NA,
++ all_data)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_add_target() for NA returned error %i",
++ error);
++ return error; /* execution failed */
++ }
++ return 0;
++
++ default:
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_data() for NA/accesses returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution failed */
++ }
++
++ case PF_delete_na:
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_delete_na)
++ || (all_data.tkt.function_param.delete_na.task
++ != param.delete_na.task)
++ || (all_data.tkt.function_param.delete_na.object_class
++ != param.delete_na.object_class)
++ || (all_data.tkt.function_param.delete_na.tp
++ != param.delete_na.tp)
++ || (all_data.tkt.function_param.delete_na.accesses
++ != param.delete_na.accesses) )
++ return -RSBAC_EPERM;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> remove target */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ /* remove NA */
++ pm_tid.na.task = param.delete_na.task;
++ pm_tid.na.object_class = param.delete_na.object_class;
++ pm_tid.na.tp = param.delete_na.tp;
++ return(rsbac_pm_remove_target(ta_number,
++ PMT_NA,
++ pm_tid));
++
++ case PF_add_task:
++ /* task-id 0 is used internally, reject */
++ if(!param.add_task.id)
++ return -RSBAC_EINVALIDVALUE;
++ /* purpose-id 0 is invalid, reject */
++ if(!param.add_task.purpose)
++ return -RSBAC_EINVALIDVALUE;
++
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_add_task)
++ || (all_data.tkt.function_param.add_task.id
++ != param.add_task.id)
++ || (all_data.tkt.function_param.add_task.purpose
++ != param.add_task.purpose) )
++ return -RSBAC_EPERM;
++
++ /* check, whether purpose exists */
++ pm_tid2.pp = param.add_task.purpose;
++ if(!rsbac_pm_exists(ta_number,
++ PMT_PP,
++ pm_tid2))
++ return -RSBAC_EINVALIDVALUE;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> remove target */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* try to add task */
++ all_data.task.id = param.add_task.id;
++ all_data.task.purpose = param.add_task.purpose;
++ all_data.task.tp_set = 0;
++ all_data.task.ru_set = 0;
++ return(rsbac_pm_add_target(ta_number,
++ PMT_TASK,
++ all_data));
++
++ case PF_delete_task:
++ /* task-id 0 is used internally, reject */
++ if(!param.add_task.id)
++ return -RSBAC_EINVALIDVALUE;
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_delete_task)
++ || (all_data.tkt.function_param.delete_task.id
++ != param.delete_task.id) )
++ return -RSBAC_EPERM;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> remove target */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* try to delete task */
++ pm_tid.task = param.delete_task.id;
++ return(rsbac_pm_remove_target(ta_number,
++ PMT_TASK,
++ pm_tid));
++
++ case PF_add_object_class:
++ /* class-id 0/NIL, IPC and DEV are used internally, reject */
++ if( !param.add_object_class.id
++ || (param.add_object_class.id == RSBAC_PM_IPC_OBJECT_CLASS_ID)
++ || (param.add_object_class.id == RSBAC_PM_DEV_OBJECT_CLASS_ID))
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): add_object_class: reserved class-id 0, %u or %u requested!\n",
++ RSBAC_PM_IPC_OBJECT_CLASS_ID,
++ RSBAC_PM_DEV_OBJECT_CLASS_ID);
++ return -RSBAC_EINVALIDVALUE;
++ }
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_add_object_class)
++ || (all_data.tkt.function_param.tkt_add_object_class.id
++ != param.add_object_class.id) )
++ return -RSBAC_EPERM;
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> remove target */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* check purposes in ticket against those provided */
++ if(param.add_object_class.pp_list_p)
++ {
++ if(!all_data.tkt.function_param.tkt_add_object_class.pp_set)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): add_object_class: no purpose in tkt\n");
++ return -RSBAC_EINVALIDVALUE;
++ }
++ pm_set_id.pp_set = all_data.tkt.function_param.tkt_add_object_class.pp_set;
++ rsbac_get_user(&pp_set,
++ param.add_object_class.pp_list_p,
++ sizeof(pp_set));
++ pm_set_member.pp = pp_set.id;
++ if(!rsbac_pm_set_member(ta_number,PS_PP,pm_set_id,pm_set_member))
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): add_object_class: first purpose-id %i not in tkt-set\n",
++ pp_set.id);
++ return -RSBAC_EINVALIDVALUE;
++ }
++
++ while(pp_set.next)
++ {
++ rsbac_get_user(&pp_set,
++ pp_set.next,
++ sizeof(pp_set));
++ pm_set_member.pp = pp_set.id;
++ if(!rsbac_pm_set_member(ta_number,PS_PP,pm_set_id,pm_set_member))
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): add_object_class: purpose-id %i not in tkt-set\n",
++ pp_set.id);
++ return -RSBAC_EINVALIDVALUE;
++ }
++ }
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated */
++ /* calls and memory waste. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* check, whether class exists */
++ pm_tid.object_class = param.add_object_class.id;
++ class_exists = rsbac_pm_exists(ta_number,PMT_CLASS, pm_tid);
++ if(!class_exists)
++ {
++ /* try to add class */
++ all_data.object_class.id = param.add_object_class.id;
++ all_data.object_class.pp_set = 0;
++ if((error = rsbac_pm_add_target(ta_number,
++ PMT_CLASS,
++ all_data)))
++ return error;
++ }
++
++ /* get purposes from user space and add them to set */
++ if(param.add_object_class.pp_list_p)
++ {
++ pm_set_id.pp_set = param.add_object_class.id;
++ if(!class_exists)
++ {
++ if(rsbac_pm_create_set(ta_number,PS_PP,pm_set_id))
++ return -RSBAC_EWRITEFAILED;
++ }
++ else
++ {
++ if(rsbac_pm_clear_set(ta_number,PS_PP,pm_set_id))
++ return -RSBAC_EWRITEFAILED;
++ }
++
++ rsbac_get_user(&pp_set,
++ param.add_object_class.pp_list_p,
++ sizeof(pp_set));
++ pm_set_member.pp = pp_set.id;
++ if(rsbac_pm_add_to_set(ta_number,PS_PP,pm_set_id,pm_set_member))
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): add_object_class: could not add first purpose-id %i to pp_set\n",
++ pp_set.id);
++ return -RSBAC_EWRITEFAILED;
++ }
++
++ while(pp_set.next)
++ {
++ rsbac_get_user(&pp_set,
++ pp_set.next,
++ sizeof(pp_set));
++ pm_set_member.pp = pp_set.id;
++ if(rsbac_pm_add_to_set(ta_number,PS_PP,pm_set_id,pm_set_member))
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): add_object_class: could not add purpose-id %i to pp_set\n",
++ pp_set.id);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ /* notify class item of its pp_set_id */
++ pm_tid.object_class = param.add_object_class.id;
++ data_val.pp_set = param.add_object_class.id;
++ if((error = rsbac_pm_set_data(ta_number,
++ PMT_CLASS,
++ pm_tid,
++ PD_pp_set,
++ data_val)))
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): add_object_class: could not set pp_set_id for class\n");
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ /* ready */
++ return 0;
++
++ case PF_delete_object_class:
++ /* class-id 0/NIL, IPC and DEV are used internally, reject */
++ if( !param.delete_object_class.id
++ || (param.delete_object_class.id == RSBAC_PM_IPC_OBJECT_CLASS_ID)
++ || (param.delete_object_class.id == RSBAC_PM_DEV_OBJECT_CLASS_ID))
++ return -RSBAC_EINVALIDVALUE;
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_delete_object_class)
++ || (all_data.tkt.function_param.delete_object_class.id
++ != param.delete_object_class.id) )
++ return -RSBAC_EPERM;
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> remove target */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* try to delete class */
++ pm_tid.object_class = param.delete_object_class.id;
++ return(rsbac_pm_remove_target(ta_number,
++ PMT_CLASS,
++ pm_tid));
++
++ case PF_add_authorized_tp:
++ /* task-id 0 and tp-id 0 are used internally, reject */
++ if(!param.add_authorized_tp.task || !param.add_authorized_tp.tp)
++ return -RSBAC_EINVALIDVALUE;
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_add_authorized_tp)
++ || (all_data.tkt.function_param.add_authorized_tp.task
++ != param.add_authorized_tp.task)
++ || (all_data.tkt.function_param.add_authorized_tp.tp
++ != param.add_authorized_tp.tp) )
++ return -RSBAC_EPERM;
++
++ /* check, whether task exists */
++ pm_tid2.task = param.add_authorized_tp.task;
++ if(!rsbac_pm_exists(ta_number,
++ PMT_TASK,
++ pm_tid2))
++ return -RSBAC_EINVALIDVALUE;
++ /* check, whether tp exists */
++ pm_tid2.tp = param.add_authorized_tp.tp;
++ if(!rsbac_pm_exists(ta_number,
++ PMT_TP,
++ pm_tid2))
++ return -RSBAC_EINVALIDVALUE;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> remove target */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* try to add tp to tp_set of task */
++ /* lookup tp_set_id for this task */
++ pm_tid.task = param.add_authorized_tp.task;
++ if((error = rsbac_pm_get_data(ta_number,
++ PMT_TASK,
++ pm_tid,
++ PD_tp_set,
++ &data_val)))
++ return -RSBAC_EREADFAILED;
++ /* if tp_set is 0, it must be created and notified to task-data */
++ if(!data_val.tp_set)
++ {
++ pm_set_id.tp_set = param.add_authorized_tp.task;
++ if((error = rsbac_pm_create_set(ta_number,
++ PS_TP,
++ pm_set_id)))
++ return error;
++ data_val.tp_set = param.add_authorized_tp.task;
++ if((error = rsbac_pm_set_data(ta_number,
++ PMT_TASK,
++ pm_tid,
++ PD_tp_set,
++ data_val)))
++ return -RSBAC_EWRITEFAILED;
++ }
++
++ /* now that we know the set exists, try to add tp to it */
++ pm_set_id.tp_set = data_val.tp_set;
++ pm_set_member.tp = param.add_authorized_tp.tp;
++ if(rsbac_pm_add_to_set(ta_number,PS_TP,pm_set_id,pm_set_member))
++ return -RSBAC_EWRITEFAILED;
++ else
++ /* ready */
++ return 0;
++
++ case PF_delete_authorized_tp:
++ /* task-id 0 and tp-id 0 are used internally, reject */
++ if(!param.delete_authorized_tp.task || !param.delete_authorized_tp.tp)
++ return -RSBAC_EINVALIDVALUE;
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_delete_authorized_tp)
++ || (all_data.tkt.function_param.delete_authorized_tp.task
++ != param.delete_authorized_tp.task)
++ || (all_data.tkt.function_param.delete_authorized_tp.tp
++ != param.delete_authorized_tp.tp) )
++ return -RSBAC_EPERM;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> remove target */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* try to remove tp from tp_set of task */
++ /* lookup tp_set_id for this task */
++ pm_tid.task = param.delete_authorized_tp.task;
++ if((error = rsbac_pm_get_data(ta_number,
++ PMT_TASK,
++ pm_tid,
++ PD_tp_set,
++ &data_val)))
++ return -RSBAC_EREADFAILED;
++ /* if tp_set is 0, there are no tps to delete -> return */
++ if(!data_val.tp_set)
++ return -RSBAC_EINVALIDVALUE;
++
++ /* now that we know the set exists, try to remove tp from it */
++ pm_set_id.tp_set = data_val.tp_set;
++ pm_set_member.tp = param.delete_authorized_tp.tp;
++ if(rsbac_pm_remove_from_set(ta_number,PS_TP,pm_set_id,pm_set_member))
++ return -RSBAC_EWRITEFAILED;
++ else
++ /* ready */
++ return 0;
++
++ case PF_add_consent:
++ /* purpose_id 0 is used internally, reject */
++ if(!param.add_consent.purpose)
++ return -RSBAC_EINVALIDVALUE;
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* get file id */
++ if ((error = pm_get_file(param.add_consent.filename, &target, &tid)) < 0)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): call to pm_get_file() returned error %i\n",
++ error);
++#endif
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* target must be file */
++ if(target != T_FILE)
++ return -RSBAC_EINVALIDTARGET;
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_add_consent)
++ || (RSBAC_MAJOR(all_data.tkt.function_param.tkt_add_consent.file.device)
++ != RSBAC_MAJOR(tid.file.device))
++ || (RSBAC_MINOR(all_data.tkt.function_param.tkt_add_consent.file.device)
++ != RSBAC_MINOR(tid.file.device))
++ || (all_data.tkt.function_param.tkt_add_consent.file.inode
++ != tid.file.inode)
++ || (all_data.tkt.function_param.tkt_add_consent.purpose
++ != param.add_consent.purpose) )
++ return -RSBAC_EPERM;
++ file = tid.file;
++ /* check, whether purpose exists */
++ pm_tid2.pp = param.add_consent.purpose;
++ if(!rsbac_pm_exists(ta_number,
++ PMT_PP,
++ pm_tid2))
++ return -RSBAC_EINVALIDVALUE;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> remove target */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* check, whether this consent exists */
++ pm_tid.cs.file = file;
++ pm_tid.cs.purpose = param.add_consent.purpose;
++ if(rsbac_pm_exists(ta_number,
++ PMT_CS,
++ pm_tid))
++ return -RSBAC_EEXISTS;
++ /* consent does not exist, try to add it */
++ all_data.cs.file = file;
++ all_data.cs.purpose = param.add_consent.purpose;
++ return(rsbac_pm_add_target(ta_number,PMT_CS,all_data));
++
++ case PF_delete_consent:
++ /* purpose_id 0 is used internally, reject */
++ if(!param.delete_consent.purpose)
++ return -RSBAC_EINVALIDVALUE;
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* get file id */
++ if ((error = pm_get_file(param.add_consent.filename, &target, &tid)) < 0)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): call to pm_get_file() returned error %i\n",
++ error);
++#endif
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* target must be file */
++ if(target != T_FILE)
++ return -RSBAC_EINVALIDTARGET;
++ file=tid.file;
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_delete_consent)
++ || (RSBAC_MAJOR(all_data.tkt.function_param.tkt_delete_consent.file.device)
++ != RSBAC_MAJOR(file.device))
++ || (RSBAC_MINOR(all_data.tkt.function_param.tkt_delete_consent.file.device)
++ != RSBAC_MINOR(file.device))
++ || (all_data.tkt.function_param.tkt_delete_consent.file.inode
++ != file.inode)
++ || (all_data.tkt.function_param.tkt_delete_consent.purpose
++ != param.delete_consent.purpose) )
++ return -RSBAC_EPERM;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> remove target */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* try to delete this consent */
++ pm_tid.cs.file = file;
++ pm_tid.cs.purpose = param.delete_consent.purpose;
++ return(rsbac_pm_remove_target(ta_number,
++ PMT_CS,
++ pm_tid));
++
++ case PF_add_purpose:
++ /* purpose_id 0, classes 0, IPC and DEV are used internally, reject */
++ if( !param.add_purpose.id
++ || !param.add_purpose.def_class
++ || (param.add_purpose.def_class
++ == RSBAC_PM_IPC_OBJECT_CLASS_ID)
++ || (param.add_purpose.def_class
++ == RSBAC_PM_DEV_OBJECT_CLASS_ID) )
++ return -RSBAC_EINVALIDVALUE;
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_add_purpose)
++ || (all_data.tkt.function_param.add_purpose.id
++ != param.add_purpose.id)
++ || (all_data.tkt.function_param.add_purpose.def_class
++ != param.add_purpose.def_class) )
++ return -RSBAC_EPERM;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> remove target */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* if def_class does not exist, try to create it */
++ pm_tid.object_class = param.add_purpose.def_class;
++ if(!rsbac_pm_exists(ta_number,
++ PMT_CLASS,
++ pm_tid))
++ {
++ /* try to add class */
++ all_data.object_class.id = param.add_purpose.def_class;
++ all_data.object_class.pp_set = 0;
++ if((error = rsbac_pm_add_target(ta_number,
++ PMT_CLASS,
++ all_data)))
++ return error;
++ }
++
++ /* try to add purpose */
++ all_data.pp.id = param.add_purpose.id;
++ all_data.pp.def_class = param.add_purpose.def_class;
++ if((error = rsbac_pm_add_target(ta_number,
++ PMT_PP,
++ all_data)))
++ return error;
++
++ /* add purpose to purpose-set of class */
++ /* lookup pp_set_id for this class */
++ pm_tid.object_class = param.add_purpose.def_class;
++ if((error = rsbac_pm_get_data(ta_number,
++ PMT_CLASS,
++ pm_tid,
++ PD_pp_set,
++ &data_val)))
++ return -RSBAC_EREADFAILED;
++ /* if no pp-set: create it and set it in class structure */
++ if(!data_val.pp_set)
++ {
++ pm_set_id.pp_set = param.add_purpose.def_class;
++ if(rsbac_pm_create_set(ta_number,PS_PP,pm_set_id))
++ return -RSBAC_EWRITEFAILED;
++ data_val.pp_set = param.add_purpose.def_class;
++ if((error = rsbac_pm_set_data(ta_number,
++ PMT_CLASS,
++ pm_tid,
++ PD_pp_set,
++ data_val)))
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* now that we know the set exists, try to add purpose to it */
++ pm_set_id.pp_set = data_val.pp_set;
++ pm_set_member.pp = param.add_purpose.id;
++ if(rsbac_pm_add_to_set(ta_number,PS_PP,pm_set_id,pm_set_member))
++ return -RSBAC_EWRITEFAILED;
++ else
++ /* ready */
++ return 0;
++
++ case PF_delete_purpose:
++ /* purpose_id 0 is used internally, reject */
++ if(!param.delete_purpose.id)
++ return -RSBAC_EINVALIDVALUE;
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_delete_purpose)
++ || (all_data.tkt.function_param.delete_purpose.id
++ != param.delete_purpose.id) )
++ return -RSBAC_EPERM;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> delete ticket */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* try to delete this purpose */
++ pm_tid.pp = param.delete_purpose.id;
++ return(rsbac_pm_remove_target(ta_number,
++ PMT_PP,
++ pm_tid));
++
++ case PF_add_responsible_user:
++ /* task_id 0 is used internally, reject */
++ if(!param.add_responsible_user.task)
++ return -RSBAC_EINVALIDVALUE;
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_add_responsible_user)
++ || (all_data.tkt.function_param.add_responsible_user.user
++ != param.add_responsible_user.user)
++ || (all_data.tkt.function_param.add_responsible_user.task
++ != param.add_responsible_user.task) )
++ return -RSBAC_EPERM;
++
++ /* check, whether task exists */
++ pm_tid2.task = param.add_responsible_user.task;
++ if(!rsbac_pm_exists(ta_number,
++ PMT_TASK,
++ pm_tid2))
++ return -RSBAC_EINVALIDVALUE;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> delete ticket */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* try to add user to ru_set of task */
++
++ /* lookup ru_set_id for this task */
++ pm_tid.task = param.add_responsible_user.task;
++ if((error = rsbac_pm_get_data(ta_number,
++ PMT_TASK,
++ pm_tid,
++ PD_ru_set,
++ &data_val)))
++ return -RSBAC_EREADFAILED;
++ /* if ru_set is 0, it must be created and notified to task-data */
++ if(!data_val.ru_set)
++ {
++ pm_set_id.ru_set = param.add_responsible_user.task;
++ if((error = rsbac_pm_create_set(ta_number,
++ PS_RU,
++ pm_set_id)))
++ return error;
++ data_val.ru_set = param.add_responsible_user.task;
++ if((error = rsbac_pm_set_data(ta_number,
++ PMT_TASK,
++ pm_tid,
++ PD_ru_set,
++ data_val)))
++ return -RSBAC_EWRITEFAILED;
++ }
++
++ /* now that we know the set exists, try to add ru to it */
++ pm_set_id.ru_set = data_val.ru_set;
++ pm_set_member.ru = param.add_responsible_user.user;
++ if(rsbac_pm_add_to_set(ta_number,PS_RU,pm_set_id,pm_set_member))
++ return -RSBAC_EWRITEFAILED;
++ else
++ /* ready */
++ return 0;
++
++ case PF_delete_responsible_user:
++ /* task_id 0 is used internally, reject */
++ if(!param.delete_responsible_user.task)
++ return -RSBAC_EINVALIDVALUE;
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_delete_responsible_user)
++ || (all_data.tkt.function_param.delete_responsible_user.user
++ != param.delete_responsible_user.user)
++ || (all_data.tkt.function_param.delete_responsible_user.task
++ != param.delete_responsible_user.task) )
++ return -RSBAC_EPERM;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> delete ticket */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ /* try to add user to ru_set of task */
++ /* lookup ru_set_id for this task */
++ pm_tid.task = param.delete_responsible_user.task;
++ if((error = rsbac_pm_get_data(ta_number,
++ PMT_TASK,
++ pm_tid,
++ PD_ru_set,
++ &data_val)))
++ return -RSBAC_EREADFAILED;
++ /* if ru_set is 0, there is nothing to delete */
++ if(!data_val.ru_set)
++ return -RSBAC_EINVALIDVALUE;
++
++ /* now that we know the set exists, try to remove ru from it */
++ pm_set_id.ru_set = data_val.ru_set;
++ pm_set_member.ru = param.delete_responsible_user.user;
++ if(rsbac_pm_remove_from_set(ta_number,PS_RU,pm_set_id,pm_set_member))
++ return -RSBAC_EWRITEFAILED;
++ else
++ /* ready */
++ return 0;
++
++ case PF_delete_user_aci:
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_delete_user_aci)
++ || (all_data.tkt.function_param.delete_user_aci.id
++ != param.delete_user_aci.id) )
++ return -RSBAC_EPERM;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> delete ticket */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now remove aci. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ tid.user = param.delete_user_aci.id;
++ rsbac_ta_remove_target(ta_number,T_USER,tid);
++ return 0;
++
++ case PF_set_role:
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_set_role)
++ || (all_data.tkt.function_param.set_role.user
++ != param.set_role.user)
++ || (all_data.tkt.function_param.set_role.role
++ != param.set_role.role) )
++ return -RSBAC_EPERM;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> delete ticket */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* try to set role */
++ tid.user = param.set_role.user;
++ attr_val.pm_role = param.set_role.role;
++ return(rsbac_ta_set_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ attr_val));
++
++ case PF_set_object_class:
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* get file id */
++ if ((error = pm_get_file(param.set_object_class.filename, &target, &tid)) < 0)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): call to pm_get_file() returned error %i\n",
++ error);
++#endif
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* target must be file */
++ if( (target != T_FILE)
++ && (target != T_FIFO)
++ )
++ return -RSBAC_EINVALIDTARGET;
++ file=tid.file;
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_set_object_class)
++ || (RSBAC_MAJOR(all_data.tkt.function_param.tkt_set_object_class.file.device)
++ != RSBAC_MAJOR(file.device))
++ || (RSBAC_MINOR(all_data.tkt.function_param.tkt_set_object_class.file.device)
++ != RSBAC_MINOR(file.device))
++ || (all_data.tkt.function_param.tkt_set_object_class.file.inode
++ != file.inode)
++ || (all_data.tkt.function_param.tkt_set_object_class.object_class
++ != param.set_object_class.object_class) )
++ return -RSBAC_EPERM;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> delete ticket */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* get old pm_object_type */
++ tid.file = file;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ target,
++ tid,
++ A_pm_object_type,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for FILE/FIFO/pm_object_type returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ switch(attr_val.pm_object_type)
++ {
++ case PO_personal_data:
++ case PO_none:
++ case PO_non_personal_data:
++ break;
++ default:
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* set new pm_object_type */
++ if(param.set_object_class.object_class)
++ attr_val.pm_object_type = PO_personal_data;
++ else
++ attr_val.pm_object_type = PO_non_personal_data;
++ if((error = rsbac_ta_set_attr(ta_number,
++ SW_PM,
++ target,
++ tid,
++ A_pm_object_type,
++ attr_val)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_set_attr() for FILE/pm_object_type returned error %i",
++ error);
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* set new pm_object_class */
++ attr_val.pm_object_class = param.set_object_class.object_class;
++ if((error = rsbac_ta_set_attr(ta_number,
++ SW_PM,
++ target,
++ tid,
++ A_pm_object_class,
++ attr_val)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_set_attr() for FILE/pm_object_type returned error %i",
++ error);
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* ready */
++ return 0;
++
++#ifdef CONFIG_RSBAC_SWITCH_PM
++ case PF_switch_pm:
++ /* only values 0 and 1 are allowed */
++ if(param.switch_pm.value && (param.switch_pm.value != 1))
++ return -RSBAC_EINVALIDVALUE;
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_switch_pm)
++ || (all_data.tkt.function_param.switch_pm.value
++ != param.switch_pm.value))
++ return -RSBAC_EPERM;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> delete ticket */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* switch pm-module */
++ rsbac_printk(KERN_WARNING "sys_rsbac_switch(): switching RSBAC module PM (No. %i) to %i!\n",
++ SW_PM, param.switch_pm.value);
++ rsbac_switch_pm = param.switch_pm.value;
++ return 0;
++
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_AUTH
++ case PF_switch_auth:
++ /* only values 0 and 1 are allowed */
++ if(param.switch_auth.value && (param.switch_auth.value != 1))
++ return -RSBAC_EINVALIDVALUE;
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_switch_auth)
++ || (all_data.tkt.function_param.switch_auth.value
++ != param.switch_auth.value))
++ return -RSBAC_EPERM;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> delete ticket */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all own checks done. Call ADF for other modules. */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG "rsbac_pm(): calling ADF int\n");
++#endif
++ tid.dummy = 0;
++ attr_val.switch_target = SW_AUTH;
++ if (!rsbac_adf_request_int(R_SWITCH_MODULE,
++ task_pid(current),
++ T_NONE,
++ &tid,
++ A_switch_target,
++ &attr_val,
++ SW_PM))
++ {
++ return -EPERM;
++ }
++
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* switch auth module */
++ rsbac_printk(KERN_WARNING "sys_rsbac_pm/switch(): switching RSBAC module AUTH (No. %i) to %i!\n",
++ SW_AUTH, param.switch_auth.value);
++ rsbac_switch_auth = param.switch_auth.value;
++ return 0;
++#endif /* SWITCH_AUTH */
++
++ case PF_set_device_object_type:
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* get file id */
++ if ((error = pm_get_file(param.set_device_object_type.filename, &target, &tid)) < 0)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): call to pm_get_file() returned error %i\n",
++ error);
++#endif
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* target must be dev */
++ if(target != T_DEV)
++ return -RSBAC_EINVALIDTARGET;
++ dev=tid.dev;
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_set_device_object_type)
++ || (all_data.tkt.function_param.tkt_set_device_object_type.dev.type
++ != dev.type)
++ || (all_data.tkt.function_param.tkt_set_device_object_type.dev.major
++ != dev.major)
++ || (all_data.tkt.function_param.tkt_set_device_object_type.dev.minor
++ != dev.minor)
++ || (all_data.tkt.function_param.tkt_set_device_object_type.object_type
++ != param.set_device_object_type.object_type)
++ || (all_data.tkt.function_param.tkt_set_device_object_type.object_class
++ != param.set_device_object_type.object_class) )
++ return -RSBAC_EPERM;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> delete ticket */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ switch(param.set_device_object_type.object_type)
++ {
++ case PO_personal_data:
++ case PO_none:
++ case PO_TP:
++ case PO_non_personal_data:
++ break;
++ default:
++ return -RSBAC_EINVALIDVALUE;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* set new pm_object_type */
++ tid.dev = dev;
++ attr_val.pm_object_type = param.set_device_object_type.object_type;
++ if((error = rsbac_ta_set_attr(ta_number,
++ SW_PM,
++ T_DEV,
++ tid,
++ A_pm_object_type,
++ attr_val)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_set_attr() for DEV/pm_object_type returned error %i",
++ error);
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* set new pm_object_class */
++ attr_val.pm_object_class = param.set_device_object_type.object_class;
++ if((error = rsbac_ta_set_attr(ta_number,
++ SW_PM,
++ T_DEV,
++ tid,
++ A_pm_object_class,
++ attr_val)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_set_attr() for DEV/pm_object_class returned error %i",
++ error);
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* ready */
++ return 0;
++
++#ifdef CONFIG_RSBAC_AUTH
++ case PF_set_auth_may_setuid:
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* get file id */
++ if ((error = pm_get_file(param.set_auth_may_setuid.filename, &target, &tid)) < 0)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): call to pm_get_file() returned error %i\n",
++ error);
++#endif
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* target must be file */
++ if( (target != T_FILE)
++ && (target != T_FIFO)
++ )
++ return -RSBAC_EINVALIDTARGET;
++ file=tid.file;
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_set_auth_may_setuid)
++ || (RSBAC_MAJOR(all_data.tkt.function_param.tkt_set_auth_may_setuid.file.device)
++ != RSBAC_MAJOR(file.device))
++ || (RSBAC_MINOR(all_data.tkt.function_param.tkt_set_auth_may_setuid.file.device)
++ != RSBAC_MINOR(file.device))
++ || (all_data.tkt.function_param.tkt_set_auth_may_setuid.file.inode
++ != file.inode)
++ || (all_data.tkt.function_param.tkt_set_auth_may_setuid.value
++ != param.set_auth_may_setuid.value)
++ )
++ return -RSBAC_EPERM;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> delete ticket */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ switch(param.set_auth_may_setuid.value)
++ {
++ case FALSE:
++ case TRUE:
++ break;
++ default:
++ return -RSBAC_EINVALIDVALUE;
++ }
++ /* OK, all own checks done. Call ADF for other modules. */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG "rsbac_pm(): calling ADF int\n");
++#endif
++ tid.file = file;
++ attr_val.auth_may_setuid = param.set_auth_may_setuid.value;
++ if (!rsbac_adf_request_int(R_MODIFY_ATTRIBUTE,
++ task_pid(current),
++ T_FILE,
++ &tid,
++ A_auth_may_setuid,
++ &attr_val,
++ SW_PM))
++ {
++ return -EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* set new auth_may_setuid */
++ if((error = rsbac_ta_set_attr(ta_number,
++ SW_AUTH,
++ T_FILE,
++ tid,
++ A_auth_may_setuid,
++ attr_val)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_set_attr() for FILE/auth_may_setuid returned error %i",
++ error);
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* ready */
++ return 0;
++
++ case PF_set_auth_may_set_cap:
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* get file id */
++ if ((error = pm_get_file(param.set_auth_may_set_cap.filename, &target, &tid)) < 0)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): call to pm_get_file() returned error %i\n",
++ error);
++#endif
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* target must be file */
++ if(target != T_FILE)
++ return -RSBAC_EINVALIDTARGET;
++ file=tid.file;
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_set_auth_may_set_cap)
++ || (RSBAC_MAJOR(all_data.tkt.function_param.tkt_set_auth_may_set_cap.file.device)
++ != RSBAC_MAJOR(file.device))
++ || (RSBAC_MINOR(all_data.tkt.function_param.tkt_set_auth_may_set_cap.file.device)
++ != RSBAC_MINOR(file.device))
++ || (all_data.tkt.function_param.tkt_set_auth_may_set_cap.file.inode
++ != file.inode)
++ || (all_data.tkt.function_param.tkt_set_auth_may_set_cap.value
++ != param.set_auth_may_set_cap.value)
++ )
++ return -RSBAC_EPERM;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> delete ticket */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ switch(param.set_auth_may_set_cap.value)
++ {
++ case FALSE:
++ case TRUE:
++ break;
++ default:
++ return -RSBAC_EINVALIDVALUE;
++ }
++ /* OK, all own checks done. Call ADF for other modules. */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG "rsbac_pm(): calling ADF int\n");
++#endif
++ tid.file = file;
++ attr_val.auth_may_set_cap = param.set_auth_may_set_cap.value;
++ if (!rsbac_adf_request_int(R_MODIFY_ATTRIBUTE,
++ task_pid(current),
++ T_FILE,
++ &tid,
++ A_auth_may_set_cap,
++ &attr_val,
++ SW_PM))
++ {
++ return -EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++
++ /* set new auth_may_set_cap */
++ if((error = rsbac_ta_set_attr(ta_number,
++ SW_AUTH,
++ T_FILE,
++ tid,
++ A_auth_may_set_cap,
++ attr_val)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_set_attr() for FILE/auth_may_set_cap returned error %i",
++ error);
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* ready */
++ return 0;
++#endif /* CONFIG_RSBAC_AUTH */
++
++/************/
++
++ case PF_add_authorized_task:
++ /* task_id 0 is used internally, reject */
++ if(!param.add_authorized_task.task)
++ return -RSBAC_EINVALIDVALUE;
++ if(role != PR_security_officer)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): caller of add_authorized_task is not SO\n");
++#endif
++ return -RSBAC_EPERM;
++ }
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i\n",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_add_authorized_task)
++ || (all_data.tkt.function_param.add_authorized_task.user
++ != param.add_authorized_task.user)
++ || (all_data.tkt.function_param.add_authorized_task.task
++ != param.add_authorized_task.task) )
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_aef_pm)
++ {
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): calling add_authorized_task with invalid ticket\n");
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): tkt-task: %i, tkt-user: %i, call-task: %i, call-user: %i\n",
++ all_data.tkt.function_param.add_authorized_task.user,
++ all_data.tkt.function_param.add_authorized_task.task,
++ param.add_authorized_task.task,
++ param.add_authorized_task.user);
++ }
++#endif
++ return -RSBAC_EPERM;
++ }
++
++ /* check, whether task exists */
++ pm_tid2.task = param.add_authorized_task.task;
++ if(!rsbac_pm_exists(ta_number,
++ PMT_TASK,
++ pm_tid2))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): calling add_authorized_task with invalid task id\n");
++#endif
++ return -RSBAC_EINVALIDVALUE;
++ }
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i\n",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ { /* no dpo? -> responsible user? */
++ /* get ru_set_id for this task */
++ pm_tid.task = param.add_authorized_task.task;
++ if((error = rsbac_pm_get_data(ta_number,
++ PMT_TASK,
++ pm_tid,
++ PD_ru_set,
++ &data_val)))
++ return -RSBAC_EREADFAILED;
++ /* if ru_set is 0, there is no responsible user -> error */
++ if(!data_val.ru_set)
++ {
++ /* illegal issuer -> delete ticket */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): calling add_authorized_task with invalid ticket issuer (no set)\n");
++#endif
++ return -RSBAC_EPERM;
++ }
++ /* check, whether issuer is responsible user for this task */
++ pm_set_id.ru_set = data_val.ru_set;
++ pm_set_member.ru = all_data.tkt.issuer;
++ if(!rsbac_pm_set_member(ta_number,PS_RU,pm_set_id,pm_set_member))
++ {
++ /* illegal issuer -> delete ticket */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): calling add_authorized_task with invalid ticket issuer\n");
++#endif
++ return -RSBAC_EPERM;
++ }
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ /* try to add task to task_set of user */
++ /* lookup task_set_id for this user */
++ tid.user = param.add_authorized_task.user;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_task_set,
++ &attr_val,
++ TRUE)))
++ return -RSBAC_EREADFAILED;
++ /* if pm_task_set is 0, it must be created and notified to task-data */
++ if(!attr_val.pm_task_set)
++ { /* set task_set_id to user-id */
++ pm_set_id.task_set = param.add_authorized_task.user;
++ /* 0 is reserved -> take another one for root */
++ if(!pm_set_id.task_set)
++ pm_set_id.task_set = RSBAC_PM_ROOT_TASK_SET_ID;
++ if((error = rsbac_pm_create_set(ta_number,
++ PS_TASK,
++ pm_set_id)))
++ return error;
++ attr_val.pm_task_set = pm_set_id.task_set;
++ if((error = rsbac_ta_set_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_task_set,
++ attr_val)))
++ return -RSBAC_EWRITEFAILED;
++ }
++
++ /* now that we know the set exists, try to add task to it */
++ pm_set_id.task_set = attr_val.pm_task_set;
++ pm_set_member.task = param.add_authorized_task.task;
++ if(rsbac_pm_add_to_set(ta_number,PS_TASK,pm_set_id,pm_set_member))
++ return -RSBAC_EWRITEFAILED;
++ else
++ /* ready */
++ return 0;
++
++ case PF_delete_authorized_task:
++ /* task_id 0 is used internally, reject */
++ if(!param.delete_authorized_task.task)
++ return -RSBAC_EINVALIDVALUE;
++ if(role != PR_security_officer)
++ return -RSBAC_EPERM;
++
++ /* get ticket data, deny, if not found */
++ pm_tid.tkt = tkt;
++ if((error = rsbac_pm_get_all_data(ta_number,
++ PMT_TKT,
++ pm_tid,
++ &all_data)))
++ { /* returns error -RSBAC_EINVALIDTARGET (old ds) or ENOTFOUND, if not found */
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_pm_get_all_data() for ticket returned error %i",
++ error);
++ return -RSBAC_EPERM; /* execution denied */
++ }
++ /* check ticket entries */
++ if( (all_data.tkt.function_type != PTF_delete_authorized_task)
++ || (all_data.tkt.function_param.delete_authorized_task.user
++ != param.delete_authorized_task.user)
++ || (all_data.tkt.function_param.delete_authorized_task.task
++ != param.delete_authorized_task.task) )
++ return -RSBAC_EPERM;
++
++ /* get ticket issuer role */
++ tid.user = all_data.tkt.issuer;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_role,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm(): rsbac_get_attr() for USER/pm_role returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* execution denied */
++ }
++
++ if(attr_val.pm_role != PR_data_protection_officer)
++ {
++ /* illegal issuer -> delete ticket */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, all checks done. Now change data. */
++ /* First remove ticket to prevent repeated calls. */
++ rsbac_pm_remove_target(ta_number,PMT_TKT,pm_tid);
++ /* try to remove task from task_set of user */
++ /* lookup task_set_id for this user */
++ tid.user = param.delete_authorized_task.user;
++ if((error = rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_USER,
++ tid,
++ A_pm_task_set,
++ &attr_val,
++ TRUE)))
++ return -RSBAC_EREADFAILED;
++ /* if pm_task_set is 0, there is no task to be deleted -> error */
++ if(!attr_val.pm_task_set)
++ return -RSBAC_EINVALIDVALUE;
++
++ /* now that we know the set exists, try to remove task from it */
++ pm_set_id.task_set = attr_val.pm_task_set;
++ pm_set_member.task = param.delete_authorized_tp.task;
++ if(rsbac_pm_remove_from_set(ta_number,PS_TASK,pm_set_id,pm_set_member))
++ return -RSBAC_EWRITEFAILED;
++ else
++ /* ready */
++ return 0;
++
++
++/************/
++
++ case PF_create_tp:
++ /* tp_id 0 is used internally, reject */
++ if(!param.create_tp.id)
++ return -RSBAC_EINVALIDVALUE;
++ if(role != PR_tp_manager)
++ return -RSBAC_EPERM;
++
++ /* OK, all checks done. Now change data. */
++ /* try to add tp */
++ all_data.tp.id = param.create_tp.id;
++ return(rsbac_pm_add_target(ta_number,PMT_TP,all_data));
++
++ case PF_delete_tp:
++ /* tp_id 0 is used internally, reject */
++ if(!param.delete_tp.id)
++ return -RSBAC_EINVALIDVALUE;
++ if(role != PR_tp_manager)
++ return -RSBAC_EPERM;
++
++ /* OK, all checks done. Now change data. */
++
++ /* try to delete tp */
++ pm_tid.tp = param.delete_tp.id;
++ return(rsbac_pm_remove_target(ta_number,PMT_TP,pm_tid));
++
++ case PF_set_tp:
++ /* tp_id 0 means set to non-tp, do NOT reject here */
++ if(role != PR_tp_manager)
++ return -RSBAC_EPERM;
++
++ /* if tp != 0, check, whether it is valid */
++ if(param.set_tp.tp)
++ {
++ pm_tid.tp = param.set_tp.tp;
++ if(!rsbac_pm_exists(ta_number,PMT_TP,pm_tid))
++ return -RSBAC_EINVALIDVALUE;
++ }
++
++ /* get file id */
++ if ((error = pm_get_file(param.set_tp.filename,
++ &target,
++ &tid)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm(): call to pm_get_file() returned error %i\n",
++ error);
++#endif
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* target must be file */
++ if(target != T_FILE)
++ return -RSBAC_EINVALIDTARGET;
++ file=tid.file;
++ /* get old object_type */
++ if (rsbac_ta_get_attr(ta_number,
++ SW_PM,
++ T_FILE,
++ tid,
++ A_pm_object_type,
++ &attr_val,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_pm(): rsbac_get_attr() returned error!\n");
++ return -RSBAC_EREADFAILED;
++ }
++ /* if old OT is not to be changed here -> do not allow */
++ if( (attr_val.pm_object_type != PO_TP)
++ && (attr_val.pm_object_type != PO_none)
++ && (attr_val.pm_object_type != PO_non_personal_data))
++ return -RSBAC_EINVALIDTARGET;
++
++ /* OK, all checks done. Now change data. */
++ /* try to set OT*/
++ if(param.set_tp.tp)
++ attr_val.pm_object_type = PO_TP;
++ else
++ attr_val.pm_object_type = PO_none;
++ if(rsbac_ta_set_attr(ta_number,
++ SW_PM,
++ T_FILE,
++ tid,
++ A_pm_object_type,
++ attr_val))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_pm(): rsbac_set_attr() returned error!\n");
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* try to set tp-id*/
++ attr_val.pm_tp = param.set_tp.tp;
++ if (rsbac_ta_set_attr(ta_number,
++ SW_PM,
++ T_FILE,
++ tid,
++ A_pm_tp,
++ attr_val))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_pm(): rsbac_set_attr() returned error!\n");
++ return -RSBAC_EWRITEFAILED;
++ }
++ return 0;
++
++/************/
++
++ default:
++ return -RSBAC_EINVALIDREQUEST;
++ }
++ } /* end of rsbac_pm() */
++
++/***************************************************************************/
++
++int rsbac_pm_change_current_task(rsbac_pm_task_id_t task)
++ {
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val;
++ int error = 0;
++ rsbac_uid_t owner;
++ union rsbac_pm_set_id_t pm_set_id;
++ union rsbac_pm_set_member_t pm_set_member;
++
++/* No processing possible before init (called at boot time) */
++ if (!rsbac_is_initialized())
++ return -RSBAC_ENOTINITIALIZED;
++
++ if(!task)
++ return -RSBAC_EINVALIDVALUE;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm_change_current_task(): called for task %i!\n",
++ task);
++#endif
++ /* getting current_tp of calling process from rsbac system */
++ tid.process = task_pid(current);
++ if((error = rsbac_get_attr(SW_PM,T_PROCESS,
++ tid,
++ A_pm_tp,
++ &attr_val,
++ FALSE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm_change_current_task(): rsbac_get_attr() for pm_tp returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* something weird happened */
++ }
++ /* changing current_task for a tp is forbidden -> error */
++ if(attr_val.pm_tp)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm_change_current_task(): tried to change current_task for tp-process\n");
++#endif
++ return -RSBAC_EPERM;
++ }
++
++ /* Getting basic information about caller */
++ /* only useful for real process, not idle or init */
++ if (current->pid > 1)
++ owner = current_uid();
++ else /* caller_pid <= 1 -> kernel or init are always owned by root */
++ owner = 0;
++
++ /* getting owner's task_set_id (authorized tasks) from rsbac system */
++ tid.user = owner;
++ if((error = rsbac_get_attr(SW_PM,T_USER,
++ tid,
++ A_pm_task_set,
++ &attr_val,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm_change_current_task(): rsbac_get_attr() for pm_task_set returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* something weird happened */
++ }
++
++ /* if there is no set of authorized tasks for owner: deny */
++ if(!attr_val.pm_task_set)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm_change_current_task(): process owner has no authorized task\n");
++#endif
++ return -RSBAC_EPERM;
++ }
++
++ /* check, whether owner is authorized for this task */
++ pm_set_id.task_set = attr_val.pm_task_set;
++ pm_set_member.task = task;
++ if(!rsbac_pm_set_member(0,PS_TASK,pm_set_id,pm_set_member))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm_change_current_task(): process owner is not authorized for task\n");
++#endif
++ return -RSBAC_EPERM;
++ }
++
++ /* OK, checks are passed. Change current_task for process. */
++ tid.process = task_pid(current);
++ attr_val.pm_current_task = task;
++ if((error = rsbac_set_attr(SW_PM,T_PROCESS,
++ tid,
++ A_pm_current_task,
++ attr_val)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm_change_current_task(): rsbac_set_attr() for pm_current_task returned error %i",
++ error);
++ return -RSBAC_EWRITEFAILED; /* something weird happened */
++ }
++ return 0;
++ }
++
++int rsbac_pm_create_file(const char * filename,
++ int mode,
++ rsbac_pm_object_class_id_t object_class)
++ {
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val;
++ union rsbac_attribute_value_t attr_val2;
++ union rsbac_pm_target_id_t pm_tid;
++ union rsbac_pm_data_value_t data_val;
++ union rsbac_pm_data_value_t data_val2;
++ int error = 0;
++ union rsbac_pm_set_id_t pm_set_id;
++ union rsbac_pm_set_member_t pm_set_member;
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "sys_rsbac_pm_create_file(): called with class %i, mode %o!\n",
++ object_class, mode);
++#endif
++ /* do not allow IPC or DEV class */
++ if( (object_class == RSBAC_PM_IPC_OBJECT_CLASS_ID)
++ || (object_class == RSBAC_PM_DEV_OBJECT_CLASS_ID))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm_create_file(): Class-ID is IPC or DEV\n");
++#endif
++ return -RSBAC_EINVALIDVALUE;
++ }
++
++ /* is mode for regular file? */
++ if(mode & ~S_IRWXUGO)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm_create_file(): illegal creation mode\n");
++#endif
++ return -RSBAC_EINVALIDVALUE;
++ }
++
++ /* does class exist (NIL always exists)? */
++ if(object_class)
++ {
++ pm_tid.object_class = object_class;
++ if(!rsbac_pm_exists(0,
++ PMT_CLASS,
++ pm_tid))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm_create_file(): non-existent class\n");
++#endif
++ return -RSBAC_EINVALIDVALUE;
++ }
++ }
++
++ /* getting current_task of calling process from rsbac system */
++ tid.process = task_pid(current);
++ if((error = rsbac_get_attr(SW_PM,T_PROCESS,
++ tid,
++ A_pm_current_task,
++ &attr_val,
++ FALSE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm_create_file(): rsbac_get_attr() for pm_current_task returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* something weird happened */
++ }
++
++ /* getting current_tp of calling process from rsbac system */
++ if((error = rsbac_get_attr(SW_PM,T_PROCESS,
++ tid,
++ A_pm_tp,
++ &attr_val2,
++ FALSE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm_create_file(): rsbac_get_attr() for pm_tp returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* something weird happened */
++ }
++
++ /* getting neccessary accesses for task, class, tp from PM-data */
++ pm_tid.na.task = attr_val.pm_current_task;
++ pm_tid.na.object_class = object_class;
++ pm_tid.na.tp = attr_val2.pm_tp;
++ if((error = rsbac_pm_get_data(0,
++ PMT_NA,
++ pm_tid,
++ PD_accesses,
++ &data_val)))
++ {
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm_create_file(): rsbac_pm_get_data() for NA/accesses returned error %i",
++ error);
++#ifdef CONFIG_RSBAC_DEBUG
++ else if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm_create_file(): NA/accesses (%i,%i,%i) not found\n",
++ pm_tid.na.task, object_class, pm_tid.na.tp);
++#endif
++ return -RSBAC_EPERM; /* deny */
++ }
++
++ /* is create necessary? if not -> error */
++ if(!(data_val.accesses & RSBAC_PM_A_CREATE))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm_create_file(): create is not necessary\n");
++#endif
++ return -RSBAC_EPERM;
++ }
++
++ /* get purpose for current_task */
++ pm_tid.task = attr_val.pm_current_task;
++ if((error = rsbac_pm_get_data(0,
++ PMT_TASK,
++ pm_tid,
++ PD_purpose,
++ &data_val)))
++ {
++ if( (error != -RSBAC_EINVALIDTARGET)
++ && (error != -RSBAC_ENOTFOUND)
++ )
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm_create_file(): rsbac_get_data() for TASK/purpose returned error %i",
++ error);
++ return -RSBAC_EPERM; /* deny */
++ }
++
++ /* further checks only, if there is a purpose defined */
++ if(data_val.purpose)
++ {
++ /* get purpose_set_id for class */
++ pm_tid.object_class = object_class;
++ if((error = rsbac_pm_get_data(0,
++ PMT_CLASS,
++ pm_tid,
++ PD_pp_set,
++ &data_val2)))
++ {
++ if( (error == -RSBAC_EINVALIDTARGET)
++ || (error == -RSBAC_ENOTFOUND)
++ )
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm_create_file(): non-existent class\n");
++#endif
++ return -RSBAC_EINVALIDVALUE;
++ }
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm_create_file(): rsbac_get_data() for TASK/purpose returned error %i",
++ error);
++ return -RSBAC_EREADFAILED; /* deny */
++ }
++ /* if there is no purpose set for this class, deny */
++ if(!data_val2.pp_set)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm_create_file(): current_task has purpose, class not\n");
++#endif
++ return -RSBAC_EPERM;
++ }
++
++ /* last check: is our task's purpose in the set of purposes for our class? */
++ pm_set_id.pp_set = data_val2.pp_set;
++ pm_set_member.pp = data_val.purpose;
++ if(!rsbac_pm_set_member(0,PS_PP,pm_set_id,pm_set_member))
++ /* our task's purpose does not match with class purposes -> deny */
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG
++ "rsbac_pm_create_file(): purpose of current_task is not in purpose set of class\n");
++#endif
++ return -RSBAC_EPERM;
++ }
++ }
++
++ /* try to create object using standard syscalls, leading to general rsbac */
++ /* checks via ADF-Request */
++ /* we are not using sys_creat(), because alpha kernels don't know it */
++ error = sys_open(filename, O_CREAT | O_WRONLY | O_TRUNC, mode);
++ if (error < 0)
++ return error;
++
++ /* setting class for new object */
++ rcu_read_lock();
++ tid.file.device = current->files->fdt->fd[error]->f_vfsmnt->mnt_sb->s_dev;
++ tid.file.inode = current->files->fdt->fd[error]->f_dentry->d_inode->i_ino;
++ tid.file.dentry_p = current->files->fdt->fd[error]->f_dentry;
++ rcu_read_unlock();
++ attr_val.pm_object_class = object_class;
++ if(rsbac_set_attr(SW_PM,T_FILE,
++ tid,
++ A_pm_object_class,
++ attr_val))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_pm_create_file(): rsbac_set_attr() for pm_object_class returned error");
++ }
++ return error;
++ }
++
++
++/* end of rsbac/adf/pm/syscalls.c */
+diff --git a/rsbac/adf/rc/Makefile b/rsbac/adf/rc/Makefile
+new file mode 100644
+index 0000000..95e8bdb
+--- /dev/null
++++ b/rsbac/adf/rc/Makefile
+@@ -0,0 +1,13 @@
++#
++# File: rsbac/adf/rc/Makefile
++#
++# Makefile for the Linux rsbac rc decision module.
++#
++# Author and (c) 1999-2012 Amon Ott <ao@rsbac.org>
++#
++
++obj-y := rc_syscalls.o
++# decisions only in non-maint mode
++ifneq ($(CONFIG_RSBAC_MAINT),y)
++obj-y += rc_main.o
++endif
+diff --git a/rsbac/adf/rc/rc_main.c b/rsbac/adf/rc/rc_main.c
+new file mode 100644
+index 0000000..f66b700
+--- /dev/null
++++ b/rsbac/adf/rc/rc_main.c
+@@ -0,0 +1,3157 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - Role Compatibility */
++/* File: rsbac/adf/rc/main.c */
++/* */
++/* Author and (c) 1999-2011: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 29/Nov/2011 */
++/*************************************************** */
++
++#include <linux/string.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/rc.h>
++#include <rsbac/error.h>
++#include <rsbac/debug.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/rc_getname.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/network.h>
++#include <rsbac/rc_types.h>
++#include <rsbac/lists.h>
++
++#if defined(CONFIG_RSBAC_RC_LEARN)
++#ifdef CONFIG_RSBAC_RC_LEARN_TA
++rsbac_list_ta_number_t rc_learn_ta = CONFIG_RSBAC_RC_LEARN_TA;
++#else
++rsbac_list_ta_number_t rc_learn_ta = 0;
++#endif
++#endif
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++static enum rsbac_adf_req_ret_t
++check_comp_rc(enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_adf_request_t request, rsbac_pid_t caller_pid)
++{
++ int err;
++ union rsbac_target_id_t i_tid;
++ enum rsbac_attribute_t i_attr;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_attribute_value_t i_attr_val2;
++
++ union rsbac_rc_target_id_t i_rc_subtid;
++ enum rsbac_rc_item_t i_rc_item;
++
++ /* get rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC,
++ T_PROCESS,
++ i_tid,
++ A_rc_role, &i_attr_val1, TRUE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return NOT_GRANTED;
++ }
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ i_rc_item = RI_type_comp_fd;
++ i_attr = A_rc_type_fd;
++ break;
++ case T_DEV:
++ i_rc_item = RI_type_comp_dev;
++ i_attr = A_rc_type;
++ break;
++ case T_USER:
++ i_rc_item = RI_type_comp_user;
++ i_attr = A_rc_type;
++ break;
++ case T_PROCESS:
++ i_rc_item = RI_type_comp_process;
++ i_attr = A_rc_type;
++ break;
++ case T_IPC:
++ i_rc_item = RI_type_comp_ipc;
++ i_attr = A_rc_type;
++ break;
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ case T_GROUP:
++ i_rc_item = RI_type_comp_group;
++ i_attr = A_rc_type;
++ break;
++#endif
++#if defined(CONFIG_RSBAC_RC_NET_DEV_PROT)
++ case T_NETDEV:
++ i_rc_item = RI_type_comp_netdev;
++ i_attr = A_rc_type;
++ break;
++#endif
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++ case T_NETTEMP:
++ i_rc_item = RI_type_comp_nettemp;
++ i_attr = A_rc_type_nt;
++ break;
++ case T_NETOBJ:
++ i_rc_item = RI_type_comp_netobj;
++ if (rsbac_net_remote_request(request))
++ i_attr = A_remote_rc_type;
++ else
++ i_attr = A_local_rc_type;
++ break;
++#endif
++ default:
++ rsbac_printk(KERN_WARNING "check_comp_rc(): invalid target %i!\n",
++ target);
++ return NOT_GRANTED;
++ }
++
++ /* get rc_type[_fd|_nt] from target */
++ if ((err = rsbac_get_attr(SW_RC,
++ target,
++ tid, i_attr, &i_attr_val2, TRUE))) {
++ rsbac_pr_get_error(i_attr);
++ return NOT_GRANTED;
++ }
++
++ /* get type_comp_xxx of role */
++ i_rc_subtid.type = i_attr_val2.rc_type;
++ if (rsbac_rc_check_comp(i_attr_val1.rc_role,
++ i_rc_subtid, i_rc_item, request))
++ return GRANTED;
++ else {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_adf_rc) {
++ char *tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ char *tmp2 =
++ rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp2) {
++#if defined(CONFIG_RSBAC_RC_LEARN)
++ if (rsbac_rc_learn) {
++ union rsbac_rc_target_id_t i_rc_tid;
++ union rsbac_rc_item_value_t i_rc_value;
++
++ i_rc_tid.role = i_attr_val1.rc_role;
++#ifdef CONFIG_RSBAC_RC_LEARN_TA
++ if (!rsbac_list_ta_exist(rc_learn_ta))
++ rsbac_list_ta_begin(CONFIG_RSBAC_LIST_TRANS_MAX_TTL,
++ &rc_learn_ta,
++ RSBAC_ALL_USERS,
++ RSBAC_RC_LEARN_TA_NAME,
++ NULL);
++#endif
++ err = rsbac_rc_get_item (rc_learn_ta,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_subtid,
++ i_rc_item,
++ &i_rc_value,
++ NULL);
++ if (!err) {
++ i_rc_value.rights |= RSBAC_RC_RIGHTS_VECTOR(request);
++ err = rsbac_rc_set_item (rc_learn_ta,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_subtid,
++ i_rc_item,
++ i_rc_value,
++ RSBAC_LIST_TTL_KEEP);
++ if (!err) {
++#ifdef CONFIG_RSBAC_LOG_PSEUDO
++ u_int pseudo = 0;
++ union rsbac_attribute_value_t i_attr_val3;
++
++ /* Get owner's logging pseudo */
++ i_tid.user = current_uid();
++ if (!rsbac_get_attr(SW_GEN,T_USER,i_tid,A_pseudo,&i_attr_val3,FALSE)) {
++ pseudo = i_attr_val3.pseudo;
++ }
++ if (pseudo) {
++ rsbac_printk(KERN_INFO "check_comp_rc(): learning mode: pid %u (%.15s), pseudo %u, rc_role %u, %s rc_type %u, right %s added to transaction %u!\n",
++ pid_nr(caller_pid),
++ current->comm,
++ pseudo,
++ i_attr_val1.rc_role,
++ get_target_name_only
++ (tmp, target),
++ i_attr_val2.rc_type,
++ get_rc_special_right_name
++ (tmp2, request),
++ rc_learn_ta);
++ } else
++#endif
++ rsbac_printk(KERN_INFO "check_comp_rc(): learning mode: pid %u (%.15s), owner %u, rc_role %u, %s rc_type %u, right %s added to transaction %u!\n",
++ pid_nr(caller_pid),
++ current->comm,
++ current_uid(),
++ i_attr_val1.rc_role,
++ get_target_name_only
++ (tmp, target),
++ i_attr_val2.rc_type,
++ get_rc_special_right_name
++ (tmp2, request),
++ rc_learn_ta);
++ rsbac_kfree(tmp2);
++ rsbac_kfree(tmp);
++ return GRANTED;
++ }
++ }
++ }
++#endif
++ {
++#ifdef CONFIG_RSBAC_LOG_PSEUDO
++ u_int pseudo = 0;
++ union rsbac_attribute_value_t i_attr_val3;
++
++ /* Get owner's logging pseudo */
++ i_tid.user = current_uid();
++ if (!rsbac_get_attr(SW_GEN,T_USER,i_tid,A_pseudo,&i_attr_val3,FALSE)) {
++ pseudo = i_attr_val3.pseudo;
++ }
++ if (pseudo) {
++ rsbac_pr_debug(adf_rc, "pid %u (%.15s), pseudo %u, rc_role %u, %s rc_type %u, request %s -> NOT_GRANTED!\n",
++ pid_nr(caller_pid),
++ current->comm,
++ pseudo,
++ i_attr_val1.rc_role,
++ get_target_name_only
++ (tmp, target),
++ i_attr_val2.rc_type,
++ get_rc_special_right_name
++ (tmp2, request));
++ } else
++#endif
++ rsbac_pr_debug(adf_rc, "pid %u (%.15s), owner %u, rc_role %u, %s rc_type %u, request %s -> NOT_GRANTED!\n",
++ pid_nr(caller_pid),
++ current->comm,
++ current_uid(),
++ i_attr_val1.rc_role,
++ get_target_name_only
++ (tmp, target),
++ i_attr_val2.rc_type,
++ get_rc_special_right_name
++ (tmp2, request));
++ }
++ rsbac_kfree(tmp2);
++ }
++ rsbac_kfree(tmp);
++ }
++ }
++#endif
++ return NOT_GRANTED;
++ }
++}
++
++static enum rsbac_adf_req_ret_t
++check_comp_rc_scd(enum rsbac_rc_scd_type_t scd_type,
++ enum rsbac_adf_request_t request, rsbac_pid_t caller_pid)
++{
++ int err;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ union rsbac_rc_target_id_t i_rc_subtid;
++
++ /* get rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC,
++ T_PROCESS,
++ i_tid,
++ A_rc_role, &i_attr_val1, TRUE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return NOT_GRANTED;
++ }
++ /* get type_comp_scd of role */
++ i_rc_subtid.type = scd_type;
++ if (rsbac_rc_check_comp(i_attr_val1.rc_role,
++ i_rc_subtid, RI_type_comp_scd, request)) {
++ return GRANTED;
++ } else {
++#if defined(CONFIG_RSBAC_RC_LEARN) || defined(CONFIG_RSBAC_DEBUG)
++ char tmp[RSBAC_MAXNAMELEN];
++#endif
++
++#if defined(CONFIG_RSBAC_RC_LEARN)
++ if (rsbac_rc_learn) {
++ union rsbac_rc_target_id_t i_rc_tid;
++ union rsbac_rc_item_value_t i_rc_value;
++
++ i_rc_tid.role = i_attr_val1.rc_role;
++#ifdef CONFIG_RSBAC_RC_LEARN_TA
++ if (!rsbac_list_ta_exist(rc_learn_ta))
++ rsbac_list_ta_begin(CONFIG_RSBAC_LIST_TRANS_MAX_TTL,
++ &rc_learn_ta,
++ RSBAC_ALL_USERS,
++ RSBAC_RC_LEARN_TA_NAME,
++ NULL);
++#endif
++ err = rsbac_rc_get_item (rc_learn_ta,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_subtid,
++ RI_type_comp_scd,
++ &i_rc_value,
++ NULL);
++ if (!err) {
++ i_rc_value.rights |= RSBAC_RC_RIGHTS_VECTOR(request);
++ err = rsbac_rc_set_item (rc_learn_ta,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_subtid,
++ RI_type_comp_scd,
++ i_rc_value,
++ RSBAC_LIST_TTL_KEEP);
++ if (!err) {
++#ifdef CONFIG_RSBAC_LOG_PSEUDO
++ u_int pseudo = 0;
++ union rsbac_attribute_value_t i_attr_val3;
++
++ /* Get owner's logging pseudo */
++ i_tid.user = current_uid();
++ if (!rsbac_get_attr(SW_GEN,T_USER,i_tid,A_pseudo,&i_attr_val3,FALSE)) {
++ pseudo = i_attr_val3.pseudo;
++ }
++ if (pseudo) {
++ rsbac_printk(KERN_INFO "check_comp_rc_scd(): learning mode: pid %u (%.15s), pseudo %u, rc_role %i, scd_type %i, right %s added to transaction %u!\n",
++ pid_nr(caller_pid), current->comm, pseudo,
++ i_attr_val1.rc_role, scd_type,
++ get_request_name(tmp, request),
++ rc_learn_ta);
++ } else
++#endif
++ rsbac_printk(KERN_INFO "check_comp_rc_scd(): learning mode: pid %u (%.15s), owner %u, rc_role %i, scd_type %i, right %s added to transaction %u!\n",
++ pid_nr(caller_pid), current->comm, current_uid(),
++ i_attr_val1.rc_role, scd_type,
++ get_request_name(tmp, request),
++ rc_learn_ta);
++ return GRANTED;
++ }
++ }
++ }
++#endif
++ {
++#ifdef CONFIG_RSBAC_LOG_PSEUDO
++ u_int pseudo = 0;
++ union rsbac_attribute_value_t i_attr_val3;
++
++ /* Get owner's logging pseudo */
++ i_tid.user = current_uid();
++ if (!rsbac_get_attr(SW_GEN,T_USER,i_tid,A_pseudo,&i_attr_val3,FALSE)) {
++ pseudo = i_attr_val3.pseudo;
++ }
++ if (pseudo) {
++ rsbac_pr_debug(adf_rc, "pid %u (%.15s), pseudo %u, rc_role %i, scd_type %i, request %s -> NOT_GRANTED!\n",
++ pid_nr(caller_pid), current->comm, pseudo,
++ i_attr_val1.rc_role, scd_type,
++ get_request_name(tmp, request));
++ } else
++#endif
++ rsbac_pr_debug(adf_rc, "pid %u (%.15s), owner %u, rc_role %i, scd_type %i, request %s -> NOT_GRANTED!\n",
++ pid_nr(caller_pid), current->comm, current_uid(),
++ i_attr_val1.rc_role, scd_type,
++ get_request_name(tmp, request));
++ return NOT_GRANTED;
++ }
++ }
++}
++
++static enum rsbac_adf_req_ret_t
++rc_check_create(
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_rc_target_id_t tid,
++ union rsbac_rc_target_id_t subtid,
++ enum rsbac_rc_item_t item)
++{
++ if (rsbac_rc_check_comp(tid.role, subtid, item, R_CREATE))
++ return GRANTED;
++ else {
++ char tmp[RSBAC_MAXNAMELEN];
++
++#if defined(CONFIG_RSBAC_RC_LEARN)
++ if (rsbac_rc_learn) {
++ union rsbac_rc_item_value_t i_rc_value;
++ int err;
++
++#ifdef CONFIG_RSBAC_RC_LEARN_TA
++ if (!rsbac_list_ta_exist(rc_learn_ta))
++ rsbac_list_ta_begin(CONFIG_RSBAC_LIST_TRANS_MAX_TTL,
++ &rc_learn_ta,
++ RSBAC_ALL_USERS,
++ RSBAC_RC_LEARN_TA_NAME,
++ NULL);
++#endif
++ err = rsbac_rc_get_item (rc_learn_ta,
++ RT_ROLE,
++ tid,
++ subtid,
++ item,
++ &i_rc_value,
++ NULL);
++ if (!err) {
++ i_rc_value.rights |= RSBAC_RC_RIGHTS_VECTOR(R_CREATE);
++ err = rsbac_rc_set_item (rc_learn_ta,
++ RT_ROLE,
++ tid,
++ subtid,
++ item,
++ i_rc_value,
++ RSBAC_LIST_TTL_KEEP);
++ if (!err) {
++#ifdef CONFIG_RSBAC_LOG_PSEUDO
++ u_int pseudo = 0;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val3;
++
++ /* Get owner's logging pseudo */
++ i_tid.user = current_uid();
++ if (!rsbac_get_attr(SW_GEN,T_USER,i_tid,A_pseudo,&i_attr_val3,FALSE)) {
++ pseudo = i_attr_val3.pseudo;
++ }
++ if (pseudo) {
++ rsbac_printk(KERN_INFO "rc_check_create(): learning mode: pid %u (%.15s), pseudo %u, rc_role %u, %s rc_type %u, right CREATE added to transaction %u!\n",
++ pid_nr(caller_pid),
++ current->comm,
++ pseudo,
++ tid.role,
++ get_target_name_only
++ (tmp, target),
++ subtid.type,
++ rc_learn_ta);
++ } else
++#endif
++ rsbac_printk(KERN_INFO "rc_check_create(): learning mode: pid %u (%.15s), owner %u, rc_role %u, %s rc_type %u, right CREATE added to transaction %u!\n",
++ pid_nr(caller_pid),
++ current->comm,
++ current_uid(),
++ tid.role,
++ get_target_name_only
++ (tmp, target),
++ subtid.type,
++ rc_learn_ta);
++ return GRANTED;
++ }
++ }
++ }
++#endif
++ rsbac_printk(KERN_WARNING "rc_check_create(): rc_role %i has no CREATE right on its %s def_create_type %i -> NOT_GRANTED!\n",
++ tid.role,
++ get_target_name_only (tmp, target),
++ subtid.type);
++ return NOT_GRANTED;
++ }
++}
++
++/* exported for rc_syscalls.c */
++int rsbac_rc_test_admin_roles(rsbac_rc_role_id_t t_role,
++ rsbac_boolean_t modify)
++{
++ int err;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_rc_target_id_t i_rc_subtid;
++
++ if (t_role > RC_role_max_value)
++ return -RSBAC_EINVALIDVALUE;
++ /* get rc_role of process */
++ i_tid.process = task_pid(current);
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid, A_rc_role, &i_attr_val1, TRUE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return -RSBAC_EREADFAILED;
++ }
++
++ i_rc_subtid.role = t_role;
++ /* read_only? -> assign_roles membership is enough */
++ if (!modify) {
++ if (rsbac_rc_check_comp(i_attr_val1.rc_role,
++ i_rc_subtid,
++ RI_assign_roles, R_NONE))
++ return 0;
++ /* fall through */
++ }
++ /* check admin_roles of role */
++ if (rsbac_rc_check_comp(i_attr_val1.rc_role,
++ i_rc_subtid, RI_admin_roles, R_NONE))
++ return 0;
++
++ rsbac_pr_debug(adf_rc,
++ "rsbac_rc_test_admin_roles(): role %u not in admin roles of role %u, pid %u, user %u!\n",
++ t_role,
++ i_attr_val1.rc_role,
++ current->pid,
++ current_uid());
++ return -EPERM;
++}
++
++/* exported for rc_syscalls.c */
++int rsbac_rc_test_assign_roles(enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ rsbac_rc_role_id_t t_role)
++{
++ int err;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_attribute_value_t i_attr_val2;
++ union rsbac_rc_target_id_t i_rc_subtid;
++
++ if (target >= T_NONE)
++ return -RSBAC_EINVALIDVALUE;
++ /* get rc_role of process */
++ i_tid.process = task_pid(current);
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid, A_rc_role, &i_attr_val1, TRUE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return -RSBAC_EREADFAILED;
++ }
++ /* get old role of target */
++ if ((err = rsbac_get_attr(SW_RC,
++ target,
++ tid, attr, &i_attr_val2, TRUE))) {
++ rsbac_pr_get_error(attr);
++ return -RSBAC_EREADFAILED;
++ }
++
++ i_rc_subtid.role = i_attr_val2.rc_role;
++ if (!rsbac_rc_check_comp(i_attr_val1.rc_role,
++ i_rc_subtid, RI_assign_roles, R_NONE)) {
++ rsbac_pr_debug(adf_rc,
++ "rsbac_rc_test_assign_roles(): old role %u not in assign roles of role %u, pid %u, user %u!\n",
++ i_attr_val2.rc_role,
++ i_attr_val1.rc_role,
++ current->pid,
++ current_uid());
++ return -EPERM;
++ }
++ i_rc_subtid.role = t_role;
++ if (!rsbac_rc_check_comp(i_attr_val1.rc_role,
++ i_rc_subtid,
++ RI_assign_roles, R_NONE)) {
++ rsbac_pr_debug(adf_rc,
++ "rsbac_rc_test_assign_roles(): new role %u not in assign roles of role %u, pid %u, user %u!\n",
++ t_role,
++ i_attr_val1.rc_role,
++ current->pid,
++ current_uid());
++ return -EPERM;
++ }
++ return 0;
++}
++
++enum rsbac_adf_req_ret_t
++rsbac_rc_check_type_comp(enum rsbac_target_t target,
++ rsbac_rc_type_id_t type,
++ enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid)
++{
++ int err;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ union rsbac_rc_target_id_t i_rc_subtid;
++ enum rsbac_rc_item_t i_rc_item;
++
++ if (!caller_pid)
++ caller_pid = task_pid(current);
++ /*
++ * we don't care about tried assignments of special type values,
++ * but deny other accesses to those
++ */
++ if (type > RC_type_max_value) {
++ if (request == RCR_ASSIGN)
++ return GRANTED;
++ else
++ return NOT_GRANTED;
++ }
++
++ /* get rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC,
++ T_PROCESS,
++ i_tid,
++ A_rc_role, &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return NOT_GRANTED;
++ }
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_FD:
++ i_rc_item = RI_type_comp_fd;
++ break;
++ case T_DEV:
++ i_rc_item = RI_type_comp_dev;
++ break;
++ case T_USER:
++ i_rc_item = RI_type_comp_user;
++ break;
++ case T_PROCESS:
++ i_rc_item = RI_type_comp_process;
++ break;
++ case T_IPC:
++ i_rc_item = RI_type_comp_ipc;
++ break;
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ case T_GROUP:
++ i_rc_item = RI_type_comp_group;
++ break;
++#endif
++#if defined(CONFIG_RSBAC_RC_NET_DEV_PROT)
++ case T_NETDEV:
++ i_rc_item = RI_type_comp_netdev;
++ break;
++#endif
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++ case T_NETTEMP:
++ i_rc_item = RI_type_comp_nettemp;
++ break;
++ case T_NETOBJ:
++ i_rc_item = RI_type_comp_netobj;
++ break;
++#endif
++
++ default:
++ rsbac_printk(KERN_WARNING "rsbac_rc_check_type_comp(): invalid target %i!\n",
++ target);
++ return NOT_GRANTED;
++ }
++ /* check type_comp_xxx of role */
++ i_rc_subtid.type = type;
++ if (rsbac_rc_check_comp(i_attr_val1.rc_role,
++ i_rc_subtid, i_rc_item, request))
++ return GRANTED;
++ else {
++#ifdef CONFIG_RSBAC_DEBUG
++ char tmp[50];
++ char tmp2[RSBAC_MAXNAMELEN];
++
++ rsbac_pr_debug(adf_rc, "rc_role is %i, %s rc_type is %i, request is %s -> NOT_GRANTED!\n",
++ i_attr_val1.rc_role,
++ get_target_name_only(tmp, target),
++ type,
++ get_rc_special_right_name(tmp2, request));
++#endif
++ return NOT_GRANTED;
++ }
++}
++
++/* exported for rc_syscalls.c */
++int rsbac_rc_test_role_admin(rsbac_boolean_t modify)
++{
++ int err;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_rc_target_id_t i_rc_tid;
++ union rsbac_rc_item_value_t i_rc_item_val1;
++
++ /* get rc_role of process */
++ i_tid.process = task_pid(current);
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid, A_rc_role, &i_attr_val1, TRUE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return -RSBAC_EREADFAILED;
++ }
++
++ /* get admin_type of role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0, RT_ROLE, i_rc_tid, i_rc_tid, /* dummy */
++ RI_admin_type,
++ &i_rc_item_val1, NULL))) {
++ rsbac_rc_pr_get_error(RI_admin_type);
++ return -RSBAC_EREADFAILED;
++ }
++
++ /* allow, if RC_role_admin or (read_only and RC_system_admin) */
++ if ((i_rc_item_val1.admin_type == RC_role_admin)
++ || (!modify && (i_rc_item_val1.admin_type == RC_system_admin)
++ )
++ )
++ return 0;
++ else
++ return -EPERM;
++}
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++inline enum rsbac_adf_req_ret_t
++rsbac_adf_request_rc(enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++{
++ int err;
++ enum rsbac_adf_req_ret_t result = DO_NOT_CARE;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_rc_target_id_t i_rc_tid;
++ union rsbac_rc_target_id_t i_rc_subtid;
++ union rsbac_rc_item_value_t i_rc_item_val1;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val2;
++
++ switch (request) {
++ case R_SEARCH:
++ switch (target) {
++ case T_DIR:
++ case T_FILE:
++ case T_SYMLINK:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_DEV:
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++ case T_NETOBJ:
++#endif
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++#endif
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_CLOSE:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_DEV:
++ case T_IPC:
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++ case T_NETOBJ:
++#endif
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_GET_STATUS_DATA:
++ switch (target) {
++ case T_SCD:
++ return check_comp_rc_scd
++ (tid.scd, request, caller_pid);
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_IPC:
++ case T_PROCESS:
++ case T_DEV:
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++#endif
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++#if defined(CONFIG_RSBAC_RC_NET_DEV_PROT)
++ case T_NETDEV:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++#endif
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++ case T_NETOBJ:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++#endif
++
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_SEND:
++ switch (target) {
++ case T_DEV:
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++ case T_NETOBJ:
++#endif
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ case T_UNIXSOCK:
++ case T_IPC:
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_UNIX_PROCESS)
++ if (attr == A_process) {
++ enum rsbac_adf_req_ret_t tmp_result;
++
++ i_tid.process = attr_val.process;
++ tmp_result = check_comp_rc(T_PROCESS, i_tid,
++ R_SEND,
++ caller_pid);
++ if ((tmp_result == NOT_GRANTED)
++ || (tmp_result == UNDEFINED)
++ )
++ return tmp_result;
++ }
++#endif /* UNIX_PROCESS */
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++
++ /* all other cases are undefined */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_LISTEN:
++ case R_NET_SHUTDOWN:
++ switch (target) {
++ case T_UNIXSOCK:
++ case T_IPC:
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++ case T_NETOBJ:
++#endif
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are undefined */
++ default:
++ return DO_NOT_CARE;
++ }
++ case R_ACCEPT:
++ case R_CONNECT:
++ case R_RECEIVE:
++ switch (target) {
++ case T_UNIXSOCK:
++ case T_IPC:
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_UNIX_PROCESS)
++ if (attr == A_process) {
++ enum rsbac_adf_req_ret_t tmp_result;
++
++ i_tid.process = attr_val.process;
++ tmp_result = check_comp_rc(T_PROCESS, i_tid,
++ request,
++ caller_pid);
++ if ((tmp_result == NOT_GRANTED)
++ || (tmp_result == UNDEFINED)
++ )
++ return tmp_result;
++ }
++#endif /* UNIX_PROCESS */
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++ case T_NETOBJ:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++#endif
++
++ /* all other cases are undefined */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_READ:
++ case R_WRITE:
++ switch (target) {
++ case T_DIR:
++#ifdef CONFIG_RSBAC_RW
++ case T_FILE:
++ case T_FIFO:
++ case T_DEV:
++#endif
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++#endif
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++#if defined(CONFIG_RSBAC_NET_OBJ_RW)
++ case T_NETTEMP:
++#endif
++#endif
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++#ifdef CONFIG_RSBAC_RW
++ case T_IPC:
++ case T_UNIXSOCK:
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_UNIX_PROCESS)
++ if (attr == A_process) {
++ enum rsbac_adf_req_ret_t tmp_result;
++
++ i_tid.process = attr_val.process;
++ if (request == R_READ)
++ tmp_result =
++ check_comp_rc(T_PROCESS, i_tid,
++ R_RECEIVE,
++ caller_pid);
++ else
++ tmp_result =
++ check_comp_rc(T_PROCESS, i_tid,
++ R_SEND,
++ caller_pid);
++ if ((tmp_result == NOT_GRANTED)
++ || (tmp_result == UNDEFINED)
++ )
++ return tmp_result;
++ }
++#endif /* UNIX_PROCESS */
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++#endif /* RW */
++
++ case T_SCD:
++ return check_comp_rc_scd
++ (tid.scd, request, caller_pid);
++
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++#if defined(CONFIG_RSBAC_NET_OBJ_RW)
++ case T_NETOBJ:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++#endif
++#endif
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_APPEND_OPEN:
++ case R_READ_WRITE_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_DEV:
++ case T_FIFO:
++ case T_IPC:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_MAP_EXEC:
++ switch (target) {
++ case T_FILE:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++ case T_NONE:
++ /* anonymous mapping */
++ return check_comp_rc_scd
++ (ST_other, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_CHANGE_GROUP:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_IPC:
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ case T_USER:
++#endif
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_CHANGE_OWNER:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_IPC:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++#ifdef CONFIG_RSBAC_USER_CHOWN
++ case T_USER:
++#if defined(CONFIG_RSBAC_AUTH)
++ result = check_comp_rc(target, tid, request, caller_pid);
++ if((result == GRANTED) || (result == DO_NOT_CARE))
++ return result;
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_AUTH, T_PROCESS,
++ i_tid,
++ A_auth_last_auth,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_auth_last_auth);
++ return NOT_GRANTED;
++ }
++ if(i_attr_val1.auth_last_auth != tid.user)
++ return NOT_GRANTED;
++ else
++ return check_comp_rc(target, tid, RCR_CHANGE_AUTHED_OWNER, caller_pid);
++#else
++ return check_comp_rc(target, tid, request, caller_pid);
++#endif
++#endif
++
++ case T_PROCESS:
++ /* get rc_role from process */
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ tid,
++ A_rc_role,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return NOT_GRANTED;
++ }
++ /* get def_process_chown_type of role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0, RT_ROLE, i_rc_tid, i_rc_tid, /* dummy */
++ RI_def_process_chown_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_process_chown_type);
++ return NOT_GRANTED;
++ }
++ if ((i_rc_item_val1.type_id == RC_type_no_chown)
++ || (i_rc_item_val1.type_id ==
++ RC_type_no_create)
++ )
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_CHDIR:
++ switch (target) {
++ case T_DIR:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_CLONE:
++ if (target == T_PROCESS) {
++ /* check, whether we may create process of def_process_create_type */
++ /* get rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return NOT_GRANTED;
++ }
++ /* get def_process_create_type of role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_tid,
++ RI_def_process_create_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_process_create_type);
++ return NOT_GRANTED;
++ }
++ switch (i_rc_item_val1.type_id) {
++ case RC_type_no_create:
++ rsbac_pr_debug(adf_rc, "pid %u (%.15s), owner %u, rc_role %u, def_process_create_type no_create, request CLONE -> NOT_GRANTED!\n",
++ pid_nr(caller_pid), current->comm,
++ current_uid(),
++ i_attr_val1.rc_role);
++ return NOT_GRANTED;
++
++ case RC_type_use_new_role_def_create:
++ case RC_type_use_fd:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_rc(): invalid type use_new_role_def_create in def_process_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return NOT_GRANTED;
++
++ case RC_type_inherit_parent:
++ case RC_type_inherit_process:
++ return GRANTED;
++
++ default:
++ /* check, whether role has CREATE right to new type */
++ /* check type_comp_process of role */
++ i_rc_subtid.type = i_rc_item_val1.type_id;
++ return rc_check_create(caller_pid,
++ target,
++ i_rc_tid,
++ i_rc_subtid,
++ RI_type_comp_process);
++ }
++ } else
++ return DO_NOT_CARE;
++
++ /* Creating dir or (pseudo) file IN target dir! */
++ case R_CREATE:
++ switch (target) {
++ case T_DIR:
++ /* check, whether we may create files/dirs in this dir */
++ result =
++ check_comp_rc(target, tid, request,
++ caller_pid);
++ if ((result != GRANTED) && (result != DO_NOT_CARE))
++ return result;
++
++ /* get rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return NOT_GRANTED;
++ }
++ /* Check, whether this process has a preselected type */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_select_type,
++ &i_attr_val2, FALSE))) {
++ rsbac_pr_get_error(A_rc_select_type);
++ return NOT_GRANTED;
++ }
++ if (i_attr_val2.rc_select_type == RC_type_use_fd) {
++ /* get def_fd_create_type of role */
++ /* First get target dir's efftype */
++ if ((err = rsbac_get_attr(SW_RC,
++ target,
++ tid,
++ A_rc_type_fd,
++ &i_attr_val2, TRUE))) {
++ rsbac_pr_get_error(A_rc_type_fd);
++ return NOT_GRANTED;
++ }
++ i_rc_tid.role = i_attr_val1.rc_role;
++ i_rc_subtid.type = i_attr_val2.rc_type;
++ if ((err = rsbac_rc_get_item(0, RT_ROLE, i_rc_tid, i_rc_subtid, RI_def_fd_ind_create_type, &i_rc_item_val1, NULL))) { /* No individual create type -> try global */
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_subtid,
++ RI_def_fd_create_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_fd_create_type);
++ return NOT_GRANTED;
++ }
++ }
++ } else
++ i_rc_item_val1.type_id = i_attr_val2.rc_select_type;
++
++ switch (i_rc_item_val1.type_id) {
++ case RC_type_inherit_parent:
++ return GRANTED;
++ case RC_type_no_create:
++ rsbac_pr_debug(adf_rc, "pid %u (%.15s), owner %u, rc_role %u, def_fd_create_type no_create, request CREATE -> NOT_GRANTED!\n",
++ pid_nr(caller_pid), current->comm,
++ current_uid(),
++ i_attr_val1.rc_role);
++ return NOT_GRANTED;
++ break;
++
++ case RC_type_use_new_role_def_create:
++ case RC_type_inherit_process:
++ case RC_type_use_fd:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_rc(): invalid type use_new_role_def_create in def_fd_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return NOT_GRANTED;
++
++ default:
++ /* check, whether role has CREATE right to new type */
++ /* get type_comp_fd of role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ i_rc_subtid.type = i_rc_item_val1.type_id;
++ return rc_check_create(caller_pid,
++ target,
++ i_rc_tid,
++ i_rc_subtid,
++ RI_type_comp_fd);
++ }
++
++ case T_IPC:
++ /* check, whether we may create IPC of def_ipc_create_type */
++ /* get rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return NOT_GRANTED;
++ }
++ /* get def_ipc_create_type of role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_tid,
++ RI_def_ipc_create_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_ipc_create_type);
++ return NOT_GRANTED;
++ }
++ switch (i_rc_item_val1.type_id) {
++ case RC_type_no_create:
++ rsbac_pr_debug(adf_rc, "pid %u (%.15s), owner %u, rc_role %u, def_ipc_create_type no_create, request CREATE -> NOT_GRANTED!\n",
++ pid_nr(caller_pid), current->comm,
++ current_uid(),
++ i_attr_val1.rc_role);
++ return NOT_GRANTED;
++
++ case RC_type_use_new_role_def_create:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_rc(): invalid type use_new_role_def_create in def_ipc_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return NOT_GRANTED;
++
++ case RC_type_inherit_parent:
++ case RC_type_inherit_process:
++ case RC_type_use_fd:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_rc(): invalid type inherit_parent in def_ipc_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return NOT_GRANTED;
++
++ default:
++ /* check, whether role has CREATE right to new type */
++ /* get type_comp_ipc of role */
++ i_rc_subtid.type = i_rc_item_val1.type_id;
++ return rc_check_create(caller_pid,
++ target,
++ i_rc_tid,
++ i_rc_subtid,
++ RI_type_comp_ipc);
++ }
++
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ case T_USER:
++ /* check, whether we may create USER of def_user_create_type */
++ /* get rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return NOT_GRANTED;
++ }
++ /* get def_user_create_type of role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_tid,
++ RI_def_user_create_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_user_create_type);
++ return NOT_GRANTED;
++ }
++ switch (i_rc_item_val1.type_id) {
++ case RC_type_no_create:
++ rsbac_pr_debug(adf_rc, "pid %u (%.15s), owner %u, rc_role %u, def_user_create_type no_create, request CREATE -> NOT_GRANTED!\n",
++ pid_nr(caller_pid), current->comm,
++ current_uid(),
++ i_attr_val1.rc_role);
++ return NOT_GRANTED;
++
++ case RC_type_use_new_role_def_create:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_rc(): invalid type use_new_role_def_create in def_user_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return NOT_GRANTED;
++
++ case RC_type_inherit_parent:
++ case RC_type_inherit_process:
++ case RC_type_use_fd:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_rc(): invalid type inherit_parent in def_user_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return NOT_GRANTED;
++
++ default:
++ /* check, whether role has CREATE right to new type */
++ /* get type_comp_ipc of role */
++ i_rc_subtid.type = i_rc_item_val1.type_id;
++ return rc_check_create(caller_pid,
++ target,
++ i_rc_tid,
++ i_rc_subtid,
++ RI_type_comp_user);
++ }
++
++ case T_GROUP:
++ /* check, whether we may create GROUP of def_group_create_type */
++ /* get rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return NOT_GRANTED;
++ }
++ /* get def_group_create_type of role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_tid,
++ RI_def_group_create_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_group_create_type);
++ return NOT_GRANTED;
++ }
++ switch (i_rc_item_val1.type_id) {
++ case RC_type_no_create:
++ rsbac_pr_debug(adf_rc, "pid %u (%.15s), owner %u, rc_role %u, def_group_create_type no_create, request CREATE -> NOT_GRANTED!\n",
++ pid_nr(caller_pid), current->comm,
++ current_uid(),
++ i_attr_val1.rc_role);
++ return NOT_GRANTED;
++
++ case RC_type_use_new_role_def_create:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_rc(): invalid type use_new_role_def_create in def_group_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return NOT_GRANTED;
++
++ case RC_type_inherit_parent:
++ case RC_type_inherit_process:
++ case RC_type_use_fd:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_rc(): invalid type inherit_parent in def_group_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return NOT_GRANTED;
++
++ default:
++ /* check, whether role has CREATE right to new type */
++ /* get type_comp_ipc of role */
++ i_rc_subtid.type = i_rc_item_val1.type_id;
++ return rc_check_create(caller_pid,
++ target,
++ i_rc_tid,
++ i_rc_subtid,
++ RI_type_comp_group);
++ }
++#endif /* RSBAC_RC_UM_PROT */
++
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++ case T_NETTEMP:
++ /* get rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC,
++ T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return NOT_GRANTED;
++ }
++ /* get type_comp_xxx of role - we always use type GENERAL for CREATE */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ i_rc_subtid.type = RSBAC_RC_GENERAL_TYPE;
++ return rc_check_create(caller_pid,
++ target,
++ i_rc_tid,
++ i_rc_subtid,
++ RI_type_comp_nettemp);
++
++ case T_NETOBJ:
++ /* check, whether we may create NETOBJ of this type */
++ return(check_comp_rc(target, tid, request, caller_pid));
++#endif
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_DELETE:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_IPC:
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++ case T_NETTEMP:
++ case T_NETOBJ:
++#endif
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++#endif
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_EXECUTE:
++ switch (target) {
++ case T_FILE:
++ /* get rc_role from process */
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ tid,
++ A_rc_role,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return NOT_GRANTED;
++ }
++ /* get def_process_execute_type of role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_tid,
++ RI_def_process_execute_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_process_execute_type);
++ return NOT_GRANTED;
++ }
++ if (i_rc_item_val1.type_id == RC_type_no_execute)
++ return NOT_GRANTED;
++ else
++ return check_comp_rc
++ (target, tid, request,
++ caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_GET_PERMISSIONS_DATA:
++ switch (target) {
++ case T_SCD:
++ return check_comp_rc_scd
++ (tid.scd, request, caller_pid);
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_IPC:
++ case T_DEV:
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++ case T_NETOBJ:
++#endif
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++#endif
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ default:
++ return DO_NOT_CARE;
++ };
++
++ case R_LINK_HARD:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_SYMLINK:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_MODIFY_ACCESS_DATA:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_AUTHENTICATE:
++ switch (target) {
++ case T_USER:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_MODIFY_ATTRIBUTE:
++ switch (attr) { /* owner must be changed by other request to prevent inconsistency */
++ case A_owner:
++ return NOT_GRANTED;
++ case A_rc_type:
++ case A_local_rc_type:
++ case A_remote_rc_type:
++ case A_rc_type_fd:
++ case A_rc_type_nt:
++ case A_rc_select_type:
++ /* Granted on target? */
++ result =
++ check_comp_rc(target, tid, request,
++ caller_pid);
++ if ((result == GRANTED)
++ || (result == DO_NOT_CARE)
++ ) {
++ /* Granted on type? */
++ if ( (target == T_NETTEMP)
++ && (attr == A_rc_type)
++ )
++ target = T_NETOBJ;
++ result =
++ rsbac_rc_check_type_comp(target,
++ attr_val.
++ rc_type,
++ RCR_ASSIGN,
++ caller_pid);
++ if ((result == GRANTED)
++ || (result == DO_NOT_CARE)
++ )
++ return result;
++ }
++ /* Classical admin_type check */
++ if ((err = rsbac_rc_test_role_admin(TRUE)))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++
++ case A_rc_force_role:
++ case A_rc_initial_role:
++ case A_rc_role:
++ case A_rc_def_role:
++ /* Granted on target? */
++ result =
++ check_comp_rc(target, tid, request,
++ caller_pid);
++ if ((result == GRANTED)
++ || (result == DO_NOT_CARE)
++ ) {
++ /* test assign_roles of process / modify */
++ if (!
++ (err =
++ rsbac_rc_test_assign_roles(target,
++ tid, attr,
++ attr_val.
++ rc_role)))
++ return GRANTED;
++ }
++ /* Classical admin_type check */
++ if (rsbac_rc_test_role_admin(TRUE))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++
++ /* you may only change a user's pseudo, if you also may assign her role */
++ case A_pseudo:
++ if (target != T_USER)
++ return NOT_GRANTED;
++ /* test assign_roles of process for user's role only */
++ if (rsbac_rc_test_assign_roles
++ (target, tid, A_rc_def_role,
++ RC_role_inherit_user))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++
++#ifdef CONFIG_RSBAC_RC_GEN_PROT
++ case A_log_array_low:
++ case A_log_array_high:
++ case A_log_program_based:
++ case A_log_user_based:
++ case A_symlink_add_remote_ip:
++ case A_symlink_add_uid:
++ case A_symlink_add_rc_role:
++ case A_linux_dac_disable:
++ case A_fake_root_uid:
++ case A_audit_uid:
++ case A_auid_exempt:
++ case A_remote_ip:
++ case A_vset:
++ case A_program_file:
++ /* Explicitely granted? */
++ result =
++ check_comp_rc(target, tid, request,
++ caller_pid);
++ if ((result == GRANTED)
++ || (result == DO_NOT_CARE)
++ )
++ return result;
++ /* Failed -> Classical admin_type check / modify */
++ if (rsbac_rc_test_role_admin(TRUE))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++#endif
++
++ /* All attributes (remove target!) */
++ case A_none:
++ switch (target) {
++ case T_USER:
++ /* test assign_roles of process for user's role */
++ if ((err =
++ rsbac_rc_test_assign_roles(target,
++ tid,
++ A_rc_def_role,
++ RC_role_inherit_user)))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++
++ default:
++ /* Explicitely granted? */
++ return check_comp_rc
++ (target, tid, request,
++ caller_pid);
++ }
++
++#ifdef CONFIG_RSBAC_RC_AUTH_PROT
++ case A_auth_may_setuid:
++ case A_auth_may_set_cap:
++ case A_auth_start_uid:
++ case A_auth_start_euid:
++ case A_auth_start_gid:
++ case A_auth_start_egid:
++ case A_auth_learn:
++ case A_auth_add_f_cap:
++ case A_auth_remove_f_cap:
++ case A_auth_last_auth:
++ /* may manipulate auth capabilities, if allowed in general... */
++ result =
++ check_comp_rc_scd(RST_auth_administration,
++ request, caller_pid);
++ if ((result == GRANTED)
++ || (result == DO_NOT_CARE)
++ ) {
++ /* ...and for this target */
++ result =
++ check_comp_rc(target, tid,
++ RCR_MODIFY_AUTH,
++ caller_pid);
++ if ((result == GRANTED)
++ || (result == DO_NOT_CARE)
++ )
++ return result;
++ }
++ /* Last chance: classical admin_type check */
++ if ((err = rsbac_rc_test_role_admin(TRUE)))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++#endif
++#if defined(CONFIG_RSBAC_RC_LEARN)
++ case A_rc_learn:
++ /* Only role admin */
++ if ((err = rsbac_rc_test_role_admin(TRUE)))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++#endif
++
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_MODIFY_PERMISSIONS_DATA:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ case T_IPC:
++ case T_DEV:
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++#endif
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++ case T_NETOBJ:
++#endif
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ case T_SCD:
++ return check_comp_rc_scd
++ (tid.scd, request, caller_pid);
++
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE
++ case T_NONE:
++ /* may turn off Linux DAC, if compatible */
++ return check_comp_rc_scd
++ (ST_other, request, caller_pid);
++#endif
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_MODIFY_SYSTEM_DATA:
++ switch (target) {
++ case T_SCD:
++ return check_comp_rc_scd
++ (tid.scd, request, caller_pid);
++
++ case T_DEV:
++ case T_PROCESS:
++ case T_IPC:
++#if defined(CONFIG_RSBAC_RC_NET_DEV_PROT)
++ case T_NETDEV:
++#endif
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++ case T_NETOBJ:
++#endif
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_MOUNT:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_DEV:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_READ_ATTRIBUTE:
++ switch (attr) {
++ case A_rc_type:
++ case A_rc_type_fd:
++ case A_rc_type_nt:
++ case A_rc_force_role:
++ case A_rc_initial_role:
++ case A_rc_role:
++ case A_rc_def_role:
++ case A_rc_select_type:
++ case A_pseudo:
++#ifdef CONFIG_RSBAC_RC_GEN_PROT
++ case A_owner:
++ case A_log_array_low:
++ case A_log_array_high:
++ case A_log_program_based:
++ case A_log_user_based:
++ case A_symlink_add_remote_ip:
++ case A_symlink_add_uid:
++ case A_symlink_add_rc_role:
++ case A_linux_dac_disable:
++ case A_fake_root_uid:
++ case A_audit_uid:
++ case A_auid_exempt:
++ case A_remote_ip:
++ case A_vset:
++ case A_program_file:
++#endif
++ /* Explicitely granted? */
++ result =
++ check_comp_rc(target, tid, request,
++ caller_pid);
++ if ((result == GRANTED)
++ || (result == DO_NOT_CARE)
++ )
++ return result;
++ /* Failed -> Classical admin_type check / modify */
++ if (rsbac_rc_test_role_admin(FALSE))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++
++#ifdef CONFIG_RSBAC_RC_AUTH_PROT
++ case A_auth_may_setuid:
++ case A_auth_may_set_cap:
++ case A_auth_start_uid:
++ case A_auth_start_euid:
++ case A_auth_start_gid:
++ case A_auth_start_egid:
++ case A_auth_learn:
++ case A_auth_add_f_cap:
++ case A_auth_remove_f_cap:
++ case A_auth_last_auth:
++ /* may read auth capabilities, if compatible */
++ result =
++ check_comp_rc_scd(RST_auth_administration,
++ request, caller_pid);
++ if ((result == GRANTED)
++ || (result == DO_NOT_CARE)
++ )
++ return result;
++ /* Failed -> Classical admin_type check / modify */
++ if (rsbac_rc_test_role_admin(FALSE))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++#endif
++
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_READ_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_DEV:
++ case T_IPC:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_ADD_TO_KERNEL:
++ switch (target) {
++ case T_NONE:
++ /* may add to kernel, if compatible */
++ return check_comp_rc_scd
++ (ST_other, request, caller_pid);
++
++ case T_FILE:
++ case T_DEV:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++
++ case R_ALTER:
++ /* only for IPC */
++ switch (target) {
++ case T_IPC:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_REMOVE_FROM_KERNEL:
++ switch (target) {
++ case T_NONE:
++ return check_comp_rc_scd
++ (ST_other, request, caller_pid);
++
++ case T_FILE:
++ case T_DEV:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_RENAME:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ case T_USER:
++ case T_GROUP:
++#endif
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_SEND_SIGNAL:
++ case R_TRACE:
++ if (target == T_PROCESS)
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++ else
++ return DO_NOT_CARE;
++
++ case R_SHUTDOWN:
++ switch (target) {
++ case T_NONE:
++ return check_comp_rc_scd
++ (ST_other, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_SWITCH_LOG:
++ switch (target) {
++ case T_NONE:
++ return check_comp_rc_scd
++ (ST_other, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_SWITCH_MODULE:
++ switch (target) {
++ case T_NONE:
++ /* we need the switch_target */
++ if (attr != A_switch_target)
++ return NOT_GRANTED;
++ /* do not care for other modules */
++ if ((attr_val.switch_target != SW_RC)
++#ifdef CONFIG_RSBAC_SOFTMODE
++ && (attr_val.switch_target != SW_SOFTMODE)
++#endif
++#ifdef CONFIG_RSBAC_FREEZE
++ && (attr_val.switch_target != SW_FREEZE)
++#endif
++#ifdef CONFIG_RSBAC_RC_AUTH_PROT
++ && (attr_val.switch_target != SW_AUTH)
++#endif
++ )
++ return DO_NOT_CARE;
++ return check_comp_rc_scd
++ (ST_other, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_TERMINATE:
++ return DO_NOT_CARE;
++
++ case R_TRUNCATE:
++ switch (target) {
++ case T_FILE:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_WRITE_OPEN:
++ switch (target) {
++ case T_FILE:
++ case T_DEV:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_IPC:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_UMOUNT:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_DEV:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ /* all other cases are unknown */
++ default:
++ return DO_NOT_CARE;
++ }
++
++
++ case R_BIND:
++ switch (target) {
++ case T_IPC:
++ /* check, whether we may create IPC of def_ipc_create_type */
++ /* get rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return NOT_GRANTED;
++ }
++ /* get def_ipc_create_type of role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_tid,
++ RI_def_ipc_create_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_ipc_create_type);
++ return NOT_GRANTED;
++ }
++ switch (i_rc_item_val1.type_id) {
++ case RC_type_no_create:
++ rsbac_pr_debug(adf_rc, "pid %u (%.15s), owner %u, rc_role %u, def_ipc_create_type no_create, request CREATE -> NOT_GRANTED!\n",
++ pid_nr(caller_pid), current->comm,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
++ current_uid(),
++#else
++ current->uid,
++#endif
++ i_attr_val1.rc_role);
++ return NOT_GRANTED;
++
++ case RC_type_use_new_role_def_create:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_rc(): invalid type use_new_role_def_create in def_ipc_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return NOT_GRANTED;
++
++ case RC_type_inherit_parent:
++ case RC_type_inherit_process:
++ case RC_type_use_fd:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_request_rc(): invalid type inherit_parent in def_ipc_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return NOT_GRANTED;
++
++ default:
++ /* check, whether role has CREATE right to new type */
++ /* get type_comp_ipc of role */
++ i_rc_subtid.type = i_rc_item_val1.type_id;
++ return rc_check_create(caller_pid,
++ target,
++ i_rc_tid,
++ i_rc_subtid,
++ RI_type_comp_ipc);
++ }
++#if defined(CONFIG_RSBAC_RC_NET_DEV_PROT)
++ case T_NETDEV:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++#endif
++
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++ case T_NETOBJ:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++#endif
++
++ /* all other cases are undefined */
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_IOCTL:
++ switch (target) {
++ case T_DEV:
++ case T_UNIXSOCK:
++ case T_IPC:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++#if defined(CONFIG_RSBAC_RC_NET_OBJ_PROT)
++ case T_NETOBJ:
++#endif
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ default:
++ return DO_NOT_CARE;
++ }
++
++ case R_LOCK:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++
++ default:
++ return DO_NOT_CARE;
++ }
++ case RCR_SELECT:
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ return check_comp_rc
++ (target, tid, request, caller_pid);
++ default:
++ return DO_NOT_CARE;
++ }
++ default:
++ return DO_NOT_CARE;
++ }
++
++ return result;
++}
++
++/*****************************************************************************/
++/* If the request returned granted and the operation is performed, */
++/* the following function can be called by the AEF to get all aci set */
++/* correctly. For write accesses that are performed fully within the kernel, */
++/* this is usually not done to prevent extra calls, including R_CLOSE for */
++/* cleaning up. Because of this, the write boundary is not adjusted - there */
++/* is no user-level writing anyway... */
++/* The second instance of target specification is the new target, if one has */
++/* been created, otherwise its values are ignored. */
++/* On success, 0 is returned, and an error from rsbac/error.h otherwise. */
++
++inline int rsbac_adf_set_attr_rc(enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_target_t new_target,
++ union rsbac_target_id_t new_tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++{
++ int err;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_attribute_value_t i_attr_val2;
++ union rsbac_rc_target_id_t i_rc_tid;
++ union rsbac_rc_target_id_t i_rc_subtid;
++ union rsbac_rc_item_value_t i_rc_item_val1;
++
++ switch (request) {
++ case R_CLOSE:
++ case R_ACCEPT:
++ case R_READ:
++ return 0;
++ case R_CHANGE_OWNER:
++ switch (target) {
++ case T_PROCESS:
++ /* setting owner for process is done in main dispatcher */
++ /* Here we have to adjust the rc_type and set the rc_role */
++ /* to the new owner's rc_def_role */
++ if (attr != A_owner)
++ return -RSBAC_EINVALIDATTR;
++
++ /* get old rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1, TRUE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return -RSBAC_EREADFAILED;
++ }
++ /* get def_process_chown_type of old role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_tid,
++ RI_def_process_chown_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_process_chown_type);
++ return -RSBAC_EREADFAILED;
++ }
++
++ /* get rc_force_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_force_role,
++ &i_attr_val1, TRUE))) {
++ rsbac_pr_get_error(A_rc_force_role);
++ return -RSBAC_EREADFAILED;
++ }
++ /* only set to user's rc_def_role, if indicated by force_role, otherwise keep */
++ if ((i_attr_val1.rc_force_role ==
++ RC_role_inherit_user)
++ || (i_attr_val1.rc_force_role ==
++ RC_role_inherit_up_mixed)
++ ) {
++ /* get rc_def_role from new owner */
++ i_tid.user = attr_val.owner;
++ if ((err = rsbac_get_attr(SW_RC, T_USER,
++ i_tid,
++ A_rc_def_role,
++ &i_attr_val1,
++ TRUE))) {
++ rsbac_pr_get_error(A_rc_def_role);
++ return -RSBAC_EREADFAILED;
++ }
++ /* check rc_def_role, warn, if unusable */
++ if (i_attr_val1.rc_def_role >
++ RC_role_max_value) {
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_rc(): rc_def_role %u of user %u is higher than MAX_ROLE %u, setting role of process %u to GENERAL_ROLE %u!\n",
++ i_attr_val1.
++ rc_def_role,
++ attr_val.owner,
++ RC_role_max_value,
++ pid_nr(caller_pid),
++ RSBAC_RC_GENERAL_ROLE);
++ i_attr_val1.rc_def_role =
++ RSBAC_RC_GENERAL_ROLE;
++ }
++ /* set new rc_role for process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_set_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ i_attr_val1))) {
++ rsbac_pr_set_error(A_rc_role);
++ return -RSBAC_EWRITEFAILED;
++ }
++ } else
++ /* set it to the force_role, if real role) */
++ if ((i_attr_val1.rc_force_role <= RC_role_max_value)
++ ) {
++ /* set new rc_role for process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_set_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ i_attr_val1))) {
++ rsbac_pr_set_error(A_rc_role);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++
++ /* adjust type: switch on def_process_chown_type of old role */
++ switch (i_rc_item_val1.type_id) {
++ case RC_type_inherit_parent:
++ case RC_type_inherit_process:
++ /* keep old type */
++ break;
++ case RC_type_use_new_role_def_create:
++ /* get new rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1, TRUE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return -RSBAC_EREADFAILED;
++ }
++ /* Cannot adjust, if new role is no real role */
++ if (i_attr_val1.rc_role >
++ RC_role_max_value)
++ break;
++ /* get def_process_create_type of new role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_tid,
++ RI_def_process_create_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_process_create_type);
++ return -RSBAC_EREADFAILED;
++ }
++ switch (i_rc_item_val1.type_id) {
++ case RC_type_inherit_parent:
++ case RC_type_inherit_process:
++ /* keep old type */
++ break;
++ case RC_type_use_new_role_def_create:
++ /* error - complain, but keep type (inherit) */
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_rc(): invalid type use_new_role_def_create in def_process_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ break;
++ case RC_type_no_create:
++ /* set rc_type for process to general */
++ i_rc_item_val1.type_id =
++ RSBAC_RC_GENERAL_TYPE;
++ /* fall through */
++ default:
++ /* set rc_type for process */
++ i_attr_val1.rc_type =
++ i_rc_item_val1.type_id;
++ if ((err =
++ rsbac_set_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_type,
++ i_attr_val1)))
++ {
++ rsbac_pr_set_error(A_rc_type);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ break;
++ case RC_type_no_create:
++ case RC_type_no_chown:
++ /* set rc_type for process to general */
++ i_rc_item_val1.type_id =
++ RSBAC_RC_GENERAL_TYPE;
++ /* fall through */
++ default:
++ /* set rc_type for process */
++ i_attr_val1.rc_type =
++ i_rc_item_val1.type_id;
++ if ((err =
++ rsbac_set_attr(SW_RC, T_PROCESS, i_tid,
++ A_rc_type,
++ i_attr_val1))) {
++ rsbac_pr_set_error(A_rc_type);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++
++ return 0;
++
++ /* all other cases */
++ default:
++ return 0;
++ }
++
++ case R_CLONE:
++ if (target == T_PROCESS) {
++ /* get rc_role from process */
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ tid,
++ A_rc_role,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return -RSBAC_EREADFAILED;
++ }
++
++ /* get rc_force_role from process */
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ tid,
++ A_rc_force_role,
++ &i_attr_val2, FALSE))) {
++ rsbac_pr_get_error(A_rc_force_role);
++ return -RSBAC_EREADFAILED;
++ }
++
++ /* set rc_role for new process */
++ if ((err = rsbac_set_attr(SW_RC, T_PROCESS,
++ new_tid,
++ A_rc_role,
++ i_attr_val1))) {
++ rsbac_pr_set_error(A_rc_role);
++ return -RSBAC_EWRITEFAILED;
++ }
++
++ /* set rc_force_role for new process */
++ if ((err = rsbac_set_attr(SW_RC, T_PROCESS,
++ new_tid,
++ A_rc_force_role,
++ i_attr_val2))) {
++ rsbac_pr_set_error(A_rc_force_role);
++ return -RSBAC_EWRITEFAILED;
++ }
++
++ /* get def_process_create_type of role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_tid,
++ RI_def_process_create_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_process_create_type);
++ return -RSBAC_EREADFAILED;
++ }
++ switch (i_rc_item_val1.type_id) {
++ case RC_type_inherit_parent:
++ case RC_type_inherit_process:
++ /* copy old type */
++ /* get rc_type from old process */
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ tid,
++ A_rc_type,
++ &i_attr_val1,
++ FALSE))) {
++ rsbac_pr_get_error(A_rc_type);
++ return -RSBAC_EREADFAILED;
++ }
++ /* set rc_type for new process */
++ if ((err = rsbac_set_attr(SW_RC, T_PROCESS,
++ new_tid,
++ A_rc_type,
++ i_attr_val1))) {
++ rsbac_pr_set_error(A_rc_type);
++ return -RSBAC_EWRITEFAILED;
++ }
++ break;
++ case RC_type_no_create:
++ return -RSBAC_EDECISIONMISMATCH;
++ case RC_type_use_new_role_def_create:
++ /* error - complain, but keep type (inherit) */
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_rc(): invalid type use_new_role_def_create in def_process_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return -RSBAC_EINVALIDVALUE;
++ default:
++ /* set rc_type for new process */
++ i_attr_val1.rc_type =
++ i_rc_item_val1.type_id;
++ if ((err =
++ rsbac_set_attr(SW_RC, T_PROCESS, new_tid,
++ A_rc_type,
++ i_attr_val1))) {
++ rsbac_pr_set_error(A_rc_type);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ return 0;
++ } else
++ return 0;
++
++ case R_CREATE:
++ switch (target) {
++ /* Creating dir or (pseudo) file IN target dir! */
++ case T_DIR:
++ /* Mode of created item is ignored! */
++ /* check for select_fd_type being set for calling
++ * process and enforce it if set. */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_select_type,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_select_type);
++ return -RSBAC_EREADFAILED;
++ }
++ if (i_attr_val1.rc_select_type != RC_type_use_fd) {
++ i_attr_val2.rc_type_fd = i_attr_val1.rc_select_type;
++ /* rc_select_type is one use only so we reset it
++ * to default value first.
++ * value to be set already backup'ed. */
++ i_attr_val1.rc_select_type = RC_type_use_fd;
++ if ((err = rsbac_set_attr(SW_RC, T_PROCESS,
++ i_tid, A_rc_select_type,
++ i_attr_val1)))
++ {
++ rsbac_printk("rsbac_adf_set_attr_rc(): unable to reset rc_select_type to default value!\n");
++ }
++ if ((err = rsbac_set_attr(SW_RC, new_target,
++ new_tid, A_rc_type_fd,
++ i_attr_val2)))
++ {
++ rsbac_pr_set_error(A_rc_type_fd);
++ return -RSBAC_EWRITEFAILED;
++ }
++ return 0;
++
++ }
++ /* get rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return -RSBAC_EREADFAILED;
++ }
++ /* get def_fd_create_type of role */
++ /* First get target dir's efftype */
++ if ((err = rsbac_get_attr(SW_RC,
++ target,
++ tid,
++ A_rc_type_fd,
++ &i_attr_val2, TRUE))) {
++ rsbac_pr_get_error(A_rc_type_fd);
++ return -RSBAC_EREADFAILED;
++ }
++ i_rc_tid.role = i_attr_val1.rc_role;
++ switch(new_target) {
++ case T_UNIXSOCK:
++ i_rc_subtid.type = i_attr_val2.rc_type;
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_subtid,
++ RI_def_unixsock_create_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_unixsock_create_type);
++ return -RSBAC_EREADFAILED;
++ }
++ if(i_rc_item_val1.type_id != RC_type_use_fd)
++ break;
++ /* fall through */
++ default:
++ i_rc_subtid.type = i_attr_val2.rc_type;
++ if ((err = rsbac_rc_get_item(0, RT_ROLE, i_rc_tid, i_rc_subtid, RI_def_fd_ind_create_type, &i_rc_item_val1, NULL))) { /* No individual create type -> try global */
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_subtid,
++ RI_def_fd_create_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_fd_create_type);
++ return -RSBAC_EREADFAILED;
++ }
++ }
++ }
++ switch (i_rc_item_val1.type_id) {
++ case RC_type_no_create:
++ return -RSBAC_EDECISIONMISMATCH;
++ break;
++
++ case RC_type_use_new_role_def_create:
++ case RC_type_inherit_process:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_rc(): invalid type inherit_process or use_new_role_def_create in def_fd_create_type or def_unixsock_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return -RSBAC_EINVALIDVALUE;
++
++ case RC_type_inherit_parent:
++ default:
++ /* get type from new target */
++ if ((err = rsbac_get_attr(SW_RC, new_target,
++ new_tid,
++ A_rc_type_fd,
++ &i_attr_val1,
++ FALSE))) {
++ rsbac_pr_get_error(A_rc_type_fd);
++ return -RSBAC_EREADFAILED;
++ }
++ /* set it for new target, if different */
++ if (i_attr_val1.rc_type_fd !=
++ i_rc_item_val1.type_id) {
++ i_attr_val1.rc_type_fd =
++ i_rc_item_val1.type_id;
++ if ((err =
++ rsbac_set_attr(SW_RC, new_target,
++ new_tid,
++ A_rc_type_fd,
++ i_attr_val1)))
++ {
++ rsbac_pr_set_error(A_rc_type_fd);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ }
++ return 0;
++
++ case T_IPC:
++ /* get rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return -RSBAC_EREADFAILED;
++ }
++ /* get def_ipc_create_type of role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_tid,
++ RI_def_ipc_create_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_ipc_create_type);
++ return -RSBAC_EREADFAILED;
++ }
++ switch (i_rc_item_val1.type_id) {
++ case RC_type_no_create:
++ return -RSBAC_EDECISIONMISMATCH;
++ break;
++
++ case RC_type_use_new_role_def_create:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_rc(): invalid type use_new_role_def_create in def_ipc_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return -RSBAC_EINVALIDVALUE;
++
++ case RC_type_inherit_parent:
++ case RC_type_inherit_process:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_rc(): invalid type inherit_parent in def_ipc_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return -RSBAC_EINVALIDVALUE;
++
++ default:
++ /* set rc_type for ipc target */
++ i_attr_val1.rc_type =
++ i_rc_item_val1.type_id;
++ /* get type from target */
++ if ((err = rsbac_get_attr(SW_RC,
++ target,
++ tid,
++ A_rc_type,
++ &i_attr_val2,
++ FALSE))) {
++ rsbac_pr_get_error(A_rc_type);
++ return -RSBAC_EREADFAILED;
++ }
++ /* set it for new target, if different */
++ if (i_attr_val1.rc_type !=
++ i_attr_val2.rc_type) {
++ if ((err =
++ rsbac_set_attr(SW_RC, target,
++ tid, A_rc_type,
++ i_attr_val1)))
++ {
++ rsbac_pr_set_error
++ (A_rc_type);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ }
++ return 0;
++
++ case T_USER:
++ /* get rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return -RSBAC_EREADFAILED;
++ }
++ /* get def_user_create_type of role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_tid,
++ RI_def_user_create_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_user_create_type);
++ return -RSBAC_EREADFAILED;
++ }
++ switch (i_rc_item_val1.type_id) {
++ case RC_type_no_create:
++ rsbac_pr_debug(adf_rc, "pid %u (%.15s), owner %u, rc_role %u, def_user_create_type no_create, request CREATE -> NOT_GRANTED!\n",
++ pid_nr(caller_pid), current->comm,
++ current_uid(),
++ i_attr_val1.rc_role);
++ return -RSBAC_EDECISIONMISMATCH;
++
++ case RC_type_use_new_role_def_create:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_rc(): invalid type use_new_role_def_create in def_user_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return -RSBAC_EINVALIDVALUE;
++
++ case RC_type_inherit_parent:
++ case RC_type_inherit_process:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_rc(): invalid type inherit_parent in def_user_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return -RSBAC_EINVALIDVALUE;
++
++ default:
++ /* set rc_type for user target */
++ i_attr_val1.rc_type =
++ i_rc_item_val1.type_id;
++ /* get type from target */
++ if ((err = rsbac_get_attr(SW_RC,
++ target,
++ tid,
++ A_rc_type,
++ &i_attr_val2,
++ TRUE))) {
++ rsbac_pr_get_error(A_rc_type);
++ return -RSBAC_EREADFAILED;
++ }
++ /* set it for new target, if different */
++ if (i_attr_val1.rc_type !=
++ i_attr_val2.rc_type) {
++ if ((err =
++ rsbac_set_attr(SW_RC, target,
++ tid, A_rc_type,
++ i_attr_val1)))
++ {
++ rsbac_pr_set_error
++ (A_rc_type);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ }
++ return 0;
++
++ case T_GROUP:
++ /* get rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1, TRUE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return -RSBAC_EREADFAILED;
++ }
++ /* get def_group_create_type of role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_tid,
++ RI_def_group_create_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_group_create_type);
++ return -RSBAC_EREADFAILED;
++ }
++ switch (i_rc_item_val1.type_id) {
++ case RC_type_no_create:
++ rsbac_pr_debug(adf_rc, "pid %u (%.15s), owner %u, rc_role %u, def_group_create_type no_create, request CREATE -> NOT_GRANTED!\n",
++ pid_nr(caller_pid), current->comm,
++ current_uid(),
++ i_attr_val1.rc_role);
++ return -RSBAC_EDECISIONMISMATCH;
++
++ case RC_type_use_new_role_def_create:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_rc(): invalid type use_new_role_def_create in def_group_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return -RSBAC_EINVALIDVALUE;
++
++ case RC_type_inherit_parent:
++ case RC_type_inherit_process:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_rc(): invalid type inherit_parent in def_group_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return -RSBAC_EINVALIDVALUE;
++
++ default:
++ /* set rc_type for group target */
++ i_attr_val1.rc_type =
++ i_rc_item_val1.type_id;
++ /* get type from target */
++ if ((err = rsbac_get_attr(SW_RC,
++ target,
++ tid,
++ A_rc_type,
++ &i_attr_val2,
++ TRUE))) {
++ rsbac_pr_get_error(A_rc_type);
++ return -RSBAC_EREADFAILED;
++ }
++ /* set it for new target, if different */
++ if (i_attr_val1.rc_type !=
++ i_attr_val2.rc_type) {
++ if ((err =
++ rsbac_set_attr(SW_RC, target,
++ tid, A_rc_type,
++ i_attr_val1)))
++ {
++ rsbac_pr_set_error
++ (A_rc_type);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ }
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++ case R_EXECUTE:
++ switch (target) {
++ case T_FILE:
++ /* get rc_force_role from target file */
++ if ((err = rsbac_get_attr(SW_RC, T_FILE,
++ tid,
++ A_rc_force_role,
++ &i_attr_val1, TRUE))) {
++ rsbac_pr_get_error(A_rc_force_role);
++ return -RSBAC_EREADFAILED;
++ }
++ /* check rc_force_role, warn, if unusable */
++ if ((i_attr_val1.rc_force_role > RC_role_max_value)
++ && (i_attr_val1.rc_force_role <
++ RC_role_min_special)
++ ) {
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_rc(): rc_force_role %u of file %u on device %02u:%02u is higher than MAX_ROLE %u, setting forced role of process %u to default value %u!\n",
++ i_attr_val1.rc_force_role,
++ tid.file.inode,
++ MAJOR(tid.file.device),
++ MINOR(tid.file.device),
++ RC_role_max_value, pid_nr(caller_pid),
++ RC_default_root_dir_force_role);
++ i_attr_val1.rc_force_role =
++ RC_default_root_dir_force_role;
++ }
++ /* set rc_force_role for this process to keep track of it later */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_set_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_force_role,
++ i_attr_val1))) {
++ rsbac_pr_set_error(A_rc_force_role);
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* get rc_initial_role from target file */
++ if ((err = rsbac_get_attr(SW_RC, T_FILE,
++ tid,
++ A_rc_initial_role,
++ &i_attr_val2, TRUE))) {
++ rsbac_pr_get_error(A_rc_initial_role);
++ return -RSBAC_EREADFAILED;
++ }
++ /* check rc_initial_role, warn, if unusable */
++ if ((i_attr_val2.rc_initial_role >
++ RC_role_max_value)
++ && (i_attr_val2.rc_initial_role !=
++ RC_role_use_force_role)
++ ) {
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_rc(): rc_initial_role %u of file %u on device %02u:%02u is higher than MAX_ROLE %u, setting initial role of process %u to default value %u!\n",
++ i_attr_val2.rc_initial_role,
++ tid.file.inode,
++ MAJOR(tid.file.device),
++ MINOR(tid.file.device),
++ RC_role_max_value, pid_nr(caller_pid),
++ RC_default_root_dir_initial_role);
++ i_attr_val2.rc_initial_role =
++ RC_default_root_dir_initial_role;
++ }
++ if (i_attr_val2.rc_initial_role ==
++ RC_role_use_force_role) {
++ switch (i_attr_val1.rc_force_role) {
++ case RC_role_inherit_user:
++ /* get rc_def_role from process owner */
++ i_tid.user = owner;
++ if ((err =
++ rsbac_get_attr(SW_RC, T_USER,
++ i_tid,
++ A_rc_def_role,
++ &i_attr_val1,
++ TRUE))) {
++ rsbac_pr_get_error
++ (A_rc_def_role);
++ return -RSBAC_EREADFAILED;
++ }
++ /* set it for this process */
++ i_tid.process = caller_pid;
++ if ((err =
++ rsbac_set_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ i_attr_val1)))
++ {
++ rsbac_pr_set_error
++ (A_rc_role);
++ return -RSBAC_EWRITEFAILED;
++ }
++ break;
++
++ case RC_role_inherit_parent:
++ case RC_role_inherit_process:
++ case RC_role_inherit_up_mixed:
++ /* keep current role */
++ break;
++
++ default:
++ /* set forced role for this process */
++ i_tid.process = caller_pid;
++ if ((err =
++ rsbac_set_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ i_attr_val1)))
++ {
++ rsbac_pr_set_error
++ (A_rc_role);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ } else { /* use initial_role */
++
++ /* set initial role for this process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_set_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ i_attr_val2))) {
++ rsbac_pr_set_error
++ (A_rc_role);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ /* Get role of process. */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return -RSBAC_EREADFAILED;
++ }
++ /* get def_process_execute_type of role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_tid,
++ RI_def_process_execute_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_process_execute_type);
++ return -RSBAC_EREADFAILED;
++ }
++ switch (i_rc_item_val1.type_id) {
++ case RC_type_no_create:
++ case RC_type_use_new_role_def_create:
++ /* Cannot reset, because of unusable default -> warn and keep */
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_rc(): invalid type in def_process_execute_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return -RSBAC_EINVALIDVALUE;
++ case RC_type_inherit_parent:
++ case RC_type_inherit_process:
++ break;
++ case RC_type_no_execute:
++ return -RSBAC_EDECISIONMISMATCH;
++ default:
++ /* set rc_type for process */
++ i_attr_val1.rc_type =
++ i_rc_item_val1.type_id;
++ i_tid.process = caller_pid;
++ if ((err =
++ rsbac_set_attr(SW_RC, T_PROCESS, i_tid,
++ A_rc_type,
++ i_attr_val1))) {
++ rsbac_pr_set_error(A_rc_type);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ /* type and role are set - ready. */
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++ case R_BIND:
++ switch (target) {
++ case T_IPC:
++ /* get rc_role from process */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return -RSBAC_EREADFAILED;
++ }
++ /* get def_ipc_create_type of role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_tid,
++ RI_def_ipc_create_type,
++ &i_rc_item_val1,
++ NULL))) {
++ rsbac_rc_pr_get_error
++ (RI_def_ipc_create_type);
++ return -RSBAC_EREADFAILED;
++ }
++ switch (i_rc_item_val1.type_id) {
++ case RC_type_no_create:
++ return -RSBAC_EDECISIONMISMATCH;
++ break;
++
++ case RC_type_use_new_role_def_create:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_rc(): invalid type use_new_role_def_create in def_ipc_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return -RSBAC_EINVALIDVALUE;
++
++ case RC_type_inherit_parent:
++ case RC_type_inherit_process:
++ /* error - complain and return error */
++ rsbac_printk(KERN_WARNING "rsbac_adf_set_attr_rc(): invalid type inherit_parent in def_ipc_create_type of role %i!\n",
++ i_attr_val1.rc_role);
++ return -RSBAC_EINVALIDVALUE;
++
++ default:
++ /* set rc_type for ipc target */
++ i_attr_val1.rc_type =
++ i_rc_item_val1.type_id;
++ /* get type from target */
++ if ((err = rsbac_get_attr(SW_RC,
++ target,
++ tid,
++ A_rc_type,
++ &i_attr_val2,
++ FALSE))) {
++ rsbac_pr_get_error(A_rc_type);
++ return -RSBAC_EREADFAILED;
++ }
++ /* set it for new target, if different */
++ if (i_attr_val1.rc_type !=
++ i_attr_val2.rc_type) {
++ if ((err =
++ rsbac_set_attr(SW_RC, target,
++ tid, A_rc_type,
++ i_attr_val1)))
++ {
++ rsbac_pr_set_error
++ (A_rc_type);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ }
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++ case R_CONNECT:
++ switch (target) {
++ case T_IPC:
++ if (new_target == T_IPC) {
++ /* get type from old target */
++ i_tid.process = caller_pid;
++ if ((err = rsbac_get_attr(SW_RC, T_IPC,
++ tid,
++ A_rc_type,
++ &i_attr_val1, FALSE))) {
++ rsbac_pr_get_error(A_rc_role);
++ return -RSBAC_EREADFAILED;
++ }
++ /* set rc_type for new ipc target, if not 0 */
++ if (i_attr_val1.rc_type) {
++ if ((err = rsbac_set_attr(SW_RC, T_IPC,
++ new_tid, A_rc_type,
++ i_attr_val1))) {
++ rsbac_pr_set_error(A_rc_type);
++ return -RSBAC_EWRITEFAILED;
++ }
++ }
++ }
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++
++ default:
++ return 0;
++ }
++
++ return 0;
++}
++
++#ifdef CONFIG_RSBAC_SECDEL
++inline rsbac_boolean_t rsbac_need_overwrite_rc(struct dentry * dentry_p)
++{
++ int err = 0;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_rc_target_id_t i_rc_tid;
++ union rsbac_rc_item_value_t i_rc_item_val1;
++
++ if (!dentry_p || !dentry_p->d_inode)
++ return FALSE;
++
++ i_tid.file.device = dentry_p->d_sb->s_dev;
++ i_tid.file.inode = dentry_p->d_inode->i_ino;
++ i_tid.file.dentry_p = dentry_p;
++ /* get target's rc_type_fd */
++ if (rsbac_get_attr(SW_RC, T_FILE,
++ i_tid, A_rc_type_fd, &i_attr_val1, TRUE)) {
++ rsbac_pr_get_error(A_rc_type_fd);
++ return FALSE;
++ }
++ /* get type_fd_need_secdel of target's rc_type_fd */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err = rsbac_rc_get_item(0,
++ RT_TYPE,
++ i_rc_tid,
++ i_rc_tid,
++ RI_type_fd_need_secdel,
++ &i_rc_item_val1, NULL))) {
++ rsbac_rc_pr_get_error(RI_type_fd_need_secdel);
++ return FALSE;
++ }
++
++ /* return need_overwrite */
++ return i_rc_item_val1.need_secdel;
++}
++#endif
+diff --git a/rsbac/adf/rc/rc_syscalls.c b/rsbac/adf/rc/rc_syscalls.c
+new file mode 100644
+index 0000000..71d7ab2
+--- /dev/null
++++ b/rsbac/adf/rc/rc_syscalls.c
+@@ -0,0 +1,1734 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - Role Compatibility */
++/* File: rsbac/adf/rc/syscalls.c */
++/* */
++/* Author and (c) 1999-2009: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 29/Jan/2009 */
++/*************************************************** */
++
++#include <linux/string.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/rc.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/error.h>
++#include <rsbac/debug.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/rc_getname.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/um.h>
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++/************************************************* */
++/* Declarations */
++/************************************************* */
++
++#if !defined(CONFIG_RSBAC_MAINT)
++/* from rsbac/adf/rc/main.c */
++int rsbac_rc_test_role_admin(rsbac_boolean_t modify);
++
++int rsbac_rc_test_admin_roles(rsbac_rc_role_id_t t_role, rsbac_boolean_t modify);
++
++enum rsbac_adf_req_ret_t
++ rsbac_rc_check_type_comp(enum rsbac_target_t target,
++ rsbac_rc_type_id_t type,
++ enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid);
++#endif
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++/* Here we only check access rights and pass on to rc_data_structures */
++int rsbac_rc_sys_copy_role(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_rc_role_id_t from_role,
++ rsbac_rc_role_id_t to_role)
++ {
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_RC
++ if(rsbac_switch_rc)
++#endif
++ {
++ int err;
++ /* source role must be in admin roles or caller must be role_admin */
++ if ( (err=rsbac_rc_test_admin_roles(from_role, TRUE))
++ && rsbac_rc_test_role_admin(TRUE)
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_copy_role(): copying of role %u denied for pid %u, user %u - not in admin_roles!\n",
++ from_role,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ /* only role_admins may copy to existing targets */
++ if ( rsbac_rc_role_exists(ta_number, to_role)
++ && rsbac_rc_test_role_admin(TRUE)
++ )
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_copy_role(): overwriting of existing role %u denied for pid %u, user %u - no role_admin!\n",
++ to_role,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return -EPERM;
++ }
++ }
++#endif /* !MAINT */
++
++ /* pass on */
++ return(rsbac_rc_copy_role(ta_number, from_role, to_role));
++ }
++
++/* Here we only check access rights and pass on to rc_data_structures */
++int rsbac_rc_sys_copy_type (
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ rsbac_rc_type_id_t from_type,
++ rsbac_rc_type_id_t to_type)
++ {
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_RC
++ if(rsbac_switch_rc)
++#endif
++ {
++ int err;
++
++ switch(target)
++ {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ target = T_FD;
++ break;
++ case T_FD:
++ case T_DEV:
++ case T_USER:
++ case T_PROCESS:
++ case T_IPC:
++ case T_GROUP:
++ case T_NETDEV:
++ case T_NETTEMP:
++ case T_NETOBJ:
++ break;
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* need ADMIN right to source type or caller must be role_admin */
++ if( (rsbac_rc_check_type_comp(target, from_type, RCR_ADMIN, 0) != GRANTED)
++ && (err=rsbac_rc_test_role_admin(FALSE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_copy_type(): copying of %s type %u denied for pid %u, user %u - not in admin_roles!\n",
++ get_target_name_only(tmp, target),
++ from_type,
++ current->pid,
++ user);
++ rsbac_kfree(tmp);
++ }
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ /* only role_admins may copy to existing targets */
++ if ( rsbac_rc_type_exists(ta_number, target, to_type)
++ && rsbac_rc_test_role_admin(TRUE)
++ )
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_copy_type(): overwriting of existing %s type %u denied for pid %u, user %u - no role_admin!\n",
++ get_target_name_only(tmp, target),
++ to_type,
++ current->pid,
++ user);
++ rsbac_kfree(tmp);
++ }
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return -EPERM;
++ }
++ }
++#endif /* !MAINT */
++
++ /* pass on */
++ return(rsbac_rc_copy_type(ta_number, target, from_type, to_type));
++ }
++
++/* Getting values */
++int rsbac_rc_sys_get_item(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ union rsbac_rc_target_id_t tid,
++ union rsbac_rc_target_id_t subtid,
++ enum rsbac_rc_item_t item,
++ union rsbac_rc_item_value_t * value_p,
++ rsbac_time_t * ttl_p)
++ {
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_RC
++ if(rsbac_switch_rc)
++#endif
++ {
++ int err;
++
++ switch(item)
++ {
++ case RI_name:
++ case RI_type_fd_name:
++ case RI_type_dev_name:
++ case RI_type_ipc_name:
++ case RI_type_user_name:
++ case RI_type_process_name:
++ case RI_type_scd_name:
++ case RI_type_group_name:
++ case RI_type_netdev_name:
++ case RI_type_nettemp_name:
++ case RI_type_netobj_name:
++ /* getting names is always allowed */
++ break;
++
++ case RI_type_fd_need_secdel:
++ if(target != RT_TYPE)
++ return -RSBAC_EINVALIDTARGET;
++ if( (err=rsbac_rc_check_type_comp(T_FILE, tid.type, RCR_ADMIN, 0))
++ && (err=rsbac_rc_test_role_admin(FALSE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_get_item(): reading fd_need_secdel of type %u denied for pid %u, user %u - no ADMIN right!\n",
++ tid.type,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ break;
++
++ default:
++ if(target != RT_ROLE)
++ return -RSBAC_EINVALIDATTR;
++ /* test admin_roles or admin_type of process' role / no modify */
++ if ( (err=rsbac_rc_test_admin_roles(tid.role, FALSE))
++ && (err=rsbac_rc_test_role_admin(FALSE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_get_item(): getting item of role %u denied for pid %u, user %u - not in admin_roles!\n",
++ tid.role,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ }
++ }
++#endif /* !MAINT */
++
++ /* pass on */
++ return(rsbac_rc_get_item(ta_number,target, tid, subtid, item, value_p, ttl_p));
++ }
++
++/* Setting values */
++int rsbac_rc_sys_set_item(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ union rsbac_rc_target_id_t tid,
++ union rsbac_rc_target_id_t subtid,
++ enum rsbac_rc_item_t item,
++ union rsbac_rc_item_value_t value,
++ rsbac_time_t ttl)
++ {
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_RC
++ if(rsbac_switch_rc)
++#endif
++ {
++ int err;
++
++ switch(item)
++ {
++ /* type targets */
++ case RI_type_fd_name:
++ case RI_type_fd_need_secdel:
++ case RI_type_fd_remove:
++ if(target != RT_TYPE)
++ return -RSBAC_EINVALIDTARGET;
++ if( (rsbac_rc_check_type_comp(T_FILE, tid.type, RCR_ADMIN, 0) == NOT_GRANTED)
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++ char tmp[80];
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing %s of FD type %u denied for pid %u, user %u - no ADMIN right!\n",
++ get_rc_item_name(tmp, item),
++ tid.type,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ break;
++ case RI_type_dev_name:
++ case RI_type_dev_remove:
++ if(target != RT_TYPE)
++ return -RSBAC_EINVALIDTARGET;
++ if( (rsbac_rc_check_type_comp(T_DEV, tid.type, RCR_ADMIN, 0) == NOT_GRANTED)
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing name or removing of DEV type %u denied for pid %u, user %u - no ADMIN right!\n",
++ tid.type,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ break;
++ case RI_type_ipc_name:
++ case RI_type_ipc_remove:
++ if(target != RT_TYPE)
++ return -RSBAC_EINVALIDTARGET;
++ if( (rsbac_rc_check_type_comp(T_IPC, tid.type, RCR_ADMIN, 0) == NOT_GRANTED)
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing name or removing of IPC type %u denied for pid %u, user %u - no ADMIN right!\n",
++ tid.type,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ break;
++ case RI_type_user_name:
++ case RI_type_user_remove:
++ if(target != RT_TYPE)
++ return -RSBAC_EINVALIDTARGET;
++ if( (rsbac_rc_check_type_comp(T_USER, tid.type, RCR_ADMIN, 0) == NOT_GRANTED)
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing name or removing of USER type %u denied for pid %u, user %u - no ADMIN right!\n",
++ tid.type,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ break;
++ case RI_type_process_name:
++ case RI_type_process_remove:
++ if(target != RT_TYPE)
++ return -RSBAC_EINVALIDTARGET;
++ if( (rsbac_rc_check_type_comp(T_PROCESS, tid.type, RCR_ADMIN, 0) == NOT_GRANTED)
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing name or removing of process type %u denied for pid %u, user %u - no ADMIN right!\n",
++ tid.type,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ break;
++ case RI_type_scd_name:
++ if(target != RT_TYPE)
++ return -RSBAC_EINVALIDTARGET;
++ if( (rsbac_rc_check_type_comp(T_SCD, tid.type, RCR_ADMIN, 0) == NOT_GRANTED)
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing name or removing of SCD type %u denied for pid %u, user %u - no ADMIN right!\n",
++ tid.type,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ break;
++ case RI_type_group_name:
++ case RI_type_group_remove:
++ if(target != RT_TYPE)
++ return -RSBAC_EINVALIDTARGET;
++ if( (rsbac_rc_check_type_comp(T_GROUP, tid.type, RCR_ADMIN, 0) == NOT_GRANTED)
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing name or removing of GROUP type %u denied for pid %u, user %u - no ADMIN right!\n",
++ tid.type,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ break;
++ case RI_type_netdev_name:
++ case RI_type_netdev_remove:
++ if(target != RT_TYPE)
++ return -RSBAC_EINVALIDTARGET;
++ if( (rsbac_rc_check_type_comp(T_NETDEV, tid.type, RCR_ADMIN, 0) == NOT_GRANTED)
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing name or removing of NETDEV type %u denied for pid %u, user %u - no ADMIN right!\n",
++ tid.type,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ break;
++ case RI_type_nettemp_name:
++ case RI_type_nettemp_remove:
++ if(target != RT_TYPE)
++ return -RSBAC_EINVALIDTARGET;
++ if( (rsbac_rc_check_type_comp(T_NETTEMP, tid.type, RCR_ADMIN, 0) == NOT_GRANTED)
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing name or removing of NETTEMP type %u denied for pid %u, user %u - no ADMIN right!\n",
++ tid.type,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ break;
++ case RI_type_netobj_name:
++ case RI_type_netobj_remove:
++ if(target != RT_TYPE)
++ return -RSBAC_EINVALIDTARGET;
++ if( (rsbac_rc_check_type_comp(T_NETOBJ, tid.type, RCR_ADMIN, 0) == NOT_GRANTED)
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing name or removing of NETOBJ type %u denied for pid %u, user %u - no ADMIN right!\n",
++ tid.type,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ break;
++
++ /* roles only from here */
++ case RI_role_comp:
++ /* need admin for this role, assign for changed compatible roles */
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ if(target != RT_ROLE)
++ return -RSBAC_EINVALIDATTR;
++ if(!rsbac_rc_test_role_admin(TRUE))
++ break;
++ /* test admin_role of process / modify */
++ if((err=rsbac_rc_test_admin_roles(tid.role, TRUE)))
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing role_comp of role %u denied for pid %u, user %u - not in admin_roles!\n",
++ tid.role,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ /* now check assign for changed comp role. */
++ /* get rc_role of process */
++ i_tid.process = task_pid(current);
++ if ((err=rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1,
++ TRUE)))
++ {
++ rsbac_pr_get_error(A_rc_role);
++ return -RSBAC_EREADFAILED;
++ }
++ /* check assign_roles of role */
++ if (!rsbac_rc_check_comp(i_attr_val1.rc_role,
++ tid,
++ RI_assign_roles,
++ R_NONE))
++ {
++ rsbac_uid_t user;
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing role_comp for role %u denied for user %u, role %u - not in assign_roles!\n",
++ tid.role,
++ user,
++ i_attr_val1.rc_role);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return -EPERM;
++ }
++ }
++ break;
++
++ case RI_admin_type:
++ case RI_admin_roles:
++ case RI_assign_roles:
++ case RI_boot_role:
++ case RI_req_reauth:
++ /* admin_type role_admin */
++ if((err=rsbac_rc_test_role_admin(TRUE)))
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++ char tmp[80];
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing %s of role %u denied for pid %u, user %u - no Role Admin!\n",
++ get_rc_item_name(tmp, item),
++ tid.role,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ break;
++ case RI_name:
++ /* admin for this role */
++ /* test admin_role of process / modify */
++ if( (err=rsbac_rc_test_admin_roles(tid.role, TRUE))
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing name of role %u denied for pid %u, user %u - not in admin_roles!\n",
++ tid.role,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ break;
++
++ case RI_remove_role:
++ /* test admin_role of process role / modify */
++ if((err=rsbac_rc_test_role_admin(TRUE)))
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): removing of role %u denied for pid %u, user %u - not in admin_roles!\n",
++ tid.role,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ break;
++
++ case RI_def_fd_create_type:
++ case RI_def_fd_ind_create_type:
++ /* admin for this role and assign for target type */
++ /* test admin_role of process / modify */
++ if( (err=rsbac_rc_test_admin_roles(tid.role, TRUE))
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing def_fd_[ind_]create_type of role %u denied for pid %u, user %u - not in admin_roles!\n",
++ tid.role,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ else
++ {
++ enum rsbac_adf_req_ret_t result;
++
++ result = rsbac_rc_check_type_comp(T_FILE, value.type_id, RCR_ASSIGN, 0);
++ if( ( (result == NOT_GRANTED)
++ || (result == UNDEFINED)
++ )
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing def_fd_[ind_]create_type for role %u to %u denied for user %u - no ASSIGN right for type!\n",
++ tid.role,
++ value.type_id,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return -EPERM;
++ }
++ }
++ break;
++
++ case RI_def_fd_ind_create_type_remove:
++ /* test admin_role of process / modify */
++ if( (err=rsbac_rc_test_admin_roles(tid.role, TRUE))
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing def_fd_[ind_]create_type of role %u denied for pid %u, user %u - not in admin_roles!\n",
++ tid.role,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ break;
++
++ case RI_def_user_create_type:
++ /* admin for this role and assign for target type */
++ /* test admin_role of process / modify */
++ if( (err=rsbac_rc_test_admin_roles(tid.role, TRUE))
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing def_user_create_type of role %u denied for pid %u, user %u - not in admin_roles!\n",
++ tid.role,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ else
++ {
++ enum rsbac_adf_req_ret_t result;
++
++ result = rsbac_rc_check_type_comp(T_USER, value.type_id, RCR_ASSIGN, 0);
++ if( ( (result == NOT_GRANTED)
++ || (result == UNDEFINED)
++ )
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing def_user_create_type for role %u to %u denied for user %u - no ASSIGN right for type!\n",
++ tid.role,
++ value.type_id,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return -EPERM;
++ }
++ }
++ break;
++
++ case RI_def_process_create_type:
++ case RI_def_process_chown_type:
++ case RI_def_process_execute_type:
++ /* admin for this role and assign for target type */
++ /* test admin_role of process / modify */
++ if( (err=rsbac_rc_test_admin_roles(tid.role, TRUE))
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++ char tmp[80];
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing %s of role %u denied for pid %u, user %u - not in admin_roles!\n",
++ get_rc_item_name(tmp, item),
++ tid.role,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ else
++ {
++ enum rsbac_adf_req_ret_t result;
++
++ result = rsbac_rc_check_type_comp(T_PROCESS, value.type_id, RCR_ASSIGN, 0);
++ if( ( (result == NOT_GRANTED)
++ || (result == UNDEFINED)
++ )
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing def_process_*_type for role %u to %u denied for user %u - no ASSIGN right for type!\n",
++ tid.role,
++ value.type_id,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return -EPERM;
++ }
++ }
++ break;
++ case RI_def_ipc_create_type:
++ /* admin for this role and assign for target type */
++ /* test admin_role of process / modify */
++ if( (err=rsbac_rc_test_admin_roles(tid.role, TRUE))
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing def_ipc_create_type of role %u denied for pid %u, user %u - not in admin_roles!\n",
++ tid.role,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ else
++ {
++ enum rsbac_adf_req_ret_t result;
++
++ result = rsbac_rc_check_type_comp(T_IPC, value.type_id, RCR_ASSIGN, 0);
++ if( ( (result == NOT_GRANTED)
++ || (result == UNDEFINED)
++ )
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing def_ipc_create_type for role %u to %u denied for user %u - no ASSIGN right for type!\n",
++ tid.role,
++ value.type_id,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return -EPERM;
++ }
++ }
++ break;
++ case RI_def_group_create_type:
++ /* admin for this role and assign for target type */
++ /* test admin_role of process / modify */
++ if( (err=rsbac_rc_test_admin_roles(tid.role, TRUE))
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing def_group_create_type of role %u denied for pid %u, user %u - not in admin_roles!\n",
++ tid.role,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ else
++ {
++ enum rsbac_adf_req_ret_t result;
++
++ result = rsbac_rc_check_type_comp(T_GROUP, value.type_id, RCR_ASSIGN, 0);
++ if( ( (result == NOT_GRANTED)
++ || (result == UNDEFINED)
++ )
++ && (err=rsbac_rc_test_role_admin(TRUE))
++ )
++ {
++ rsbac_uid_t user;
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing def_group_create_type for role %u to %u denied for user %u - no ASSIGN right for type!\n",
++ tid.role,
++ value.type_id,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return -EPERM;
++ }
++ }
++ break;
++ case RI_def_unixsock_create_type:
++ /* admin for this role and assign for target type */
++ /* test admin_role of process / modify */
++ if ((err =
++ rsbac_rc_test_admin_roles(tid.role, TRUE))
++ && (err = rsbac_rc_test_role_admin(TRUE))
++ ) {
++ if (err == -EPERM) {
++ rsbac_uid_t user;
++
++ if (!rsbac_get_owner(&user)) {
++ rsbac_printk(KERN_INFO "rsbac_rc_sys_set_item(): changing def_unixsock_create_type of role %u denied for pid %u, user %u - not in admin_roles\n",
++ tid.role,
++ current->pid,
++ user);
++ }
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if (!rsbac_softmode
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++#endif
++ )
++#endif
++ return err;
++ } else
++ return err;
++ } else {
++ enum rsbac_adf_req_ret_t result;
++
++ result =
++ rsbac_rc_check_type_comp(T_UNIXSOCK,
++ value.type_id,
++ RCR_ASSIGN,
++ 0);
++ if (((result == NOT_GRANTED)
++ || (result == UNDEFINED)
++ )
++ && (err =
++ rsbac_rc_test_role_admin(TRUE))
++ ) {
++ rsbac_uid_t user;
++
++ if (!rsbac_get_owner(&user)) {
++ rsbac_printk(KERN_INFO "rsbac_rc_sys_set_item(): changing def_unixsock_create_type for role %u to %u denied for user %u - no ASSIGN right for type\n",
++ tid.role,
++ value.type_id,
++ user);
++ }
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if (!rsbac_softmode
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++#endif
++ )
++#endif
++ return -EPERM;
++ }
++ }
++ break;
++
++
++ case RI_type_comp_fd:
++ case RI_type_comp_dev:
++ case RI_type_comp_user:
++ case RI_type_comp_process:
++ case RI_type_comp_ipc:
++ case RI_type_comp_scd:
++ case RI_type_comp_group:
++ case RI_type_comp_netdev:
++ case RI_type_comp_nettemp:
++ case RI_type_comp_netobj:
++ {
++ union rsbac_rc_item_value_t old_value, my_value;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_rc_target_id_t i_rc_tid;
++
++ if(target != RT_ROLE)
++ return -RSBAC_EINVALIDATTR;
++ if(!rsbac_rc_test_role_admin(TRUE))
++ break;
++ /* test admin_role of process / modify */
++ if((err=rsbac_rc_test_admin_roles(tid.role, TRUE)))
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++ char tmp[80];
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing %s of role %u denied for pid %u, user %u - not in admin_roles!\n",
++ get_rc_item_name(tmp, item),
++ tid.role,
++ current->pid,
++ user);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ /* test caller's RCR_ACCESS_CONTROL for the type, if we change normal access */
++ /* and caller's RCR_SUPERVISOR for the type, if we change special rights */
++ /* first get old setting */
++ err = rsbac_rc_get_item(ta_number, target, tid, subtid, item, &old_value, NULL);
++ if(err)
++ return(err);
++
++ /* get rc_role of process */
++ i_tid.process = task_pid(current);
++ if ((err=rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1,
++ TRUE)))
++ {
++ rsbac_pr_get_error(A_rc_role);
++ return err;
++ }
++ /* get item of process role */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ if ((err=rsbac_rc_get_item(ta_number,
++ RT_ROLE,
++ i_rc_tid,
++ subtid,
++ item,
++ &my_value,
++ NULL)))
++ {
++ rsbac_rc_pr_get_error(item);
++ return err;
++ }
++
++ /* check planned changes for type */
++ if( /* Want to change normal rights to this type? Need RCR_ACCESS_CONTROL. */
++ ( ( (old_value.rights & RSBAC_ALL_REQUEST_VECTOR)
++ != (value.rights & RSBAC_ALL_REQUEST_VECTOR)
++ )
++ && (!(my_value.rights & RSBAC_RC_RIGHTS_VECTOR(RCR_ACCESS_CONTROL)))
++ )
++ ||
++ /* Want to change special rights to this type? Need RCR_SUPERVISOR. */
++ ( ( (old_value.rights & RSBAC_RC_SPECIAL_RIGHTS_VECTOR)
++ != (value.rights & RSBAC_RC_SPECIAL_RIGHTS_VECTOR)
++ )
++ && (!(my_value.rights & RSBAC_RC_RIGHTS_VECTOR(RCR_SUPERVISOR)))
++ )
++ )
++ {
++ /* check failed. Last resort: Classical admin_type. */
++ if((err=rsbac_rc_test_role_admin(TRUE)))
++ {
++ if(err == -EPERM)
++ {
++ rsbac_uid_t user;
++ char tmp[80];
++
++ if(!rsbac_get_owner(&user))
++ {
++ rsbac_printk(KERN_INFO
++ "rsbac_rc_sys_set_item(): changing %s of role %u denied for pid %u, user %u, role %u - insufficent rights!\n",
++ get_rc_item_name(tmp, item),
++ tid.role,
++ current->pid,
++ user,
++ i_attr_val1.rc_role);
++ }
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++ #endif
++ )
++ #endif
++ return err;
++ }
++ else
++ return err;
++ }
++ }
++ }
++ break;
++
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++ }
++#endif /* !MAINT */
++
++ /* pass on */
++ return(rsbac_rc_set_item(ta_number, target, tid, subtid, item, value, ttl));
++ }
++
++/* Set own role, if allowed ( = in role_comp vector of current role) */
++int rsbac_rc_sys_change_role(rsbac_rc_role_id_t role, char __user * pass)
++{
++ int err;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_UM
++ union rsbac_rc_item_value_t i_rc_item_val1;
++ char *k_pass;
++#endif
++#endif
++
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SWITCH_RC
++ if (rsbac_switch_rc)
++#endif
++ {
++ union rsbac_rc_target_id_t i_rc_subtid;
++
++ i_tid.process = task_pid(current);
++ /* get rc_role of process */
++ if ((err = rsbac_get_attr(SW_RC,
++ T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1, TRUE))) {
++ rsbac_printk(KERN_WARNING "rsbac_rc_sys_change_role(): rsbac_get_attr() returned error %i\n",
++ err);
++ goto out;
++ }
++
++ /* check role_comp of role */
++ i_rc_subtid.role = role;
++ if (!rsbac_rc_check_comp(i_attr_val1.rc_role,
++ i_rc_subtid, RI_role_comp, 0)) {
++ rsbac_uid_t user;
++
++ if (!rsbac_get_owner(&user)) {
++ rsbac_printk(KERN_INFO "rsbac_rc_sys_change_role(): changing from role %u to %u denied for pid %u, user %u, role %u - roles not compatible\n",
++ i_attr_val1.rc_role,
++ role,
++ pid_nr(i_tid.process),
++ user, i_attr_val1.rc_role);
++ }
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if (!rsbac_softmode
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++#endif
++ )
++#endif
++ {
++ err = -EPERM;
++ goto out;
++ }
++ }
++#ifdef CONFIG_RSBAC_UM
++ /* need to make sure UM is compilled in and active
++ * XXX what to do about softmode here
++ */
++ if ((err = rsbac_rc_get_item(0, RT_ROLE, i_rc_subtid, i_rc_subtid,
++ RI_req_reauth,
++ &i_rc_item_val1, NULL))) {
++ rsbac_printk(KERN_WARNING "rsbac_rc_sys_change_role(): rsbac_rc_get_item() returned error %i\n",
++ err);
++ err = -EPERM;
++ goto out;
++ }
++ if (i_rc_item_val1.req_reauth) {
++ rsbac_uid_t user;
++
++ if (!pass) {
++ rsbac_printk(KERN_WARNING "rsbac_rc_sys_change_role(): password required for switching to role %u\n",
++ role);
++ err = -EPERM;
++ goto out;
++ }
++ k_pass = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (!k_pass) {
++ err = -RSBAC_ENOMEM;
++ goto out;
++ }
++ err =
++ rsbac_get_user(k_pass, pass, RSBAC_MAXNAMELEN);
++ if (err)
++ goto out_free;
++ k_pass[RSBAC_MAXNAMELEN - 1] = 0;
++ err = rsbac_get_owner(&user);
++ if (err) {
++ rsbac_printk(KERN_WARNING "rsbac_rc_sys_change_role(): rsbac_rc_get_item() returned error %i\n",
++ err);
++ goto out_free;
++ }
++ err = rsbac_um_check_pass(user, k_pass);
++ if (err) {
++ goto out_free;
++ }
++ }
++#endif
++
++ }
++#endif
++
++ /* OK, check passed. Set role. */
++ i_tid.process = task_pid(current);
++ i_attr_val1.rc_role = role;
++ if (rsbac_set_attr(SW_RC, T_PROCESS, i_tid, A_rc_role, i_attr_val1)) { /* failed! */
++ rsbac_printk(KERN_WARNING "rsbac_rc_sys_change_role(): rsbac_set_attr() returned error\n");
++ err = -RSBAC_EWRITEFAILED;
++ }
++ else
++ err = 0;
++
++#if !defined(CONFIG_RSBAC_MAINT)
++out:
++#endif
++ return err;
++
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_UM
++out_free:
++ memset(k_pass, 0, RSBAC_MAXNAMELEN);
++ rsbac_kfree(k_pass);
++ goto out;
++#endif
++#endif
++}
++
++/* Getting own effective rights */
++int rsbac_rc_sys_get_eff_rights(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_rc_request_vector_t * request_vector,
++ rsbac_time_t * ttl_p)
++ {
++ union rsbac_target_id_t i_tid;
++ enum rsbac_attribute_t i_attr = A_none;
++ union rsbac_attribute_value_t i_attr_val1;
++ union rsbac_attribute_value_t i_attr_val2;
++ int err;
++ enum rsbac_rc_item_t i_rc_item;
++ union rsbac_rc_target_id_t i_rc_tid;
++ union rsbac_rc_target_id_t i_rc_subtid;
++ union rsbac_rc_item_value_t i_rc_item_val1;
++
++ i_tid.process = task_pid(current);
++ /* get rc_role of process */
++ if ((err=rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_rc_sys_get_eff_rights(): rsbac_get_attr() returned error %i!\n",err);
++ return -RSBAC_EREADFAILED;
++ }
++
++ switch(target)
++ {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ i_attr = A_rc_type_fd;
++ i_rc_item = RI_type_comp_fd;
++ break;
++ case T_DEV:
++ i_attr = A_rc_type;
++ i_rc_item = RI_type_comp_dev;
++ break;
++ case T_IPC:
++ i_attr = A_rc_type;
++ i_rc_item = RI_type_comp_ipc;
++ break;
++ case T_PROCESS:
++ i_attr = A_rc_type;
++ i_rc_item = RI_type_comp_process;
++ break;
++ case T_SCD: /* special case! */
++ if(tid.scd >= RST_none)
++ return -RSBAC_EINVALIDTARGET;
++ i_rc_item = RI_type_comp_scd;
++ break;
++ case T_GROUP:
++ i_attr = A_rc_type;
++ i_rc_item = RI_type_comp_group;
++ break;
++ case T_NETDEV:
++ i_attr = A_rc_type;
++ i_rc_item = RI_type_comp_netdev;
++ break;
++ case T_NETTEMP:
++ i_attr = A_rc_type_nt;
++ i_rc_item = RI_type_comp_nettemp;
++ break;
++ case T_NETOBJ:
++ i_attr = A_rc_type;
++ i_rc_item = RI_type_comp_netobj;
++ break;
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* get rc_type of target */
++ if(target == T_SCD)
++ {
++ i_attr_val2.rc_type = tid.scd;
++ }
++ else
++ {
++ if ((err=rsbac_get_attr(SW_RC,
++ target,
++ tid,
++ i_attr,
++ &i_attr_val2,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_rc_sys_get_eff_rights(): rsbac_get_attr() returned error %i!\n",err);
++ return -RSBAC_EREADFAILED;
++ }
++ }
++ /* get type_comp_xxx of role for type and target */
++ i_rc_tid.role = i_attr_val1.rc_role;
++ i_rc_subtid.type = i_attr_val2.rc_type;
++ if ((err=rsbac_rc_get_item(ta_number,
++ RT_ROLE,
++ i_rc_tid,
++ i_rc_subtid,
++ i_rc_item,
++ &i_rc_item_val1,
++ ttl_p)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_rc_sys_get_eff_rights(): rsbac_rc_get_item() returned error %i!\n",err);
++ return -RSBAC_EREADFAILED;
++ }
++ /* extract value */
++ *request_vector = i_rc_item_val1.rights;
++ /* Ready. */
++ return 0;
++ }
++
++int rsbac_rc_sys_get_current_role(rsbac_rc_role_id_t * role_p)
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++ int err;
++
++ /* get rc_role of process */
++ i_tid.process = task_pid(current);
++ if ((err=rsbac_get_attr(SW_RC, T_PROCESS,
++ i_tid,
++ A_rc_role,
++ &i_attr_val1,
++ TRUE)))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_rc_sys_get_current_role(): rsbac_get_attr() returned error %i!\n",err);
++ return -RSBAC_EREADFAILED;
++ }
++ *role_p = i_attr_val1.rc_role;
++ /* Ready. */
++ return 0;
++ }
++
++int rsbac_rc_select_fd_create_type(rsbac_rc_type_id_t type)
++{
++
++ int res;
++
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val;
++
++ /* sanity checks */
++ if (type != RC_type_use_fd) {
++ if (!rsbac_rc_type_exists(0, T_FILE, type))
++ return -RSBAC_EINVALIDVALUE;
++#ifndef CONFIG_RSBAC_MAINT
++ if (!rsbac_rc_check_type_comp(T_FILE, type, RCR_SELECT, task_pid(current))) {
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RC]
++#endif
++ )
++#endif
++ return -EPERM;
++ }
++#endif
++ }
++
++ tid.process = task_pid(current);
++ attr_val.rc_select_type = type;
++ if ((res = rsbac_set_attr(SW_RC,
++ T_PROCESS,
++ tid,
++ A_rc_select_type,
++ attr_val))) {
++ rsbac_printk(KERN_WARNING "rsbac_rc_select_fd_create_type(): rsbac_set_attr() returned error %i\n", res);
++ return -EPERM;
++ }
++
++ return 0;
++}
++
++/* end of rsbac/adf/rc/syscalls.c */
+diff --git a/rsbac/adf/reg/Makefile b/rsbac/adf/reg/Makefile
+new file mode 100644
+index 0000000..2e6fac0
+--- /dev/null
++++ b/rsbac/adf/reg/Makefile
+@@ -0,0 +1,13 @@
++#
++# File: rsbac/adf/reg/Makefile
++#
++# Makefile for the Linux rsbac REG / registration of decision modules
++#
++# Author and (c) 1999-2012 Amon Ott <ao@rsbac.org>
++#
++
++obj-y := reg_main.o
++ifeq ($(CONFIG_RSBAC_REG_SAMPLES),y)
++obj-m += reg_sample1.o reg_sample3.o kproc_hide.o modules_off.o
++endif
++
+diff --git a/rsbac/adf/reg/kproc_hide.c b/rsbac/adf/reg/kproc_hide.c
+new file mode 100644
+index 0000000..93d0328
+--- /dev/null
++++ b/rsbac/adf/reg/kproc_hide.c
+@@ -0,0 +1,121 @@
++/*
++ * RSBAC REG decision module kproc_hide. Hiding kernel processes.
++ *
++ * Author and (c) 2004 Michal Purzynski <albeiro@rsbac.org>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/fs.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <rsbac/types.h>
++#include <rsbac/reg.h>
++#include <rsbac/adf.h>
++#include <rsbac/aci.h>
++#include <rsbac/getname.h>
++#include <rsbac/error.h>
++#include <rsbac/proc_fs.h>
++
++MODULE_AUTHOR("Michal Purzynski");
++MODULE_DESCRIPTION("RSBAC REG kproc_hide decision module");
++MODULE_LICENSE("GPL");
++
++static long handle = 9999992;
++
++/**** Helper Functions ****/
++
++/**********************************************************************
++Description: Checks if process is a kernel process.
++Parameters: Pid of checking process.
++Return value: 1 if is, 0 otherwise.
++**********************************************************************/
++
++int is_kproc(struct pid *pid)
++{
++ struct task_struct *tid_task;
++
++ tid_task = pid_task(pid, PIDTYPE_PID);
++
++ if (tid_task->mm == NULL)
++ return 1;
++ else
++ return 0;
++}
++
++/**** Decision Functions ****/
++
++static int request_func(enum rsbac_adf_request_t request,
++ rsbac_pid_t owner_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++{
++
++ switch (request) {
++ case R_GET_STATUS_DATA:
++ switch (target) {
++ case T_PROCESS:
++ if (is_kproc(tid.process))
++ return NOT_GRANTED;
++ default:
++ return DO_NOT_CARE;
++ }
++ default:
++ return DO_NOT_CARE;
++ }
++
++/*
++ if (request == R_GET_STATUS_DATA && target == T_PROCESS && is_kproc(tid.process))
++ return NOT_GRANTED;
++ else
++ return GRANTED;
++*/
++}
++
++/**** Init ****/
++
++int init_module(void)
++{
++ struct rsbac_reg_entry_t entry;
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module kproc_hide: Initializing.\n");
++
++ /* clearing registration entries */
++ memset(&entry, 0, sizeof(entry));
++
++ strcpy(entry.name, "RSBAC REG kproc_hide ADF module");
++ rsbac_printk(KERN_INFO "RSBAC REG decision module kproc_hide: REG Version: %u, Name: %s, Handle: %li\n",
++ RSBAC_REG_VERSION, entry.name, handle);
++
++ entry.handle = handle;
++ entry.request_func = request_func;
++ entry.switch_on = TRUE;
++ rsbac_printk(KERN_INFO "RSBAC REG decision module kproc_hide: Registering to ADF.\n");
++
++ if(rsbac_reg_register(RSBAC_REG_VERSION, entry) < 0) {
++ rsbac_printk(KERN_WARNING "RSBAC REG decision module sample 1: Registering failed. Unloading.\n");
++ return -ENOEXEC;
++ }
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module kproc_hide: Loaded.\n");
++
++ return 0;
++}
++
++void cleanup_module(void)
++{
++ rsbac_printk(KERN_INFO "RSBAC REG decision module kproc_hide: Unregistering.\n");
++
++ if(rsbac_reg_unregister(handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module kproc_hide: Unregistering failed - beware of possible system failure!\n");
++ }
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module kproc_hide: Unloaded.\n");
++}
++
+diff --git a/rsbac/adf/reg/modules_off.c b/rsbac/adf/reg/modules_off.c
+new file mode 100644
+index 0000000..e19a488
+--- /dev/null
++++ b/rsbac/adf/reg/modules_off.c
+@@ -0,0 +1,90 @@
++/*
++ * RSBAC REG decision module kproc_hide. Disabling kernel modules support.
++ *
++ * Author and (c) 2004 Michal Purzynski <michal@rsbac.org>
++ * Adjusted 2011 Amon Ott <ao@rsbac.org>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/fs.h>
++#include <rsbac/types.h>
++#include <rsbac/reg.h>
++#include <rsbac/adf.h>
++#include <rsbac/aci.h>
++#include <rsbac/getname.h>
++#include <rsbac/error.h>
++#include <rsbac/proc_fs.h>
++#include <linux/namei.h>
++
++MODULE_AUTHOR("Michal Purzynski");
++MODULE_DESCRIPTION("RSBAC REG modules_off decision module");
++MODULE_LICENSE("GPL");
++
++static long handle = 9999991;
++
++/**** Decision Functions ****/
++
++static int request_func (enum rsbac_adf_request_t request,
++ rsbac_pid_t owner_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++{
++ switch (request) {
++ case R_ADD_TO_KERNEL:
++ case R_REMOVE_FROM_KERNEL:
++ return NOT_GRANTED;
++ default:
++ return DO_NOT_CARE;
++ }
++}
++
++/**** Init ****/
++
++int init_module(void)
++{
++
++ struct rsbac_reg_entry_t entry;
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module modules_off: Initializing.\n");
++
++ /* clearing registration entries */
++ memset(&entry, 0, sizeof(entry));
++
++ strcpy(entry.name, "RSBAC REG modules_off ADF module");
++ rsbac_printk(KERN_INFO "RSBAC REG decision module modules_off: REG Version: %u, Name: %s, Handle: %li\n",RSBAC_REG_VERSION, entry.name, handle);
++
++ entry.handle = handle;
++ entry.request_func = request_func;
++ entry.switch_on = TRUE;
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module modules_off: Registering to ADF.\n");
++
++ if(rsbac_reg_register(RSBAC_REG_VERSION, entry) < 0)
++ {
++ rsbac_printk(KERN_WARNING "RSBAC REG decision module sample 1: Registering failed. Unloading.\n");
++ return -ENOEXEC;
++ }
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module modules_off: Loaded.\n");
++
++ return 0;
++}
++
++void cleanup_module(void)
++{
++ rsbac_printk(KERN_INFO "RSBAC REG decision module modules_off: Unregistering.\n");
++
++ if(rsbac_reg_unregister(handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module modules_off: Unregistering failed - beware of possible system failure!\n");
++ }
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module modules_off: Unloaded.\n");
++}
++
+diff --git a/rsbac/adf/reg/reg_main.c b/rsbac/adf/reg/reg_main.c
+new file mode 100644
+index 0000000..de803a4
+--- /dev/null
++++ b/rsbac/adf/reg/reg_main.c
+@@ -0,0 +1,928 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - REG / Decision Module Registration */
++/* File: rsbac/adf/reg/main.c */
++/* */
++/* Author and (c) 1999-2011: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 12/Jul/2011 */
++/*************************************************** */
++
++#include <linux/types.h>
++#include <linux/string.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <asm/uaccess.h>
++#include <linux/seq_file.h>
++#include <rsbac/types.h>
++#include <rsbac/reg.h>
++#include <rsbac/reg_main.h>
++#include <rsbac/aci.h>
++#include <rsbac/aci_data_structures.h>
++#include <rsbac/adf.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/proc_fs.h>
++#include <rsbac/rkmem.h>
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++static struct rsbac_reg_list_head_t list_head;
++static struct rsbac_reg_sc_list_head_t sc_list_head;
++
++/************************************************* */
++/* Internal functions */
++/************************************************* */
++
++static void reg_read_lock(void)
++ {
++ spin_lock(&list_head.lock);
++ while(list_head.readers < 0)
++ {
++ spin_unlock(&list_head.lock);
++ spin_lock(&list_head.lock);
++ }
++ list_head.readers++;
++ spin_unlock(&list_head.lock);
++ }
++
++static void reg_read_unlock(void)
++ {
++ spin_lock(&list_head.lock);
++ list_head.readers--;
++ spin_unlock(&list_head.lock);
++ }
++
++static void reg_write_lock(void)
++ {
++ spin_lock(&list_head.lock);
++ while(list_head.readers != 0)
++ {
++ spin_unlock(&list_head.lock);
++ spin_lock(&list_head.lock);
++ }
++ list_head.readers = -1;
++ spin_unlock(&list_head.lock);
++ }
++
++static void reg_write_unlock(void)
++ {
++ spin_lock(&list_head.lock);
++ list_head.readers = 0;
++ spin_unlock(&list_head.lock);
++ }
++
++static void reg_sc_read_lock(void)
++ {
++ spin_lock(&sc_list_head.lock);
++ while(sc_list_head.readers < 0)
++ {
++ spin_unlock(&sc_list_head.lock);
++ spin_lock(&sc_list_head.lock);
++ }
++ sc_list_head.readers++;
++ spin_unlock(&sc_list_head.lock);
++ }
++
++static void reg_sc_read_unlock(void)
++ {
++ spin_lock(&sc_list_head.lock);
++ sc_list_head.readers--;
++ spin_unlock(&sc_list_head.lock);
++ }
++
++static void reg_sc_write_lock(void)
++ {
++ spin_lock(&sc_list_head.lock);
++ while(sc_list_head.readers != 0)
++ {
++ spin_unlock(&sc_list_head.lock);
++ spin_lock(&sc_list_head.lock);
++ }
++ sc_list_head.readers = -1;
++ spin_unlock(&sc_list_head.lock);
++ }
++
++static void reg_sc_write_unlock(void)
++ {
++ spin_lock(&sc_list_head.lock);
++ sc_list_head.readers = 0;
++ spin_unlock(&sc_list_head.lock);
++ }
++
++/* lookup_item() */
++static struct rsbac_reg_list_item_t * lookup_item(rsbac_reg_handle_t handle)
++ {
++ struct rsbac_reg_list_item_t * curr = list_head.curr;
++
++ /* is the current item the one we look for? yes -> return, else search */
++ if (curr && (curr->entry.handle == handle))
++ return (curr);
++
++ curr = list_head.head;
++ while (curr && (curr->entry.handle != handle))
++ curr = curr->next;
++ if (curr)
++ list_head.curr=curr;
++ return (curr);
++ };
++
++/* lookup_sc_item_reg() */
++static struct rsbac_reg_sc_list_item_t * lookup_sc_item_reg(rsbac_reg_handle_t handle)
++ {
++ struct rsbac_reg_sc_list_item_t * curr = sc_list_head.curr;
++
++ /* is the current item the one we look for? yes -> return, else search */
++ if (curr && (curr->entry.registration_handle == handle))
++ return (curr);
++
++ curr = sc_list_head.head;
++ while (curr && (curr->entry.registration_handle != handle))
++ curr = curr->next;
++ if (curr)
++ sc_list_head.curr=curr;
++ return (curr);
++ };
++
++/* lookup_sc_item_dis() */
++static struct rsbac_reg_sc_list_item_t * lookup_sc_item_dis(rsbac_reg_handle_t handle)
++ {
++ struct rsbac_reg_sc_list_item_t * curr = sc_list_head.curr;
++
++ /* is the current item the one we look for? yes -> return, else search */
++ if (curr && (curr->entry.dispatcher_handle == handle))
++ return (curr);
++
++ curr = sc_list_head.head;
++ while (curr && (curr->entry.dispatcher_handle != handle))
++ curr = curr->next;
++ if (curr)
++ sc_list_head.curr=curr;
++ return (curr);
++ };
++
++static struct rsbac_reg_list_item_t*
++ add_item(struct rsbac_reg_entry_t entry)
++ {
++ struct rsbac_reg_list_item_t * new_item_p = NULL;
++
++ if ( !(new_item_p = (struct rsbac_reg_list_item_t *)
++ rsbac_kmalloc(sizeof(*new_item_p))) )
++ return(NULL);
++ new_item_p->entry.handle = entry.handle;
++ strncpy(new_item_p->entry.name, entry.name, RSBAC_REG_NAME_LEN);
++ new_item_p->entry.name[RSBAC_REG_NAME_LEN] = 0;
++ new_item_p->entry.request_func = entry.request_func;
++ new_item_p->entry.set_attr_func = entry.set_attr_func;
++ new_item_p->entry.need_overwrite_func = entry.need_overwrite_func;
++ new_item_p->entry.write_func = entry.write_func;
++ new_item_p->entry.mount_func = entry.mount_func;
++ new_item_p->entry.umount_func = entry.umount_func;
++ new_item_p->entry.check_func = entry.check_func;
++ new_item_p->entry.switch_on = entry.switch_on;
++
++ if (!list_head.head)
++ {
++ list_head.head=new_item_p;
++ list_head.tail=new_item_p;
++ list_head.curr=new_item_p;
++ list_head.count = 1;
++ new_item_p->prev=NULL;
++ new_item_p->next=NULL;
++ }
++ else
++ {
++ new_item_p->prev=list_head.tail;
++ new_item_p->next=NULL;
++ list_head.tail->next=new_item_p;
++ list_head.tail=new_item_p;
++ list_head.curr=new_item_p;
++ list_head.count++;
++ };
++ return(new_item_p);
++ };
++
++static struct rsbac_reg_sc_list_item_t*
++ add_sc_item(struct rsbac_reg_syscall_entry_t entry)
++ {
++ struct rsbac_reg_sc_list_item_t * new_item_p = NULL;
++
++ if ( !(new_item_p = (struct rsbac_reg_sc_list_item_t *)
++ rsbac_kmalloc(sizeof(*new_item_p))) )
++ return(NULL);
++ new_item_p->entry.registration_handle = entry.registration_handle;
++ new_item_p->entry.dispatcher_handle = entry.dispatcher_handle;
++ strncpy(new_item_p->entry.name, entry.name, RSBAC_REG_NAME_LEN);
++ new_item_p->entry.name[RSBAC_REG_NAME_LEN] = 0;
++ new_item_p->entry.syscall_func = entry.syscall_func;
++
++ if (!sc_list_head.head)
++ {
++ sc_list_head.head=new_item_p;
++ sc_list_head.tail=new_item_p;
++ sc_list_head.curr=new_item_p;
++ sc_list_head.count = 1;
++ new_item_p->prev=NULL;
++ new_item_p->next=NULL;
++ }
++ else
++ {
++ new_item_p->prev=sc_list_head.tail;
++ new_item_p->next=NULL;
++ sc_list_head.tail->next=new_item_p;
++ sc_list_head.tail=new_item_p;
++ sc_list_head.curr=new_item_p;
++ sc_list_head.count++;
++ };
++ return(new_item_p);
++ };
++
++static void remove_item(rsbac_reg_handle_t handle)
++ {
++ struct rsbac_reg_list_item_t * item_p;
++
++ /* first we must locate the item. */
++ if ( (item_p = lookup_item(handle)) )
++ { /* ok, item was found */
++ if ( (list_head.head == item_p) )
++ { /* item is head */
++ if ( (list_head.tail == item_p) )
++ { /* item is head and tail = only item -> list will be empty*/
++ list_head.head = NULL;
++ list_head.tail = NULL;
++ }
++ else
++ { /* item is head, but not tail -> next item becomes head */
++ item_p->next->prev = NULL;
++ list_head.head = item_p->next;
++ };
++ }
++ else
++ { /* item is not head */
++ if ( (list_head.tail == item_p) )
++ { /*item is not head, but tail -> previous item becomes tail*/
++ item_p->prev->next = NULL;
++ list_head.tail = item_p->prev;
++ }
++ else
++ { /* item is neither head nor tail -> item is cut out */
++ item_p->prev->next = item_p->next;
++ item_p->next->prev = item_p->prev;
++ };
++ };
++
++ /* curr is no longer valid -> reset */
++ list_head.curr=NULL;
++ /* adjust counter */
++ list_head.count--;
++ /* now we can remove the item from memory */
++ rsbac_kfree(item_p);
++ }; /* end of if: item was found */
++ }; /* end of remove_item() */
++
++static void remove_sc_item(rsbac_reg_handle_t handle)
++ {
++ struct rsbac_reg_sc_list_item_t * item_p;
++
++ /* first we must locate the item. */
++ if ( (item_p = lookup_sc_item_reg(handle)) )
++ { /* ok, item was found */
++ if ( (sc_list_head.head == item_p) )
++ { /* item is head */
++ if ( (sc_list_head.tail == item_p) )
++ { /* item is head and tail = only item -> sc_list will be empty*/
++ sc_list_head.head = NULL;
++ sc_list_head.tail = NULL;
++ }
++ else
++ { /* item is head, but not tail -> next item becomes head */
++ item_p->next->prev = NULL;
++ sc_list_head.head = item_p->next;
++ };
++ }
++ else
++ { /* item is not head */
++ if ( (sc_list_head.tail == item_p) )
++ { /*item is not head, but tail -> previous item becomes tail*/
++ item_p->prev->next = NULL;
++ sc_list_head.tail = item_p->prev;
++ }
++ else
++ { /* item is neither head nor tail -> item is cut out */
++ item_p->prev->next = item_p->next;
++ item_p->next->prev = item_p->prev;
++ };
++ };
++
++ /* curr is no longer valid -> reset */
++ sc_list_head.curr=NULL;
++ /* adjust counter */
++ sc_list_head.count--;
++ /* now we can remove the item from memory */
++ rsbac_kfree(item_p);
++ }; /* end of if: item was found */
++ }; /* end of remove_item() */
++
++
++/************************************************* */
++/* PROC support */
++/************************************************* */
++
++#if defined(CONFIG_RSBAC_PROC) && defined(CONFIG_PROC_FS)
++static int
++reg_modules_proc_show(struct seq_file *m, void *v)
++{
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ struct rsbac_reg_list_item_t * item_p;
++ struct rsbac_reg_sc_list_item_t * sc_item_p;
++
++ if (!rsbac_is_initialized())
++ return (-ENOSYS);
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "reg_modules_proc_info(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++
++ seq_printf(m, "RSBAC REG registered decision modules\n-------------------------------------\n");
++
++ reg_read_lock();
++ item_p=list_head.head;
++ while(item_p)
++ {
++ if(item_p->entry.name[0] == 0)
++ seq_printf(m, "(no name)\n");
++ else
++ seq_printf(m, "%s\n",
++ item_p->entry.name);
++ item_p = item_p->next;
++ }
++ reg_read_unlock();
++
++ seq_printf(m, "\n %i module entries used.\n",
++ list_head.count);
++ seq_printf(m, "\nRSBAC REG registered system calls\n---------------------------------\n");
++
++ reg_sc_read_lock();
++ sc_item_p=sc_list_head.head;
++ while(sc_item_p)
++ {
++ if(sc_item_p->entry.name[0] == 0)
++ seq_printf(m, "%u: (no name)\n",
++ sc_item_p->entry.dispatcher_handle);
++ else
++ seq_printf(m, "%u: %s\n",
++ sc_item_p->entry.dispatcher_handle,
++ sc_item_p->entry.name);
++ sc_item_p = sc_item_p->next;
++ }
++ reg_sc_read_unlock();
++
++ seq_printf(m, "\n %i syscall entries used.\n",
++ sc_list_head.count);
++ return 0;
++}
++
++static int reg_modules_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, reg_modules_proc_show, NULL);
++}
++
++static const struct file_operations reg_modules_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = reg_modules_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *reg_modules;
++
++#endif /* PROC */
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++void rsbac_reg_init(void)
++#else
++void __init rsbac_reg_init(void)
++#endif
++ {
++ if (rsbac_is_initialized())
++ {
++ rsbac_printk(KERN_WARNING "rsbac_reg_init(): RSBAC already initialized\n");
++ return;
++ }
++ /* init data structures */
++ rsbac_printk(KERN_INFO "rsbac_reg_init(): Initializing RSBAC: REG module and syscall registration\n");
++
++ spin_lock_init(&list_head.lock);
++ list_head.readers = 0;
++ list_head.head = NULL;
++ list_head.tail = NULL;
++ list_head.curr = NULL;
++ list_head.count = 0;
++ spin_lock_init(&sc_list_head.lock);
++ sc_list_head.readers = 0;
++ sc_list_head.head = NULL;
++ sc_list_head.tail = NULL;
++ sc_list_head.curr = NULL;
++ sc_list_head.count = 0;
++
++ /* init proc entry */
++ #if defined(CONFIG_RSBAC_PROC) && defined(CONFIG_PROC_FS)
++ {
++ reg_modules = proc_create(RSBAC_REG_PROC_NAME,
++ S_IFREG | S_IRUGO,
++ proc_rsbac_root_p, &reg_modules_proc_fops);
++ }
++ #endif
++ }
++
++
++inline enum rsbac_adf_req_ret_t
++ rsbac_adf_request_reg (enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ enum rsbac_adf_req_ret_t result = DO_NOT_CARE;
++ struct rsbac_reg_list_item_t * item_p;
++
++ reg_read_lock();
++ item_p=list_head.head;
++ while(item_p)
++ {
++ if( item_p->entry.request_func
++ #ifdef CONFIG_RSBAC_SWITCH_REG
++ && item_p->entry.switch_on
++ #endif
++ )
++ result = adf_and_plus(result,
++ item_p->entry.request_func (request,
++ caller_pid,
++ target,
++ tid,
++ attr,
++ attr_val,
++ owner) );
++ item_p=item_p->next;
++ }
++ reg_read_unlock();
++ return result;
++ }
++
++inline int rsbac_adf_set_attr_reg(
++ enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_target_t new_target,
++ union rsbac_target_id_t new_tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ int error = 0;
++ int suberror;
++ struct rsbac_reg_list_item_t * item_p;
++
++ reg_read_lock();
++ item_p=list_head.head;
++ while(item_p)
++ {
++ if( item_p->entry.set_attr_func
++ #ifdef CONFIG_RSBAC_SWITCH_REG
++ && item_p->entry.switch_on
++ #endif
++ )
++ {
++ suberror = item_p->entry.set_attr_func (request,
++ caller_pid,
++ target,
++ tid,
++ new_target,
++ new_tid,
++ attr,
++ attr_val,
++ owner);
++ if(suberror)
++ error = suberror;
++ }
++ item_p = item_p->next;
++ }
++ reg_read_unlock();
++ return error;
++ }
++
++
++#ifdef CONFIG_RSBAC_SECDEL
++inline rsbac_boolean_t rsbac_need_overwrite_reg(struct dentry * dentry_p)
++ {
++ rsbac_boolean_t need_overwrite = FALSE;
++ struct rsbac_reg_list_item_t * item_p;
++
++ reg_read_lock();
++ item_p=list_head.head;
++ while(item_p)
++ {
++ if( item_p->entry.need_overwrite_func
++ #ifdef CONFIG_RSBAC_SWITCH_REG
++ && item_p->entry.switch_on
++ #endif
++ )
++ if(!need_overwrite)
++ need_overwrite = item_p->entry.need_overwrite_func(dentry_p);
++ item_p=item_p->next;
++ }
++ reg_read_unlock();
++ return need_overwrite;
++ }
++#endif
++
++/* mounting and umounting */
++inline int rsbac_mount_reg(kdev_t kdev)
++ {
++ int error = 0;
++ int suberror;
++ struct rsbac_reg_list_item_t * item_p;
++
++ reg_read_lock();
++ item_p=list_head.head;
++ while(item_p)
++ {
++ if( item_p->entry.mount_func
++ )
++ {
++ suberror = item_p->entry.mount_func(kdev);
++ if(suberror < 0)
++ error = suberror;
++ }
++ item_p=item_p->next;
++ }
++ reg_read_unlock();
++ return error;
++ }
++
++inline int rsbac_umount_reg(kdev_t kdev)
++ {
++ int error = 0;
++ int suberror;
++ struct rsbac_reg_list_item_t * item_p;
++
++ reg_read_lock();
++ item_p=list_head.head;
++ while(item_p)
++ {
++ if( item_p->entry.umount_func
++ )
++ {
++ suberror = item_p->entry.umount_func(kdev);
++ if(suberror < 0)
++ error = suberror;
++ }
++ item_p=item_p->next;
++ }
++ reg_read_unlock();
++ return error;
++ }
++
++#if defined(CONFIG_RSBAC_AUTO_WRITE)
++inline int rsbac_write_reg(void)
++ {
++ int count = 0;
++ int subcount = 0;
++ struct rsbac_reg_list_item_t * item_p;
++
++ reg_read_lock();
++ item_p=list_head.head;
++ while(item_p)
++ {
++ if(item_p->entry.write_func)
++ {
++ subcount = item_p->entry.write_func(FALSE);
++ if(subcount > 0)
++ {
++ count += subcount;
++ }
++ else
++ if(subcount < 0)
++ {
++ if(subcount != -RSBAC_ENOTWRITABLE)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_write_reg(): write_func() for REG module %s returned error %i\n",
++ item_p->entry.name, subcount);
++ }
++ }
++ }
++ item_p=item_p->next;
++ }
++ reg_read_unlock();
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_write)
++ {
++ rsbac_printk(KERN_DEBUG "rsbac_write_reg(): %u lists written.\n",
++ count);
++ }
++#endif
++ return count;
++ }
++#endif /* CONFIG_RSBAC_AUTO_WRITE */
++
++/* Status checking */
++inline int rsbac_check_reg(int correct, int check_inode)
++ {
++ int error = 0;
++ int suberror;
++ struct rsbac_reg_list_item_t * item_p;
++
++ reg_read_lock();
++ item_p=list_head.head;
++ while(item_p)
++ {
++ if( item_p->entry.check_func
++ )
++ {
++ suberror = item_p->entry.check_func(correct, check_inode);
++ if(suberror < 0)
++ error = suberror;
++ }
++ item_p=item_p->next;
++ }
++ reg_read_unlock();
++ return error;
++ }
++
++
++/*
++ * Register an ADF decision module
++ * Returns given positive handle or negative error code
++ */
++
++EXPORT_SYMBOL(rsbac_reg_register);
++
++rsbac_reg_handle_t rsbac_reg_register( rsbac_version_t version,
++ struct rsbac_reg_entry_t entry)
++ {
++ if(version != RSBAC_REG_VERSION)
++ return(-RSBAC_EINVALIDVERSION);
++
++ /* check entry */
++ if( ( !entry.request_func
++ && !entry.set_attr_func
++ && !entry.need_overwrite_func
++ && !entry.write_func
++ && !entry.mount_func
++ && !entry.umount_func
++ )
++ || (entry.handle <= 0)
++ )
++ return -RSBAC_EINVALIDVALUE;
++
++ reg_write_lock();
++ if(lookup_item(entry.handle))
++ {
++ rsbac_printk(KERN_INFO "rsbac_reg_register: Handle in use, registering failed: %s.\n",
++ entry.name);
++ entry.handle = -RSBAC_EEXISTS;
++ }
++ else
++ {
++ if(!add_item(entry))
++ {
++ entry.name[RSBAC_REG_NAME_LEN] = 0;
++ rsbac_printk(KERN_INFO "rsbac_reg_register: registering failed for %s.\n",
++ entry.name);
++ entry.handle = -RSBAC_ECOULDNOTADDITEM;
++ }
++#ifdef CONFIG_RSBAC_DEBUG
++ else
++ if(rsbac_debug_reg)
++ {
++ rsbac_printk(KERN_DEBUG "rsbac_reg_register: module %s registered.\n",
++ entry.name);
++ }
++#endif
++ }
++ reg_write_unlock();
++ return entry.handle;
++ }
++
++/*
++ * Switch module on or off - for 'normal' modules this is done by general
++ * function. This is a dummy, if module switching is disabled.
++ */
++
++EXPORT_SYMBOL(rsbac_reg_switch);
++
++int rsbac_reg_switch (rsbac_reg_handle_t handle, rsbac_boolean_t value)
++ {
++#ifdef CONFIG_RSBAC_SWITCH_REG
++ struct rsbac_reg_list_item_t * item_p;
++ int err=0;
++
++ if((value != FALSE) && (value != TRUE))
++ return -RSBAC_EINVALIDVALUE;
++ reg_read_lock();
++ item_p = lookup_item(handle);
++ if(item_p)
++ {
++ item_p->entry.switch_on = value;
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_reg)
++ {
++ rsbac_printk(KERN_DEBUG "rsbac_reg_switch: module %s switched to %i.\n",
++ item_p->entry.name,
++ value);
++ }
++#endif
++ }
++ else
++ err = -RSBAC_EINVALIDTARGET;
++ reg_read_unlock();
++ return err;
++#else
++ return(-RSBAC_EINVALIDTARGET);
++#endif
++ };
++
++/*
++ * Unregister an ADF decision module
++ * Returns 0 on success or negative error code. Be careful not to unregister
++ * modules you did not register yourself.
++ */
++
++EXPORT_SYMBOL(rsbac_reg_unregister);
++
++int rsbac_reg_unregister(rsbac_reg_handle_t handle)
++ {
++ int err=0;
++
++ if(handle <= 0)
++ return -RSBAC_EINVALIDVALUE;
++
++ reg_write_lock();
++ if(lookup_item(handle))
++ {
++ remove_item(handle);
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_reg)
++ {
++ rsbac_printk(KERN_DEBUG "rsbac_reg_unregister: module unregistered.\n");
++ }
++#endif
++ }
++ else
++ {
++ err = -RSBAC_EINVALIDTARGET;
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_reg)
++ {
++ rsbac_printk(KERN_DEBUG "rsbac_reg_unregister: module unregistering failed.\n");
++ }
++#endif
++ }
++ reg_write_unlock();
++ return err;
++ }
++
++
++/*
++ * Register a system call
++ * Returns given positive handle or negative error code
++ */
++
++EXPORT_SYMBOL(rsbac_reg_register_syscall);
++
++rsbac_reg_handle_t rsbac_reg_register_syscall( rsbac_version_t version,
++ struct rsbac_reg_syscall_entry_t entry)
++ {
++ if(version != RSBAC_REG_VERSION)
++ return(-RSBAC_EINVALIDVERSION);
++
++ /* check entry */
++ if( !entry.syscall_func
++ || (entry.registration_handle <= 0)
++ || (entry.dispatcher_handle <= 0)
++ )
++ return -RSBAC_EINVALIDVALUE;
++
++ reg_sc_write_lock();
++ if(lookup_sc_item_reg(entry.registration_handle))
++ {
++ rsbac_printk(KERN_INFO "rsbac_reg_register_syscall: Registration handle in use, registering failed: %s.\n",
++ entry.name);
++ entry.registration_handle = -RSBAC_EEXISTS;
++ }
++ else
++ if(lookup_sc_item_dis(entry.dispatcher_handle))
++ {
++ rsbac_printk(KERN_INFO "rsbac_reg_register_syscall: Dispatcher handle in use, registering failed: %s.\n",
++ entry.name);
++ entry.registration_handle = -RSBAC_EEXISTS;
++ }
++ else
++ {
++ entry.name[RSBAC_REG_NAME_LEN] = 0;
++ if(!add_sc_item(entry))
++ {
++ rsbac_printk(KERN_INFO "rsbac_reg_register_syscall: registering failed for %s.\n",
++ entry.name);
++ entry.registration_handle = -RSBAC_ECOULDNOTADDITEM;
++ }
++#ifdef CONFIG_RSBAC_DEBUG
++ else
++ if(rsbac_debug_reg)
++ {
++ rsbac_printk(KERN_DEBUG "rsbac_reg_register_syscall: syscall %s registered.\n",
++ entry.name);
++ }
++#endif
++ }
++ reg_sc_write_unlock();
++ return entry.registration_handle;
++ }
++
++/*
++ * Unregister a system call
++ * Returns 0 on success or negative error code. Be careful not to unregister
++ * syscalls you did not register yourself.
++ */
++
++EXPORT_SYMBOL(rsbac_reg_unregister_syscall);
++
++int rsbac_reg_unregister_syscall(rsbac_reg_handle_t handle)
++ {
++ int err=0;
++
++ if(handle <= 0)
++ return -RSBAC_EINVALIDVALUE;
++
++ reg_sc_write_lock();
++ if(lookup_sc_item_reg(handle))
++ {
++ remove_sc_item(handle);
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_reg)
++ {
++ rsbac_printk(KERN_DEBUG "rsbac_reg_unregister_syscall: syscall unregistered.\n");
++ }
++#endif
++ }
++ else
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ rsbac_printk(KERN_INFO "rsbac_reg_unregister_syscall: syscall unregistering failed for invalid handle!\n");
++ }
++ reg_sc_write_unlock();
++ return err;
++ }
++
++int rsbac_reg_syscall(rsbac_reg_handle_t handle,
++ void __user * arg)
++ {
++ int err = 0;
++ struct rsbac_reg_sc_list_item_t * item_p;
++
++ reg_sc_read_lock();
++ item_p=lookup_sc_item_dis(handle);
++ if(item_p && item_p->entry.syscall_func)
++ {
++ err = item_p->entry.syscall_func(arg);
++ }
++ else
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ }
++ reg_sc_read_unlock();
++ return err;
++ }
++
++/* end of rsbac/adf/reg/reg_main.c */
+diff --git a/rsbac/adf/reg/reg_sample1.c b/rsbac/adf/reg/reg_sample1.c
+new file mode 100644
+index 0000000..e005603
+--- /dev/null
++++ b/rsbac/adf/reg/reg_sample1.c
+@@ -0,0 +1,254 @@
++/*
++ * RSBAC REG decision module sample 1
++ *
++ * Author and (c) 1999-2009 Amon Ott <ao@rsbac.org>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/fs.h>
++#include <linux/seq_file.h>
++#include <rsbac/types.h>
++#include <rsbac/reg.h>
++#include <rsbac/adf.h>
++#include <rsbac/aci.h>
++#include <rsbac/getname.h>
++#include <rsbac/error.h>
++#include <rsbac/proc_fs.h>
++
++static u_long nr_request_calls = 0;
++static u_long nr_set_attr_calls = 0;
++static u_long nr_need_overwrite_calls = 0;
++static u_long nr_system_calls = 0;
++static void * system_call_arg = NULL;
++
++MODULE_AUTHOR("Amon Ott");
++MODULE_DESCRIPTION("RSBAC REG sample decision module 1");
++MODULE_LICENSE("GPL");
++
++static char * name = NULL;
++static char * syscall_name = NULL;
++static long handle = 123456;
++static long syscall_registration_handle = 654321;
++static long syscall_dispatcher_handle = 1;
++
++module_param(name, charp, 0000);
++MODULE_PARM_DESC(name, "Name");
++module_param(syscall_name, charp, 0000);
++MODULE_PARM_DESC(syscall_name, "Syscall name");
++module_param(handle, long, S_IRUSR);
++MODULE_PARM_DESC(handle, "Handle");
++module_param(syscall_registration_handle, long, S_IRUSR);
++MODULE_PARM_DESC(syscall_registration_handle, "Syscall registration handle");
++module_param(syscall_dispatcher_handle, long, S_IRUSR);
++MODULE_PARM_DESC(syscall_dispatcher_handle, "Syscall dispatcher");
++
++
++/* PROC functions */
++
++#if defined(CONFIG_RSBAC_PROC)
++#define PROC_NAME "reg_sample1"
++
++static struct proc_dir_entry * reg_sample_proc_p;
++
++static int
++reg_sample_proc_show(struct seq_file *m, void *v)
++{
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_is_initialized())
++ return -ENOSYS;
++
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++ seq_puts(m, "RSBAC REG decision module sample 1\n----------------------------------\n");
++ seq_printf(m, "%lu calls to request function.\n",
++ nr_request_calls);
++ seq_printf(m, "%lu calls to set_attr function.\n",
++ nr_set_attr_calls);
++ seq_printf(m, "%lu calls to need_overwrite function.\n",
++ nr_need_overwrite_calls);
++ seq_printf(m, "%lu calls to system_call function %lu, last arg was %p.\n",
++ nr_system_calls,
++ syscall_dispatcher_handle,
++ system_call_arg);
++ return 0;
++}
++
++static int reg_sample_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, reg_sample_proc_show, NULL);
++}
++
++static const struct file_operations reg_sample_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = reg_sample_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++#endif /* CONFIG_RSBAC_PROC */
++
++/**** Decision Functions ****/
++
++static int request_func ( enum rsbac_adf_request_t request,
++ rsbac_pid_t owner_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ /* count call, but not for SEARCH request */
++ if(request != R_SEARCH)
++ nr_request_calls++;
++ return GRANTED;
++ }
++
++static int set_attr_func ( enum rsbac_adf_request_t request,
++ rsbac_pid_t owner_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_target_t new_target,
++ union rsbac_target_id_t new_tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ /* count call, but not for SEARCH request */
++ if(request != R_SEARCH)
++ nr_set_attr_calls++;
++ return 0;
++ }
++
++static rsbac_boolean_t need_overwrite_func (struct dentry * dentry_p)
++ {
++ nr_need_overwrite_calls++;
++ return FALSE;
++ }
++
++static int syscall_func (void * arg)
++ {
++ nr_system_calls++;
++ system_call_arg = arg;
++ return nr_system_calls;
++ }
++
++/**** Init ****/
++
++int init_module(void)
++{
++ struct rsbac_reg_entry_t entry;
++ struct rsbac_reg_syscall_entry_t syscall_entry;
++
++ if(!handle)
++ handle = 123456;
++ if(!syscall_registration_handle)
++ syscall_registration_handle = 654321;
++ if(!syscall_dispatcher_handle)
++ syscall_dispatcher_handle = 1;
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 1: Initializing.\n");
++
++ /* clearing registration entries */
++ memset(&entry, 0, sizeof(entry));
++ memset(&syscall_entry, 0, sizeof(syscall_entry));
++
++ if(name)
++ {
++ strncpy(entry.name, name, RSBAC_REG_NAME_LEN);
++ entry.name[RSBAC_REG_NAME_LEN] = 0;
++ }
++ else
++ strcpy(entry.name, "RSBAC REG sample 1 ADF module");
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 1: REG Version: %u, Name: %s, Handle: %li\n",
++ RSBAC_REG_VERSION, entry.name, handle);
++
++ entry.handle = handle;
++ entry.request_func = request_func;
++ entry.set_attr_func = set_attr_func;
++ entry.need_overwrite_func = need_overwrite_func;
++ entry.switch_on = TRUE;
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 1: Registering to ADF.\n");
++ if(rsbac_reg_register(RSBAC_REG_VERSION, entry) < 0)
++ {
++ rsbac_printk(KERN_WARNING "RSBAC REG decision module sample 1: Registering failed. Unloading.\n");
++ return -ENOEXEC;
++ }
++
++ if(syscall_name)
++ {
++ strncpy(syscall_entry.name, syscall_name, RSBAC_REG_NAME_LEN);
++ syscall_entry.name[RSBAC_REG_NAME_LEN] = 0;
++ }
++ else
++ strcpy(syscall_entry.name, "RSBAC REG sample 1 syscall");
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 1: REG Version: %u, Name: %s, Dispatcher Handle: %li\n",
++ RSBAC_REG_VERSION, syscall_entry.name, syscall_dispatcher_handle);
++
++ syscall_entry.registration_handle = syscall_registration_handle;
++ syscall_entry.dispatcher_handle = syscall_dispatcher_handle;
++ syscall_entry.syscall_func = syscall_func;
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 1: Registering syscall.\n");
++ syscall_registration_handle = rsbac_reg_register_syscall(RSBAC_REG_VERSION, syscall_entry);
++ if(syscall_registration_handle < 0)
++ {
++ rsbac_printk(KERN_WARNING "RSBAC REG decision module sample 1: Registering syscall failed. Unloading.\n");
++ if(rsbac_reg_unregister(handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module sample 1: Unregistering failed - beware of possible system failure!\n");
++ }
++ return -ENOEXEC;
++ }
++
++ #if defined(CONFIG_RSBAC_PROC)
++ reg_sample_proc_p = proc_create(PROC_NAME, S_IFREG | S_IRUGO, proc_rsbac_root_p, &reg_sample_proc_fops);
++ if(!reg_sample_proc_p)
++ {
++ rsbac_printk(KERN_WARNING "%s: Not loaded due to failed proc entry registering.\n", name);
++ if(rsbac_reg_unregister(handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module sample 1: Unregistering failed - beware of possible system failure!\n");
++ }
++ if(rsbac_reg_unregister_syscall(syscall_registration_handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module sample 1: Unregistering syscall failed - beware of possible system failure!\n");
++ }
++ return -ENOEXEC;
++ }
++ #endif
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 1: Loaded.\n");
++
++ return 0;
++}
++
++void cleanup_module(void)
++{
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 1: Unregistering.\n");
++ #if defined(CONFIG_RSBAC_PROC)
++ remove_proc_entry(PROC_NAME, proc_rsbac_root_p);
++ #endif
++ if(rsbac_reg_unregister_syscall(syscall_registration_handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module sample 1: Unregistering syscall failed - beware of possible system failure!\n");
++ }
++ if(rsbac_reg_unregister(handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module sample 1: Unregistering failed - beware of possible system failure!\n");
++ }
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 1: Unloaded.\n");
++}
+diff --git a/rsbac/adf/reg/reg_sample2.c b/rsbac/adf/reg/reg_sample2.c
+new file mode 100644
+index 0000000..e28ba10
+--- /dev/null
++++ b/rsbac/adf/reg/reg_sample2.c
+@@ -0,0 +1,548 @@
++/*
++ * RSBAC REG decision module sample2
++ * (not working any more, kept for reference)
++ *
++ * Author and (c) 1999-2005 Amon Ott <ao@rsbac.org>
++ */
++
++/* general stuff */
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++/* for file access */
++#include <linux/fs.h>
++#include <asm/uaccess.h>
++/* rsbac */
++#include <rsbac/types.h>
++#include <rsbac/reg.h>
++#include <rsbac/adf.h>
++#include <rsbac/aci.h>
++#include <rsbac/aci_data_structures.h>
++#include <rsbac/getname.h>
++#include <rsbac/error.h>
++#include <rsbac/proc_fs.h>
++
++static u_long nr_request_calls = 0;
++static u_long nr_set_attr_calls = 0;
++static u_long nr_need_overwrite_calls = 0;
++static rsbac_boolean_t no_write = FALSE;
++static u_long nr_system_calls = 0;
++static void * system_call_arg = 0;
++
++MODULE_AUTHOR("Amon Ott");
++MODULE_DESCRIPTION("RSBAC REG sample decision module 2");
++
++MODULE_PARM(name, "s");
++static char * name = NULL;
++static char dummy_buf[70]="To protect against wrong insmod params";
++
++MODULE_PARM(syscall_name, "s");
++static char * syscall_name = NULL;
++static char dummy_buf2[70]="To protect against wrong insmod params";
++
++MODULE_PARM(handle, "l");
++static long handle = 123457;
++
++MODULE_PARM(syscall_registration_handle, "l");
++static long syscall_registration_handle = 754321;
++MODULE_PARM(syscall_dispatcher_handle, "l");
++static long syscall_dispatcher_handle = 2;
++
++/* Filename for persistent data in /rsbac dir of ROOT_DEV (max 7 chars) */
++#define FILENAME "regsmp2"
++
++/* Version number for on disk data structures */
++#define FILE_VERSION 1
++
++/* PROC functions */
++
++#if defined(CONFIG_RSBAC_PROC)
++#define PROC_NAME "reg_sample2"
++static struct proc_dir_entry * proc_reg_sample_p;
++
++static int
++adf_sample_proc_info(char *buffer, char **start, off_t offset, int length)
++{
++ int len = 0;
++ off_t pos = 0;
++ off_t begin = 0;
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_is_initialized())
++ return (-ENOSYS);
++
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++ len += sprintf(buffer, "RSBAC REG decision module sample 2\n----------------------------------\n");
++ pos = begin + len;
++ if (pos < offset)
++ {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset+length)
++ goto out;
++
++ len += sprintf(buffer + len, "%lu calls to request function.\n",
++ nr_request_calls);
++ pos = begin + len;
++ if (pos < offset)
++ {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset+length)
++ goto out;
++
++ len += sprintf(buffer + len, "%lu calls to set_attr function.\n",
++ nr_set_attr_calls);
++ pos = begin + len;
++ if (pos < offset)
++ {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset+length)
++ goto out;
++
++ len += sprintf(buffer + len, "%lu calls to need_overwrite function.\n",
++ nr_need_overwrite_calls);
++ pos = begin + len;
++ if (pos < offset)
++ {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset+length)
++ goto out;
++
++ len += sprintf(buffer + len, "%lu calls to system_call function %lu, last arg was %p.\n",
++ nr_system_calls,
++ syscall_dispatcher_handle,
++ system_call_arg);
++ pos = begin + len;
++ if (pos < offset)
++ {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset+length)
++ goto out;
++
++out:
++ *start = buffer + (offset - begin);
++ len -= (offset - begin);
++
++ if (len > length)
++ len = length;
++ return len;
++}
++#endif /* CONFIG_RSBAC_PROC */
++
++
++/**** Read/Write Functions ****/
++
++/* read_info() */
++/* reading the system wide adf_sample2 data */
++
++static int read_info(void)
++ {
++ struct file file;
++ char name[RSBAC_MAXNAMELEN];
++ int err = 0;
++ int tmperr;
++ mm_segment_t oldfs;
++ u_int version;
++ u_long tmpval;
++
++ /* copy name from base name */
++ strcpy(name, FILENAME);
++
++ /* open file */
++ if ((err = rsbac_read_open(name,
++ &file,
++ rsbac_root_dev) ))
++ return(err);
++
++ /* OK, now we can start reading */
++
++ /* There is a read function for this file, so read data from
++ * previous module load.
++ * A positive read return value means a read success,
++ * 0 end of file and a negative value an error.
++ */
++
++ /* Set current user space to kernel space, because read() writes */
++ /* to user space */
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++
++ tmperr = file.f_op->read(&file,
++ (char *) &version,
++ sizeof(version),
++ &file.f_pos);
++ /* error? */
++ if (tmperr < sizeof(version))
++ {
++ rsbac_printk(KERN_WARNING
++ "read_info(): read error from file!\n");
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++ /* if wrong version, warn and skip */
++ if (version != FILE_VERSION)
++ {
++ rsbac_printk(KERN_WARNING
++ "read_info(): wrong version %u, expected %u - skipping file and setting no_write!\n",
++ version, FILE_VERSION);
++ no_write = TRUE;
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++
++ /* read nr_request_calls */
++ tmperr = file.f_op->read(&file,
++ (char *) &tmpval,
++ sizeof(tmpval),
++ &file.f_pos);
++ if (tmperr < sizeof(tmpval))
++ {
++ rsbac_printk(KERN_WARNING "%s\n",
++ "read_info(): read error from file!");
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++ nr_request_calls = tmpval;
++
++ /* read nr_set_attr_calls */
++ tmperr = file.f_op->read(&file,
++ (char *) &tmpval,
++ sizeof(tmpval),
++ &file.f_pos);
++ if (tmperr < sizeof(tmpval))
++ {
++ rsbac_printk(KERN_WARNING "%s\n",
++ "read_info(): read error from file!");
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++ nr_set_attr_calls = tmpval;
++
++ /* read nr_need_overwrite_calls */
++ tmperr = file.f_op->read(&file,
++ (char *) &tmpval,
++ sizeof(tmpval),
++ &file.f_pos);
++ if (tmperr < sizeof(tmpval))
++ {
++ rsbac_printk(KERN_WARNING "%s\n",
++ "read_info(): read error from file!");
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++ nr_need_overwrite_calls = tmpval;
++
++end_read:
++ /* Set current user space back to user space, because read() writes */
++ /* to user space */
++ set_fs(oldfs);
++
++ /* We do not need this file dentry any more */
++ rsbac_read_close(&file);
++
++ /* ready */
++ return(err);
++ }; /* end of read_info() */
++
++static int write_info(void)
++ {
++ struct file file;
++ char name[RSBAC_MAXNAMELEN];
++ int err = 0;
++ int tmperr;
++ mm_segment_t oldfs;
++ u_int version = FILE_VERSION;
++
++ /* copy name from base name */
++ strcpy(name, FILENAME);
++
++ /* get rsbac write-to-disk semaphore */
++ down(&rsbac_write_sem);
++
++ /* open file */
++ if ((err = rsbac_write_open(name,
++ &file,
++ rsbac_root_dev) ))
++ {
++ up(&rsbac_write_sem);
++ return(err);
++ }
++
++ /* OK, now we can start writing all sample items.
++ * A positive return value means a write success,
++ * 0 end of file and a negative value an error.
++ */
++
++ /* Set current user space to kernel space, because write() reads
++ * from user space */
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++
++ tmperr = file.f_op->write(&file,
++ (char *) &version,
++ sizeof(version),
++ &file.f_pos);
++ if (tmperr < sizeof(version))
++ {
++ rsbac_printk(KERN_WARNING
++ "write_info(): write error %i on file!\n",
++ tmperr);
++ err = -RSBAC_EWRITEFAILED;
++ goto end_write;
++ }
++
++ tmperr = file.f_op->write(&file,
++ (char *) &nr_request_calls,
++ sizeof(nr_request_calls),
++ &file.f_pos);
++ if (tmperr < sizeof(nr_request_calls))
++ {
++ rsbac_printk(KERN_WARNING
++ "write_info(): write error %i on file!\n",
++ tmperr);
++ err = -RSBAC_EWRITEFAILED;
++ goto end_write;
++ }
++
++ tmperr = file.f_op->write(&file,
++ (char *) &nr_set_attr_calls,
++ sizeof(nr_set_attr_calls),
++ &file.f_pos);
++ if (tmperr < sizeof(nr_set_attr_calls))
++ {
++ rsbac_printk(KERN_WARNING
++ "write_info(): write error %i on file!\n",
++ tmperr);
++ err = -RSBAC_EWRITEFAILED;
++ goto end_write;
++ }
++
++ tmperr = file.f_op->write(&file,
++ (char *) &nr_need_overwrite_calls,
++ sizeof(nr_need_overwrite_calls),
++ &file.f_pos);
++ if (tmperr < sizeof(nr_need_overwrite_calls))
++ {
++ rsbac_printk(KERN_WARNING
++ "write_info(): write error %i on file!\n",
++ tmperr);
++ err = -RSBAC_EWRITEFAILED;
++ goto end_write;
++ }
++
++end_write:
++ /* Set current user space back to user space, because write() reads */
++ /* from user space */
++ set_fs(oldfs);
++
++ /* End of write access */
++ rsbac_write_close(&file);
++ up(&rsbac_write_sem);
++ return(err);
++ }; /* end of write_info() */
++
++
++/**** Decision Functions ****/
++
++static int request_func ( enum rsbac_adf_request_t request,
++ rsbac_pid_t owner_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ /* count call, but not for SEARCH request */
++ if(request != R_SEARCH)
++ nr_request_calls++;
++ return GRANTED;
++ }
++
++static int set_attr_func ( enum rsbac_adf_request_t request,
++ rsbac_pid_t owner_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_target_t new_target,
++ union rsbac_target_id_t new_tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ /* count call, but not for SEARCH request */
++ if(request != R_SEARCH)
++ nr_set_attr_calls++;
++ return 0;
++ }
++
++static rsbac_boolean_t need_overwrite_func (struct dentry * dentry_p)
++ {
++ nr_need_overwrite_calls++;
++ return FALSE;
++ }
++
++static int write_func(rsbac_boolean_t need_lock)
++ {
++ int res=0;
++
++ if(!write_info())
++ res = 1;
++
++ return(res);
++ }
++
++static int syscall_func (void * arg)
++ {
++ nr_system_calls++;
++ system_call_arg = arg;
++ return nr_system_calls;
++ }
++
++/**** Init ****/
++
++int init_module(void)
++{
++ struct rsbac_reg_entry_t entry;
++ struct rsbac_reg_syscall_entry_t syscall_entry;
++
++ if(!handle)
++ handle = 123457;
++ if(!syscall_registration_handle)
++ syscall_registration_handle = 754321;
++ if(!syscall_dispatcher_handle)
++ syscall_dispatcher_handle = 2;
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 2: Initializing.\n");
++
++ /* clearing registration entries */
++ memset(&entry, 0, sizeof(entry));
++ memset(&syscall_entry, 0, sizeof(syscall_entry));
++
++ if((dummy_buf[0] != 'T') || (dummy_buf2[0] != 'T'))
++ {
++ rsbac_printk(KERN_WARNING "RSBAC REG decision module sample 2: Not loaded due to invalid param string.\n");
++ return -ENOEXEC;
++ }
++ if(name)
++ {
++ strncpy(entry.name, name, RSBAC_REG_NAME_LEN);
++ entry.name[RSBAC_REG_NAME_LEN] = 0;
++ }
++ else
++ strcpy(entry.name, "RSBAC REG sample 2 ADF module");
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 2: REG Version: %u, Name: %s, Handle: %li\n",
++ RSBAC_REG_VERSION, entry.name, handle);
++
++ entry.handle = handle;
++ entry.request_func = request_func;
++ entry.set_attr_func = set_attr_func;
++ entry.need_overwrite_func = need_overwrite_func;
++ entry.write_func = write_func;
++ entry.switch_on = TRUE;
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 2: Registering to ADF.\n");
++ if(rsbac_reg_register(RSBAC_REG_VERSION, entry) < 0)
++ {
++ rsbac_printk(KERN_WARNING "RSBAC REG decision module sample 2: Registering failed. Unloading.\n");
++ return -ENOEXEC;
++ }
++
++ if(syscall_name)
++ {
++ strncpy(syscall_entry.name, syscall_name, RSBAC_REG_NAME_LEN);
++ syscall_entry.name[RSBAC_REG_NAME_LEN] = 0;
++ }
++ else
++ strcpy(syscall_entry.name, "RSBAC REG sample 2 syscall");
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 2: REG Version: %u, Name: %s, Dispatcher Handle: %li\n",
++ RSBAC_REG_VERSION, syscall_entry.name, syscall_dispatcher_handle);
++
++ syscall_entry.registration_handle = syscall_registration_handle;
++ syscall_entry.dispatcher_handle = syscall_dispatcher_handle;
++ syscall_entry.syscall_func = syscall_func;
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 2: Registering syscall.\n");
++ syscall_registration_handle = rsbac_reg_register_syscall(RSBAC_REG_VERSION, syscall_entry);
++ if(syscall_registration_handle < 0)
++ {
++ rsbac_printk(KERN_WARNING "RSBAC REG decision module sample 2: Registering syscall failed. Unloading.\n");
++ if(rsbac_reg_unregister(handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module sample 2: Unregistering failed - beware of possible system failure!\n");
++ }
++ return -ENOEXEC;
++ }
++
++ if(read_info())
++ {
++ rsbac_printk(KERN_WARNING
++ "RSBAC REG decision module sample 2: Could not read info from previous session.\n");
++ }
++
++ #if defined(CONFIG_RSBAC_PROC)
++ proc_reg_sample_p = create_proc_entry(PROC_NAME,
++ S_IFREG | S_IRUGO,
++ proc_rsbac_root_p);
++ if(!proc_reg_sample_p)
++ {
++ rsbac_printk(KERN_WARNING "%s: Not loaded due to failed proc entry registering.\n", name);
++ if(rsbac_reg_unregister_syscall(syscall_registration_handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module sample 2: Unregistering syscall failed - beware of possible system failure!\n");
++ }
++ if(rsbac_reg_unregister(handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module sample 2: Unregistering from ADF failed - beware of possible system failure!\n");
++ }
++ return -ENOEXEC;
++ }
++ proc_reg_sample_p->get_info = adf_sample_proc_info;
++ #endif
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 2: Loaded.\n");
++
++ return 0;
++}
++
++void cleanup_module(void)
++{
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 2: Unregistering.\n");
++ #if defined(CONFIG_RSBAC_PROC)
++ remove_proc_entry(PROC_NAME, proc_rsbac_root_p);
++ #endif
++ if(write_info())
++ {
++ rsbac_printk(KERN_WARNING
++ "RSBAC REG decision module sample 2: Could not save info for next session.\n");
++ }
++ if(rsbac_reg_unregister_syscall(syscall_registration_handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module sample 2: Unregistering syscall failed - beware of possible system failure!\n");
++ }
++ if(rsbac_reg_unregister(handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module sample 2: Unregistering module failed - beware of possible system failure!\n");
++ }
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 2: Unloaded.\n");
++}
+diff --git a/rsbac/adf/reg/reg_sample3.c b/rsbac/adf/reg/reg_sample3.c
+new file mode 100644
+index 0000000..2fef23d
+--- /dev/null
++++ b/rsbac/adf/reg/reg_sample3.c
+@@ -0,0 +1,369 @@
++/*
++ * RSBAC REG decision module sample
++ *
++ * Author and (c) 1999-2009 Amon Ott <ao@rsbac.org>
++ */
++
++/* general stuff */
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++/* for file access */
++#include <linux/fs.h>
++#include <linux/seq_file.h>
++#include <asm/uaccess.h>
++/* rsbac */
++#include <rsbac/types.h>
++#include <rsbac/reg.h>
++#include <rsbac/adf.h>
++#include <rsbac/aci.h>
++#include <rsbac/lists.h>
++#include <rsbac/getname.h>
++#include <rsbac/error.h>
++#include <rsbac/proc_fs.h>
++
++static u_long nr_request_calls = 0;
++#define ORD_request 1
++static u_long nr_set_attr_calls = 0;
++#define ORD_set_attr 2
++static u_long nr_need_overwrite_calls = 0;
++#define ORD_overwrite 3
++static u_long nr_write_calls = 0;
++#define ORD_write 4
++static u_long nr_system_calls = 0;
++#define ORD_syscall 5
++static void * system_call_arg = 0;
++
++MODULE_AUTHOR("Amon Ott");
++MODULE_DESCRIPTION("RSBAC REG sample decision module 3");
++MODULE_LICENSE("GPL");
++
++static char * name = NULL;
++static char * syscall_name = NULL;
++static u_int listkey = 133457;
++static long handle = 133457;
++static long syscall_registration_handle = 754331;
++static long syscall_dispatcher_handle = 3;
++
++module_param(name, charp, 0000);
++MODULE_PARM_DESC(name, "Name");
++module_param(syscall_name, charp, 0000);
++MODULE_PARM_DESC(syscall_name, "Syscall name");
++module_param(listkey, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
++MODULE_PARM_DESC(listkey, "List key");
++module_param(handle, long, S_IRUSR);
++MODULE_PARM_DESC(handle, "Handle");
++module_param(syscall_registration_handle, long, S_IRUSR);
++MODULE_PARM_DESC(syscall_registration_handle, "Syscall registration handle");
++module_param(syscall_dispatcher_handle, long, S_IRUSR);
++MODULE_PARM_DESC(syscall_dispatcher_handle, "Syscall dispatcher handle");
++
++/* Filename for persistent data in /rsbac dir of ROOT_DEV (max 7 chars) */
++#define FILENAME "regsmp3"
++
++/* Version number for on disk data structures */
++#define LIST_VERSION 1
++
++static rsbac_list_handle_t list_handle;
++
++/* PROC functions */
++
++#if defined(CONFIG_RSBAC_PROC)
++#define PROC_NAME "reg_sample3"
++static struct proc_dir_entry * reg_sample_proc_p;
++
++static int
++reg_sample_proc_show(struct seq_file *m, void *v)
++{
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_is_initialized())
++ return -ENOSYS;
++
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++ seq_puts(m, "RSBAC REG decision module sample 3\n----------------------------------\n");
++ seq_printf(m, "%lu calls to request function.\n",
++ nr_request_calls);
++ seq_printf(m, "%lu calls to set_attr function.\n",
++ nr_set_attr_calls);
++ seq_printf(m, "%lu calls to need_overwrite function.\n",
++ nr_need_overwrite_calls);
++ seq_printf(m, "%lu calls to write function.\n",
++ nr_write_calls);
++ seq_printf(m, "%lu calls to system_call function %lu, last arg was %p.\n",
++ nr_system_calls,
++ syscall_dispatcher_handle,
++ system_call_arg);
++ seq_printf(m, "%li list items.\n",
++ rsbac_list_count(list_handle));
++ return 0;
++}
++
++static int reg_sample_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, reg_sample_proc_show, NULL);
++}
++
++static const struct file_operations reg_sample_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = reg_sample_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++#endif /* CONFIG_RSBAC_PROC */
++
++/**** List helper functions ****/
++
++static int compare(void * desc1, void * desc2)
++ {
++ return memcmp((u_int *) desc1, (u_int *) desc2, sizeof(u_int) );
++ }
++
++/*
++static rsbac_list_conv_function_t * get_conv(rsbac_version_t version)
++ {
++ return compare;
++ }
++*/
++
++/**** Decision Functions ****/
++
++static int request_func ( enum rsbac_adf_request_t request,
++ rsbac_pid_t owner_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ /* count call, but not for SEARCH request */
++ if(request != R_SEARCH)
++ {
++ __u32 ord = ORD_request;
++
++ nr_request_calls++;
++ rsbac_list_add(list_handle, &ord, &nr_request_calls);
++ }
++ return GRANTED;
++ }
++
++static int set_attr_func ( enum rsbac_adf_request_t request,
++ rsbac_pid_t owner_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_target_t new_target,
++ union rsbac_target_id_t new_tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ __u32 ord = ORD_set_attr;
++
++ /* count call, but not for SEARCH request */
++ if(request != R_SEARCH)
++ {
++ nr_set_attr_calls++;
++ rsbac_list_add(list_handle, &ord, &nr_set_attr_calls);
++ }
++ return 0;
++ }
++
++static rsbac_boolean_t need_overwrite_func (struct dentry * dentry_p)
++ {
++ __u32 ord = ORD_overwrite;
++
++ nr_need_overwrite_calls++;
++ rsbac_list_add(list_handle, &ord, &nr_need_overwrite_calls);
++ return FALSE;
++ }
++
++static int write_func(rsbac_boolean_t need_lock)
++ {
++ __u32 ord = ORD_write;
++
++ nr_write_calls++;
++ rsbac_list_add(list_handle, &ord, &nr_write_calls);
++ return(0);
++ }
++
++static int syscall_func (void * arg)
++ {
++ __u32 ord = ORD_syscall;
++
++ nr_system_calls++;
++ system_call_arg = arg;
++ rsbac_list_add(list_handle, &ord, &nr_system_calls);
++ return nr_system_calls;
++ }
++
++/**** Init ****/
++
++int init_module(void)
++{
++ struct rsbac_reg_entry_t entry;
++ struct rsbac_reg_syscall_entry_t syscall_entry;
++ struct rsbac_list_info_t list_info;
++ __u32 ord;
++
++ if(!listkey)
++ listkey = 133457;
++ if(!handle)
++ handle = 133457;
++ if(!syscall_registration_handle)
++ syscall_registration_handle = 754331;
++ if(!syscall_dispatcher_handle)
++ syscall_dispatcher_handle = 3;
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 3: Initializing.\n");
++
++ /* clearing registration entries */
++ memset(&entry, 0, sizeof(entry));
++ memset(&syscall_entry, 0, sizeof(syscall_entry));
++ /* Register a generic list */
++ list_info.version = LIST_VERSION;
++ list_info.key = listkey;
++ list_info.desc_size = sizeof(__u32);
++ list_info.data_size = sizeof(nr_request_calls);
++ list_info.max_age = 3600; /* 1h */
++ if(rsbac_list_register(RSBAC_LIST_VERSION,
++ &list_handle,
++ &list_info,
++ RSBAC_LIST_PERSIST | RSBAC_LIST_BACKUP,
++ compare,
++ NULL,
++ NULL,
++ FILENAME,
++ 0))
++ {
++ rsbac_printk(KERN_WARNING "RSBAC REG decision module sample 3: Registering list failed. Unloading.\n");
++ return -ENOEXEC;
++ }
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 3: List Version: %u, Name: %s, Handle: %p, Key: %u\n",
++ RSBAC_LIST_VERSION, FILENAME, list_handle, listkey);
++ ord = ORD_request;
++ if(rsbac_list_exist(list_handle, &ord))
++ rsbac_list_get_data(list_handle, &ord, &nr_request_calls);
++ ord = ORD_set_attr;
++ if(rsbac_list_exist(list_handle, &ord))
++ rsbac_list_get_data(list_handle, &ord, &nr_set_attr_calls);
++ ord = ORD_overwrite;
++ if(rsbac_list_exist(list_handle, &ord))
++ rsbac_list_get_data(list_handle, &ord, &nr_need_overwrite_calls);
++ ord = ORD_write;
++ if(rsbac_list_exist(list_handle, &ord))
++ rsbac_list_get_data(list_handle, &ord, &nr_write_calls);
++ ord = ORD_syscall;
++ if(rsbac_list_exist(list_handle, &ord))
++ rsbac_list_get_data(list_handle, &ord, &nr_system_calls);
++
++ /* Register to ADF */
++ if(name)
++ {
++ strncpy(entry.name, name, RSBAC_REG_NAME_LEN);
++ entry.name[RSBAC_REG_NAME_LEN] = 0;
++ }
++ else
++ strcpy(entry.name, "RSBAC REG sample 3 ADF module");
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 3: REG Version: %u, Name: %s, Handle: %li\n",
++ RSBAC_REG_VERSION, entry.name, handle);
++
++ entry.handle = handle;
++ entry.request_func = request_func;
++ entry.set_attr_func = set_attr_func;
++ entry.need_overwrite_func = need_overwrite_func;
++ entry.write_func = write_func;
++ entry.switch_on = TRUE;
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 3: Registering to ADF.\n");
++ if(rsbac_reg_register(RSBAC_REG_VERSION, entry) < 0)
++ {
++ rsbac_printk(KERN_WARNING "RSBAC REG decision module sample 3: Registering failed. Unloading.\n");
++ if(rsbac_list_detach(&list_handle, listkey))
++ rsbac_printk(KERN_WARNING "RSBAC REG decision module sample 3: Unregistering list failed - beware!\n");
++ return -ENOEXEC;
++ }
++
++ if(syscall_name)
++ {
++ strncpy(syscall_entry.name, syscall_name, RSBAC_REG_NAME_LEN);
++ syscall_entry.name[RSBAC_REG_NAME_LEN] = 0;
++ }
++ else
++ strcpy(syscall_entry.name, "RSBAC REG sample 3 syscall");
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 3: REG Version: %u, Name: %s, Dispatcher Handle: %li\n",
++ RSBAC_REG_VERSION, syscall_entry.name, syscall_dispatcher_handle);
++
++ syscall_entry.registration_handle = syscall_registration_handle;
++ syscall_entry.dispatcher_handle = syscall_dispatcher_handle;
++ syscall_entry.syscall_func = syscall_func;
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 3: Registering syscall.\n");
++ syscall_registration_handle = rsbac_reg_register_syscall(RSBAC_REG_VERSION, syscall_entry);
++ if(syscall_registration_handle < 0)
++ {
++ rsbac_printk(KERN_WARNING "RSBAC REG decision module sample 3: Registering syscall failed. Unloading.\n");
++ if(rsbac_reg_unregister(handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module sample 3: Unregistering failed - beware of possible system failure!\n");
++ }
++ if(rsbac_list_detach(&list_handle, listkey))
++ rsbac_printk(KERN_WARNING "RSBAC REG decision module sample 3: Unregistering list failed - beware!\n");
++ return -ENOEXEC;
++ }
++
++ #if defined(CONFIG_RSBAC_PROC)
++ reg_sample_proc_p = proc_create(PROC_NAME, S_IFREG | S_IRUGO, proc_rsbac_root_p, &reg_sample_proc_fops);
++ if(!reg_sample_proc_p)
++ {
++ rsbac_printk(KERN_WARNING "%s: Not loaded due to failed proc entry registering.\n", name);
++ if(rsbac_reg_unregister_syscall(syscall_registration_handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module sample 3: Unregistering syscall failed - beware of possible system failure!\n");
++ }
++ if(rsbac_reg_unregister(handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module sample 3: Unregistering from ADF failed - beware of possible system failure!\n");
++ }
++ if(rsbac_list_detach(&list_handle, listkey))
++ rsbac_printk(KERN_WARNING "RSBAC REG decision module sample 3: Unregistering list failed - beware!\n");
++ return -ENOEXEC;
++ }
++ #endif
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 3: Loaded.\n");
++
++ return 0;
++}
++
++void cleanup_module(void)
++{
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 3: Unregistering.\n");
++ #if defined(CONFIG_RSBAC_PROC)
++ remove_proc_entry(PROC_NAME, proc_rsbac_root_p);
++ #endif
++ if(rsbac_reg_unregister_syscall(syscall_registration_handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module sample 3: Unregistering syscall failed - beware of possible system failure!\n");
++ }
++ if(rsbac_reg_unregister(handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module sample 3: Unregistering module failed - beware of possible system failure!\n");
++ }
++ if(rsbac_list_detach(&list_handle, listkey))
++ rsbac_printk(KERN_WARNING "RSBAC REG decision module sample 3: Unregistering list failed - beware!\n");
++ rsbac_printk(KERN_INFO "RSBAC REG decision module sample 3: Unloaded.\n");
++}
+diff --git a/rsbac/adf/reg/root_plug.c b/rsbac/adf/reg/root_plug.c
+new file mode 100644
+index 0000000..4679d5f
+--- /dev/null
++++ b/rsbac/adf/reg/root_plug.c
+@@ -0,0 +1,138 @@
++/*
++ * RSBAC REG decision module kproc_hide.
++ *
++ * Originally written for a Linux Journal as LSM sample module.
++ * Rewriten for RSBAC by Michal Purzynski <albeiro@rsbac.org>
++ *
++ * Copyright (C) 2002 Greg Kroah-Hartman <greg@kroah.com>
++ *
++ * Prevents any programs running with egid == 0 if a specific USB device
++ * is not present in the system. Yes, it can be gotten around, but is a
++ * nice starting point for people to play with, and learn the LSM interface.
++ *
++ * See http://www.linuxjournal.com/article.php?sid=6279 for more information about this code.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the License.
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/fs.h>
++#include <linux/usb.h>
++#include <rsbac/types.h>
++#include <rsbac/reg.h>
++#include <rsbac/adf.h>
++#include <rsbac/aci.h>
++#include <rsbac/getname.h>
++#include <rsbac/error.h>
++#include <rsbac/proc_fs.h>
++#include <linux/usb.h>
++#include <linux/moduleparam.h>
++
++MODULE_AUTHOR("Michal Purzynski");
++MODULE_DESCRIPTION("RSBAC REG root_plug decision module");
++MODULE_LICENSE("GPL");
++
++#ifdef CONFIG_USB
++/* default is a generic type of usb to serial converter */
++static int vendor_id = 0x0557;
++static int product_id = 0x2008;
++
++module_param(vendor_id, uint, 0400);
++module_param(product_id, uint, 0400);
++#endif
++
++static long handle = 999999;
++
++/**** Decision Functions ****/
++
++static int request_func (enum rsbac_adf_request_t request,
++ rsbac_pid_t owner_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++{
++ struct usb_device *dev = NULL;
++#ifdef CONFIG_USB
++ dev = usb_find_device(vendor_id, product_id);
++#endif
++
++ if (!dev) {
++
++ switch (request) {
++ case R_CHANGE_OWNER:
++ case R_CHANGE_GROUP:
++ case R_CLONE:
++ switch (target) {
++ case T_PROCESS:
++ switch (attr) {
++ case A_owner:
++ switch (attr_val.owner) {
++ case 0:
++ return NOT_GRANTED;
++ default:
++ return DO_NOT_CARE;
++ }
++ default:
++ return DO_NOT_CARE;
++ }
++ default:
++ return DO_NOT_CARE;
++ }
++ default:
++ return DO_NOT_CARE;
++ }
++ }
++
++ return DO_NOT_CARE;
++}
++
++/**** Init ****/
++
++int init_module(void)
++{
++ struct rsbac_reg_entry_t entry;
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module root_plug: Initializing.\n");
++
++ /* clearing registration entries */
++ memset(&entry, 0, sizeof(entry));
++
++ strcpy(entry.name, "RSBAC REG root_plug ADF module");
++ rsbac_printk(KERN_INFO "RSBAC REG decision module root_plug: REG Version: %u, Name: %s, Handle: %li\n",
++ RSBAC_REG_VERSION, entry.name, handle);
++
++ entry.handle = handle;
++ entry.request_func = request_func;
++ entry.switch_on = TRUE;
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module root_plug: Registering to ADF.\n");
++
++ if(rsbac_reg_register(RSBAC_REG_VERSION, entry) < 0) {
++ rsbac_printk(KERN_WARNING "RSBAC REG decision module sample 1: Registering failed. Unloading.\n");
++ return -ENOEXEC;
++ }
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module root_plug: Loaded.\n");
++
++ return 0;
++}
++
++void cleanup_module(void)
++{
++ rsbac_printk(KERN_INFO "RSBAC REG decision module root_plug: Unregistering.\n");
++
++ if(rsbac_reg_unregister(handle))
++ {
++ rsbac_printk(KERN_ERR "RSBAC REG decision module root_plug: Unregistering failed - beware of possible system failure!\n");
++ }
++
++ rsbac_printk(KERN_INFO "RSBAC REG decision module root_plug: Unloaded.\n");
++}
++
+diff --git a/rsbac/adf/res/Makefile b/rsbac/adf/res/Makefile
+new file mode 100644
+index 0000000..bcf8e21
+--- /dev/null
++++ b/rsbac/adf/res/Makefile
+@@ -0,0 +1,10 @@
++#
++# File: rsbac/adf/res/Makefile
++#
++# Makefile for the Linux rsbac RES decision module.
++#
++# Author and (c) 1999-2012 Amon Ott <ao@rsbac.org>
++#
++
++# 2.6.x
++obj-y := res_main.o
+diff --git a/rsbac/adf/res/res_main.c b/rsbac/adf/res/res_main.c
+new file mode 100644
+index 0000000..61beada
+--- /dev/null
++++ b/rsbac/adf/res/res_main.c
+@@ -0,0 +1,430 @@
++/**************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of the Access Control Decision */
++/* Facility (ADF) - System Resources (RES) */
++/* File: rsbac/adf/res/main.c */
++/* */
++/* Author and (c) 2002-2009: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 14/Jan/2009 */
++/**************************************************** */
++
++#include <linux/string.h>
++#include <linux/version.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/debug.h>
++
++/************************************************* */
++/* Global Variables */
++/************************************************* */
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++/************************************************* */
++/* Externally visible functions */
++/************************************************* */
++
++enum rsbac_adf_req_ret_t
++ rsbac_adf_request_res (enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ switch (request)
++ {
++ case R_MODIFY_ATTRIBUTE:
++ switch(attr)
++ {
++ case A_system_role:
++ case A_res_role:
++ case A_res_min:
++ case A_res_max:
++ /* All attributes (remove target!) */
++ case A_none:
++ /* Security Officer? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_RES,
++ T_USER,
++ i_tid,
++ A_res_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_res()", A_res_role);
++ return(NOT_GRANTED);
++ }
++ /* if sec_officer, then grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_READ_ATTRIBUTE:
++ switch(attr)
++ {
++ case A_system_role:
++ case A_res_role:
++ case A_res_min:
++ case A_res_max:
++ /* All attributes (remove target!) */
++ case A_none:
++ /* Security Officer or Admin? */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_RES,
++ T_USER,
++ i_tid,
++ A_res_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_res()", A_res_role);
++ return(NOT_GRANTED);
++ }
++ /* if sec_officer, then grant */
++ if( (i_attr_val1.system_role == SR_security_officer)
++ || (i_attr_val1.system_role == SR_administrator)
++ )
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ default:
++ return(DO_NOT_CARE);
++ }
++
++ case R_SWITCH_LOG:
++ switch(target)
++ {
++ case T_NONE:
++ /* test owner's res_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_RES,
++ T_USER,
++ i_tid,
++ A_res_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_res()", A_res_role);
++ return(NOT_GRANTED);
++ }
++ /* security officer? -> grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ /* all other cases are unknown */
++ default: return(DO_NOT_CARE);
++ }
++
++ case R_SWITCH_MODULE:
++ switch(target)
++ {
++ case T_NONE:
++ /* we need the switch_target */
++ if(attr != A_switch_target)
++ return NOT_GRANTED;
++ /* do not care for other modules */
++ if( (attr_val.switch_target != SW_RES)
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ && (attr_val.switch_target != SW_SOFTMODE)
++ #endif
++ #ifdef CONFIG_RSBAC_FREEZE
++ && (attr_val.switch_target != SW_FREEZE)
++ #endif
++ )
++ return(DO_NOT_CARE);
++ /* test owner's res_role */
++ i_tid.user = owner;
++ if (rsbac_get_attr(SW_RES,
++ T_USER,
++ i_tid,
++ A_res_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_request_res()", A_res_role);
++ return(NOT_GRANTED);
++ }
++ /* security officer? -> grant */
++ if (i_attr_val1.system_role == SR_security_officer)
++ return(GRANTED);
++ else
++ return(NOT_GRANTED);
++
++ /* all other cases are unknown */
++ default: return(DO_NOT_CARE);
++ }
++
++
++/*********************/
++ default: return DO_NOT_CARE;
++ }
++
++ return DO_NOT_CARE;
++ } /* end of rsbac_adf_request_res() */
++
++
++/*****************************************************************************/
++/* If the request returned granted and the operation is performed, */
++/* the following function can be called by the AEF to get all aci set */
++/* correctly. For write accesses that are performed fully within the kernel, */
++/* this is usually not done to prevent extra calls, including R_CLOSE for */
++/* cleaning up. */
++/* The second instance of target specification is the new target, if one has */
++/* been created, otherwise its values are ignored. */
++/* On success, 0 is returned, and an error from rsbac/error.h otherwise. */
++
++int rsbac_adf_set_attr_res(
++ enum rsbac_adf_request_t request,
++ rsbac_pid_t caller_pid,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_target_t new_target,
++ union rsbac_target_id_t new_tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t attr_val,
++ rsbac_uid_t owner)
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ switch (request)
++ {
++ case R_CHANGE_OWNER:
++ switch(target)
++ {
++ case T_PROCESS:
++ if(attr != A_owner)
++ return(-RSBAC_EINVALIDATTR);
++ /* Adjust Linux resources */
++ i_tid.user = attr_val.owner;
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RES]
++#endif
++ )
++#endif
++ {
++ int maxval = rsbac_min(RLIM_NLIMITS - 1, RSBAC_RES_MAX);
++ int i;
++
++ if (rsbac_get_attr(SW_RES,
++ T_USER,
++ i_tid,
++ A_res_max,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_res()", A_res_max);
++ return -RSBAC_EREADFAILED;
++ }
++ for(i = 0; i <= maxval ; i++)
++ {
++ if(i_attr_val1.res_array[i])
++ {
++ task_lock(current->group_leader);
++ if(current->signal->rlim[i].rlim_max > i_attr_val1.res_array[i])
++ current->signal->rlim[i].rlim_max = i_attr_val1.res_array[i];
++ if(current->signal->rlim[i].rlim_cur > i_attr_val1.res_array[i])
++ current->signal->rlim[i].rlim_cur = i_attr_val1.res_array[i];
++ task_unlock(current->group_leader);
++ }
++ }
++ if (rsbac_get_attr(SW_RES,
++ T_USER,
++ i_tid,
++ A_res_min,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_res()", A_res_min);
++ return -RSBAC_EREADFAILED;
++ }
++ if(i_attr_val1.res_array[RLIMIT_NOFILE] > sysctl_nr_open)
++ i_attr_val1.res_array[RLIMIT_NOFILE] = sysctl_nr_open;
++ for(i = 0; i <= maxval ; i++)
++ {
++ if(i_attr_val1.res_array[i])
++ {
++ task_lock(current->group_leader);
++ if(current->signal->rlim[i].rlim_max < i_attr_val1.res_array[i])
++ current->signal->rlim[i].rlim_max = i_attr_val1.res_array[i];
++ if(current->signal->rlim[i].rlim_cur < i_attr_val1.res_array[i])
++ current->signal->rlim[i].rlim_cur = i_attr_val1.res_array[i];
++ task_unlock(current->group_leader);
++ }
++ }
++ }
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return(0);
++ }
++ break;
++
++ case R_EXECUTE:
++ switch(target)
++ {
++ case T_FILE:
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RES]
++#endif
++ )
++#endif
++ {
++ int maxval = rsbac_min(RLIM_NLIMITS - 1, RSBAC_RES_MAX);
++ int i;
++
++ if (rsbac_get_attr(SW_RES,
++ target,
++ tid,
++ A_res_max,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_res()", A_res_max);
++ return -RSBAC_EREADFAILED;
++ }
++ for(i = 0; i <= maxval ; i++)
++ {
++ if(i_attr_val1.res_array[i])
++ {
++ task_lock(current->group_leader);
++ if(current->signal->rlim[i].rlim_max > i_attr_val1.res_array[i])
++ current->signal->rlim[i].rlim_max = i_attr_val1.res_array[i];
++ if(current->signal->rlim[i].rlim_cur > i_attr_val1.res_array[i])
++ current->signal->rlim[i].rlim_cur = i_attr_val1.res_array[i];
++ task_unlock(current->group_leader);
++ }
++ }
++ if (rsbac_get_attr(SW_RES,
++ target,
++ tid,
++ A_res_min,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_res()", A_res_min);
++ return -RSBAC_EREADFAILED;
++ }
++ for(i = 0; i <= maxval ; i++)
++ {
++ if(i_attr_val1.res_array[i])
++ {
++ task_lock(current->group_leader);
++ if(current->signal->rlim[i].rlim_max < i_attr_val1.res_array[i])
++ current->signal->rlim[i].rlim_max = i_attr_val1.res_array[i];
++ if(current->signal->rlim[i].rlim_cur < i_attr_val1.res_array[i])
++ current->signal->rlim[i].rlim_cur = i_attr_val1.res_array[i];
++ task_unlock(current->group_leader);
++ }
++ }
++ }
++ return 0;
++
++ /* all other cases are unknown */
++ default:
++ return 0;
++ }
++ break;
++
++ case R_MODIFY_SYSTEM_DATA:
++ if ( (target == T_SCD)
++ && (tid.scd == ST_rlimit)
++ )
++ {
++ /* Adjust Linux resources */
++ i_tid.user = owner;
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_RES]
++#endif
++ )
++#endif
++ {
++ int maxval = rsbac_min(RLIM_NLIMITS - 1, RSBAC_RES_MAX);
++ int i;
++
++ if (rsbac_get_attr(SW_RES,
++ T_USER,
++ i_tid,
++ A_res_max,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_res()", A_res_max);
++ return -RSBAC_EREADFAILED;
++ }
++ for(i = 0; i <= maxval ; i++)
++ {
++ if(i_attr_val1.res_array[i])
++ {
++ task_lock(current->group_leader);
++ if(current->signal->rlim[i].rlim_max > i_attr_val1.res_array[i])
++ current->signal->rlim[i].rlim_max = i_attr_val1.res_array[i];
++ if(current->signal->rlim[i].rlim_cur > i_attr_val1.res_array[i])
++ current->signal->rlim[i].rlim_cur = i_attr_val1.res_array[i];
++ task_unlock(current->group_leader);
++ }
++ }
++ if (rsbac_get_attr(SW_RES,
++ T_USER,
++ i_tid,
++ A_res_min,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_ds_get_error("rsbac_adf_set_attr_res()", A_res_min);
++ return -RSBAC_EREADFAILED;
++ }
++ if(i_attr_val1.res_array[RLIMIT_NOFILE] > sysctl_nr_open)
++ i_attr_val1.res_array[RLIMIT_NOFILE] = sysctl_nr_open;
++ for(i = 0; i <= maxval ; i++)
++ {
++ if(i_attr_val1.res_array[i])
++ {
++ task_lock(current->group_leader);
++ if(current->signal->rlim[i].rlim_max < i_attr_val1.res_array[i])
++ current->signal->rlim[i].rlim_max = i_attr_val1.res_array[i];
++ if(current->signal->rlim[i].rlim_cur < i_attr_val1.res_array[i])
++ current->signal->rlim[i].rlim_cur = i_attr_val1.res_array[i];
++ task_unlock(current->group_leader);
++ }
++ }
++ }
++ return 0;
++ }
++ break;
++
++/*********************/
++ default: return 0;
++ }
++
++ return 0;
++ } /* end of rsbac_adf_set_attr_res() */
++
++/* end of rsbac/adf/res/main.c */
+diff --git a/rsbac/data_structures/Makefile b/rsbac/data_structures/Makefile
+new file mode 100644
+index 0000000..d1c2076
+--- /dev/null
++++ b/rsbac/data_structures/Makefile
+@@ -0,0 +1,19 @@
++#
++# File: rsbac/data_structures/Makefile
++#
++# Makefile for the RSBAC data structures.
++#
++# Author and (c) 1999-2012 Amon Ott <ao@rsbac.org>
++#
++
++obj-y += aci_data_structures.o gen_lists.o
++
++# Adding policy dependent data structures
++
++obj-$(CONFIG_RSBAC_MAC) += mac_data_structures.o
++obj-$(CONFIG_RSBAC_PM) += pm_data_structures.o
++obj-$(CONFIG_RSBAC_RC) += rc_data_structures.o
++obj-$(CONFIG_RSBAC_AUTH) += auth_data_structures.o
++obj-$(CONFIG_RSBAC_ACL) += acl_data_structures.o
++obj-$(CONFIG_RSBAC_UM) += um_data_structures.o
++
+diff --git a/rsbac/data_structures/aci_data_structures.c b/rsbac/data_structures/aci_data_structures.c
+new file mode 100644
+index 0000000..f8117fa
+--- /dev/null
++++ b/rsbac/data_structures/aci_data_structures.c
+@@ -0,0 +1,14459 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of ACI data structures */
++/* Author and (c) 1999-2012: Amon Ott <ao@rsbac.org> */
++/* (some smaller parts copied from fs/namei.c */
++/* and others) */
++/* */
++/* Last modified: 19/Apr/2012 */
++/*************************************************** */
++
++#include <linux/types.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/fs_struct.h>
++#include <linux/mount.h>
++#include <linux/sched.h>
++#include <linux/quotaops.h>
++#include <linux/proc_fs.h>
++#include <linux/msdos_fs.h>
++#include <linux/iso_fs.h>
++#include <linux/nfs_fs.h>
++#include <linux/ext2_fs.h>
++#include <linux/kthread.h>
++#include <linux/coda.h>
++#include <linux/initrd.h>
++#include <linux/security.h>
++#include <linux/syscalls.h>
++#include <linux/srcu.h>
++#include <linux/seq_file.h>
++#include <linux/magic.h>
++#include <linux/dnotify.h>
++#include <linux/fsnotify.h>
++#include <linux/mm.h>
++#include <linux/blkdev.h>
++#include <linux/freezer.h>
++#include <net/net_namespace.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/file.h>
++#include <linux/spinlock.h>
++#include <asm/uaccess.h>
++#include <asm/atomic.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/aci_data_structures.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/fs.h>
++#include <rsbac/getname.h>
++#include <rsbac/net_getname.h>
++#include <rsbac/adf.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/reg.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/gen_lists.h>
++#include <rsbac/jail.h>
++#include <linux/string.h>
++#include <linux/kdev_t.h>
++#include "../../fs/mount.h"
++
++#define FUSE_SUPER_MAGIC 0x65735546
++#define CEPH_SUPER_MAGIC 0x00c36400
++
++#ifdef CONFIG_RSBAC_MAC
++#include <rsbac/mac.h>
++#endif
++
++#ifdef CONFIG_RSBAC_PM
++#include <rsbac/pm.h>
++#endif
++
++#ifdef CONFIG_RSBAC_DAZ
++#include <rsbac/daz.h>
++#endif
++
++#if defined(CONFIG_RSBAC_RC)
++#include <rsbac/rc.h>
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH)
++#include <rsbac/auth.h>
++#endif
++
++#if defined(CONFIG_RSBAC_ACL)
++#include <rsbac/acl.h>
++#endif
++
++#if defined(CONFIG_RSBAC_JAIL)
++rsbac_jail_id_t rsbac_jail_syslog_jail_id = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_PAX) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
++#include <rsbac/pax.h>
++#endif
++
++#ifdef CONFIG_RSBAC_UM
++#include <rsbac/um.h>
++#endif
++
++#if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++#include <linux/unistd.h>
++#include <linux/timer.h>
++static u_int auto_interval = CONFIG_RSBAC_AUTO_WRITE * HZ;
++#endif /* CONFIG_RSBAC_AUTO_WRITE */
++
++#if (defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)) \
++ || defined(CONFIG_RSBAC_INIT_THREAD)
++static DECLARE_WAIT_QUEUE_HEAD(rsbacd_wait);
++static struct timer_list rsbac_timer;
++#endif
++
++#if defined(CONFIG_RSBAC_NET_OBJ)
++#include <rsbac/network.h>
++#endif
++
++/************************************************************************** */
++/* Global Variables */
++/************************************************************************** */
++
++/* The following global variables are needed for access to ACI data. */
++
++#if defined(CONFIG_RSBAC_REG)
++EXPORT_SYMBOL(rsbac_initialized);
++#endif
++rsbac_boolean_t rsbac_initialized = FALSE;
++
++static rsbac_boolean_t rsbac_allow_mounts = FALSE;
++
++static char compiled_modules[80];
++
++kdev_t rsbac_root_dev;
++#ifdef CONFIG_RSBAC_INIT_DELAY
++struct vfsmount * rsbac_root_vfsmount_p = NULL;
++#endif
++#if defined(CONFIG_RSBAC_REG)
++EXPORT_SYMBOL(rsbac_root_dev);
++#endif
++DEFINE_SEMAPHORE(rsbac_write_sem);
++
++static struct rsbac_device_list_head_t * device_head_p[RSBAC_NR_DEVICE_LISTS];
++static spinlock_t device_list_locks[RSBAC_NR_DEVICE_LISTS];
++static struct srcu_struct device_list_srcu[RSBAC_NR_DEVICE_LISTS];
++static struct lock_class_key device_list_lock_class;
++
++#ifdef CONFIG_RSBAC_FD_CACHE
++static rsbac_list_handle_t fd_cache_handle[SW_NONE];
++#ifdef CONFIG_RSBAC_XSTATS
++static __u64 fd_cache_hits[SW_NONE];
++static __u64 fd_cache_misses[SW_NONE];
++static u_int fd_cache_invalidates;
++static u_int fd_cache_invalidate_alls;
++__u64 syscall_count[RSYS_none];
++#endif
++#endif
++
++#ifdef CONFIG_RSBAC_XSTATS
++__u64 syscall_count[RSYS_none];
++#endif
++
++static struct rsbac_dev_handles_t dev_handles;
++static struct rsbac_dev_handles_t dev_major_handles;
++static struct rsbac_ipc_handles_t ipc_handles;
++static struct rsbac_user_handles_t user_handles;
++#ifdef CONFIG_RSBAC_RC_UM_PROT
++static struct rsbac_group_handles_t group_handles;
++#endif
++static struct rsbac_process_handles_t process_handles;
++
++#ifdef CONFIG_RSBAC_NET_DEV
++static struct rsbac_netdev_handles_t netdev_handles;
++#endif
++#ifdef CONFIG_RSBAC_NET_OBJ
++static rsbac_list_handle_t net_temp_handle;
++static struct rsbac_nettemp_handles_t nettemp_handles;
++#if defined(CONFIG_RSBAC_MAC) || defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_RC)
++static struct rsbac_lnetobj_handles_t lnetobj_handles;
++static struct rsbac_rnetobj_handles_t rnetobj_handles;
++#endif
++#if defined(CONFIG_RSBAC_IND_NETOBJ_LOG)
++static struct rsbac_gen_netobj_aci_t def_gen_netobj_aci =
++ DEFAULT_GEN_NETOBJ_ACI;
++#endif
++#endif
++
++/* Default ACIs: implemented as variables, might be changeable some time */
++
++/* rsbac root dir items, end of recursive inherit */
++static struct rsbac_gen_fd_aci_t def_gen_root_dir_aci =
++ DEFAULT_GEN_ROOT_DIR_ACI;
++static struct rsbac_gen_fd_aci_t def_gen_fd_aci = DEFAULT_GEN_FD_ACI;
++
++#if defined(CONFIG_RSBAC_MAC)
++static struct rsbac_mac_fd_aci_t def_mac_root_dir_aci =
++ DEFAULT_MAC_ROOT_DIR_ACI;
++static struct rsbac_mac_fd_aci_t def_mac_fd_aci = DEFAULT_MAC_FD_ACI;
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++static struct rsbac_daz_fd_aci_t def_daz_root_dir_aci = DEFAULT_DAZ_ROOT_DIR_ACI;
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++static rsbac_time_t rsbac_daz_ttl = CONFIG_RSBAC_DAZ_TTL;
++#endif
++#endif
++#if defined(CONFIG_RSBAC_PM)
++static struct rsbac_pm_fd_aci_t def_pm_fd_aci = DEFAULT_PM_FD_ACI;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++static struct rsbac_rc_fd_aci_t def_rc_root_dir_aci =
++ DEFAULT_RC_ROOT_DIR_ACI;
++static struct rsbac_rc_fd_aci_t def_rc_fd_aci = DEFAULT_RC_FD_ACI;
++#endif
++#if defined(CONFIG_RSBAC_RES)
++static struct rsbac_res_fd_aci_t def_res_fd_aci = DEFAULT_RES_FD_ACI;
++#endif
++
++#if defined(CONFIG_RSBAC_PROC)
++#include <rsbac/proc_fs.h>
++
++#ifdef CONFIG_RSBAC_XSTATS
++static __u64 get_attr_count[T_NONE] = { 0, 0, 0, 0, 0, 0, 0 };
++static __u64 set_attr_count[T_NONE] = { 0, 0, 0, 0, 0, 0, 0 };
++static __u64 remove_count[T_NONE] = { 0, 0, 0, 0, 0, 0, 0 };
++static __u64 get_parent_count = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_REG)
++EXPORT_SYMBOL(proc_rsbac_root_p);
++#endif
++struct proc_dir_entry *proc_rsbac_root_p = NULL;
++
++#if defined(CONFIG_RSBAC_REG)
++EXPORT_SYMBOL(proc_rsbac_backup_p);
++#endif
++struct proc_dir_entry *proc_rsbac_backup_p = NULL;
++
++#endif /* PROC */
++
++#ifdef CONFIG_DEVFS_MOUNT
++#include <linux/devfs_fs_kernel.h>
++#endif
++
++static struct rsbac_mount_list_t * rsbac_mount_list = NULL;
++
++#ifdef CONFIG_RSBAC_MAC
++static struct rsbac_mac_process_aci_t mac_init_p_aci =
++ DEFAULT_MAC_P_INIT_ACI;
++#endif
++#ifdef CONFIG_RSBAC_RC
++static struct rsbac_rc_process_aci_t rc_kernel_p_aci =
++ DEFAULT_RC_P_KERNEL_ACI;
++#endif
++
++static kdev_t umount_device_in_progress = RSBAC_AUTO_DEV;
++
++static struct kmem_cache * device_item_slab = NULL;
++
++/**************************************************/
++/* Declarations of internal functions */
++/**************************************************/
++
++static struct rsbac_device_list_item_t *lookup_device(kdev_t kdev, u_int hash);
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++static u_int gen_nr_fd_hashes = RSBAC_LIST_MIN_MAX_HASHES;
++static u_int gen_nr_p_hashes = 1;
++
++#if defined(CONFIG_RSBAC_MAC)
++static u_int mac_nr_fd_hashes = RSBAC_LIST_MIN_MAX_HASHES;
++static u_int mac_nr_p_hashes = 1;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++static u_int pm_nr_fd_hashes = RSBAC_LIST_MIN_MAX_HASHES;
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++static u_int daz_nr_fd_hashes = RSBAC_LIST_MIN_MAX_HASHES;
++
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++static u_int daz_scanned_nr_fd_hashes = RSBAC_LIST_MIN_MAX_HASHES;
++#endif
++#endif
++#if defined(CONFIG_RSBAC_FF)
++static u_int ff_nr_fd_hashes = RSBAC_LIST_MIN_MAX_HASHES;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++static u_int rc_nr_fd_hashes = RSBAC_LIST_MIN_MAX_HASHES;
++static u_int rc_nr_p_hashes = 1;
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++static u_int auth_nr_fd_hashes = RSBAC_LIST_MIN_MAX_HASHES;
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++static u_int cap_nr_fd_hashes = RSBAC_LIST_MIN_MAX_HASHES;
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++static u_int jail_nr_p_hashes = 1;
++#endif
++#if defined(CONFIG_RSBAC_PAX)
++static u_int pax_nr_fd_hashes = RSBAC_LIST_MIN_MAX_HASHES;
++#endif
++#if defined(CONFIG_RSBAC_RES)
++static u_int res_nr_fd_hashes = RSBAC_LIST_MIN_MAX_HASHES;
++#endif
++
++static inline u_int device_hash(kdev_t id)
++{
++ return id & (RSBAC_NR_DEVICE_LISTS - 1);
++}
++
++/* These help functions do NOT handle data consistency protection by */
++/* rw-spinlocks! This is done exclusively by non-internal functions! */
++
++/************************************************************************** */
++/* Read/Write functions */
++
++/* This help function protects some filesystems from being written to */
++/* and disables writing under some conditions, e.g. in an interrupt */
++
++rsbac_boolean_t rsbac_writable(struct super_block * sb_p)
++{
++#ifdef CONFIG_RSBAC_NO_WRITE
++ return FALSE;
++#else
++ if (!sb_p || !sb_p->s_dev)
++ return FALSE;
++ if (rsbac_debug_no_write || (sb_p->s_flags & MS_RDONLY)
++ || in_interrupt())
++ return FALSE;
++ if (!MAJOR(sb_p->s_dev)
++#ifndef CONFIG_RSBAC_MSDOS_WRITE
++ || (sb_p->s_magic == MSDOS_SUPER_MAGIC)
++#endif
++ || (sb_p->s_magic == SOCKFS_MAGIC)
++ || (sb_p->s_magic == PIPEFS_MAGIC)
++ || (sb_p->s_magic == SYSFS_MAGIC)
++ || (sb_p->s_magic == NFS_SUPER_MAGIC)
++ || (sb_p->s_magic == CODA_SUPER_MAGIC)
++ || (sb_p->s_magic == NCP_SUPER_MAGIC)
++ || (sb_p->s_magic == SMB_SUPER_MAGIC)
++ || (sb_p->s_magic == ISOFS_SUPER_MAGIC)
++ || (sb_p->s_magic == OCFS2_SUPER_MAGIC)
++ || (sb_p->s_magic == FUSE_SUPER_MAGIC)
++ || (sb_p->s_magic == CEPH_SUPER_MAGIC))
++ return FALSE;
++ else
++ return TRUE;
++#endif
++}
++
++/* This lookup function ensures correct access to the file system. */
++/* It returns a pointer to the dentry of the rsbac directory on the mounted */
++/* device specified by kdev. If the directory */
++/* does not exist, it is created, if create_dir == TRUE and writable. */
++
++static int lookup_aci_path_dentry(struct vfsmount *vfsmount_p,
++ struct dentry **dir_dentry_pp,
++ rsbac_boolean_t create_dir, kdev_t kdev)
++{
++ struct dentry *dir_dentry_p = NULL;
++ struct dentry *root_dentry_p = NULL;
++ int err = 0;
++ struct rsbac_device_list_item_t *device_p;
++ u_int hash;
++ int srcu_idx;
++
++ if (!dir_dentry_pp)
++ return -RSBAC_EINVALIDPOINTER;
++
++ if (!vfsmount_p) {
++ vfsmount_p = rsbac_get_vfsmount(kdev);
++ if (!vfsmount_p) {
++ rsbac_printk(KERN_WARNING "lookup_aci_path_dentry(): invalid device %02u:%02u\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev));
++ return -RSBAC_EINVALIDDEV;
++ }
++ }
++
++ /* pipefs and sockfs must not be read from */
++ if ((vfsmount_p->mnt_sb->s_magic == PIPEFS_MAGIC)
++ || (vfsmount_p->mnt_sb->s_magic == SOCKFS_MAGIC)
++ ) {
++ return -RSBAC_ENOTFOUND;
++ }
++ hash = device_hash(kdev);
++
++ srcu_idx = srcu_read_lock(&device_list_srcu[hash]);
++ device_p = lookup_device(kdev, hash);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "lookup_aci_path_dentry(): No entry for device %02u:%02u\n",
++ MAJOR(kdev), MINOR(kdev));
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ /* already looked up earlier? */
++ if (device_p->rsbac_dir_dentry_p) {
++ *dir_dentry_pp = device_p->rsbac_dir_dentry_p;
++ spin_lock(&device_p->rsbac_dir_dentry_p->d_lock);
++ rsbac_pr_debug(ds, "device_p->rsbac_dir_dentry_p->d_count "
++ "for device %02u:%02u is %i!\n",
++ MAJOR(vfsmount_p->mnt_sb->s_dev), MINOR(vfsmount_p->mnt_sb->s_dev),
++ device_p->rsbac_dir_dentry_p->d_count);
++ spin_unlock(&device_p->rsbac_dir_dentry_p->d_lock);
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ return 0;
++ }
++ /* Must unlock here for the lookup */
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ rsbac_pr_debug(ds, "first time lookup for or non-existing %s on device "
++ "%02u:%02u!\n", RSBAC_ACI_PATH,
++ MAJOR(vfsmount_p->mnt_sb->s_dev), MINOR(vfsmount_p->mnt_sb->s_dev));
++ if (!vfsmount_p->mnt_sb->s_root) {
++ rsbac_printk(KERN_WARNING "lookup_aci_path_dentry(): Super_block for device %02u:%02u has no root dentry!\n",
++ MAJOR(vfsmount_p->mnt_sb->s_dev), MINOR(vfsmount_p->mnt_sb->s_dev));
++ err = -RSBAC_EINVALIDDEV;
++ goto out;
++ }
++
++ if (!vfsmount_p->mnt_sb->s_root->d_inode) {
++ rsbac_printk(KERN_WARNING "lookup_aci_path_dentry(): Super_block for device %02u:%02u has no root dentry->d_inode!\n",
++ MAJOR(vfsmount_p->mnt_sb->s_dev), MINOR(vfsmount_p->mnt_sb->s_dev));
++ err = -RSBAC_EINVALIDDEV;
++ goto out;
++ }
++
++ /* lookup dentry of ACI_PATH on this device */
++ spin_lock(&vfsmount_p->mnt_sb->s_root->d_lock);
++ rsbac_pr_debug(ds, "lookup rsbac path %s for device %02u:%02u, "
++ "sb_p->s_root->d_count is %i!\n",
++ RSBAC_ACI_PATH, MAJOR(vfsmount_p->mnt_sb->s_dev),
++ MINOR(vfsmount_p->mnt_sb->s_dev),
++ vfsmount_p->mnt_sb->s_root->d_count);
++ spin_unlock(&vfsmount_p->mnt_sb->s_root->d_lock);
++
++ mutex_lock_nested(&vfsmount_p->mnt_sb->s_root->d_inode->i_mutex, I_MUTEX_XATTR);
++ dir_dentry_p =
++ rsbac_lookup_one_len(RSBAC_ACI_PATH, vfsmount_p->mnt_sb->s_root,
++ strlen(RSBAC_ACI_PATH));
++ mutex_unlock(&vfsmount_p->mnt_sb->s_root->d_inode->i_mutex);
++
++ if (IS_ERR(dir_dentry_p))
++ switch (PTR_ERR(dir_dentry_p)) {
++ case -ENOENT:
++ case -ENOTDIR:
++ err = -RSBAC_ENOTFOUND;
++ goto out;
++ case -ENOMEM:
++ rsbac_printk(KERN_WARNING "lookup_aci_path_dentry(): memory allocation error!\n");
++ err = -RSBAC_ENOROOTDIR;
++ goto out;
++ case -ENAMETOOLONG:
++ rsbac_printk(KERN_WARNING "lookup_aci_path_dentry(): ACI_PATH too long on fs!\n");
++ err = -RSBAC_EPATHTOOLONG;
++ goto out;
++ case -EACCES:
++ rsbac_printk(KERN_WARNING "lookup_aci_path_dentry(): No access to ACI_PATH!\n");
++ err = -RSBAC_EACCESS;
++ goto out;
++ default:
++ rsbac_printk(KERN_WARNING "lookup_aci_path_dentry(): Error on root dir: %li!\n",
++ PTR_ERR(dir_dentry_p));
++ err = -RSBAC_ENOROOTDIR;
++ goto out;
++ }
++
++ if (!dir_dentry_p) {
++ rsbac_printk(KERN_WARNING "lookup_aci_path_dentry(): rsbac_lookup_(dentry|one) returned null pointer!\n");
++ err = -RSBAC_EINVALIDPOINTER;
++ goto out;
++ }
++ if (!dir_dentry_p->d_inode) { /* dir could not be found -> try to create it */
++ /* but only, if allowed... */
++ if (!create_dir) {
++ err = -RSBAC_ENOTFOUND;
++ goto out_dir_dput;
++ }
++ rsbac_pr_debug(ds, "try to create dir, first test writable!\n");
++ /* ... and writable. */
++ if (!rsbac_writable(vfsmount_p->mnt_sb)) { /* mounted read only or special case */
++ err = -RSBAC_ENOTWRITABLE;
++ goto out_dir_dput;
++ }
++ root_dentry_p = lock_parent(dir_dentry_p);
++ err = PTR_ERR(root_dentry_p);
++ if (IS_ERR(root_dentry_p)) {
++ err = -RSBAC_ECOULDNOTCREATEPATH;
++ goto out_dir_dput;
++ }
++ if (!root_dentry_p->d_inode
++ || !root_dentry_p->d_inode->i_op
++ || !root_dentry_p->d_inode->i_op->mkdir) {
++ unlock_dir(root_dentry_p);
++ err = -RSBAC_ECOULDNOTCREATEPATH;
++ goto out_dir_dput;
++ }
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)
++ dquot_initialize(root_dentry_p->d_inode);
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
++ vfs_dq_init(root_dentry_p->d_inode);
++#else
++ DQUOT_INIT(root_dentry_p->d_inode);
++#endif
++ err =
++ root_dentry_p->d_inode->i_op->mkdir(root_dentry_p->
++ d_inode,
++ dir_dentry_p,
++ RSBAC_ACI_DIR_MODE);
++ unlock_dir(root_dentry_p);
++ if (err) {
++ err = -RSBAC_ECOULDNOTCREATEPATH;
++ goto out_dir_dput;
++ }
++ } else { /* was found */
++ /* check, whether this is a dir */
++ if (!S_ISDIR(dir_dentry_p->d_inode->i_mode)) { /* no dir! We have a real prob here! */
++ rsbac_printk(KERN_WARNING "lookup_aci_path_dentry(): supposed /%s dir on dev %02u:%02u is no dir!\n",
++ RSBAC_ACI_PATH,
++ MAJOR(vfsmount_p->mnt_sb->s_dev),
++ MINOR(vfsmount_p->mnt_sb->s_dev));
++ err = -RSBAC_EACCESS;
++ goto out_dir_dput;
++ }
++ }
++ spin_lock(&dir_dentry_p->d_lock);
++ rsbac_pr_debug(ds, "dir_dentry_p->d_count is %i!\n",
++ dir_dentry_p->d_count);
++ spin_unlock(&dir_dentry_p->d_lock);
++ spin_lock(&vfsmount_p->mnt_sb->s_root->d_lock);
++ rsbac_pr_debug(ds, "vfsmount_p->mnt_sb->s_root->d_count is now %i!\n",
++ vfsmount_p->mnt_sb->s_root->d_count);
++ spin_unlock(&vfsmount_p->mnt_sb->s_root->d_lock);
++ /* we want to keep dir_dentry_p in device_item */
++ /* dput must be done in remove_device_item! */
++ *dir_dentry_pp = dir_dentry_p;
++
++ /* Must lock and relookup device_p to cache result */
++ srcu_idx = srcu_read_lock(&device_list_srcu[hash]);
++ device_p = lookup_device(kdev, hash);
++ if (device_p && !device_p->rsbac_dir_dentry_p) {
++ device_p->rsbac_dir_dentry_p = dir_dentry_p;
++ device_p->rsbac_dir_inode = dir_dentry_p->d_inode->i_ino;
++ }
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++
++ out:
++ return err;
++
++ out_dir_dput:
++ dput(dir_dentry_p);
++ goto out;
++}
++
++/************************************************************************** */
++/* The lookup functions return NULL, if the item is not found, and a */
++/* pointer to the item otherwise. */
++
++/* First, a lookup for the device list item */
++
++static struct rsbac_device_list_item_t *lookup_device(kdev_t kdev, u_int hash)
++{
++ struct rsbac_device_list_item_t *curr = rcu_dereference(device_head_p[hash])->curr;
++
++ /* if there is no current item or it is not the right one, search... */
++ if (!(curr && (MAJOR(curr->id) == MAJOR(kdev))
++ && (MINOR(curr->id) == MINOR(kdev))))
++ {
++ curr = rcu_dereference(device_head_p[hash])->head;
++ while (curr
++ && ((RSBAC_MAJOR(curr->id) != RSBAC_MAJOR(kdev))
++ || (RSBAC_MINOR(curr->id) != RSBAC_MINOR(kdev))
++ )
++ ) {
++ curr = curr->next;
++ }
++ if (curr)
++ rcu_dereference(device_head_p[hash])->curr = curr;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++#ifdef CONFIG_RSBAC_FD_CACHE
++u_int hash_fd_cache(void * desc, __u32 nr_hashes)
++{
++ return ( ((struct rsbac_fd_cache_desc_t *) desc)->inode & (nr_hashes - 1) );
++}
++#endif
++
++static int dev_compare(void *desc1, void *desc2)
++{
++ int result;
++ struct rsbac_dev_desc_t *i_desc1 = desc1;
++ struct rsbac_dev_desc_t *i_desc2 = desc2;
++
++ result = memcmp(&i_desc1->type,
++ &i_desc2->type, sizeof(i_desc1->type));
++ if (result)
++ return result;
++ result = memcmp(&i_desc1->major,
++ &i_desc2->major, sizeof(i_desc1->major));
++ if (result)
++ return result;
++ return memcmp(&i_desc1->minor,
++ &i_desc2->minor, sizeof(i_desc1->minor));
++}
++
++#ifdef CONFIG_RSBAC_RC
++static int dev_major_compare(void *desc1, void *desc2)
++{
++ int result;
++ struct rsbac_dev_desc_t *i_desc1 = desc1;
++ struct rsbac_dev_desc_t *i_desc2 = desc2;
++
++ result = memcmp(&i_desc1->type,
++ &i_desc2->type, sizeof(i_desc1->type));
++ if (result)
++ return result;
++ return memcmp(&i_desc1->major,
++ &i_desc2->major, sizeof(i_desc1->major));
++}
++#endif
++
++static int ipc_compare(void *desc1, void *desc2)
++{
++ int result;
++ struct rsbac_ipc_t *i_desc1 = desc1;
++ struct rsbac_ipc_t *i_desc2 = desc2;
++
++ result = memcmp(&i_desc1->type,
++ &i_desc2->type, sizeof(i_desc1->type));
++ if (result)
++ return result;
++ else
++ return memcmp(&i_desc1->id.id_nr,
++ &i_desc2->id.id_nr,
++ sizeof(i_desc1->id.id_nr));
++}
++
++#ifdef CONFIG_RSBAC_NET_DEV
++#if defined(CONFIG_RSBAC_IND_NETDEV_LOG) || defined(CONFIG_RSBAC_RC)
++static int netdev_compare(void *desc1, void *desc2)
++{
++ return strncmp(desc1, desc2, RSBAC_IFNAMSIZ);
++}
++#endif
++#endif
++
++/************************************************************************** */
++/* Convert functions */
++
++static int gen_fd_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_gen_fd_aci_t *new_aci = new_data;
++ struct rsbac_gen_fd_old_aci_t *old_aci = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t));
++ new_aci->log_array_low = old_aci->log_array_low;
++ new_aci->log_array_high = old_aci->log_array_high;
++ new_aci->log_program_based = old_aci->log_program_based;
++ new_aci->symlink_add_remote_ip = old_aci->symlink_add_remote_ip;
++ new_aci->symlink_add_uid = old_aci->symlink_add_uid;
++ new_aci->symlink_add_mac_level = old_aci->symlink_add_mac_level;
++ new_aci->symlink_add_rc_role = old_aci->symlink_add_rc_role;
++ new_aci->linux_dac_disable = old_aci->linux_dac_disable;
++ new_aci->fake_root_uid = old_aci->fake_root_uid;
++ new_aci->auid_exempt = old_aci->auid_exempt;
++ new_aci->vset = RSBAC_UM_VIRTUAL_KEEP;
++ return 0;
++}
++
++static int gen_fd_old_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_gen_fd_aci_t *new_aci = new_data;
++ struct rsbac_gen_fd_old_old_aci_t *old_aci = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t));
++ new_aci->log_array_low = old_aci->log_array_low;
++ new_aci->log_array_high = old_aci->log_array_high;
++ new_aci->log_program_based = old_aci->log_program_based;
++ new_aci->symlink_add_remote_ip = 0;
++ new_aci->symlink_add_uid = old_aci->symlink_add_uid;
++ new_aci->symlink_add_mac_level = old_aci->symlink_add_mac_level;
++ new_aci->symlink_add_rc_role = old_aci->symlink_add_rc_role;
++ new_aci->linux_dac_disable = old_aci->linux_dac_disable;
++ new_aci->fake_root_uid = old_aci->fake_root_uid;
++ new_aci->auid_exempt = old_aci->auid_exempt;
++ new_aci->vset = RSBAC_UM_VIRTUAL_KEEP;
++ return 0;
++}
++
++static rsbac_list_conv_function_t *gen_fd_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_GEN_FD_OLD_ACI_VERSION:
++ return gen_fd_conv;
++ case RSBAC_GEN_FD_OLD_OLD_ACI_VERSION:
++ return gen_fd_old_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int gen_dev_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_dev_desc_t *new = new_desc;
++ struct rsbac_dev_t *old = old_desc;
++
++ memcpy(new_data, old_data, sizeof(struct rsbac_gen_dev_aci_t));
++ new->type = old->type;
++ new->major = RSBAC_MAJOR(old->id);
++ new->minor = RSBAC_MINOR(old->id);
++ return 0;
++}
++
++static rsbac_list_conv_function_t *gen_dev_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_GEN_DEV_OLD_ACI_VERSION:
++ return gen_dev_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int gen_user_conv(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ *((rsbac_uid_t *)new_desc) = *((rsbac_old_uid_t *)old_desc);
++ memcpy(new_data, old_data, sizeof(struct rsbac_gen_user_aci_t));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *gen_user_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_GEN_USER_OLD_ACI_VERSION:
++ return gen_user_conv;
++ default:
++ return NULL;
++ }
++}
++
++#ifdef CONFIG_RSBAC_MAC
++static int mac_old_fd_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_mac_fd_aci_t *new_aci = new_data;
++ struct rsbac_mac_fd_old_aci_t *old_aci = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t));
++ new_aci->sec_level = old_aci->sec_level;
++ new_aci->mac_categories = old_aci->mac_categories;
++ new_aci->mac_auto = old_aci->mac_auto;
++ new_aci->mac_prop_trusted = old_aci->mac_prop_trusted;
++ new_aci->mac_file_flags = old_aci->mac_file_flags;
++ return 0;
++}
++
++static int mac_old_old_fd_conv(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ struct rsbac_mac_fd_aci_t *new_aci = new_data;
++ struct rsbac_mac_fd_old_old_aci_t *old_aci = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t));
++ new_aci->sec_level = old_aci->sec_level;
++ new_aci->mac_categories = old_aci->mac_categories;
++ new_aci->mac_auto = old_aci->mac_auto;
++ new_aci->mac_prop_trusted = FALSE;
++ if (old_aci->mac_shared)
++ new_aci->mac_file_flags = MAC_write_up;
++ else
++ new_aci->mac_file_flags = 0;
++ return 0;
++}
++
++static int mac_old_old_old_fd_conv(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ struct rsbac_mac_fd_aci_t *new_aci = new_data;
++ struct rsbac_mac_fd_old_old_old_aci_t *old_aci = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t));
++ new_aci->sec_level = old_aci->sec_level;
++ new_aci->mac_categories = old_aci->mac_categories;
++ new_aci->mac_auto = old_aci->mac_auto;
++ new_aci->mac_prop_trusted = FALSE;
++ new_aci->mac_file_flags = 0;
++ return 0;
++}
++
++static rsbac_list_conv_function_t *mac_fd_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_MAC_FD_OLD_ACI_VERSION:
++ return mac_old_fd_conv;
++ case RSBAC_MAC_FD_OLD_OLD_ACI_VERSION:
++ return mac_old_old_fd_conv;
++ case RSBAC_MAC_FD_OLD_OLD_OLD_ACI_VERSION:
++ return mac_old_old_old_fd_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int mac_dev_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_dev_desc_t *new = new_desc;
++ struct rsbac_dev_t *old = old_desc;
++
++ memcpy(new_data, old_data, sizeof(struct rsbac_mac_dev_aci_t));
++ new->type = old->type;
++ new->major = RSBAC_MAJOR(old->id);
++ new->minor = RSBAC_MINOR(old->id);
++ return 0;
++}
++
++static rsbac_list_conv_function_t *mac_dev_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_MAC_DEV_OLD_ACI_VERSION:
++ return mac_dev_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int mac_user_conv(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ *((rsbac_uid_t *)new_desc) = *((rsbac_old_uid_t *)old_desc);
++ memcpy(new_data, old_data, sizeof(struct rsbac_mac_user_aci_t));
++ return 0;
++}
++
++static int mac_old_user_conv(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ struct rsbac_mac_user_aci_t *new_aci = new_data;
++ struct rsbac_mac_user_old_aci_t *old_aci = old_data;
++
++ *((rsbac_uid_t *)new_desc) = *((rsbac_old_uid_t *)old_desc);
++ new_aci->security_level = old_aci->access_appr;
++ new_aci->initial_security_level = old_aci->access_appr;
++ new_aci->min_security_level = old_aci->min_access_appr;
++ new_aci->mac_categories = old_aci->mac_categories;
++ new_aci->mac_initial_categories = old_aci->mac_categories;
++ new_aci->mac_min_categories = old_aci->mac_min_categories;
++ new_aci->system_role = old_aci->system_role;
++ new_aci->mac_user_flags = RSBAC_MAC_DEF_U_FLAGS;
++ if (old_aci->mac_allow_auto)
++ new_aci->mac_user_flags |= MAC_allow_auto;
++ return 0;
++}
++
++static int mac_old_old_user_conv(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ struct rsbac_mac_user_aci_t *new_aci = new_data;
++ struct rsbac_mac_user_old_old_aci_t *old_aci = old_data;
++
++ *((rsbac_uid_t *)new_desc) = *((rsbac_old_uid_t *)old_desc);
++ new_aci->security_level = old_aci->access_appr;
++ new_aci->initial_security_level = old_aci->access_appr;
++ new_aci->min_security_level = old_aci->min_access_appr;
++ new_aci->mac_categories = old_aci->mac_categories;
++ new_aci->mac_initial_categories = old_aci->mac_categories;
++ new_aci->mac_min_categories = old_aci->mac_min_categories;
++ new_aci->system_role = old_aci->system_role;
++ new_aci->mac_user_flags = RSBAC_MAC_DEF_U_FLAGS;
++ return 0;
++}
++
++static int mac_old_old_old_user_conv(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ struct rsbac_mac_user_aci_t *new_aci = new_data;
++ struct rsbac_mac_user_old_old_old_aci_t *old_aci = old_data;
++
++ *((rsbac_uid_t *)new_desc) = *((rsbac_old_uid_t *)old_desc);
++ new_aci->security_level = old_aci->access_appr;
++ new_aci->initial_security_level = old_aci->access_appr;
++ new_aci->min_security_level = SL_unclassified;
++ new_aci->mac_categories = old_aci->mac_categories;
++ new_aci->mac_initial_categories = old_aci->mac_categories;
++ new_aci->mac_min_categories = RSBAC_MAC_MIN_CAT_VECTOR;
++ new_aci->system_role = old_aci->system_role;
++ new_aci->mac_user_flags = RSBAC_MAC_DEF_U_FLAGS;
++ return 0;
++}
++
++static rsbac_list_conv_function_t *mac_user_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_MAC_USER_OLD_ACI_VERSION:
++ return mac_user_conv;
++ case RSBAC_MAC_USER_OLD_OLD_ACI_VERSION:
++ return mac_old_user_conv;
++ case RSBAC_MAC_USER_OLD_OLD_OLD_ACI_VERSION:
++ return mac_old_old_user_conv;
++ case RSBAC_MAC_USER_OLD_OLD_OLD_OLD_ACI_VERSION:
++ return mac_old_old_old_user_conv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++#ifdef CONFIG_RSBAC_PM
++static int pm_dev_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_dev_desc_t *new = new_desc;
++ struct rsbac_dev_t *old = old_desc;
++
++ memcpy(new_data, old_data, sizeof(struct rsbac_pm_dev_aci_t));
++ new->type = old->type;
++ new->major = RSBAC_MAJOR(old->id);
++ new->minor = RSBAC_MINOR(old->id);
++ return 0;
++}
++
++static rsbac_list_conv_function_t *pm_dev_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_PM_DEV_OLD_ACI_VERSION:
++ return pm_dev_conv;
++ default:
++ return NULL;
++ }
++}
++static int pm_user_conv(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ *((rsbac_uid_t *)new_desc) = *((rsbac_old_uid_t *)old_desc);
++ memcpy(new_data, old_data, sizeof(struct rsbac_pm_user_aci_t));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *pm_user_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_PM_USER_OLD_ACI_VERSION:
++ return pm_user_conv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++#ifdef CONFIG_RSBAC_DAZ
++static int daz_old_fd_conv(
++ void * old_desc,
++ void * old_data,
++ void * new_desc,
++ void * new_data)
++ {
++ struct rsbac_daz_fd_aci_t * new_aci = new_data;
++ struct rsbac_daz_fd_old_aci_t * old_aci = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t));
++ new_aci->daz_scanner = old_aci->daz_scanner;
++ new_aci->daz_do_scan = DEFAULT_DAZ_FD_DO_SCAN;
++ return 0;
++ }
++
++static rsbac_list_conv_function_t * daz_fd_get_conv(rsbac_version_t old_version)
++ {
++ switch(old_version)
++ {
++ case RSBAC_DAZ_FD_OLD_ACI_VERSION:
++ return daz_old_fd_conv;
++ default:
++ return NULL;
++ }
++ }
++
++static int daz_user_conv(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ *((rsbac_uid_t *)new_desc) = *((rsbac_old_uid_t *)old_desc);
++ memcpy(new_data, old_data, sizeof(rsbac_system_role_int_t));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *daz_user_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_DAZ_USER_OLD_ACI_VERSION:
++ return daz_user_conv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++#ifdef CONFIG_RSBAC_FF
++static int ff_user_conv(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ *((rsbac_uid_t *)new_desc) = *((rsbac_old_uid_t *)old_desc);
++ memcpy(new_data, old_data, sizeof(rsbac_system_role_int_t));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *ff_user_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_FF_USER_OLD_ACI_VERSION:
++ return ff_user_conv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++#ifdef CONFIG_RSBAC_RC
++static int rc_dev_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_dev_desc_t *new = new_desc;
++ struct rsbac_dev_t *old = old_desc;
++
++ memcpy(new_data, old_data, sizeof(rsbac_rc_type_id_t));
++ new->type = old->type;
++ new->major = RSBAC_MAJOR(old->id);
++ new->minor = RSBAC_MINOR(old->id);
++ return 0;
++}
++
++static rsbac_list_conv_function_t *rc_dev_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_RC_DEV_OLD_ACI_VERSION:
++ return rc_dev_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int rc_user_conv(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ *((rsbac_uid_t *)new_desc) = *((rsbac_old_uid_t *)old_desc);
++ memcpy(new_data, old_data, sizeof(struct rsbac_rc_user_aci_t));
++ return 0;
++}
++
++static int rc_user_old_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_rc_user_aci_t *new_aci = new_data;
++ rsbac_rc_role_id_t *old_aci = old_data;
++
++ *((rsbac_uid_t *)new_desc) = *((rsbac_old_uid_t *)old_desc);
++ new_aci->rc_role = *old_aci;
++ new_aci->rc_type = RSBAC_RC_GENERAL_TYPE;
++ return 0;
++}
++
++static rsbac_list_conv_function_t *rc_user_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_RC_USER_OLD_ACI_VERSION:
++ return rc_user_conv;
++ case RSBAC_RC_USER_OLD_OLD_ACI_VERSION:
++ return rc_user_old_conv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++#ifdef CONFIG_RSBAC_AUTH
++static int auth_old_fd_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_auth_fd_aci_t *new_aci = new_data;
++ struct rsbac_auth_fd_old_aci_t *old_aci = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t));
++ new_aci->auth_may_setuid = old_aci->auth_may_setuid;
++ new_aci->auth_may_set_cap = old_aci->auth_may_set_cap;
++ new_aci->auth_learn = FALSE;
++ return 0;
++}
++
++static rsbac_list_conv_function_t *auth_fd_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_AUTH_FD_OLD_ACI_VERSION:
++ return auth_old_fd_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int auth_user_conv(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ *((rsbac_uid_t *)new_desc) = *((rsbac_old_uid_t *)old_desc);
++ memcpy(new_data, old_data, sizeof(rsbac_system_role_int_t));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *auth_user_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_AUTH_USER_OLD_ACI_VERSION:
++ return auth_user_conv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++#ifdef CONFIG_RSBAC_CAP
++static int cap_old_fd_conv(void *old_desc, void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_cap_fd_aci_t *new_aci = new_data;
++ struct rsbac_cap_fd_old_aci_t *old_aci = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t));
++ new_aci->min_caps.cap[0] = old_aci->min_caps;
++ new_aci->max_caps.cap[0] = old_aci->max_caps;
++ new_aci->min_caps.cap[1] = (__u32) 0;
++ new_aci->max_caps.cap[1] = (__u32) -1;
++ new_aci->cap_ld_env = old_aci->cap_ld_env;
++ return 0;
++}
++
++static int cap_old_old_fd_conv(void *old_desc, void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_cap_fd_aci_t *new_aci = new_data;
++ struct rsbac_cap_fd_old_old_aci_t *old_aci = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t));
++ new_aci->min_caps.cap[0] = old_aci->min_caps;
++ new_aci->max_caps.cap[0] = old_aci->max_caps;
++ new_aci->min_caps.cap[1] = (__u32) 0;
++ new_aci->max_caps.cap[1] = (__u32) -1;
++ new_aci->cap_ld_env = LD_inherit;
++ return 0;
++}
++
++static rsbac_list_conv_function_t *cap_fd_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_CAP_FD_OLD_OLD_ACI_VERSION:
++ return cap_old_old_fd_conv;
++ case RSBAC_CAP_FD_OLD_ACI_VERSION:
++ return cap_old_fd_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int cap_old_user_conv(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ struct rsbac_cap_user_aci_t *new_aci = new_data;
++ struct rsbac_cap_user_old_aci_t *old_aci = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_uid_t));
++ new_aci->cap_role = old_aci->cap_role;
++ new_aci->min_caps.cap[0] = old_aci->min_caps;
++ new_aci->max_caps.cap[0] = old_aci->max_caps;
++ new_aci->min_caps.cap[1] = (__u32) 0;
++ new_aci->max_caps.cap[1] = (__u32) -1;
++ new_aci->cap_ld_env = old_aci->cap_ld_env;
++ return 0;
++}
++
++static int cap_old_old_user_conv(void *old_desc, void *old_data, void *new_desc, void *new_data)
++{
++ rsbac_uid_t *new_user = new_desc;
++ rsbac_old_uid_t *old_user = old_desc;
++ struct rsbac_cap_user_aci_t *new_aci = new_data;
++ struct rsbac_cap_user_old_old_aci_t *old_aci = old_data;
++
++ *new_user = RSBAC_GEN_UID(0,*old_user);
++ new_aci->cap_role = old_aci->cap_role;
++ new_aci->min_caps.cap[0] = old_aci->min_caps;
++ new_aci->max_caps.cap[0] = old_aci->max_caps;
++ new_aci->min_caps.cap[1] = (__u32) 0;
++ new_aci->max_caps.cap[1] = (__u32) -1;
++ new_aci->cap_ld_env = old_aci->cap_ld_env;
++ return 0;
++}
++
++static int cap_old_old_old_user_conv(void *old_desc, void *old_data, void *new_desc, void *new_data)
++{
++ rsbac_uid_t *new_user = new_desc;
++ rsbac_old_uid_t *old_user = old_desc;
++ struct rsbac_cap_user_aci_t *new_aci = new_data;
++ struct rsbac_cap_user_old_old_aci_t *old_aci = old_data;
++
++ *new_user = RSBAC_GEN_UID(0,*old_user);
++ new_aci->cap_role = old_aci->cap_role;
++ new_aci->min_caps.cap[0] = old_aci->min_caps;
++ new_aci->max_caps.cap[0] = old_aci->max_caps;
++ new_aci->min_caps.cap[1] = (__u32) 0;
++ new_aci->max_caps.cap[1] = (__u32) -1;
++ new_aci->cap_ld_env = LD_allow;
++ return 0;
++}
++
++static rsbac_list_conv_function_t *cap_user_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_CAP_USER_OLD_ACI_VERSION:
++ return cap_old_user_conv;
++ case RSBAC_CAP_USER_OLD_OLD_ACI_VERSION:
++ return cap_old_old_user_conv;
++ case RSBAC_CAP_USER_OLD_OLD_OLD_ACI_VERSION:
++ return cap_old_old_old_user_conv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++#ifdef CONFIG_RSBAC_JAIL
++static int jail_user_conv(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ *((rsbac_uid_t *)new_desc) = *((rsbac_old_uid_t *)old_desc);
++ memcpy(new_data, old_data, sizeof(rsbac_system_role_int_t));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *jail_user_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_JAIL_USER_OLD_ACI_VERSION:
++ return jail_user_conv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++#ifdef CONFIG_RSBAC_PAX
++static int pax_user_conv(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ *((rsbac_uid_t *)new_desc) = *((rsbac_old_uid_t *)old_desc);
++ memcpy(new_data, old_data, sizeof(rsbac_system_role_int_t));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *pax_user_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_PAX_USER_OLD_ACI_VERSION:
++ return pax_user_conv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++#ifdef CONFIG_RSBAC_RES
++static int res_user_conv(void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ *((rsbac_uid_t *)new_desc) = *((rsbac_old_uid_t *)old_desc);
++ memcpy(new_data, old_data, sizeof(struct rsbac_res_user_aci_t));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *res_user_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_RES_USER_OLD_ACI_VERSION:
++ return res_user_conv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++static int net_temp_old_conv(void *old_desc, void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_net_temp_data_t *new_aci = new_data;
++ struct rsbac_net_temp_old_data_t *old_aci = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_net_temp_id_t));
++ new_aci->address_family = old_aci->address_family;
++ new_aci->type = old_aci->type;
++ new_aci->protocol = old_aci->protocol;
++ memcpy(new_aci->netdev, old_aci->netdev, sizeof(rsbac_netdev_id_t));
++ memcpy(new_aci->name, old_aci->name, sizeof(new_aci->name));
++ switch(new_aci->address_family) {
++ case AF_INET:
++ new_aci->address.inet.nr_addr = 1;
++ new_aci->address.inet.addr[0] = *((__u32 *) old_aci->address);
++ new_aci->address.inet.valid_bits[0] = old_aci->valid_len;
++ if((old_aci->min_port == 0) && (old_aci->max_port == RSBAC_NET_MAX_PORT))
++ new_aci->ports.nr_ports = 0;
++ else {
++ new_aci->ports.nr_ports = 1;
++ new_aci->ports.ports[0].min = old_aci->min_port;
++ new_aci->ports.ports[0].max = old_aci->max_port;
++ }
++ break;
++ default:
++ memcpy(new_aci->address.other.addr, old_aci->address, sizeof(old_aci->address));
++ new_aci->address.other.valid_len = old_aci->valid_len;
++ new_aci->ports.nr_ports = 0;
++ break;
++ }
++ return 0;
++}
++
++
++static rsbac_list_conv_function_t *net_temp_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_NET_TEMP_OLD_VERSION:
++ return net_temp_old_conv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++/************************************************************************** */
++/* The add_item() functions add an item to the list, set head.curr to it, */
++/* and return a pointer to the item. */
++/* These functions will NOT check, if there is already an item under the */
++/* same ID! If this happens, the lookup functions will return the old item! */
++/* All list manipulation must be protected by rw-spinlocks to prevent */
++/* inconsistency and undefined behaviour in other concurrent functions. */
++
++/* register_fd_lists() */
++/* register fd lists for device */
++
++static int register_fd_lists(struct rsbac_device_list_item_t *device_p,
++ kdev_t kdev)
++{
++ char *name;
++ int err = 0;
++ int tmperr;
++ struct rsbac_list_info_t *info_p;
++ if (!device_p)
++ return -RSBAC_EINVALIDPOINTER;
++ name = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (!name)
++ return -RSBAC_ENOMEM;
++ info_p = rsbac_kmalloc(sizeof(*info_p));
++ if (!info_p) {
++ rsbac_kfree(name);
++ return -RSBAC_ENOMEM;
++ }
++
++ /* register general lists */
++ {
++ info_p->version = RSBAC_GEN_FD_ACI_VERSION;
++ info_p->key = RSBAC_GEN_FD_ACI_KEY;
++ info_p->desc_size = sizeof(rsbac_inode_nr_t);
++ info_p->data_size =
++ sizeof(struct rsbac_gen_fd_aci_t);
++ info_p->max_age = 0;
++ gen_nr_fd_hashes = RSBAC_GEN_NR_FD_LISTS;
++ tmperr = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->handles.gen,
++ info_p,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ gen_fd_get_conv,
++ &def_gen_fd_aci,
++ RSBAC_GEN_FD_NAME,
++ kdev,
++ gen_nr_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_GEN_OLD_FD_NAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "register_fd_lists(): registering general list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_GEN_FD_NAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp,
++ tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ }
++
++#if defined(CONFIG_RSBAC_MAC)
++ {
++ /* register MAC lists */
++ info_p->version = RSBAC_MAC_FD_ACI_VERSION;
++ info_p->key = RSBAC_MAC_FD_ACI_KEY;
++ info_p->desc_size = sizeof(rsbac_inode_nr_t);
++ info_p->data_size =
++ sizeof(struct rsbac_mac_fd_aci_t);
++ info_p->max_age = 0;
++ mac_nr_fd_hashes = RSBAC_MAC_NR_FD_LISTS;
++ tmperr = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->handles.mac,
++ info_p,
++ RSBAC_LIST_PERSIST | (RSBAC_MAJOR(kdev) ? RSBAC_LIST_OWN_SLAB : 0) |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ mac_fd_get_conv,
++ &def_mac_fd_aci,
++ RSBAC_MAC_FD_NAME,
++ kdev,
++ mac_nr_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_MAC_OLD_FD_NAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "register_fd_lists(): registering MAC list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_MAC_FD_NAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp,
++ tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_PM)
++ {
++ /* register PM lists */
++ info_p->version = RSBAC_PM_FD_ACI_VERSION;
++ info_p->key = RSBAC_PM_FD_ACI_KEY;
++ info_p->desc_size = sizeof(rsbac_inode_nr_t);
++ info_p->data_size =
++ sizeof(struct rsbac_pm_fd_aci_t);
++ info_p->max_age = 0;
++ pm_nr_fd_hashes = RSBAC_PM_NR_FD_LISTS;
++ tmperr = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->handles.pm,
++ info_p,
++ RSBAC_LIST_PERSIST | (RSBAC_MAJOR(kdev) ? RSBAC_LIST_OWN_SLAB : 0) |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL, &def_pm_fd_aci,
++ RSBAC_PM_FD_NAME, kdev,
++ pm_nr_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_PM_OLD_FD_NAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "register_fd_lists(): registering PM list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_PM_FD_NAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp,
++ tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_DAZ)
++ {
++ struct rsbac_daz_fd_aci_t def_daz_fd_aci =
++ DEFAULT_DAZ_FD_ACI;
++ /* register DAZ lists */
++ info_p->version = RSBAC_DAZ_FD_ACI_VERSION;
++ info_p->key = RSBAC_DAZ_FD_ACI_KEY;
++ info_p->desc_size = sizeof(rsbac_inode_nr_t);
++ info_p->data_size =
++ sizeof(struct rsbac_daz_fd_aci_t);
++ info_p->max_age = 0;
++ daz_nr_fd_hashes = RSBAC_DAZ_NR_FD_LISTS;
++ tmperr = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->handles.daz,
++ info_p,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ daz_fd_get_conv,
++ &def_daz_fd_aci,
++ RSBAC_DAZ_FD_NAME, kdev,
++ daz_nr_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_DAZ_OLD_FD_NAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "register_fd_lists(): registering DAZ list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_DAZ_FD_NAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp,
++ tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ }
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++ {
++ rsbac_daz_scanned_t def_daz_scanned_fd_aci =
++ DEFAULT_DAZ_FD_SCANNED;
++
++ info_p->version = RSBAC_DAZ_SCANNED_FD_ACI_VERSION;
++ info_p->key = RSBAC_DAZ_FD_ACI_KEY;
++ info_p->desc_size = sizeof(rsbac_inode_nr_t);
++ info_p->data_size = sizeof(rsbac_daz_scanned_t);
++ info_p->max_age = 0;
++ daz_scanned_nr_fd_hashes = RSBAC_DAZ_SCANNED_NR_FD_LISTS;
++ tmperr = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->handles.dazs,
++ info_p,
++#ifdef CONFIG_RSBAC_DAZ_PERSIST
++ RSBAC_LIST_PERSIST |
++#endif
++ RSBAC_LIST_DEF_DATA | (RSBAC_MAJOR(kdev) ? RSBAC_LIST_OWN_SLAB : 0) |
++ RSBAC_LIST_AUTO_HASH_RESIZE |
++ RSBAC_LIST_NO_MAX,
++ NULL,
++ NULL,
++ &def_daz_scanned_fd_aci,
++ RSBAC_DAZ_SCANNED_FD_NAME, kdev,
++ daz_scanned_nr_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_DAZ_SCANNED_OLD_FD_NAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "register_fd_lists(): registering DAZ scanned list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_DAZ_SCANNED_FD_NAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp,
++ tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ }
++#endif
++#endif
++
++#if defined(CONFIG_RSBAC_FF)
++ {
++ rsbac_ff_flags_t def_ff_fd_aci = RSBAC_FF_DEF;
++
++ info_p->version = RSBAC_FF_FD_ACI_VERSION;
++ info_p->key = RSBAC_FF_FD_ACI_KEY;
++ info_p->desc_size = sizeof(rsbac_inode_nr_t);
++ info_p->data_size = sizeof(rsbac_ff_flags_t);
++ info_p->max_age = 0;
++ ff_nr_fd_hashes = RSBAC_FF_NR_FD_LISTS;
++ tmperr = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->handles.ff,
++ info_p,
++ RSBAC_LIST_PERSIST | (RSBAC_MAJOR(kdev) ? RSBAC_LIST_OWN_SLAB : 0) |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL, &def_ff_fd_aci,
++ RSBAC_FF_FD_NAME, kdev,
++ ff_nr_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_FF_OLD_FD_NAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "register_fd_lists(): registering FF list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_FF_FD_NAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp,
++ tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_RC)
++ {
++ info_p->version = RSBAC_RC_FD_ACI_VERSION;
++ info_p->key = RSBAC_RC_FD_ACI_KEY;
++ info_p->desc_size = sizeof(rsbac_inode_nr_t);
++ info_p->data_size =
++ sizeof(struct rsbac_rc_fd_aci_t);
++ info_p->max_age = 0;
++ rc_nr_fd_hashes = RSBAC_RC_NR_FD_LISTS;
++ tmperr = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->handles.rc,
++ info_p,
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL, &def_rc_fd_aci,
++ RSBAC_RC_FD_NAME, kdev,
++ rc_nr_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_RC_OLD_FD_NAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "register_fd_lists(): registering RC list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_RC_FD_NAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp,
++ tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH)
++ {
++ struct rsbac_auth_fd_aci_t def_auth_fd_aci =
++ DEFAULT_AUTH_FD_ACI;
++
++ info_p->version = RSBAC_AUTH_FD_ACI_VERSION;
++ info_p->key = RSBAC_AUTH_FD_ACI_KEY;
++ info_p->desc_size = sizeof(rsbac_inode_nr_t);
++ info_p->data_size =
++ sizeof(struct rsbac_auth_fd_aci_t);
++ info_p->max_age = 0;
++ auth_nr_fd_hashes = RSBAC_AUTH_NR_FD_LISTS;
++ tmperr = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->handles.auth,
++ info_p,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ auth_fd_get_conv,
++ &def_auth_fd_aci,
++ RSBAC_AUTH_FD_NAME, kdev,
++ auth_nr_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_AUTH_OLD_FD_NAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "register_fd_lists(): registering AUTH list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_AUTH_FD_NAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp,
++ tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_CAP)
++ {
++ struct rsbac_cap_fd_aci_t def_cap_fd_aci = DEFAULT_CAP_FD_ACI;
++
++ info_p->version = RSBAC_CAP_FD_ACI_VERSION;
++ info_p->key = RSBAC_CAP_FD_ACI_KEY;
++ info_p->desc_size = sizeof(rsbac_inode_nr_t);
++ info_p->data_size =
++ sizeof(struct rsbac_cap_fd_aci_t);
++ info_p->max_age = 0;
++ cap_nr_fd_hashes = RSBAC_CAP_NR_FD_LISTS;
++ tmperr = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->handles.cap,
++ info_p,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ cap_fd_get_conv,
++ &def_cap_fd_aci,
++ RSBAC_CAP_FD_NAME, kdev,
++ cap_nr_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_CAP_OLD_FD_NAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "register_fd_lists(): registering CAP list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_CAP_FD_NAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp,
++ tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_PAX)
++ {
++ rsbac_pax_flags_t def_pax_fd_aci;
++
++#ifdef CONFIG_RSBAC_PAX_DEFAULT
++ def_pax_fd_aci = 0;
++#ifdef CONFIG_RSBAC_PAX_PAGEEXEC
++ def_pax_fd_aci |= PF_PAX_PAGEEXEC;
++#endif
++#ifdef CONFIG_RSBAC_PAX_EMUTRAMP
++ def_pax_fd_aci |= PF_PAX_EMUTRAMP;
++#endif
++#ifdef CONFIG_RSBAC_PAX_MPROTECT
++ def_pax_fd_aci |= PF_PAX_MPROTECT;
++#endif
++#ifdef CONFIG_RSBAC_PAX_RANDMMAP
++ def_pax_fd_aci |= PF_PAX_RANDMMAP;
++#endif
++#ifdef CONFIG_RSBAC_PAX_RANDEXEC
++ def_pax_fd_aci |= PF_PAX_RANDEXEC;
++#endif
++#ifdef CONFIG_RSBAC_PAX_SEGMEXEC
++ def_pax_fd_aci |= PF_PAX_SEGMEXEC;
++#endif
++
++#else
++ def_pax_fd_aci = RSBAC_PAX_DEF_FLAGS;
++#endif
++
++ info_p->version = RSBAC_PAX_FD_ACI_VERSION;
++ info_p->key = RSBAC_PAX_FD_ACI_KEY;
++ info_p->desc_size = sizeof(rsbac_inode_nr_t);
++ info_p->data_size = sizeof(rsbac_pax_flags_t);
++ info_p->max_age = 0;
++ pax_nr_fd_hashes = RSBAC_PAX_NR_FD_LISTS;
++ tmperr = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->handles.pax,
++ info_p,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL, &def_pax_fd_aci,
++ RSBAC_PAX_FD_NAME, kdev,
++ pax_nr_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_PAX_OLD_FD_NAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "register_fd_lists(): registering PAX list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_PAX_FD_NAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp,
++ tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_RES)
++ {
++ info_p->version = RSBAC_RES_FD_ACI_VERSION;
++ info_p->key = RSBAC_RES_FD_ACI_KEY;
++ info_p->desc_size = sizeof(rsbac_inode_nr_t);
++ info_p->data_size =
++ sizeof(struct rsbac_res_fd_aci_t);
++ info_p->max_age = 0;
++ res_nr_fd_hashes = RSBAC_RES_NR_FD_LISTS;
++ tmperr = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->handles.res,
++ info_p,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL, &def_res_fd_aci,
++ RSBAC_RES_FD_NAME, kdev,
++ res_nr_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_RES_OLD_FD_NAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "register_fd_lists(): registering RES list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_RES_FD_NAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp,
++ tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ }
++#endif
++
++ rsbac_kfree(name);
++ rsbac_kfree(info_p);
++ return err;
++}
++
++/* aci_detach_fd_lists() */
++/* detach from fd lists for device */
++
++static int aci_detach_fd_lists(struct rsbac_device_list_item_t *device_p)
++{
++ int err = 0;
++ int tmperr;
++
++ if (!device_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++ /* detach all general lists */
++ tmperr = rsbac_list_detach(&device_p->handles.gen,
++ RSBAC_GEN_FD_ACI_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "detach_fd_lists(): detaching from general list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_GEN_FD_NAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++
++#if defined(CONFIG_RSBAC_MAC)
++ /* detach all MAC lists */
++ tmperr = rsbac_list_detach(&device_p->handles.mac,
++ RSBAC_MAC_FD_ACI_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "detach_fd_lists(): detaching from MAC list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_MAC_FD_NAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_PM)
++ /* detach all PM lists */
++ tmperr = rsbac_list_detach(&device_p->handles.pm,
++ RSBAC_PM_FD_ACI_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "detach_fd_lists(): detaching from PM list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_PM_FD_NAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_DAZ)
++ /* detach all DAZ lists */
++ tmperr = rsbac_list_detach(&device_p->handles.daz,
++ RSBAC_DAZ_FD_ACI_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "detach_fd_lists(): detaching from DAZ list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_DAZ_FD_NAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++ /* detach all DAZ scanned lists */
++ tmperr = rsbac_list_detach(&device_p->handles.dazs,
++ RSBAC_DAZ_FD_ACI_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "detach_fd_lists(): detaching from DAZ scanned list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_DAZ_SCANNED_FD_NAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#endif
++#endif
++
++#if defined(CONFIG_RSBAC_FF)
++ /* detach all FF lists */
++ tmperr = rsbac_list_detach(&device_p->handles.ff,
++ RSBAC_FF_FD_ACI_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "detach_fd_lists(): detaching from FF list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_FF_FD_NAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_RC)
++ /* detach all RC lists */
++ tmperr = rsbac_list_detach(&device_p->handles.rc,
++ RSBAC_RC_FD_ACI_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "detach_fd_lists(): detaching from RC list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_RC_FD_NAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH)
++ /* detach all AUTH lists */
++ tmperr = rsbac_list_detach(&device_p->handles.auth,
++ RSBAC_AUTH_FD_ACI_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "detach_fd_lists(): detaching from AUTH list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_AUTH_FD_NAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_CAP)
++ /* detach all CAP lists */
++ tmperr = rsbac_list_detach(&device_p->handles.cap,
++ RSBAC_CAP_FD_ACI_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "detach_fd_lists(): detaching from CAP list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_CAP_FD_NAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_PAX)
++ /* detach all PAX lists */
++ tmperr = rsbac_list_detach(&device_p->handles.pax,
++ RSBAC_PAX_FD_ACI_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "detach_fd_lists(): detaching from PAX list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_PAX_FD_NAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_RES)
++ /* detach all RES lists */
++ tmperr = rsbac_list_detach(&device_p->handles.res,
++ RSBAC_RES_FD_ACI_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "detach_fd_lists(): detaching from RES list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_RES_FD_NAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#endif
++
++ return err;
++}
++
++
++/* Create a device item without adding to list. No locking needed. */
++static struct rsbac_device_list_item_t
++*create_device_item(struct vfsmount *vfsmount_p)
++{
++ struct rsbac_device_list_item_t *new_item_p;
++
++ if (!vfsmount_p)
++ return NULL;
++ /* allocate memory for new device, return NULL, if failed */
++ if (!(new_item_p = rsbac_smalloc_clear_unlocked(device_item_slab)))
++ return NULL;
++
++ new_item_p->id = vfsmount_p->mnt_sb->s_dev;
++ new_item_p->vfsmount_p = vfsmount_p;
++ new_item_p->mount_count = 1;
++ return new_item_p;
++}
++
++/* Add an existing device item to list. Locking needed. */
++static struct rsbac_device_list_item_t
++*add_device_item(struct rsbac_device_list_item_t *device_p)
++{
++ struct rsbac_device_list_head_t * new_p;
++ struct rsbac_device_list_head_t * old_p;
++ u_int hash;
++
++ if (!device_p)
++ return NULL;
++
++ hash = device_hash(device_p->id);
++ spin_lock(&device_list_locks[hash]);
++ old_p = device_head_p[hash];
++ new_p = rsbac_kmalloc(sizeof(*new_p));
++ *new_p = *old_p;
++ /* add new device to device list */
++ if (!new_p->head) { /* first device */
++ new_p->head = device_p;
++ new_p->tail = device_p;
++ new_p->curr = device_p;
++ new_p->count = 1;
++ device_p->prev = NULL;
++ device_p->next = NULL;
++ } else { /* there is another device -> hang to tail */
++ device_p->prev = new_p->tail;
++ device_p->next = NULL;
++ new_p->tail->next = device_p;
++ new_p->tail = device_p;
++ new_p->curr = device_p;
++ new_p->count++;
++ }
++ rcu_assign_pointer(device_head_p[hash], new_p);
++ spin_unlock(&device_list_locks[hash]);
++ synchronize_srcu(&device_list_srcu[hash]);
++ rsbac_kfree(old_p);
++ return device_p;
++}
++
++/************************************************************************** */
++/* The remove_item() functions remove an item from the list. If this item */
++/* is head, tail or curr, these pointers are set accordingly. */
++/* To speed up removing several subsequent items, curr is set to the next */
++/* item, if possible. */
++/* If the item is not found, nothing is done. */
++
++static void clear_device_item(struct rsbac_device_list_item_t *item_p)
++{
++ if (!item_p)
++ return;
++
++ /* dput() rsbac_dir_dentry_p, if set */
++ if (item_p->rsbac_dir_dentry_p) {
++ dput(item_p->rsbac_dir_dentry_p);
++ }
++ /* OK, lets remove the device item itself */
++ rsbac_sfree(device_item_slab, item_p);
++}
++
++/* remove_device_item unlocks device_list_locks[hash]! */
++static void remove_device_item(kdev_t kdev)
++{
++ struct rsbac_device_list_item_t *item_p;
++ u_int hash;
++
++ hash = device_hash(kdev);
++ /* first we must locate the item. */
++ if ((item_p = lookup_device(kdev, hash))) { /* ok, item was found */
++ struct rsbac_device_list_head_t * new_p;
++ struct rsbac_device_list_head_t * old_p;
++
++ old_p = device_head_p[hash];
++ new_p = rsbac_kmalloc(sizeof(*new_p));
++ if (!new_p) {
++ /* Ouch! */
++ spin_unlock(&device_list_locks[hash]);
++ return;
++ }
++ *new_p = *old_p;
++ if (new_p->head == item_p) { /* item is head */
++ if (new_p->tail == item_p) { /* item is head and tail = only item -> list will be empty */
++ new_p->head = NULL;
++ new_p->tail = NULL;
++ } else { /* item is head, but not tail -> next item becomes head */
++ item_p->next->prev = NULL;
++ new_p->head = item_p->next;
++ }
++ } else { /* item is not head */
++ if (new_p->tail == item_p) { /*item is not head, but tail -> previous item becomes tail */
++ item_p->prev->next = NULL;
++ new_p->tail = item_p->prev;
++ } else { /* item is neither head nor tail -> item is cut out */
++ item_p->prev->next = item_p->next;
++ item_p->next->prev = item_p->prev;
++ }
++ }
++
++ /* curr is no longer valid -> reset. */
++ new_p->curr = NULL;
++ /* adjust counter */
++ new_p->count--;
++ rcu_assign_pointer(device_head_p[hash], new_p);
++ spin_unlock(&device_list_locks[hash]);
++ synchronize_srcu(&device_list_srcu[hash]);
++ rsbac_kfree(old_p);
++ } else {
++ spin_unlock(&device_list_locks[hash]);
++ }
++}
++
++/**************************************************/
++/* Externally visible help functions */
++/**************************************************/
++
++/* helper, copied from open.d/do_truncate() */
++static int rsbac_clear_file(struct dentry *dentry)
++{
++ struct inode *inode = dentry->d_inode;
++ int error;
++ struct iattr newattrs;
++
++ mutex_lock(&inode->i_mutex);
++ newattrs.ia_size = 0;
++ newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
++ error = notify_change(dentry, &newattrs);
++ mutex_unlock(&inode->i_mutex);
++ return error;
++}
++
++static void wakeup_auto(u_long dummy)
++{
++ wake_up((void *) dummy);
++}
++
++#if defined(CONFIG_RSBAC_REG)
++EXPORT_SYMBOL(rsbac_check_device);
++#endif
++
++int rsbac_check_device(kdev_t kdev)
++{
++ struct rsbac_device_list_item_t *device_p;
++ u_int hash;
++ int srcu_idx;
++
++ hash = device_hash(kdev);
++ srcu_idx = srcu_read_lock(&device_list_srcu[hash]);
++ device_p = lookup_device(kdev, hash);
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ if (device_p)
++ return 0;
++ else
++ return -RSBAC_ENOTFOUND;
++}
++
++#if defined(CONFIG_RSBAC_REG)
++EXPORT_SYMBOL(rsbac_get_vfsmount);
++#endif
++struct vfsmount *rsbac_get_vfsmount(kdev_t kdev)
++{
++ struct rsbac_device_list_item_t *device_p;
++ struct vfsmount *vfsmount_p;
++ u_int hash;
++ int srcu_idx;
++
++ if (RSBAC_IS_AUTO_DEV(kdev))
++ return NULL;
++
++ hash = device_hash(kdev);
++ /* get super_block-pointer */
++ srcu_idx = srcu_read_lock(&device_list_srcu[hash]);
++ device_p = lookup_device(kdev, hash);
++ if (!device_p) {
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_get_vfsmount(): unknown device %02u:%02u\n",
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev));
++ return NULL;
++ }
++ vfsmount_p = device_p->vfsmount_p;
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ return vfsmount_p;
++}
++
++#if defined(CONFIG_RSBAC_REG)
++EXPORT_SYMBOL(rsbac_read_open);
++#endif
++int rsbac_read_open(char *name, struct file **file_pi, kdev_t kdev)
++{
++ struct dentry *dir_dentry_p;
++ struct dentry *file_dentry_p;
++ struct file *file_p;
++ struct path path;
++ int err = 0;
++ struct vfsmount *vfsmount_p;
++
++ if (!name || !file_pi) {
++ rsbac_pr_debug(ds, "called with NULL pointer!");
++ return -RSBAC_EINVALIDPOINTER;
++ }
++
++ vfsmount_p = rsbac_get_vfsmount(kdev);
++ if (!vfsmount_p) {
++ rsbac_printk(KERN_WARNING "rsbac_read_open(): invalid device %02u:%02u\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev));
++ return -RSBAC_EINVALIDDEV;
++ }
++
++ /* lookup dentry of ACI_PATH on root device, lock is released there */
++ if ((err =
++ lookup_aci_path_dentry(vfsmount_p, &dir_dentry_p, FALSE, kdev))) {
++ goto out;
++ }
++
++ mutex_lock(&dir_dentry_p->d_inode->i_mutex);
++ /* open file for reading - this must be done 'by hand', because */
++ /* standard system calls are now extended by rsbac decision calls. */
++ file_dentry_p =
++ rsbac_lookup_one_len(name, dir_dentry_p, strlen(name));
++ mutex_unlock(&dir_dentry_p->d_inode->i_mutex);
++
++ if (!file_dentry_p || IS_ERR(file_dentry_p)) { /* error in lookup */
++ err = -RSBAC_EREADFAILED;
++ goto out;
++ }
++ if (!file_dentry_p->d_inode || !file_dentry_p->d_inode->i_size) {
++ /* file not found or empty: trying backup */
++ char *bname;
++ int name_len = strlen(name);
++
++ dput(file_dentry_p);
++ bname = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (!bname) {
++ err = -RSBAC_ENOMEM;
++ goto out;
++ }
++
++ strcpy(bname, name);
++ bname[name_len] = 'b';
++ name_len++;
++ bname[name_len] = (char) 0;
++ rsbac_pr_debug(ds, "could not lookup file %s, trying backup %s\n",
++ name, bname);
++
++ mutex_lock(&dir_dentry_p->d_inode->i_mutex);
++ file_dentry_p =
++ rsbac_lookup_one_len(bname, dir_dentry_p,
++ strlen(bname));
++ mutex_unlock(&dir_dentry_p->d_inode->i_mutex);
++ rsbac_kfree(bname);
++ if (!file_dentry_p || IS_ERR(file_dentry_p)) { /* error in lookup */
++ return -RSBAC_EREADFAILED;
++ }
++ if (!file_dentry_p->d_inode || !file_dentry_p->d_inode->i_size) {
++ /* backup file also not found: return error */
++ rsbac_pr_debug(ds, "backup file %sb not found or empty\n",
++ name);
++ dput(file_dentry_p);
++ err = -RSBAC_ENOTFOUND;
++ goto out;
++ }
++ }
++ if (!(S_ISREG(file_dentry_p->d_inode->i_mode))) { /* this is not a file! -> error! */
++ rsbac_printk(KERN_WARNING "rsbac_read_open(): expected file is not a file!\n");
++ mutex_unlock(&dir_dentry_p->d_inode->i_mutex);
++ dput(file_dentry_p);
++ err = -RSBAC_EREADFAILED;
++ goto out;
++ }
++
++ /* Now we fill the file structure and */
++ /* if there is an open func for this file, use it, otherwise ignore */
++ path.dentry = file_dentry_p;
++ path.mnt = mntget(vfsmount_p);
++ file_p = alloc_file(&path, FMODE_READ, path.dentry->d_inode->i_fop);
++
++ if (!file_p) {
++ path_put(&path);
++ rsbac_printk(KERN_WARNING "rsbac_read_open(): could not open file '%s'!\n",
++ name);
++ err = -RSBAC_EREADFAILED;
++ goto out;
++ }
++
++ /* if there is no read func, we get a problem -> error */
++ if ((!file_p->f_op) || (!file_p->f_op->read)) {
++ if (!file_p->f_op) {
++ rsbac_printk(KERN_WARNING "rsbac_read_open(): no f_op for file '%s'!\n",
++ name);
++ } else {
++ rsbac_printk(KERN_WARNING "rsbac_read_open(): no file read func for file '%s'!\n",
++ name);
++ if (file_p->f_op->release)
++ file_p->f_op->release(path.dentry->
++ d_inode, file_p);
++ }
++ path_put(&path);
++ err = -RSBAC_EREADFAILED;
++ goto out;
++ }
++
++ *file_pi = file_p;
++
++ if (file_p->f_op->open)
++ err = file_p->f_path.dentry->d_inode->i_fop->open(path.dentry->d_inode, file_p);
++
++out:
++ return err;
++}
++
++#ifndef check_parent
++#define check_parent(dir, dentry) \
++ ((dir) == (dentry)->d_parent && !list_empty(&dentry->d_bucket))
++#endif
++
++#if defined(CONFIG_RSBAC_REG)
++EXPORT_SYMBOL(rsbac_write_open);
++#endif
++int rsbac_write_open(char *name, struct file **file_pi, kdev_t kdev)
++{
++ struct dentry *dir_dentry_p = NULL;
++ struct dentry *ldir_dentry_p = NULL;
++ struct dentry *file_dentry_p = NULL;
++ struct file * file_p;
++ struct path path;
++ int err = 0;
++ int tmperr = 0;
++ struct vfsmount *vfsmount_p;
++
++ if (!file_pi || !name) {
++ rsbac_pr_debug(write, "called with NULL pointer!\n");
++ return -RSBAC_EINVALIDPOINTER;
++ }
++
++ /* get super_block-pointer */
++ vfsmount_p = rsbac_get_vfsmount(kdev);
++ if (!vfsmount_p) {
++ rsbac_printk(KERN_WARNING "rsbac_write_open(): invalid device %02u:%02u\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev));
++ return -RSBAC_EINVALIDDEV;
++ }
++ if (!rsbac_writable(vfsmount_p->mnt_sb)) {
++ rsbac_pr_debug(write, "called for non-writable device\n");
++ return -RSBAC_ENOTWRITABLE;
++ }
++
++ err = mnt_want_write(vfsmount_p);
++ if (err)
++ return err;
++
++ /* lookup dentry of ACI_PATH on this device (create, if needed and possible),
++ * returns errorcode, if failed */
++ if ((tmperr = lookup_aci_path_dentry(vfsmount_p, &dir_dentry_p, TRUE,
++ kdev))) {
++ err = tmperr;
++ goto out;
++ }
++
++ mutex_lock(&dir_dentry_p->d_inode->i_mutex);
++ /* open file for reading - this must be done 'by hand', because */
++ /* standard system calls are now extended by rsbac decision calls. */
++ file_dentry_p =
++ rsbac_lookup_one_len(name, dir_dentry_p, strlen(name));
++ mutex_unlock(&dir_dentry_p->d_inode->i_mutex);
++ if (!file_dentry_p || IS_ERR(file_dentry_p)) {
++ rsbac_pr_debug(write, "lookup of %s returned error %li\n",
++ name, PTR_ERR(file_dentry_p));
++ err = -RSBAC_EWRITEFAILED;
++ goto out;
++ }
++#if 1
++ if (file_dentry_p->d_inode) { /* file was found: try to rename it as backup file */
++ if (!dir_dentry_p->d_inode->i_op
++ || !dir_dentry_p->d_inode->i_op->rename) {
++ rsbac_printk(KERN_WARNING "rsbac_write_open(): File system supports no rename - no backup of %s made!",
++ name);
++ } else {
++ char *bname;
++ int name_len = strlen(name);
++ struct dentry *new_file_dentry_p = NULL;
++ struct dentry *old_dir_p, *new_dir_p;
++
++ bname = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (!bname) {
++ err = -RSBAC_ENOMEM;
++ goto out_dput;
++ }
++ strcpy(bname, name);
++ bname[name_len] = 'b';
++ bname[name_len + 1] = (char) 0;
++ mutex_lock(&dir_dentry_p->d_inode->i_mutex);
++ new_file_dentry_p =
++ rsbac_lookup_one_len(bname, dir_dentry_p,
++ strlen(bname));
++ mutex_unlock(&dir_dentry_p->d_inode->i_mutex);
++ if (new_file_dentry_p
++ && !IS_ERR(new_file_dentry_p)) {
++ /* lock parent == rsbac-dir for rest of rename */
++ old_dir_p = dget(file_dentry_p->d_parent);
++ new_dir_p =
++ dget(new_file_dentry_p->d_parent);
++ double_lock(new_dir_p, old_dir_p);
++ dquot_initialize(old_dir_p->d_inode);
++ dquot_initialize(new_dir_p->d_inode);
++ /* try to rename file in rsbac dir */
++ /* rsbac_pr_debug(write, "calling rename function\n"); */
++ err =
++ dir_dentry_p->d_inode->i_op->
++ rename(old_dir_p->d_inode,
++ file_dentry_p,
++ new_dir_p->d_inode,
++ new_file_dentry_p);
++ /* unlock dir (dputs both dentries) */
++ double_unlock(new_dir_p, old_dir_p);
++ if (err) {
++ rsbac_printk(KERN_WARNING "rsbac_write_open(): could not rename %s to %s on dev %02u:%02u, error %i - no backup!\n",
++ name, bname,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ err);
++ } else {
++ /* The following d_move() should become unconditional */
++ if (!
++ (vfsmount_p->mnt_sb->s_type->
++ fs_flags
++ & FS_RENAME_DOES_D_MOVE
++ ))
++ d_move(file_dentry_p,
++ new_file_dentry_p);
++ fsnotify_create(old_dir_p->d_inode,
++ new_file_dentry_p);
++ }
++ dput(new_file_dentry_p);
++ dput(file_dentry_p);
++ /* re-init dentry structure */
++ mutex_lock(&dir_dentry_p->d_inode->i_mutex);
++ file_dentry_p =
++ rsbac_lookup_one_len(name,
++ dir_dentry_p,
++ strlen(name));
++ mutex_unlock(&dir_dentry_p->d_inode->i_mutex);
++ if (!file_dentry_p
++ || IS_ERR(file_dentry_p)) {
++ rsbac_pr_debug(write, "relookup of %s "
++ "returned error %li\n",
++ name,
++ PTR_ERR(file_dentry_p));
++ err = -RSBAC_EWRITEFAILED;
++ goto out;
++ }
++ if (file_dentry_p->d_inode) {
++ rsbac_printk(KERN_WARNING "rsbac_write_open(): relookup of %s returned dentry with existing inode %li, trying unlink\n",
++ name,
++ file_dentry_p->
++ d_inode->i_ino);
++ /* file was found: try to delete it */
++ if (!dir_dentry_p->d_inode->i_op
++ || !dir_dentry_p->d_inode->
++ i_op->unlink) {
++ rsbac_printk(KERN_WARNING "rsbac_write_open(): File system supports no unlink - %s not deleted!",
++ name);
++ rsbac_kfree(bname);
++ err = -RSBAC_EWRITEFAILED;
++ goto out_dput;
++ } else {
++ old_dir_p =
++ lock_parent
++ (file_dentry_p);
++ dquot_initialize(old_dir_p->d_inode);
++ err = -ENOENT;
++ err =
++ dir_dentry_p->
++ d_inode->i_op->
++ unlink
++ (old_dir_p->
++ d_inode,
++ file_dentry_p);
++ /* unlock parent dir */
++ unlock_dir(old_dir_p);
++ /* free file dentry */
++ dput(file_dentry_p);
++ if (err) {
++ rsbac_printk
++ (KERN_WARNING
++ "rsbac_write_open(): could not unlink %s on dev %02u:%02u, error %i!\n",
++ name,
++ RSBAC_MAJOR
++ (kdev),
++ RSBAC_MINOR
++ (kdev), err);
++ }
++ /* re-init dentry structure */
++ mutex_lock(&dir_dentry_p->d_inode->i_mutex);
++ file_dentry_p =
++ rsbac_lookup_one_len
++ (name, dir_dentry_p,
++ strlen(name));
++ mutex_unlock(&dir_dentry_p->d_inode->i_mutex);
++ if (!file_dentry_p
++ ||
++ IS_ERR(file_dentry_p)) {
++ rsbac_pr_debug(write, "relookup of %s returned error %li\n",
++ name,
++ PTR_ERR(file_dentry_p));
++ rsbac_kfree(bname);
++ err =
++ -RSBAC_EWRITEFAILED;
++ goto out;
++ }
++ if (file_dentry_p->d_inode) {
++ rsbac_printk
++ (KERN_WARNING
++ "rsbac_write_open(): relookup of %s returned dentry with existing inode %li\n",
++ name,
++ file_dentry_p->
++ d_inode->
++ i_ino);
++ rsbac_kfree(bname);
++ err =
++ -RSBAC_EWRITEFAILED;
++ goto out_dput;
++ }
++ }
++ }
++ } else {
++ rsbac_printk(KERN_WARNING "rsbac_write_open(): rsbac_lookup_(dentry|one) for backup file %s on dev %02u:%02u failed with error %li - no backup!\n",
++ bname, RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ PTR_ERR(new_file_dentry_p));
++ }
++ rsbac_kfree(bname);
++ }
++ }
++#endif /* backup part */
++
++ if (!file_dentry_p->d_inode) {
++ /* file not found or renamed away: try to create a new one */
++ if (!dir_dentry_p->d_inode->i_op
++ || !dir_dentry_p->d_inode->i_op->create) {
++ rsbac_printk(KERN_WARNING "%s\n",
++ "rsbac_write_open(): File system supports no create!");
++ err = -RSBAC_EWRITEFAILED;
++ goto out_dput;
++ }
++
++ /* lock parent == rsbac-dir for create */
++ ldir_dentry_p = lock_parent(file_dentry_p);
++ if (IS_ERR(ldir_dentry_p)) {
++ rsbac_pr_debug(write, "lock_parent of %s returned "
++ "error %li\n", name,
++ PTR_ERR(ldir_dentry_p));
++ err = -RSBAC_EWRITEFAILED;
++ goto out_dput;
++ }
++ /* try to create file in rsbac dir */
++ /* rsbac_pr_debug(write, "calling create function\n"); */
++ dquot_initialize(ldir_dentry_p->d_inode);
++ err =
++ dir_dentry_p->d_inode->i_op->create(ldir_dentry_p->
++ d_inode,
++ file_dentry_p,
++ RSBAC_ACI_FILE_MODE,
++ NULL);
++ unlock_dir(ldir_dentry_p);
++
++ if (err) {
++ goto out_dput;
++ }
++ /* create was successful */
++ }
++
++ if (!(S_ISREG(file_dentry_p->d_inode->i_mode))) { /* this is not a file! -> error! */
++ rsbac_printk(KERN_WARNING "rsbac_write_open(): expected file is not a file, mode is %o!\n",
++ file_dentry_p->d_inode->i_mode);
++ err = -RSBAC_EWRITEFAILED;
++ goto out_dput;
++ }
++ /* Without a write function we get into troubles -> error */
++ if ((!file_dentry_p->d_inode->i_fop) || (!file_dentry_p->d_inode->i_fop->write)) {
++ rsbac_printk(KERN_WARNING "rsbac_write_open(): file write function missing!\n");
++ err = -RSBAC_EWRITEFAILED;
++ goto out_dput;
++ }
++
++ /* file alloc will call mnt_want_write */
++ mnt_drop_write(vfsmount_p);
++
++ if ((tmperr = get_write_access(file_dentry_p->d_inode))) {
++ rsbac_printk(KERN_WARNING "rsbac_write_open(): could not get write access on file!\n");
++ dput(file_dentry_p);
++ return -RSBAC_EWRITEFAILED;
++ }
++
++ /* Now we fill the file structure, file_take_write, mnt_want_write */
++ path.dentry = file_dentry_p;
++ path.mnt = mntget(vfsmount_p);
++ file_p = alloc_file(&path, FMODE_WRITE, path.dentry->d_inode->i_fop);
++ if (!file_p) {
++ rsbac_printk(KERN_WARNING "rsbac_write_open(): could not init file!\n");
++ put_write_access(file_p->f_dentry->d_inode);
++ file_release_write(file_p);
++ path_put(&path);
++ return -RSBAC_EWRITEFAILED;
++ }
++
++ /* truncating */
++ if (rsbac_clear_file(file_dentry_p)) {
++ if (file_p->f_op->release)
++ file_p->f_op->release(file_dentry_p->d_inode,
++ file_p);
++ rsbac_printk(KERN_WARNING "rsbac_write_open(): could not truncate!\n");
++ err = -RSBAC_EWRITEFAILED;
++ put_write_access(file_p->f_dentry->d_inode);
++ file_release_write(file_p);
++ goto out_dput;
++ }
++ /* set synchronous mode for this file */
++ file_p->f_flags |= O_SYNC;
++ *file_pi = file_p;
++
++out:
++ if (err)
++ mnt_drop_write(vfsmount_p);
++ return err;
++
++out_dput:
++ dput(file_dentry_p);
++ goto out;
++}
++
++
++#if defined(CONFIG_RSBAC_REG)
++EXPORT_SYMBOL(rsbac_read_close);
++#endif
++void rsbac_read_close(struct file *file_p)
++{
++ /* cleanup copied from __fput */
++ if (file_p->f_op && file_p->f_op->release)
++ file_p->f_op->release(file_p->f_dentry->d_inode, file_p);
++ path_put(&file_p->f_path);
++ put_filp(file_p);
++}
++
++#if defined(CONFIG_RSBAC_REG)
++EXPORT_SYMBOL(rsbac_write_close);
++#endif
++void rsbac_write_close(struct file *file_p)
++{
++ put_write_access(file_p->f_dentry->d_inode);
++ mnt_drop_write(file_p->f_path.mnt);
++ file_release_write(file_p);
++ rsbac_read_close(file_p);
++}
++
++#if defined(CONFIG_RSBAC_REG)
++EXPORT_SYMBOL(rsbac_lookup_full_path);
++#endif
++int rsbac_lookup_full_path(struct dentry *dentry_p, char path[], int maxlen, int pseudonymize)
++{
++ int len = 0;
++ char *i_path;
++ int tmplen = 0;
++#ifdef CONFIG_RSBAC_LOG_PSEUDO_FS
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val;
++#endif
++ int srcu_idx;
++
++ if (!dentry_p || !path)
++ return -RSBAC_EINVALIDPOINTER;
++ if (maxlen <= 0)
++ return -RSBAC_EINVALIDVALUE;
++ i_path = rsbac_kmalloc(maxlen + RSBAC_MAXNAMELEN);
++ if (!i_path)
++ return -RSBAC_ENOMEM;
++
++ path[0] = 0;
++
++ while (dentry_p && (len < maxlen) && dentry_p->d_name.len
++ && dentry_p->d_name.name) {
++#ifdef CONFIG_RSBAC_LOG_PSEUDO_FS
++ if ( pseudonymize
++ && dentry_p->d_inode
++ && dentry_p->d_parent
++ && dentry_p->d_parent->d_inode
++ && (i_tid.user = dentry_p->d_inode->i_uid)
++ && (dentry_p->d_inode->i_uid !=
++ dentry_p->d_parent->d_inode->i_uid)
++ && !rsbac_get_attr(SW_GEN, T_USER, i_tid, A_pseudo,
++ &i_attr_val, FALSE)
++ && i_attr_val.pseudo) { /* Max len of 32 Bit value in decimal print is 11 */
++ if ((maxlen - len) < 12) {
++ rsbac_kfree(i_path);
++ return len;
++ }
++ tmplen =
++ snprintf(i_path, 11, "%u", i_attr_val.pseudo);
++ } else
++#endif
++ {
++ tmplen = dentry_p->d_name.len;
++ if ((tmplen + 1) > (maxlen - len)) {
++ rsbac_kfree(i_path);
++ return len;
++ }
++ strncpy(i_path, dentry_p->d_name.name, tmplen);
++ }
++ /* Skip double / on multi mounts.
++ * Last / is appended at the end of the function */
++ if((i_path[tmplen-1] != '/') && (tmplen != 1)) {
++ if(len && (i_path[tmplen-1] != '/')) {
++ i_path[tmplen] = '/';
++ tmplen++;
++ }
++ i_path[tmplen]=0;
++ strcat(i_path, path);
++ strcpy(path, i_path);
++ len += tmplen;
++ }
++ if (dentry_p->d_parent && (dentry_p->d_parent != dentry_p)
++ && (dentry_p->d_sb->s_root != dentry_p)
++ )
++ dentry_p = dentry_p->d_parent;
++ else {
++ struct rsbac_device_list_item_t *device_p;
++ u_int hash;
++
++ if (dentry_p->d_sb->s_dev == rsbac_root_dev) {
++ break;
++ }
++ hash = device_hash(dentry_p->d_sb->s_dev);
++ srcu_idx = srcu_read_lock(&device_list_srcu[hash]);
++ device_p = lookup_device(dentry_p->d_sb->s_dev, hash);
++ if ( device_p
++ && device_p->vfsmount_p
++ && real_mount(device_p->vfsmount_p)->mnt_mountpoint
++ && real_mount(device_p->vfsmount_p)->mnt_mountpoint->d_sb
++ && (real_mount(device_p->vfsmount_p)->mnt_mountpoint->d_sb->s_dev != dentry_p->d_sb->s_dev)
++ ) {
++ dentry_p = real_mount(device_p->vfsmount_p)->mnt_mountpoint;
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ } else {
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ break;
++ }
++ }
++ }
++
++ i_path[tmplen]=0;
++ strcat(i_path, path);
++ strcpy(path, i_path);
++
++ rsbac_kfree(i_path);
++ return len;
++}
++
++/************************************************* */
++/* proc fs functions */
++/************************************************* */
++
++#if defined(CONFIG_RSBAC_PROC)
++static int
++devices_proc_show(struct seq_file *m, void *v)
++{
++ struct rsbac_device_list_item_t *device_p;
++ u_int count = 0;
++ u_int i;
++ int srcu_idx;
++
++ if (!rsbac_initialized)
++ return -ENOSYS;
++
++ for (i = 0; i < RSBAC_NR_DEVICE_LISTS; i++)
++ count += rcu_dereference(device_head_p[i])->count;
++ seq_printf(m, "%u RSBAC Devices\n---------------\nHash size is %u\n",
++ count, RSBAC_NR_DEVICE_LISTS);
++
++ for (i = 0; i < RSBAC_NR_DEVICE_LISTS; i++) {
++ srcu_idx = srcu_read_lock(&device_list_srcu[i]);
++ for (device_p = rcu_dereference(device_head_p[i])->head; device_p;
++ device_p = device_p->next) {
++ if (device_p->vfsmount_p && device_p->vfsmount_p->mnt_sb
++ && device_p->vfsmount_p->mnt_sb->s_type
++ && device_p->vfsmount_p->mnt_sb->s_type->name
++ && real_mount(device_p->vfsmount_p)->mnt_mountpoint) {
++ seq_printf(m,
++ "%02u:%02u with mount_count %u, fs_type %s (%lx), mountpoint %s, parent %02u:%02u\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ device_p->mount_count,
++ device_p->vfsmount_p->mnt_sb->s_type->name,
++ device_p->vfsmount_p->mnt_sb->s_magic,
++ real_mount(device_p->vfsmount_p)->mnt_mountpoint->d_name.name,
++ RSBAC_MAJOR(real_mount(device_p->vfsmount_p)->mnt_mountpoint->d_sb->s_dev),
++ RSBAC_MINOR(real_mount(device_p->vfsmount_p)->mnt_mountpoint->d_sb->s_dev));
++ } else
++ seq_printf(m,
++ "%02u:%02u with mount_count %u, no vfsmount_p\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ device_p->mount_count);
++ }
++ srcu_read_unlock(&device_list_srcu[i], srcu_idx);
++ }
++ return 0;
++}
++
++static int devices_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, devices_proc_show, NULL);
++}
++
++static const struct file_operations devices_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = devices_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *devices;
++
++static int
++stats_proc_show(struct seq_file *m, void *v)
++{
++ struct rsbac_device_list_item_t *device_p;
++ long fd_count, fd_dev_count;
++ u_long fd_sum = 0;
++ u_long sum = 0;
++ u_long total_sum = 0;
++ long tmp_count;
++ int i;
++ int srcu_idx;
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_initialized)
++ return -ENOSYS;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++#ifdef CONFIG_RSBAC_MAINT
++ seq_printf(m,
++ "RSBAC Status\n------------\nRSBAC Version: %s (Maintenance Mode)\nSupported Modules:%s\n",
++ RSBAC_VERSION, compiled_modules);
++#else
++ seq_printf(m,
++ "RSBAC Status\n------------\nRSBAC Version: %s\nCompiled Modules:%s\n",
++ RSBAC_VERSION, compiled_modules);
++#endif
++#ifdef CONFIG_RSBAC_SWITCH
++ {
++ char *active_modules;
++
++ active_modules = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (active_modules) {
++ active_modules[0] = (char) 0;
++#ifdef CONFIG_RSBAC_REG
++ strcat(active_modules, " REG");
++#endif
++#ifdef CONFIG_RSBAC_MAC
++#ifdef CONFIG_RSBAC_SWITCH_MAC
++ if (rsbac_switch_mac)
++#endif
++#ifdef CONFIG_RSBAC_MAC_LIGHT
++ strcat(active_modules, " MAC-L");
++#else
++ strcat(active_modules, " MAC");
++#endif
++#endif
++#ifdef CONFIG_RSBAC_PM
++#ifdef CONFIG_RSBAC_SWITCH_PM
++ if (rsbac_switch_pm)
++#endif
++ strcat(active_modules, " PM");
++#endif
++#ifdef CONFIG_RSBAC_DAZ
++#ifdef CONFIG_RSBAC_SWITCH_DAZ
++ if (rsbac_switch_daz)
++#endif
++ strcat(active_modules, " DAZ");
++#endif
++#ifdef CONFIG_RSBAC_FF
++#ifdef CONFIG_RSBAC_SWITCH_FF
++ if (rsbac_switch_ff)
++#endif
++ strcat(active_modules, " FF");
++#endif
++#ifdef CONFIG_RSBAC_RC
++#ifdef CONFIG_RSBAC_SWITCH_RC
++ if (rsbac_switch_rc)
++#endif
++ strcat(active_modules, " RC");
++#endif
++#ifdef CONFIG_RSBAC_AUTH
++#ifdef CONFIG_RSBAC_SWITCH_AUTH
++ if (rsbac_switch_auth)
++#endif
++ strcat(active_modules, " AUTH");
++#endif
++#ifdef CONFIG_RSBAC_ACL
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ if (rsbac_switch_acl)
++#endif
++ strcat(active_modules, " ACL");
++#endif
++#ifdef CONFIG_RSBAC_CAP
++#ifdef CONFIG_RSBAC_SWITCH_CAP
++ if (rsbac_switch_cap)
++#endif
++ strcat(active_modules, " CAP");
++#endif
++#ifdef CONFIG_RSBAC_JAIL
++#ifdef CONFIG_RSBAC_SWITCH_JAIL
++ if (rsbac_switch_jail)
++#endif
++ strcat(active_modules, " JAIL");
++#endif
++#ifdef CONFIG_RSBAC_RES
++#ifdef CONFIG_RSBAC_SWITCH_RES
++ if (rsbac_switch_res)
++#endif
++ strcat(active_modules, " RES");
++#endif
++#ifdef CONFIG_RSBAC_PAX
++#ifdef CONFIG_RSBAC_SWITCH_PAX
++ if (rsbac_switch_pax)
++#endif
++ strcat(active_modules, " PAX");
++#endif
++ seq_printf(m, "Active Modules: %s\n",
++ active_modules);
++ rsbac_kfree(active_modules);
++ }
++ }
++#else
++ seq_printf(m, "All modules active (no switching)\n");
++#endif
++
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if (rsbac_softmode) {
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ seq_printf(m, "Global softmode is enabled\n");
++#else
++ seq_printf(m, "Softmode is enabled\n");
++#endif
++ } else {
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ seq_printf(m, "Global softmode is disabled\n");
++#else
++ seq_printf(m, "Softmode is disabled\n");
++#endif
++ }
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ seq_printf(m,
++ "Individual softmode enabled for:");
++ for (i = 0; i <= RSBAC_MAX_MOD; i++)
++ if (rsbac_ind_softmode[i])
++ seq_printf(m, " %s",
++ get_switch_target_name
++ (tmp, i));
++ rsbac_kfree(tmp);
++ seq_printf(m, "\n");
++ }
++ }
++#endif
++#endif
++
++ seq_printf(m, "\n");
++
++ tmp_count = 0;
++ for (i = 0; i < RSBAC_NR_DEVICE_LISTS; i++) {
++ srcu_idx = srcu_read_lock(&device_list_srcu[i]);
++ device_p = rcu_dereference(device_head_p[i])->head;
++ if (device_p)
++ seq_printf(m, "FD items:\n");
++ while (device_p) {
++ fd_dev_count = 0;
++ fd_count = rsbac_list_count(device_p->handles.gen);
++ if (fd_count >= 0) {
++ seq_printf(m, "Dev %02u:%02u: %lu GEN",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ fd_count);
++ fd_dev_count += fd_count;
++ }
++
++#if defined(CONFIG_RSBAC_MAC)
++ fd_count = rsbac_list_count(device_p->handles.mac);
++ if (fd_count >= 0) {
++ seq_printf(m, ", %lu MAC", fd_count);
++ fd_dev_count += fd_count;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_PM)
++ fd_count = rsbac_list_count(device_p->handles.pm);
++ if (fd_count >= 0) {
++ seq_printf(m, ", %lu PM", fd_count);
++ fd_dev_count += fd_count;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_DAZ)
++ fd_count = rsbac_list_count(device_p->handles.daz);
++ if (fd_count >= 0) {
++ seq_printf(m, ", %lu DAZ", fd_count);
++ fd_dev_count += fd_count;
++ }
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++ fd_count = rsbac_list_count(device_p->handles.dazs);
++ if (fd_count >= 0) {
++ seq_printf(m, ", %lu DAZ SCANNED", fd_count);
++ fd_dev_count += fd_count;
++ }
++#endif
++#endif
++
++#if defined(CONFIG_RSBAC_FF)
++ fd_count = rsbac_list_count(device_p->handles.ff);
++ if (fd_count >= 0) {
++ seq_printf(m, ", %lu FF", fd_count);
++ fd_dev_count += fd_count;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_RC)
++ fd_count = rsbac_list_count(device_p->handles.rc);
++ if (fd_count >= 0) {
++ seq_printf(m, ", %lu RC", fd_count);
++ fd_dev_count += fd_count;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH)
++ fd_count = rsbac_list_count(device_p->handles.auth);
++ if (fd_count >= 0) {
++ seq_printf(m, ", %lu AUTH", fd_count);
++ fd_dev_count += fd_count;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_CAP)
++ fd_count = rsbac_list_count(device_p->handles.cap);
++ if (fd_count >= 0) {
++ seq_printf(m, ", %lu CAP", fd_count);
++ fd_dev_count += fd_count;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_RES)
++ fd_count = rsbac_list_count(device_p->handles.res);
++ if (fd_count >= 0) {
++ seq_printf(m, ", %lu RES", fd_count);
++ fd_dev_count += fd_count;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_PAX)
++ fd_count = rsbac_list_count(device_p->handles.pax);
++ if (fd_count >= 0) {
++ seq_printf(m, ", %lu PAX", fd_count);
++ fd_dev_count += fd_count;
++ }
++#endif
++
++ seq_printf(m, ", %lu total\n",
++ fd_dev_count);
++ fd_sum += fd_dev_count;
++ device_p = device_p->next;
++ }
++ tmp_count += rcu_dereference(device_head_p[i])->count;
++ srcu_read_unlock(&device_list_srcu[i], srcu_idx);
++ }
++ seq_printf(m,
++ "Sum of %lu Devices with %lu fd-items\n\n",
++ tmp_count, fd_sum);
++ total_sum += fd_sum;
++ /* dev lists */
++ sum = 0;
++ tmp_count = rsbac_list_count(dev_handles.gen);
++ seq_printf(m, "DEV: %lu GEN", tmp_count);
++ sum += tmp_count;
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_list_count(dev_handles.mac);
++ seq_printf(m, ", %lu MAC", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_list_count(dev_handles.pm);
++ seq_printf(m, ", %lu PM", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_list_count(dev_major_handles.rc);
++ seq_printf(m, ", %lu major RC", tmp_count);
++ sum += tmp_count;
++ tmp_count = rsbac_list_count(dev_handles.rc);
++ seq_printf(m, ", %lu RC", tmp_count);
++ sum += tmp_count;
++#endif
++ seq_printf(m, ", %lu total\n", sum);
++ total_sum += sum;
++ /* ipc lists */
++ sum = 0;
++ seq_printf(m, "IPC: 0 GEN");
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_list_count(ipc_handles.mac);
++ seq_printf(m, ", %lu MAC", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_list_count(ipc_handles.pm);
++ seq_printf(m, ", %lu PM", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_list_count(ipc_handles.rc);
++ seq_printf(m, ", %lu RC", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ tmp_count = rsbac_list_count(ipc_handles.jail);
++ seq_printf(m, ", %lu JAIL", tmp_count);
++ sum += tmp_count;
++#endif
++ seq_printf(m, ", %lu total\n", sum);
++ total_sum += sum;
++ /* user lists */
++ sum = 0;
++ tmp_count = rsbac_list_count(user_handles.gen);
++ seq_printf(m, "USER: %lu GEN", tmp_count);
++ sum += tmp_count;
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_list_count(user_handles.mac);
++ seq_printf(m, ", %lu MAC", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_list_count(user_handles.pm);
++ seq_printf(m, ", %lu PM", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++ tmp_count = rsbac_list_count(user_handles.daz);
++ seq_printf(m, ", %lu DAZ", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_FF)
++ tmp_count = rsbac_list_count(user_handles.ff);
++ seq_printf(m, ", %lu FF", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_list_count(user_handles.rc);
++ seq_printf(m, ", %lu RC", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ tmp_count = rsbac_list_count(user_handles.auth);
++ seq_printf(m, ", %lu AUTH", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ tmp_count = rsbac_list_count(user_handles.cap);
++ seq_printf(m, ", %lu CAP", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ tmp_count = rsbac_list_count(user_handles.jail);
++ seq_printf(m, ", %lu JAIL", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RES)
++ tmp_count = rsbac_list_count(user_handles.res);
++ seq_printf(m, ", %lu RES", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PAX)
++ tmp_count = rsbac_list_count(user_handles.pax);
++ seq_printf(m, ", %lu PAX", tmp_count);
++ sum += tmp_count;
++#endif
++ seq_printf(m, ", %lu total\n", sum);
++ total_sum += sum;
++ /* process lists */
++ sum = 0;
++ tmp_count = rsbac_list_count(process_handles.gen);
++ seq_printf(m, "PROCESS: %lu GEN", tmp_count);
++ sum += tmp_count;
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_list_count(process_handles.mac);
++ seq_printf(m, ", %lu MAC", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_list_count(process_handles.pm);
++ seq_printf(m, ", %lu PM", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++ tmp_count = rsbac_list_count(process_handles.daz);
++ seq_printf(m, ", %lu DAZ", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_list_count(process_handles.rc);
++ seq_printf(m, ", %lu RC", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ tmp_count = rsbac_list_count(process_handles.auth);
++ seq_printf(m, ", %lu AUTH", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ tmp_count = rsbac_list_count(process_handles.cap);
++ seq_printf(m, ", %lu CAP", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ tmp_count = rsbac_list_count(process_handles.jail);
++ seq_printf(m, ", %lu JAIL", tmp_count);
++ sum += tmp_count;
++#endif
++ seq_printf(m, ", %lu total\n", sum);
++ total_sum += sum;
++#if defined(CONFIG_RSBAC_UM)
++ /* group lists */
++ sum = 0;
++ seq_printf(m, "GROUP:");
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ tmp_count = rsbac_list_count(group_handles.rc);
++ seq_printf(m, " %lu RC,", tmp_count);
++ sum += tmp_count;
++#endif
++ seq_printf(m, " %lu total\n", sum);
++ total_sum += sum;
++#endif
++
++#if defined(CONFIG_RSBAC_NET_DEV)
++ /* netdev lists */
++ sum = 0;
++#if defined(CONFIG_RSBAC_IND_NETDEV_LOG)
++ tmp_count = rsbac_list_count(netdev_handles.gen);
++ seq_printf(m, "NETDEV: %lu GEN, ", tmp_count);
++ sum += tmp_count;
++#else
++ seq_printf(m, "NETDEV: ");
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_list_count(netdev_handles.rc);
++ seq_printf(m, "%lu RC, ", tmp_count);
++ sum += tmp_count;
++#endif
++ seq_printf(m, "%lu total\n", sum);
++ total_sum += sum;
++#endif
++
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ /* net template list */
++ tmp_count = rsbac_list_count(net_temp_handle);
++ seq_printf(m, "%lu Network Templates\n", tmp_count);
++ /* nettemp lists */
++ sum = 0;
++#if defined(CONFIG_RSBAC_IND_NETOBJ_LOG)
++ tmp_count = rsbac_list_count(nettemp_handles.gen);
++ seq_printf(m, "NETTEMP: %lu GEN, ", tmp_count);
++ sum += tmp_count;
++#else
++ seq_printf(m, "NETTEMP: ");
++#endif
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_list_count(nettemp_handles.mac);
++ seq_printf(m, "%lu MAC, ", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_list_count(nettemp_handles.pm);
++ seq_printf(m, "%lu PM, ", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_list_count(nettemp_handles.rc);
++ seq_printf(m, "%lu RC, ", tmp_count);
++ sum += tmp_count;
++#endif
++ seq_printf(m, "%lu total\n", sum);
++ total_sum += sum;
++ /* local netobj lists */
++ sum = 0;
++ seq_printf(m, "LNETOBJ: ");
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_list_count(lnetobj_handles.mac);
++ seq_printf(m, "%lu MAC, ", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_list_count(lnetobj_handles.pm);
++ seq_printf(m, "%lu PM, ", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_list_count(lnetobj_handles.rc);
++ seq_printf(m, "%lu RC, ", tmp_count);
++ sum += tmp_count;
++#endif
++ seq_printf(m, "%lu total\n", sum);
++ total_sum += sum;
++ /* remote netobj lists */
++ sum = 0;
++ seq_printf(m, "RNETOBJ: ");
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_list_count(rnetobj_handles.mac);
++ seq_printf(m, "%lu MAC, ", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_list_count(rnetobj_handles.pm);
++ seq_printf(m, "%lu PM, ", tmp_count);
++ sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_list_count(rnetobj_handles.rc);
++ seq_printf(m, "%lu RC, ", tmp_count);
++ sum += tmp_count;
++#endif
++ seq_printf(m, "%lu total\n", sum);
++ total_sum += sum;
++#endif /* NET_OBJ */
++
++ seq_printf(m,
++ "Total sum of %lu registered rsbac-items\n",
++ total_sum);
++ seq_printf(m,
++ "\nadf_request calls:\nfile: %llu, dir: %llu, fifo: %llu, symlink: %llu, dev: %llu, ipc: %llu, scd: %llu, user: %llu, process: %llu, netdev: %llu, nettemp: %llu, netobj: %llu, group: %llu, unixsock: %llu\n",
++ rsbac_adf_request_count[T_FILE],
++ rsbac_adf_request_count[T_DIR],
++ rsbac_adf_request_count[T_FIFO],
++ rsbac_adf_request_count[T_SYMLINK],
++ rsbac_adf_request_count[T_DEV],
++ rsbac_adf_request_count[T_IPC],
++ rsbac_adf_request_count[T_SCD],
++ rsbac_adf_request_count[T_USER],
++ rsbac_adf_request_count[T_PROCESS],
++ rsbac_adf_request_count[T_NETDEV],
++ rsbac_adf_request_count[T_NETTEMP],
++ rsbac_adf_request_count[T_NETOBJ],
++ rsbac_adf_request_count[T_GROUP],
++ rsbac_adf_request_count[T_UNIXSOCK]);
++ seq_printf(m,
++ "adf_set_attr calls:\nfile: %llu, dir: %llu, fifo: %llu, symlink: %llu, dev: %llu, ipc: %llu, scd: %llu, user: %llu, process: %llu, netdev: %llu, nettemp: %llu, netobj: %llu, group: %llu, unixsock: %llu\n",
++ rsbac_adf_set_attr_count[T_FILE],
++ rsbac_adf_set_attr_count[T_DIR],
++ rsbac_adf_set_attr_count[T_FIFO],
++ rsbac_adf_set_attr_count[T_SYMLINK],
++ rsbac_adf_set_attr_count[T_DEV],
++ rsbac_adf_set_attr_count[T_IPC],
++ rsbac_adf_set_attr_count[T_SCD],
++ rsbac_adf_set_attr_count[T_USER],
++ rsbac_adf_set_attr_count[T_PROCESS],
++ rsbac_adf_set_attr_count[T_NETDEV],
++ rsbac_adf_set_attr_count[T_NETTEMP],
++ rsbac_adf_set_attr_count[T_NETOBJ],
++ rsbac_adf_set_attr_count[T_GROUP],
++ rsbac_adf_set_attr_count[T_UNIXSOCK]);
++ return 0;
++}
++
++static int stats_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, stats_proc_show, NULL);
++}
++
++static const struct file_operations stats_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = stats_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *stats;
++
++static int
++active_proc_show(struct seq_file *m, void *v)
++{
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_initialized)
++ return -ENOSYS;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ seq_printf(m, "Version: %s\n", RSBAC_VERSION);
++#ifdef CONFIG_RSBAC_MAINT
++ seq_printf(m, "Mode: Maintenance\n");
++ seq_printf(m, "Softmode: unavailable\n");
++#else
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if (rsbac_softmode)
++ seq_printf(m, "Mode: SOFTMODE\n");
++ else
++#endif
++ seq_printf(m, "Mode: Secure\n");
++#ifdef CONFIG_RSBAC_SOFTMODE
++ seq_printf(m, "Softmode: available\n");
++#else
++ seq_printf(m, "Softmode: unavailable\n");
++#endif
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ seq_printf(m, "Ind-Soft: available\n");
++#else
++ seq_printf(m, "Ind-Soft: unavailable\n");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH
++ seq_printf(m, "Switching off: available for");
++#ifdef CONFIG_RSBAC_SWITCH_MAC
++#ifndef CONFIG_RSBAC_SWITCH_ON
++ if (rsbac_switch_mac)
++#endif
++ seq_printf(m, " MAC");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_PM
++#ifndef CONFIG_RSBAC_SWITCH_ON
++ if (rsbac_switch_pm)
++#endif
++ seq_printf(m, " PM");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_DAZ
++ seq_printf(m, " DAZ");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_FF
++ seq_printf(m, " FF");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_RC
++#ifndef CONFIG_RSBAC_SWITCH_ON
++ if (rsbac_switch_rc)
++#endif
++ seq_printf(m, " RC");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_AUTH
++ seq_printf(m, " AUTH");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ seq_printf(m, " ACL");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_CAP
++ seq_printf(m, " CAP");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_JAIL
++ seq_printf(m, " JAIL");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_RES
++ seq_printf(m, " RES");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_PAX
++ seq_printf(m, " PAX");
++#endif
++ seq_printf(m, "\n");
++ seq_printf(m, "Switching on: available for");
++#ifdef CONFIG_RSBAC_SWITCH_ON
++#ifdef CONFIG_RSBAC_SWITCH_MAC
++ seq_printf(m, " MAC");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_PM
++ seq_printf(m, " PM");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_RC
++ seq_printf(m, " RC");
++#endif
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_DAZ
++ seq_printf(m, " DAZ");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_FF
++ seq_printf(m, " FF");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_AUTH
++ seq_printf(m, " AUTH");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ seq_printf(m, " ACL");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_CAP
++ seq_printf(m, " CAP");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_JAIL
++ seq_printf(m, " JAIL");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_RES
++ seq_printf(m, " RES");
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_PAX
++ seq_printf(m, " PAX");
++#endif
++ seq_printf(m, "\n");
++#else
++ seq_printf(m, "Switching off: unavailable\n");
++ seq_printf(m, "Switching on: unavailable\n");
++#endif
++#endif
++#ifdef CONFIG_RSBAC_REG
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if (rsbac_ind_softmode[SW_REG])
++ seq_printf(m, "Module: REG SOFTMODE\n");
++ else
++#endif
++ seq_printf(m, "Module: REG on\n");
++#endif
++
++#ifdef CONFIG_RSBAC_MAC
++#ifdef CONFIG_RSBAC_SWITCH_MAC
++ if (!rsbac_switch_mac)
++ seq_printf(m, "Module: MAC OFF\n");
++ else
++#endif
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if (rsbac_ind_softmode[SW_MAC])
++ seq_printf(m, "Module: MAC SOFTMODE\n");
++ else
++#endif
++ seq_printf(m, "Module: MAC on\n");
++#endif
++
++#ifdef CONFIG_RSBAC_PM
++#ifdef CONFIG_RSBAC_SWITCH_PM
++ if (!rsbac_switch_pm)
++ seq_printf(m, "Module: PM OFF\n");
++ else
++#endif
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if (rsbac_ind_softmode[SW_PM])
++ seq_printf(m, "Module: PM SOFTMODE\n");
++ else
++#endif
++ seq_printf(m, "Module: PM on\n");
++#endif
++
++#ifdef CONFIG_RSBAC_DAZ
++#ifdef CONFIG_RSBAC_SWITCH_DAZ
++ if (!rsbac_switch_daz)
++ seq_printf(m, "Module: DAZ OFF\n");
++ else
++#endif
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if (rsbac_ind_softmode[SW_DAZ])
++ seq_printf(m, "Module: DAZ SOFTMODE\n");
++ else
++#endif
++ seq_printf(m, "Module: DAZ on\n");
++#endif
++
++#ifdef CONFIG_RSBAC_FF
++#ifdef CONFIG_RSBAC_SWITCH_FF
++ if (!rsbac_switch_ff)
++ seq_printf(m, "Module: FF OFF\n");
++ else
++#endif
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if (rsbac_ind_softmode[SW_FF])
++ seq_printf(m, "Module: FF SOFTMODE\n");
++ else
++#endif
++ seq_printf(m, "Module: FF on\n");
++#endif
++
++#ifdef CONFIG_RSBAC_RC
++#ifdef CONFIG_RSBAC_SWITCH_RC
++ if (!rsbac_switch_rc)
++ seq_printf(m, "Module: RC OFF\n");
++ else
++#endif
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if (rsbac_ind_softmode[SW_RC])
++ seq_printf(m, "Module: RC SOFTMODE\n");
++ else
++#endif
++ seq_printf(m, "Module: RC on\n");
++#endif
++
++#ifdef CONFIG_RSBAC_AUTH
++#ifdef CONFIG_RSBAC_SWITCH_AUTH
++ if (!rsbac_switch_auth)
++ seq_printf(m, "Module: AUTH OFF\n");
++ else
++#endif
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if (rsbac_ind_softmode[SW_AUTH])
++ seq_printf(m, "Module: AUTH SOFTMODE\n");
++ else
++#endif
++ seq_printf(m, "Module: AUTH on\n");
++#endif
++
++#ifdef CONFIG_RSBAC_ACL
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ if (!rsbac_switch_acl)
++ seq_printf(m, "Module: ACL OFF\n");
++ else
++#endif
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if (rsbac_ind_softmode[SW_ACL])
++ seq_printf(m, "Module: ACL SOFTMODE\n");
++ else
++#endif
++ seq_printf(m, "Module: ACL on\n");
++#endif
++
++#ifdef CONFIG_RSBAC_CAP
++#ifdef CONFIG_RSBAC_SWITCH_CAP
++ if (!rsbac_switch_cap)
++ seq_printf(m, "Module: CAP OFF\n");
++ else
++#endif
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if (rsbac_ind_softmode[SW_CAP])
++ seq_printf(m, "Module: CAP SOFTMODE\n");
++ else
++#endif
++ seq_printf(m, "Module: CAP on\n");
++#endif
++
++#ifdef CONFIG_RSBAC_JAIL
++#ifdef CONFIG_RSBAC_SWITCH_JAIL
++ if (!rsbac_switch_jail)
++ seq_printf(m, "Module: JAIL OFF\n");
++ else
++#endif
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if (rsbac_ind_softmode[SW_JAIL])
++ seq_printf(m, "Module: JAIL SOFTMODE\n");
++ else
++#endif
++ seq_printf(m, "Module: JAIL on\n");
++#endif
++
++#ifdef CONFIG_RSBAC_RES
++#ifdef CONFIG_RSBAC_SWITCH_RES
++ if (!rsbac_switch_res)
++ seq_printf(m, "Module: RES OFF\n");
++ else
++#endif
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if (rsbac_ind_softmode[SW_RES])
++ seq_printf(m, "Module: RES SOFTMODE\n");
++ else
++#endif
++ seq_printf(m, "Module: RES on\n");
++#endif
++
++#ifdef CONFIG_RSBAC_PAX
++#ifdef CONFIG_RSBAC_SWITCH_PAX
++ if (!rsbac_switch_pax)
++ seq_printf(m, "Module: PAX OFF\n");
++ else
++#endif
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if (rsbac_ind_softmode[SW_PAX])
++ seq_printf(m, "Module: PAX SOFTMODE\n");
++ else
++#endif
++ seq_printf(m, "Module: PAX on\n");
++#endif
++ return 0;
++}
++
++static int active_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, active_proc_show, NULL);
++}
++
++static const struct file_operations active_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = active_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *active;
++
++#ifdef CONFIG_RSBAC_XSTATS
++static int
++xstats_proc_show(struct seq_file *m, void *v)
++{
++ int i, j;
++ char name[80];
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_initialized)
++ return -ENOSYS;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ seq_printf(m,
++ "RSBAC ADF call Statistics\n-------------------------\nadf_request table:\n");
++ seq_printf(m,
++ "Request /\tFILE\tDIR\tFIFO\tSYMLINK\tDEV\tIPC\tSCD\tUSER\tPROCESS\tNETDEV\tNETTEMP\tNETOBJ\tGROUP\tUNIXSOCK NONE");
++
++ for (i = 0; i < R_NONE; i++) {
++ get_request_name(name, i);
++ name[15] = 0;
++ seq_printf(m, "\n%-14s\t", name);
++ for (j = 0; j <= T_NONE; j++) {
++ if ((j == T_NETTEMP_NT)
++ || (j == T_FD)
++ )
++ continue;
++ seq_printf(m, "%llu\t",
++ rsbac_adf_request_xcount[j][i]);
++ }
++ }
++
++ seq_printf(m,
++ "\n\nadf_request calls:\nfile: %llu, dir: %llu, fifo: %llu, symlink: %llu, dev: %llu, ipc: %llu, scd: %llu, user: %llu, process: %llu, netdev: %llu, nettemp: %llu, netobj: %llu, group: %llu, unixsock: %llu, none: %llu\n",
++ rsbac_adf_request_count[T_FILE],
++ rsbac_adf_request_count[T_DIR],
++ rsbac_adf_request_count[T_FIFO],
++ rsbac_adf_request_count[T_SYMLINK],
++ rsbac_adf_request_count[T_DEV],
++ rsbac_adf_request_count[T_IPC],
++ rsbac_adf_request_count[T_SCD],
++ rsbac_adf_request_count[T_USER],
++ rsbac_adf_request_count[T_PROCESS],
++ rsbac_adf_request_count[T_NETDEV],
++ rsbac_adf_request_count[T_NETTEMP],
++ rsbac_adf_request_count[T_NETOBJ],
++ rsbac_adf_request_count[T_GROUP],
++ rsbac_adf_request_count[T_UNIXSOCK],
++ rsbac_adf_request_count[T_NONE]);
++ seq_printf(m,
++ "\n\nadf_set_attr table:\nRequest /\tFILE\tDIR\tFIFO\tSYMLINK\tDEV\tIPC\tSCD\tUSER\tPROCESS\tNETDEV\tNETTEMP\tNETOBJ\tGROUP\tUNIXSOCK NONE");
++ for (i = 0; i < R_NONE; i++) {
++ get_request_name(name, i);
++ name[15] = 0;
++ seq_printf(m, "\n%-14s\t", name);
++ for (j = 0; j <= T_NONE; j++) {
++ if ((j == T_NETTEMP_NT)
++ || (j == T_FD)
++ )
++ continue;
++ seq_printf(m, "%llu\t",
++ rsbac_adf_set_attr_xcount[j][i]);
++ }
++ }
++
++ seq_printf(m,
++ "\n\nadf_set_attr calls:\nfile: %llu, dir: %llu, fifo: %llu, symlink: %llu, dev: %llu, ipc: %llu, scd: %llu, user: %llu, process: %llu, netdev: %llu, nettemp: %llu, netobj: %llu, group: %llu, unixsock: %llu, none: %llu\n",
++ rsbac_adf_set_attr_count[T_FILE],
++ rsbac_adf_set_attr_count[T_DIR],
++ rsbac_adf_set_attr_count[T_FIFO],
++ rsbac_adf_set_attr_count[T_SYMLINK],
++ rsbac_adf_set_attr_count[T_DEV],
++ rsbac_adf_set_attr_count[T_IPC],
++ rsbac_adf_set_attr_count[T_SCD],
++ rsbac_adf_set_attr_count[T_USER],
++ rsbac_adf_set_attr_count[T_PROCESS],
++ rsbac_adf_set_attr_count[T_NETDEV],
++ rsbac_adf_set_attr_count[T_NETTEMP],
++ rsbac_adf_set_attr_count[T_NETOBJ],
++ rsbac_adf_set_attr_count[T_GROUP],
++ rsbac_adf_set_attr_count[T_UNIXSOCK],
++ rsbac_adf_set_attr_count[T_NONE]);
++ seq_printf(m,
++ "\nSyscall counts\n-------------\n");
++
++ for (i = 0; i < RSYS_none; i++) {
++ get_syscall_name(name, i);
++ name[30] = 0;
++ seq_printf(m, "%-26s %llu\n",
++ name, syscall_count[i]);
++ }
++
++ seq_printf(m,
++ "\n\nData Structures:\nrsbac_get_attr calls:\nfile: %llu, dir: %llu, fifo: %llu, symlink: %llu, dev: %llu, ipc: %llu, scd: %llu, user: %llu, process: %llu, netdev: %llu, nettemp: %llu, netobj: %llu, group: %llu, unixsock: %llu\n",
++ get_attr_count[T_FILE],
++ get_attr_count[T_DIR],
++ get_attr_count[T_FIFO],
++ get_attr_count[T_SYMLINK],
++ get_attr_count[T_DEV],
++ get_attr_count[T_IPC],
++ get_attr_count[T_SCD],
++ get_attr_count[T_USER],
++ get_attr_count[T_PROCESS],
++ get_attr_count[T_NETDEV],
++ get_attr_count[T_NETTEMP],
++ get_attr_count[T_NETOBJ],
++ get_attr_count[T_GROUP],
++ get_attr_count[T_UNIXSOCK]);
++
++ seq_printf(m,
++ "\nrsbac_set_attr calls:\nfile: %llu, dir: %llu, fifo: %llu, symlink: %llu, dev: %llu, ipc: %llu, scd: %llu, user: %llu, process: %llu, netdev: %llu, nettemp: %llu, netobj: %llu, group: %llu, unixsock: %llu\n",
++ set_attr_count[T_FILE],
++ set_attr_count[T_DIR],
++ set_attr_count[T_FIFO],
++ set_attr_count[T_SYMLINK],
++ set_attr_count[T_DEV],
++ set_attr_count[T_IPC],
++ set_attr_count[T_SCD],
++ set_attr_count[T_USER],
++ set_attr_count[T_PROCESS],
++ set_attr_count[T_NETDEV],
++ set_attr_count[T_NETTEMP],
++ set_attr_count[T_NETOBJ],
++ set_attr_count[T_GROUP],
++ set_attr_count[T_UNIXSOCK]);
++
++ seq_printf(m,
++ "\nrsbac_remove_target calls:\nfile: %llu, dir: %llu, fifo: %llu, symlink: %llu, dev: %llu, ipc: %llu, scd: %llu, user: %llu, process: %llu, netdev: %llu, nettemp: %llu, netobj: %llu, group: %llu, unixsock: %llu\n",
++ remove_count[T_FILE],
++ remove_count[T_DIR],
++ remove_count[T_FIFO],
++ remove_count[T_SYMLINK],
++ remove_count[T_DEV],
++ remove_count[T_IPC],
++ remove_count[T_SCD],
++ remove_count[T_USER],
++ remove_count[T_PROCESS],
++ remove_count[T_NETDEV],
++ remove_count[T_NETTEMP],
++ remove_count[T_NETOBJ],
++ remove_count[T_GROUP],
++ remove_count[T_UNIXSOCK]);
++
++ seq_printf(m,
++ "\nrsbac_get_parent calls: %llu\n",
++ get_parent_count);
++
++#ifdef CONFIG_RSBAC_FD_CACHE
++ seq_printf(m,
++ "\nFD Cache hits misses items subitem hm-ratio\n");
++ for (i = 0; i < SW_NONE; i++) {
++ if (fd_cache_handle[i]) {
++ __u64 tmp_hits = fd_cache_hits[i];
++ __u64 tmp_misses = fd_cache_misses[i];
++
++ while ((tmp_hits > (__u32) -1) || (tmp_misses > (__u32) -1)) {
++ tmp_hits >>= 1;
++ tmp_misses >>= 1;
++ }
++ if (!tmp_misses)
++ tmp_misses = 1;
++ seq_printf(m,
++ "%-8s %-20llu %-20llu %-7lu %-7lu %u\n",
++ get_switch_target_name(name, i),
++ fd_cache_hits[i], fd_cache_misses[i],
++ rsbac_list_lol_count(fd_cache_handle[i]),
++ rsbac_list_lol_all_subcount(fd_cache_handle[i]),
++ ((__u32) tmp_hits)/((__u32) tmp_misses));
++ }
++ }
++ seq_printf(m, "\n%u fd_cache_invalidates, %u fd_cache_invalidate_alls\n",
++ fd_cache_invalidates, fd_cache_invalidate_alls);
++#endif
++ return 0;
++}
++
++static int xstats_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, xstats_proc_show, NULL);
++}
++
++static const struct file_operations xstats_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = xstats_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *xstats;
++#endif
++
++#if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++static int
++auto_write_proc_show(struct seq_file *m, void *v)
++{
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_initialized)
++ return -ENOSYS;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ seq_printf(m,
++ "RSBAC auto write settings\n-------------------------\n");
++ seq_printf(m,
++ "auto interval %u jiffies (%i jiffies = 1 second)\n",
++ auto_interval, HZ);
++
++#ifdef CONFIG_RSBAC_DEBUG
++ seq_printf(m, "debug level is %i\n",
++ rsbac_debug_auto);
++#endif
++
++ return 0;
++}
++
++static int auto_write_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, auto_write_proc_show, NULL);
++}
++
++static ssize_t auto_write_proc_write(struct file *file,
++ const char __user * buf, size_t count,
++ loff_t *data)
++{
++ ssize_t err;
++ char *k_buf;
++ char *p;
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (count > PROC_BLOCK_SIZE) {
++ return -EOVERFLOW;
++ }
++
++ if (!(k_buf = (char *) __get_free_page(GFP_KERNEL)))
++ return -ENOMEM;
++ err = copy_from_user(k_buf, buf, count);
++ if (err < 0)
++ return err;
++
++ err = count;
++ if (count < 13 || strncmp("auto", k_buf, 4)) {
++ goto out;
++ }
++ if (!rsbac_initialized) {
++ err = -ENOSYS;
++ goto out;
++ }
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ err = -EPERM;
++ goto out;
++ }
++
++ /*
++ * Usage: echo "auto interval #N" > /proc/rsbac_info/auto_write
++ * to set auto_interval to given value
++ */
++ if (!strncmp("interval", k_buf + 5, 8)) {
++ unsigned int interval;
++
++ p = k_buf + 5 + 9;
++
++ if (*p == '\0')
++ goto out;
++
++ interval = simple_strtoul(p, NULL, 0);
++ /* only accept minimum of 1 second */
++ if (interval >= HZ) {
++ rsbac_printk(KERN_INFO "auto_write_proc_write(): setting auto write interval to %u\n",
++ interval);
++ auto_interval = interval;
++ err = count;
++ goto out;
++ } else {
++ rsbac_printk(KERN_INFO "auto_write_proc_write(): rejecting too short auto write interval %u (min. %i)\n",
++ interval, HZ);
++ goto out;
++ }
++ }
++#ifdef CONFIG_RSBAC_DEBUG
++ /*
++ * Usage: echo "auto debug #N" > /proc/rsbac_info/auto_write
++ * to set rsbac_debug_auto to given value
++ */
++ if (!strncmp("debug", k_buf + 5, 5)) {
++ unsigned int debug_level;
++
++ p = k_buf + 5 + 6;
++
++ if (*p == '\0')
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if (!debug_level || (debug_level == 1)) {
++ rsbac_printk(KERN_INFO "auto_write_proc_write(): setting rsbac_debug_auto to %u\n",
++ debug_level);
++ rsbac_debug_auto = debug_level;
++ err = count;
++ } else {
++ rsbac_printk(KERN_INFO "auto_write_proc_write(): rejecting invalid debug level (should be 0 or 1)\n");
++ }
++ }
++#endif
++
++ out:
++ free_page((ulong) k_buf);
++ return err;
++}
++
++static const struct file_operations auto_write_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = auto_write_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++ .write = auto_write_proc_write,
++};
++
++static struct proc_dir_entry *auto_write;
++#endif /* CONFIG_RSBAC_AUTO_WRITE > 0 */
++
++static int
++versions_proc_show(struct seq_file *m, void *v)
++{
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_initialized)
++ return -ENOSYS;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ seq_printf(m,
++ "RSBAC version settings (%s)\n----------------------\n",
++ RSBAC_VERSION);
++ seq_printf(m,
++ "Device list head size is %u, hash size is %u\n",
++ (int) sizeof(struct rsbac_device_list_item_t),
++ RSBAC_NR_DEVICE_LISTS);
++ seq_printf(m,
++ "FD lists:\nGEN aci version is %u, aci entry size is %Zd, %u lists per device\n",
++ RSBAC_GEN_FD_ACI_VERSION,
++ sizeof(struct rsbac_gen_fd_aci_t),
++ gen_nr_fd_hashes);
++#if defined(CONFIG_RSBAC_MAC)
++ seq_printf(m,
++ "MAC aci version is %u, aci entry size is %Zd, %u lists per device\n",
++ RSBAC_MAC_FD_ACI_VERSION,
++ sizeof(struct rsbac_mac_fd_aci_t),
++ mac_nr_fd_hashes);
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ seq_printf(m,
++ "PM aci version is %u, aci entry size is %Zd, %u lists per device\n",
++ RSBAC_PM_FD_ACI_VERSION,
++ sizeof(struct rsbac_pm_fd_aci_t),
++ pm_nr_fd_hashes);
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++ seq_printf(m,
++ "DAZ aci version is %u, aci entry size is %Zd, %u lists per device\n",
++ RSBAC_DAZ_FD_ACI_VERSION,
++ sizeof(struct rsbac_daz_fd_aci_t),
++ daz_nr_fd_hashes);
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++ seq_printf(m,
++ "DAZS aci version is %u, aci entry size is %Zd, %u lists per device\n",
++ RSBAC_DAZ_SCANNED_FD_ACI_VERSION,
++ sizeof(rsbac_daz_scanned_t),
++ daz_scanned_nr_fd_hashes);
++#endif
++#endif
++#if defined(CONFIG_RSBAC_FF)
++ seq_printf(m,
++ "FF aci version is %u, aci entry size is %Zd, %u lists per device\n",
++ RSBAC_FF_FD_ACI_VERSION, sizeof(rsbac_ff_flags_t),
++ ff_nr_fd_hashes);
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ seq_printf(m,
++ "RC aci version is %u, aci entry size is %Zd, %u lists per device\n",
++ RSBAC_RC_FD_ACI_VERSION,
++ sizeof(struct rsbac_rc_fd_aci_t),
++ rc_nr_fd_hashes);
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ seq_printf(m,
++ "AUTH aci version is %u, aci entry size is %Zd, %u lists per device\n",
++ RSBAC_AUTH_FD_ACI_VERSION,
++ sizeof(struct rsbac_auth_fd_aci_t),
++ auth_nr_fd_hashes);
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ seq_printf(m,
++ "CAP aci version is %u, aci entry size is %Zd, %u lists per device\n",
++ RSBAC_CAP_FD_ACI_VERSION,
++ sizeof(struct rsbac_cap_fd_aci_t),
++ cap_nr_fd_hashes);
++#endif
++#if defined(CONFIG_RSBAC_PAX)
++ seq_printf(m,
++ "PAX aci version is %u, aci entry size is %Zd, %u lists per device\n",
++ RSBAC_PAX_FD_ACI_VERSION, sizeof(rsbac_pax_flags_t),
++ pax_nr_fd_hashes);
++#endif
++#if defined(CONFIG_RSBAC_RES)
++ seq_printf(m,
++ "RES aci version is %u, aci entry size is %Zd, %u lists per device\n",
++ RSBAC_RES_FD_ACI_VERSION,
++ sizeof(struct rsbac_res_fd_aci_t),
++ res_nr_fd_hashes);
++#endif
++ seq_printf(m,
++ "\nDEV lists:\nGEN aci version is %u, aci entry size is %Zd\n",
++ RSBAC_GEN_DEV_ACI_VERSION,
++ sizeof(struct rsbac_gen_dev_aci_t));
++#if defined(CONFIG_RSBAC_MAC)
++ seq_printf(m,
++ "MAC aci version is %u, aci entry size is %Zd\n",
++ RSBAC_MAC_DEV_ACI_VERSION,
++ sizeof(struct rsbac_mac_dev_aci_t));
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ seq_printf(m,
++ "PM aci version is %u, aci entry size is %Zd\n",
++ RSBAC_PM_DEV_ACI_VERSION,
++ sizeof(struct rsbac_pm_dev_aci_t));
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ seq_printf(m,
++ "RC aci version is %u, aci entry size is %Zd\n",
++ RSBAC_RC_DEV_ACI_VERSION, sizeof(rsbac_rc_type_id_t));
++#endif
++ seq_printf(m, "\nIPC lists:\n");
++#if defined(CONFIG_RSBAC_MAC)
++ seq_printf(m,
++ "MAC aci version is %u, aci entry size is %Zd\n",
++ RSBAC_MAC_IPC_ACI_VERSION,
++ sizeof(struct rsbac_mac_ipc_aci_t));
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ seq_printf(m,
++ "PM aci version is %u, aci entry size is %Zd\n",
++ RSBAC_PM_IPC_ACI_VERSION,
++ sizeof(struct rsbac_pm_ipc_aci_t));
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ seq_printf(m,
++ "RC aci version is %u, aci entry size is %Zd\n",
++ RSBAC_RC_IPC_ACI_VERSION, sizeof(rsbac_rc_type_id_t));
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ seq_printf(m,
++ "JAIL aci version is %u, aci entry size is %Zd\n",
++ RSBAC_JAIL_IPC_ACI_VERSION, sizeof(rsbac_jail_id_t));
++#endif
++ seq_printf(m,
++ "\nUSER lists:\nGEN aci version is %u, aci entry size is %Zd\n",
++ RSBAC_GEN_USER_ACI_VERSION,
++ sizeof(struct rsbac_gen_user_aci_t));
++#if defined(CONFIG_RSBAC_MAC)
++ seq_printf(m,
++ "MAC aci version is %u, aci entry size is %Zd\n",
++ RSBAC_MAC_USER_ACI_VERSION,
++ sizeof(struct rsbac_mac_user_aci_t));
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ seq_printf(m,
++ "PM aci version is %u, aci entry size is %Zd\n",
++ RSBAC_PM_USER_ACI_VERSION,
++ sizeof(struct rsbac_pm_user_aci_t));
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++ seq_printf(m,
++ "DAZ aci version is %u, aci entry size is %Zd\n",
++ RSBAC_DAZ_USER_ACI_VERSION,
++ sizeof(rsbac_system_role_int_t));
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ seq_printf(m,
++ "RC aci version is %u, aci entry size is %Zd\n",
++ RSBAC_RC_USER_ACI_VERSION, sizeof(rsbac_rc_role_id_t));
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ seq_printf(m,
++ "AUTH aci version is %u, aci entry size is %Zd\n",
++ RSBAC_AUTH_USER_ACI_VERSION,
++ sizeof(rsbac_system_role_int_t));
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ seq_printf(m,
++ "CAP aci version is %u, aci entry size is %Zd\n",
++ RSBAC_CAP_USER_ACI_VERSION,
++ sizeof(struct rsbac_cap_user_aci_t));
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ seq_printf(m,
++ "JAIL aci version is %u, aci entry size is %Zd\n",
++ RSBAC_JAIL_USER_ACI_VERSION,
++ sizeof(rsbac_system_role_int_t));
++#endif
++#if defined(CONFIG_RSBAC_PAX)
++ seq_printf(m,
++ "PAX aci version is %u, aci entry size is %Zd\n",
++ RSBAC_PAX_USER_ACI_VERSION,
++ sizeof(rsbac_system_role_int_t));
++#endif
++#if defined(CONFIG_RSBAC_RES)
++ seq_printf(m,
++ "RES aci version is %u, aci entry size is %Zd\n",
++ RSBAC_RES_USER_ACI_VERSION,
++ sizeof(struct rsbac_res_user_aci_t));
++#endif
++ seq_printf(m,
++ "\nPROCESS lists:\nGEN aci version is %i, aci entry size is %Zd, number of lists is %u\n",
++ RSBAC_GEN_PROCESS_ACI_VERSION,
++ sizeof(rsbac_request_vector_t),
++ CONFIG_RSBAC_GEN_NR_P_LISTS);
++#if defined(CONFIG_RSBAC_MAC)
++ seq_printf(m,
++ "MAC aci version is %u, aci entry size is %Zd, number of lists is %u\n",
++ RSBAC_MAC_PROCESS_ACI_VERSION,
++ sizeof(struct rsbac_mac_process_aci_t),
++ CONFIG_RSBAC_MAC_NR_P_LISTS);
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ seq_printf(m,
++ "PM aci version is %u, aci entry size is %Zd\n",
++ RSBAC_PM_PROCESS_ACI_VERSION,
++ sizeof(struct rsbac_pm_process_aci_t));
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ seq_printf(m,
++ "RC aci version is %u, aci entry size is %Zd, number of lists is %u\n",
++ RSBAC_RC_PROCESS_ACI_VERSION,
++ sizeof(struct rsbac_rc_process_aci_t),
++ CONFIG_RSBAC_RC_NR_P_LISTS);
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ seq_printf(m,
++ "AUTH aci version is %u, aci entry size is %Zd\n",
++ RSBAC_AUTH_PROCESS_ACI_VERSION,
++ sizeof(struct rsbac_auth_process_aci_t));
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ seq_printf(m,
++ "CAP aci version is %u, aci entry size is %Zd\n",
++ RSBAC_CAP_PROCESS_ACI_VERSION,
++ sizeof(struct rsbac_cap_process_aci_t));
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ seq_printf(m,
++ "JAIL aci version is %u, aci entry size is %Zd, number of lists is %u\n",
++ RSBAC_JAIL_PROCESS_ACI_VERSION,
++ sizeof(struct rsbac_jail_process_aci_t),
++ CONFIG_RSBAC_JAIL_NR_P_LISTS);
++#endif
++
++#if defined(CONFIG_RSBAC_NET_DEV)
++ seq_printf(m, "\nNETDEV lists:\n");
++#if defined(CONFIG_RSBAC_IND_NETDEV_LOG)
++ seq_printf(m,
++ "GEN aci version is %u, aci entry size is %Zd\n",
++ RSBAC_GEN_NETDEV_ACI_VERSION,
++ sizeof(struct rsbac_gen_netdev_aci_t));
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ seq_printf(m,
++ "RC aci version is %u, aci entry size is %Zd\n",
++ RSBAC_RC_NETDEV_ACI_VERSION,
++ sizeof(rsbac_rc_type_id_t));
++#endif
++#endif
++
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ seq_printf(m,
++ "\nNetwork Template list: version is %u, data size is %Zd\n",
++ RSBAC_NET_TEMP_VERSION,
++ sizeof(struct rsbac_net_temp_data_t));
++ seq_printf(m,
++ "\nNETOBJ lists:\nGEN aci version is %u, aci entry size is %Zd\n",
++ RSBAC_GEN_NETOBJ_ACI_VERSION,
++ sizeof(struct rsbac_gen_netobj_aci_t));
++#if defined(CONFIG_RSBAC_MAC)
++ seq_printf(m,
++ "MAC aci version is %u, aci entry size is %Zd\n",
++ RSBAC_MAC_NETOBJ_ACI_VERSION,
++ sizeof(struct rsbac_mac_netobj_aci_t));
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ seq_printf(m,
++ "PM aci version is %u, aci entry size is %Zd\n",
++ RSBAC_PM_NETOBJ_ACI_VERSION,
++ sizeof(struct rsbac_pm_netobj_aci_t));
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ seq_printf(m,
++ "RC aci version is %u, aci entry size is %Zd\n",
++ RSBAC_RC_NETOBJ_ACI_VERSION,
++ sizeof(rsbac_rc_type_id_t));
++#endif
++#endif
++ seq_printf(m,
++ "\nlog_levels array: version is %u, array size is %Zd\n",
++ RSBAC_LOG_LEVEL_VERSION,
++ R_NONE * (T_NONE + 1) * sizeof(rsbac_enum_t));
++ seq_printf(m,
++ "\nattribute value union size is %u\n",
++ (int) sizeof(union rsbac_attribute_value_t));
++#ifdef CONFIG_RSBAC_FD_CACHE
++ seq_printf(m,
++ "fd cache attribute value union size is %u\n",
++ (int) sizeof(union rsbac_attribute_value_cache_t));
++#endif
++ return 0;
++}
++
++static int versions_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, versions_proc_show, NULL);
++}
++
++static const struct file_operations versions_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = versions_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *versions;
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++static int
++net_temp_proc_show(struct seq_file *m, void *v)
++{
++ rsbac_net_temp_id_t *temp_array;
++ long count;
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_initialized)
++ return -ENOSYS;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ seq_printf(m, "Network Templates\n-----------------\n");
++ count =
++ rsbac_list_get_all_desc(net_temp_handle,
++ (void **) &temp_array);
++ if (count > 0) {
++ __u32 i;
++ struct rsbac_net_temp_data_t data;
++
++ for (i = 0; i < count; i++) {
++ if (!rsbac_list_get_data
++ (net_temp_handle, &temp_array[i], &data)) {
++ seq_printf(m, "%10u %s\n",
++ temp_array[i], data.name);
++ }
++ }
++ rsbac_kfree(temp_array);
++ }
++ seq_printf(m, "%lu templates\n", count);
++ return 0;
++}
++
++static int net_temp_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, net_temp_proc_show, NULL);
++}
++
++static const struct file_operations net_temp_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = net_temp_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *net_temp;
++#endif /* NET_OBJ */
++
++#ifdef CONFIG_RSBAC_JAIL
++static int
++jails_proc_show(struct seq_file *m, void *v)
++{
++ rsbac_pid_t *pid_array;
++ struct rsbac_ipc_t *ipc_array;
++ u_long count = 0;
++ u_int i;
++ struct rsbac_jail_process_aci_t data;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_initialized)
++ return -ENOSYS;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ seq_printf(m,
++ "Syslog-Jail is %u\n\nJAILed Processes\n----------------\nPID Jail-ID Flags Max Caps SCD get SCD modify IP\n",
++ rsbac_jail_syslog_jail_id);
++
++ count = rsbac_list_get_all_desc(process_handles.jail,
++ (void **) &pid_array);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ if (!rsbac_list_get_data
++ (process_handles.jail,
++ &pid_array[i], &data)) {
++ seq_printf(m,
++ "%-5u %-10u %-7u %-10i%-10u %-10u %-10u %u.%u.%u.%u\n",
++ pid_nr(pid_array[i]), data.id,
++ data.flags,
++ data.max_caps.cap[1],
++ data.max_caps.cap[0],
++ data.scd_get,
++ data.scd_modify,
++ NIPQUAD(data.ip));
++ }
++ }
++ rsbac_kfree(pid_array);
++ }
++ seq_printf(m, "%lu jailed processes\n", count);
++ seq_printf(m,
++ "\nJAIL IPCs\n---------\nType IPC-ID Jail-ID\n");
++
++ count =
++ rsbac_list_get_all_desc(ipc_handles.jail,
++ (void **) &ipc_array);
++ if (count > 0) {
++ __u32 i;
++ rsbac_jail_id_t data;
++ char tmp[RSBAC_MAXNAMELEN];
++
++ for (i = 0; i < count; i++) {
++ if (!rsbac_list_get_data
++ (ipc_handles.jail, &ipc_array[i], &data)) {
++ seq_printf(m,
++ "%-10s %-10lu %-10u\n",
++ get_ipc_target_name(tmp,
++ ipc_array
++ [i].type),
++ ipc_array[i].id.id_nr, data);
++ }
++ }
++ rsbac_kfree(ipc_array);
++ }
++ seq_printf(m, "%lu JAIL IPCs\n", count);
++ return 0;
++}
++
++static int jails_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, jails_proc_show, NULL);
++}
++
++static const struct file_operations jails_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = jails_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *jails;
++
++#endif /* JAIL */
++
++#ifdef CONFIG_RSBAC_PAX
++static int
++pax_proc_show(struct seq_file *m, void *v)
++{
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_initialized)
++ return -ENOSYS;
++
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++ seq_puts(m, "RSBAC PaX module\n----------------\n");
++ seq_printf(m, "%li user list items.\n", rsbac_list_count(user_handles.pax));
++ return 0;
++}
++
++static ssize_t pax_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, pax_proc_show, NULL);
++}
++
++static const struct file_operations pax_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = pax_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *pax;
++#endif
++
++static int register_all_rsbac_proc(void)
++{
++ proc_rsbac_root_p = create_proc_entry("rsbac-info",
++ S_IFDIR | S_IRUGO | S_IXUGO,
++ NULL);
++ if (!proc_rsbac_root_p)
++ return -RSBAC_ECOULDNOTADDITEM;
++
++ proc_rsbac_backup_p = create_proc_entry("backup",
++ S_IFDIR | S_IRUGO |
++ S_IXUGO,
++ proc_rsbac_root_p);
++ if (!proc_rsbac_backup_p)
++ return -RSBAC_ECOULDNOTADDITEM;
++
++ devices = proc_create("devices", S_IFREG | S_IRUGO, proc_rsbac_root_p, &devices_proc_fops);
++ stats = proc_create("stats", S_IFREG | S_IRUGO, proc_rsbac_root_p, &stats_proc_fops);
++ active = proc_create("active", S_IFREG | S_IRUGO, proc_rsbac_root_p, &active_proc_fops);
++#ifdef CONFIG_RSBAC_XSTATS
++ xstats = proc_create("xstats", S_IFREG | S_IRUGO, proc_rsbac_root_p, &xstats_proc_fops);
++#endif
++#if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++ auto_write = proc_create("auto_write", S_IFREG | S_IRUGO | S_IWUGO, proc_rsbac_root_p, &auto_write_proc_fops);
++#endif
++ versions = proc_create("versions", S_IFREG | S_IRUGO, proc_rsbac_root_p, &versions_proc_fops);
++#ifdef CONFIG_RSBAC_NET_OBJ
++ net_temp = proc_create("net_temp", S_IFREG | S_IRUGO, proc_rsbac_root_p, &net_temp_proc_fops);
++#endif
++#ifdef CONFIG_RSBAC_JAIL
++ jails = proc_create("jails", S_IFREG | S_IRUGO, proc_rsbac_root_p, &jails_proc_fops);
++#endif
++#ifdef CONFIG_RSBAC_PAX
++ pax = proc_create("pax", S_IFREG | S_IRUGO, proc_rsbac_root_p, &pax_proc_fops);
++#endif
++
++ return 0;
++}
++
++/*
++static int unregister_all_rsbac_proc(void)
++ {
++#ifdef CONFIG_RSBAC_PAX
++ remove_proc_entry("pax", proc_rsbac_root_p);
++#endif
++#ifdef CONFIG_RSBAC_JAIL
++ remove_proc_entry("jails", proc_rsbac_root_p);
++#endif
++#ifdef CONFIG_RSBAC_NET_OBJ
++ remove_proc_entry("net_temp", proc_rsbac_root_p);
++#endif
++ remove_proc_entry("versions", proc_rsbac_root_p);
++ remove_proc_entry("devices", proc_rsbac_root_p);
++ remove_proc_entry("stats", proc_rsbac_root_p);
++ remove_proc_entry("active", proc_rsbac_root_p);
++ remove_proc_entry("auto-write", proc_rsbac_root_p);
++ remove_proc_entry("backup", proc_rsbac_root_p);
++ remove_proc_entry("rsbac-info", &proc_root);
++ return0;
++ }
++*/
++#endif
++
++
++/************************************************* */
++/* RSBAC daemon */
++/************************************************* */
++
++/************************************************************************** */
++/* Initialization, including ACI restoration for root device from disk. */
++/* After this call, all ACI is kept in memory for performance reasons, */
++/* but user and file/dir object ACI are written to disk on every change. */
++
++/* Since there can be no access to aci data structures before init, */
++/* rsbac_do_init() will initialize all rw-spinlocks to unlocked. */
++
++/* DAZ init prototype */
++#if defined(CONFIG_RSBAC_DAZ) && !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_INIT_DELAY
++int rsbac_init_daz(void);
++#else
++int __init rsbac_init_daz(void);
++#endif
++#endif
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static void registration_error(int err, char *listname)
++#else
++static void __init registration_error(int err, char *listname)
++#endif
++{
++ if (err < 0) {
++ char *tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): Registering %s list failed with error %s\n",
++ listname, get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++}
++
++#ifdef CONFIG_RSBAC_FD_CACHE
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int register_fd_cache_lists(void)
++#else
++static int __init register_fd_cache_lists(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_list_lol_info_t *list_info_p;
++ char * tmp;
++ u_int i;
++
++ for (i = 0; i < SW_NONE; i++) {
++ fd_cache_handle[i] = NULL;
++#ifdef CONFIG_RSBAC_XSTATS
++ fd_cache_hits[i] = 0;
++ fd_cache_misses[i] = 0;
++#endif
++ }
++ list_info_p = rsbac_kmalloc_unlocked(sizeof(*list_info_p));
++ if (!list_info_p) {
++ return -ENOMEM;
++ }
++ tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if (!tmp) {
++ rsbac_kfree(list_info_p);
++ return -ENOMEM;
++ }
++ rsbac_pr_debug(ds, "registering FD Cache lists\n");
++ list_info_p->version = RSBAC_FD_CACHE_VERSION;
++ list_info_p->key = RSBAC_FD_CACHE_KEY;
++ list_info_p->desc_size = sizeof(struct rsbac_fd_cache_desc_t);
++ list_info_p->data_size = 0;
++ list_info_p->subdesc_size = sizeof(rsbac_enum_t);
++ list_info_p->subdata_size =
++ sizeof(union rsbac_attribute_value_cache_t);
++ list_info_p->max_age = 0;
++ sprintf(tmp, "%sGEN", RSBAC_FD_CACHE_NAME);
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &fd_cache_handle[SW_GEN], list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_OWN_SLAB | \
++ RSBAC_LIST_AUTO_HASH_RESIZE | \
++ RSBAC_LIST_NO_MAX_WARN,
++ NULL,
++ NULL, /* subcompare */
++ NULL, NULL, /* get_conv */
++ NULL, NULL, /* def data */
++ tmp,
++ RSBAC_AUTO_DEV,
++ RSBAC_LIST_MIN_MAX_HASHES,
++ hash_fd_cache,
++ NULL);
++ if (err)
++ registration_error(err, "FD Cache GEN");
++ else
++ rsbac_list_lol_max_items(fd_cache_handle[SW_GEN],
++ RSBAC_FD_CACHE_KEY,
++ CONFIG_RSBAC_FD_CACHE_MAX_ITEMS, A_none);
++
++#if defined(CONFIG_RSBAC_MAC) && defined(CONFIG_RSBAC_MAC_DEF_INHERIT)
++ sprintf(tmp, "%sMAC", RSBAC_FD_CACHE_NAME);
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &fd_cache_handle[SW_MAC], list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_OWN_SLAB | \
++ RSBAC_LIST_AUTO_HASH_RESIZE | \
++ RSBAC_LIST_NO_MAX_WARN,
++ NULL,
++ NULL, /* subcompare */
++ NULL, NULL, /* get_conv */
++ NULL, NULL, /* def data */
++ tmp,
++ RSBAC_AUTO_DEV,
++ RSBAC_LIST_MIN_MAX_HASHES,
++ hash_fd_cache,
++ NULL);
++ if (err)
++ registration_error(err, "FD Cache MAC");
++ else
++ rsbac_list_lol_max_items(fd_cache_handle[SW_MAC],
++ RSBAC_FD_CACHE_KEY,
++ CONFIG_RSBAC_FD_CACHE_MAX_ITEMS, A_none);
++#endif
++#if defined(CONFIG_RSBAC_FF)
++ sprintf(tmp, "%sFF", RSBAC_FD_CACHE_NAME);
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &fd_cache_handle[SW_FF], list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_OWN_SLAB | \
++ RSBAC_LIST_AUTO_HASH_RESIZE | \
++ RSBAC_LIST_NO_MAX_WARN,
++ NULL,
++ NULL, /* subcompare */
++ NULL, NULL, /* get_conv */
++ NULL, NULL, /* def data */
++ tmp,
++ RSBAC_AUTO_DEV,
++ RSBAC_LIST_MIN_MAX_HASHES,
++ hash_fd_cache,
++ NULL);
++ if (err)
++ registration_error(err, "FD Cache FF");
++ else
++ rsbac_list_lol_max_items(fd_cache_handle[SW_FF],
++ RSBAC_FD_CACHE_KEY,
++ CONFIG_RSBAC_FD_CACHE_MAX_ITEMS, A_none);
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ sprintf(tmp, "%sRC", RSBAC_FD_CACHE_NAME);
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &fd_cache_handle[SW_RC], list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_OWN_SLAB | \
++ RSBAC_LIST_AUTO_HASH_RESIZE | \
++ RSBAC_LIST_NO_MAX_WARN,
++ NULL,
++ NULL, /* subcompare */
++ NULL, NULL, /* get_conv */
++ NULL, NULL, /* def data */
++ tmp,
++ RSBAC_AUTO_DEV,
++ RSBAC_LIST_MIN_MAX_HASHES,
++ hash_fd_cache,
++ NULL);
++ if (err)
++ registration_error(err, "FD Cache RC");
++ else
++ rsbac_list_lol_max_items(fd_cache_handle[SW_RC],
++ RSBAC_FD_CACHE_KEY,
++ CONFIG_RSBAC_FD_CACHE_MAX_ITEMS, A_none);
++#endif
++#if defined(CONFIG_RSBAC_DAZ) && !defined(CONFIG_RSBAC_DAZ_CACHE)
++ sprintf(tmp, "%sDAZ", RSBAC_FD_CACHE_NAME);
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &fd_cache_handle[SW_DAZ], list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_OWN_SLAB | \
++ RSBAC_LIST_AUTO_HASH_RESIZE | \
++ RSBAC_LIST_NO_MAX_WARN,
++ NULL,
++ NULL, /* subcompare */
++ NULL, NULL, /* get_conv */
++ NULL, NULL, /* def data */
++ tmp,
++ RSBAC_AUTO_DEV,
++ RSBAC_LIST_MIN_MAX_HASHES,
++ hash_fd_cache,
++ NULL);
++ if (err)
++ registration_error(err, "FD Cache DAZ");
++ else
++ rsbac_list_lol_max_items(fd_cache_handle[SW_DAZ],
++ RSBAC_FD_CACHE_KEY,
++ CONFIG_RSBAC_FD_CACHE_MAX_ITEMS,
++ A_none);
++#endif
++
++ rsbac_kfree(list_info_p);
++ rsbac_kfree(tmp);
++ return err;
++}
++#endif
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int register_dev_lists(void)
++#else
++static int __init register_dev_lists(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_list_info_t *list_info_p;
++
++ list_info_p = rsbac_kmalloc_unlocked(sizeof(*list_info_p));
++ if (!list_info_p) {
++ return -ENOMEM;
++ }
++ rsbac_pr_debug(ds, "registering DEV lists\n");
++ {
++ struct rsbac_gen_dev_aci_t def_aci = DEFAULT_GEN_DEV_ACI;
++
++ list_info_p->version = RSBAC_GEN_DEV_ACI_VERSION;
++ list_info_p->key = RSBAC_GEN_DEV_ACI_KEY;
++ list_info_p->desc_size = sizeof(struct rsbac_dev_desc_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_gen_dev_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &dev_handles.gen, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ dev_compare,
++ gen_dev_get_conv, &def_aci,
++ RSBAC_GEN_ACI_DEV_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_dev,
++ NULL);
++ if (err) {
++ registration_error(err, "DEV General");
++ }
++ }
++#if defined(CONFIG_RSBAC_MAC)
++ {
++ struct rsbac_mac_dev_aci_t def_aci = DEFAULT_MAC_DEV_ACI;
++
++ list_info_p->version = RSBAC_MAC_DEV_ACI_VERSION;
++ list_info_p->key = RSBAC_MAC_DEV_ACI_KEY;
++ list_info_p->desc_size = sizeof(struct rsbac_dev_desc_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_mac_dev_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &dev_handles.mac, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ dev_compare,
++ mac_dev_get_conv, &def_aci,
++ RSBAC_MAC_ACI_DEV_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_dev,
++ NULL);
++ if (err) {
++ registration_error(err, "DEV MAC");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ {
++ struct rsbac_pm_dev_aci_t def_aci = DEFAULT_PM_DEV_ACI;
++
++ list_info_p->version = RSBAC_PM_DEV_ACI_VERSION;
++ list_info_p->key = RSBAC_PM_DEV_ACI_KEY;
++ list_info_p->desc_size = sizeof(struct rsbac_dev_desc_t);
++ list_info_p->data_size = sizeof(struct rsbac_pm_dev_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &dev_handles.pm, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ dev_compare,
++ pm_dev_get_conv, &def_aci,
++ RSBAC_PM_ACI_DEV_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_dev,
++ NULL);
++ if (err) {
++ registration_error(err, "DEV PM");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ {
++ rsbac_rc_type_id_t def_major_aci = RSBAC_RC_GENERAL_TYPE;
++ rsbac_rc_type_id_t def_aci = RC_type_inherit_parent;
++
++ list_info_p->version = RSBAC_RC_DEV_ACI_VERSION;
++ list_info_p->key = RSBAC_RC_DEV_ACI_KEY;
++ list_info_p->desc_size = sizeof(struct rsbac_dev_desc_t);
++ list_info_p->data_size = sizeof(rsbac_rc_type_id_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &dev_major_handles.rc,
++ list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ dev_major_compare,
++ rc_dev_get_conv, &def_major_aci,
++ RSBAC_RC_ACI_DEV_MAJOR_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_dev,
++ NULL);
++ if (err) {
++ registration_error(err, "DEV major RC");
++ }
++ list_info_p->version = RSBAC_RC_DEV_ACI_VERSION;
++ list_info_p->key = RSBAC_RC_DEV_ACI_KEY;
++ list_info_p->desc_size = sizeof(struct rsbac_dev_desc_t);
++ list_info_p->data_size = sizeof(rsbac_rc_type_id_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &dev_handles.rc, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ dev_compare,
++ rc_dev_get_conv, &def_aci,
++ RSBAC_RC_ACI_DEV_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_dev,
++ NULL);
++ if (err) {
++ registration_error(err, "DEV RC");
++ }
++ }
++#endif
++
++ rsbac_kfree(list_info_p);
++ return err;
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int register_ipc_lists(void)
++#else
++static int __init register_ipc_lists(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_list_info_t *list_info_p;
++
++ list_info_p = rsbac_kmalloc_unlocked(sizeof(*list_info_p));
++ if (!list_info_p) {
++ return -ENOMEM;
++ }
++ rsbac_pr_debug(ds, "registering IPC lists\n");
++#if defined(CONFIG_RSBAC_MAC)
++ {
++ struct rsbac_mac_ipc_aci_t def_aci = DEFAULT_MAC_IPC_ACI;
++
++ list_info_p->version = RSBAC_MAC_IPC_ACI_VERSION;
++ list_info_p->key = RSBAC_MAC_IPC_ACI_KEY;
++ list_info_p->desc_size = sizeof(struct rsbac_ipc_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_mac_ipc_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &ipc_handles.mac,
++ list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_OWN_SLAB | RSBAC_LIST_AUTO_HASH_RESIZE,
++ ipc_compare,
++ NULL,
++ &def_aci,
++ RSBAC_MAC_ACI_IPC_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_ipc,
++ NULL);
++ if (err) {
++ registration_error(err, "IPC MAC");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ {
++ struct rsbac_pm_ipc_aci_t def_aci = DEFAULT_PM_IPC_ACI;
++
++ list_info_p->version = RSBAC_PM_IPC_ACI_VERSION;
++ list_info_p->key = RSBAC_PM_IPC_ACI_KEY;
++ list_info_p->desc_size = sizeof(struct rsbac_ipc_t);
++ list_info_p->data_size = sizeof(struct rsbac_pm_ipc_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &ipc_handles.pm,
++ list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_OWN_SLAB | RSBAC_LIST_AUTO_HASH_RESIZE,
++ ipc_compare,
++ NULL,
++ &def_aci,
++ RSBAC_PM_ACI_IPC_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_ipc,
++ NULL);
++ if (err) {
++ registration_error(err, "IPC PM");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ {
++ rsbac_rc_type_id_t def_aci = RSBAC_RC_GENERAL_TYPE;
++
++ list_info_p->version = RSBAC_RC_IPC_ACI_VERSION;
++ list_info_p->key = RSBAC_RC_IPC_ACI_KEY;
++ list_info_p->desc_size = sizeof(struct rsbac_ipc_t);
++ list_info_p->data_size = sizeof(rsbac_rc_type_id_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &ipc_handles.rc,
++ list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_OWN_SLAB | RSBAC_LIST_AUTO_HASH_RESIZE,
++ ipc_compare,
++ NULL,
++ &def_aci,
++ RSBAC_RC_ACI_IPC_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_ipc,
++ NULL);
++ if (err) {
++ registration_error(err, "IPC RC");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ {
++ rsbac_jail_id_t def_aci = RSBAC_JAIL_DEF_ID;
++
++ list_info_p->version = RSBAC_JAIL_IPC_ACI_VERSION;
++ list_info_p->key = RSBAC_JAIL_IPC_ACI_KEY;
++ list_info_p->desc_size = sizeof(struct rsbac_ipc_t);
++ list_info_p->data_size = sizeof(rsbac_jail_id_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &ipc_handles.jail,
++ list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_OWN_SLAB | RSBAC_LIST_AUTO_HASH_RESIZE,
++ ipc_compare,
++ NULL,
++ &def_aci,
++ RSBAC_JAIL_ACI_IPC_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_ipc,
++ NULL);
++ if (err) {
++ registration_error(err, "IPC JAIL");
++ }
++ }
++#endif
++
++ rsbac_kfree(list_info_p);
++ return err;
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int register_user_lists1(void)
++#else
++static int __init register_user_lists1(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_list_info_t *list_info_p;
++
++ list_info_p = rsbac_kmalloc_unlocked(sizeof(*list_info_p));
++ if (!list_info_p) {
++ return -ENOMEM;
++ }
++ rsbac_pr_debug(ds, "registering USER lists\n");
++ {
++ struct rsbac_gen_user_aci_t def_aci = DEFAULT_GEN_U_ACI;
++
++ list_info_p->version = RSBAC_GEN_USER_ACI_VERSION;
++ list_info_p->key = RSBAC_GEN_USER_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_uid_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_gen_user_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &user_handles.gen, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++#ifndef CONFIG_RSBAC_UM_VIRTUAL
++ RSBAC_LIST_DEF_DATA |
++#endif
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ gen_user_get_conv,
++ &def_aci,
++ RSBAC_GEN_ACI_USER_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_uid,
++ NULL);
++ if (err) {
++ registration_error(err, "USER General");
++ }
++ }
++#if defined(CONFIG_RSBAC_MAC)
++ {
++ struct rsbac_mac_user_aci_t def_aci = DEFAULT_MAC_U_ACI;
++
++ list_info_p->version = RSBAC_MAC_USER_ACI_VERSION;
++ list_info_p->key = RSBAC_MAC_USER_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_uid_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_mac_user_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &user_handles.mac, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++#ifndef CONFIG_RSBAC_UM_VIRTUAL
++ RSBAC_LIST_DEF_DATA |
++#endif
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ mac_user_get_conv, &def_aci,
++ RSBAC_MAC_ACI_USER_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_uid,
++ NULL);
++ if (err) {
++ registration_error(err, "USER MAC");
++ } else
++ if (!rsbac_no_defaults
++ && !rsbac_list_count(user_handles.mac)) {
++ struct rsbac_mac_user_aci_t sysadm_aci =
++ DEFAULT_MAC_U_SYSADM_ACI;
++ struct rsbac_mac_user_aci_t secoff_aci =
++ DEFAULT_MAC_U_SECOFF_ACI;
++ struct rsbac_mac_user_aci_t auditor_aci =
++ DEFAULT_MAC_U_AUDITOR_ACI;
++ rsbac_uid_t user;
++
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): USER MAC ACI could not be read - generating standard entries!\n");
++ user = RSBAC_SYSADM_UID;
++ if (rsbac_list_add
++ (user_handles.mac, &user, &sysadm_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SYSADM USER MAC entry could not be added!\n");
++ user = RSBAC_SECOFF_UID;
++ if (rsbac_list_add
++ (user_handles.mac, &user, &secoff_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SECOFF USER MAC entry could not be added!\n");
++ user = RSBAC_AUDITOR_UID;
++ if (rsbac_list_add
++ (user_handles.mac, &user, &auditor_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): AUDITOR USER MAC entry could not be added!\n");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ {
++ struct rsbac_pm_user_aci_t def_aci = DEFAULT_PM_U_ACI;
++
++ list_info_p->version = RSBAC_PM_USER_ACI_VERSION;
++ list_info_p->key = RSBAC_PM_USER_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_uid_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_pm_user_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &user_handles.pm, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++#ifndef CONFIG_RSBAC_UM_VIRTUAL
++ RSBAC_LIST_DEF_DATA |
++#endif
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ pm_user_get_conv,
++ &def_aci, RSBAC_PM_ACI_USER_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_uid,
++ NULL);
++ if (err) {
++ registration_error(err, "USER PM");
++ } else
++ if (!rsbac_no_defaults
++ && !rsbac_list_count(user_handles.pm)) {
++ struct rsbac_pm_user_aci_t sysadm_aci =
++ DEFAULT_PM_U_SYSADM_ACI;
++ struct rsbac_pm_user_aci_t secoff_aci =
++ DEFAULT_PM_U_SECOFF_ACI;
++ struct rsbac_pm_user_aci_t dataprot_aci =
++ DEFAULT_PM_U_DATAPROT_ACI;
++ struct rsbac_pm_user_aci_t tpman_aci =
++ DEFAULT_PM_U_TPMAN_ACI;
++ rsbac_uid_t user;
++
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): USER PM ACI could not be read - generating standard entries!\n");
++ user = RSBAC_SYSADM_UID;
++ if (rsbac_list_add
++ (user_handles.pm, &user, &sysadm_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SYSADM USER PM entry could not be added!\n");
++ user = RSBAC_SECOFF_UID;
++ if (rsbac_list_add
++ (user_handles.pm, &user, &secoff_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SECOFF USER PM entry could not be added!\n");
++ user = RSBAC_DATAPROT_UID;
++ if (rsbac_list_add
++ (user_handles.pm, &user, &dataprot_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): DATAPROT USER PM entry could not be added!\n");
++ user = RSBAC_TPMAN_UID;
++ if (rsbac_list_add
++ (user_handles.pm, &user, &tpman_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): TPMAN USER PM entry could not be added!\n");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++ {
++ rsbac_system_role_int_t def_aci = SR_user;
++
++ list_info_p->version = RSBAC_DAZ_USER_ACI_VERSION;
++ list_info_p->key = RSBAC_DAZ_USER_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_uid_t);
++ list_info_p->data_size = sizeof(rsbac_system_role_int_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &user_handles.daz, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++#ifndef CONFIG_RSBAC_UM_VIRTUAL
++ RSBAC_LIST_DEF_DATA |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_DEF_DATA,
++ NULL,
++ daz_user_get_conv,
++ &def_aci,
++ RSBAC_DAZ_ACI_USER_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "USER DAZ");
++ } else
++ if (!rsbac_no_defaults
++ && !rsbac_list_count(user_handles.daz)) {
++ rsbac_uid_t user;
++ rsbac_system_role_int_t role;
++
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): USER DAZ ACI could not be read - generating standard entries!\n");
++ user = RSBAC_SYSADM_UID;
++ role = SR_administrator;
++ if (rsbac_list_add(user_handles.daz, &user, &role))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SYSADM USER DAZ entry could not be added!\n");
++ user = RSBAC_SECOFF_UID;
++ role = SR_security_officer;
++ if (rsbac_list_add(user_handles.daz, &user, &role))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SECOFF USER DAZ entry could not be added!\n");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_FF)
++ {
++ rsbac_system_role_int_t def_aci = SR_user;
++
++ list_info_p->version = RSBAC_FF_USER_ACI_VERSION;
++ list_info_p->key = RSBAC_FF_USER_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_uid_t);
++ list_info_p->data_size = sizeof(rsbac_system_role_int_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &user_handles.ff, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++#ifndef CONFIG_RSBAC_UM_VIRTUAL
++ RSBAC_LIST_DEF_DATA |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_DEF_DATA,
++ NULL,
++ ff_user_get_conv,
++ &def_aci, RSBAC_FF_ACI_USER_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "USER FF");
++ } else
++ if (!rsbac_no_defaults
++ && !rsbac_list_count(user_handles.ff)) {
++ rsbac_uid_t user;
++ rsbac_system_role_int_t role;
++
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): USER FF ACI could not be read - generating standard entries!\n");
++ user = RSBAC_SYSADM_UID;
++ role = SR_administrator;
++ if (rsbac_list_add(user_handles.ff, &user, &role))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SYSADM USER FF entry could not be added!\n");
++ user = RSBAC_SECOFF_UID;
++ role = SR_security_officer;
++ if (rsbac_list_add(user_handles.ff, &user, &role))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SECOFF USER FF entry could not be added!\n");
++ user = RSBAC_AUDITOR_UID;
++ role = SR_auditor;
++ if (rsbac_list_add(user_handles.ff, &user, &role))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): AUDITOR USER FF entry could not be added!\n");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ {
++ struct rsbac_cap_user_aci_t def_aci = DEFAULT_CAP_U_ACI;
++
++ list_info_p->version = RSBAC_CAP_USER_ACI_VERSION;
++ list_info_p->key = RSBAC_CAP_USER_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_uid_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_cap_user_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &user_handles.cap, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST |
++#ifndef CONFIG_RSBAC_UM_VIRTUAL
++ RSBAC_LIST_DEF_DATA |
++#endif
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ cap_user_get_conv,
++ &def_aci,
++ RSBAC_CAP_ACI_USER_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_uid,
++ NULL);
++ if (err) {
++ registration_error(err, "USER CAP");
++ } else
++ if (!rsbac_no_defaults
++ && !rsbac_list_count(user_handles.cap)) {
++ struct rsbac_cap_user_aci_t sysadm_aci =
++ DEFAULT_CAP_U_SYSADM_ACI;
++ struct rsbac_cap_user_aci_t secoff_aci =
++ DEFAULT_CAP_U_SECOFF_ACI;
++ struct rsbac_cap_user_aci_t auditor_aci =
++ DEFAULT_CAP_U_AUDITOR_ACI;
++ rsbac_uid_t user;
++
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): USER CAP ACI could not be read - generating standard entries!\n");
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): USER CAP ACI could not be read - generating standard entries!\n");
++ user = RSBAC_SYSADM_UID;
++ if (rsbac_list_add
++ (user_handles.cap, &user, &sysadm_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SYSADM USER CAP entry could not be added!\n");
++ user = RSBAC_SECOFF_UID;
++ if (rsbac_list_add
++ (user_handles.cap, &user, &secoff_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SECOFF USER CAP entry could not be added!\n");
++ user = RSBAC_AUDITOR_UID;
++ if (rsbac_list_add
++ (user_handles.cap, &user, &auditor_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): AUDITOR USER CAP entry could not be added!\n");
++ }
++ }
++#endif
++
++ rsbac_kfree(list_info_p);
++ return err;
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int register_user_lists2(void)
++#else
++static int __init register_user_lists2(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_list_info_t *list_info_p;
++
++ list_info_p = rsbac_kmalloc_unlocked(sizeof(*list_info_p));
++ if (!list_info_p) {
++ return -ENOMEM;
++ }
++
++#if defined(CONFIG_RSBAC_RC)
++ {
++ struct rsbac_rc_user_aci_t def_aci = DEFAULT_RC_U_ACI;
++
++ list_info_p->version = RSBAC_RC_USER_ACI_VERSION;
++ list_info_p->key = RSBAC_RC_USER_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_uid_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_rc_user_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &user_handles.rc, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++#ifndef CONFIG_RSBAC_UM_VIRTUAL
++ RSBAC_LIST_DEF_DATA |
++#endif
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ rc_user_get_conv, &def_aci,
++ RSBAC_RC_ACI_USER_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_uid,
++ NULL);
++ if (err) {
++ registration_error(err, "USER RC");
++ } else
++ if (!rsbac_no_defaults
++ && !rsbac_list_count(user_handles.rc)) {
++ rsbac_uid_t user;
++ struct rsbac_rc_user_aci_t sysadm_aci =
++ DEFAULT_RC_U_SYSADM_ACI;
++ struct rsbac_rc_user_aci_t secoff_aci =
++ DEFAULT_RC_U_SECOFF_ACI;
++ struct rsbac_rc_user_aci_t auditor_aci =
++ DEFAULT_RC_U_AUDITOR_ACI;
++
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): USER RC ACI could not be read - generating standard entries!\n");
++ user = RSBAC_SYSADM_UID;
++ if (rsbac_list_add
++ (user_handles.rc, &user, &sysadm_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SYSADM USER RC entry could not be added!\n");
++ user = RSBAC_SECOFF_UID;
++ if (rsbac_list_add
++ (user_handles.rc, &user, &secoff_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SECOFF USER RC entry could not be added!\n");
++ user = RSBAC_AUDITOR_UID;
++ if (rsbac_list_add
++ (user_handles.rc, &user, &auditor_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): AUDITOR USER RC entry could not be added!\n");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ {
++ rsbac_system_role_int_t def_aci = SR_user;
++
++ list_info_p->version = RSBAC_AUTH_USER_ACI_VERSION;
++ list_info_p->key = RSBAC_AUTH_USER_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_uid_t);
++ list_info_p->data_size = sizeof(rsbac_system_role_int_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &user_handles.auth, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++#ifndef CONFIG_RSBAC_UM_VIRTUAL
++ RSBAC_LIST_DEF_DATA |
++#endif
++ RSBAC_LIST_PERSIST,
++ NULL,
++ auth_user_get_conv,
++ &def_aci,
++ RSBAC_AUTH_ACI_USER_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "USER AUTH");
++ } else
++ if (!rsbac_no_defaults
++ && !rsbac_list_count(user_handles.auth)) {
++ rsbac_uid_t user;
++ rsbac_system_role_int_t role;
++
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): USER AUTH ACI could not be read - generating standard entries!\n");
++ user = RSBAC_SYSADM_UID;
++ role = SR_administrator;
++ if (rsbac_list_add
++ (user_handles.auth, &user, &role))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SYSADM USER AUTH entry could not be added!\n");
++ user = RSBAC_SECOFF_UID;
++ role = SR_security_officer;
++ if (rsbac_list_add
++ (user_handles.auth, &user, &role))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SECOFF USER AUTH entry could not be added!\n");
++ user = RSBAC_AUDITOR_UID;
++ role = SR_auditor;
++ if (rsbac_list_add
++ (user_handles.auth, &user, &role))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): AUDITOR USER AUTH entry could not be added!\n");
++ }
++ }
++#endif /* AUTH */
++#if defined(CONFIG_RSBAC_JAIL)
++ {
++ rsbac_system_role_int_t def_aci = SR_user;
++
++ list_info_p->version = RSBAC_JAIL_USER_ACI_VERSION;
++ list_info_p->key = RSBAC_JAIL_USER_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_uid_t);
++ list_info_p->data_size = sizeof(rsbac_system_role_int_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &user_handles.jail, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++#ifndef CONFIG_RSBAC_UM_VIRTUAL
++ RSBAC_LIST_DEF_DATA |
++#endif
++ RSBAC_LIST_PERSIST,
++ NULL,
++ jail_user_get_conv,
++ &def_aci,
++ RSBAC_JAIL_ACI_USER_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "USER JAIL");
++ } else
++ if (!rsbac_no_defaults
++ && !rsbac_list_count(user_handles.jail)) {
++ rsbac_uid_t user;
++ rsbac_system_role_int_t role;
++
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): USER JAIL ACI could not be read - generating standard entries!\n");
++ user = RSBAC_SYSADM_UID;
++ role = SR_administrator;
++ if (rsbac_list_add
++ (user_handles.jail, &user, &role))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SYSADM USER JAIL entry could not be added!\n");
++ user = RSBAC_SECOFF_UID;
++ role = SR_security_officer;
++ if (rsbac_list_add
++ (user_handles.jail, &user, &role))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SECOFF USER JAIL entry could not be added!\n");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_RES)
++ {
++ list_info_p->version = RSBAC_RES_USER_ACI_VERSION;
++ list_info_p->key = RSBAC_RES_USER_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_uid_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_res_user_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &user_handles.res, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST,
++ NULL,
++ res_user_get_conv,
++ NULL,
++ RSBAC_RES_ACI_USER_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "USER RES");
++ } else
++ if (!rsbac_no_defaults
++ && !rsbac_list_count(user_handles.res)) {
++ struct rsbac_res_user_aci_t sysadm_aci =
++ DEFAULT_RES_U_SYSADM_ACI;
++ struct rsbac_res_user_aci_t secoff_aci =
++ DEFAULT_RES_U_SECOFF_ACI;
++ rsbac_uid_t user;
++
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): USER RES ACI could not be read - generating standard entries!\n");
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): USER RES ACI could not be read - generating standard entries!\n");
++ user = RSBAC_SYSADM_UID;
++ if (rsbac_list_add
++ (user_handles.res, &user, &sysadm_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SYSADM USER RES entry could not be added!\n");
++ user = RSBAC_SECOFF_UID;
++ if (rsbac_list_add
++ (user_handles.res, &user, &secoff_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SECOFF USER RES entry could not be added!\n");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_PAX)
++ {
++ rsbac_system_role_int_t def_aci = SR_user;
++
++ list_info_p->version = RSBAC_PAX_USER_ACI_VERSION;
++ list_info_p->key = RSBAC_PAX_USER_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_uid_t);
++ list_info_p->data_size = sizeof(rsbac_system_role_int_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &user_handles.pax, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++#ifndef CONFIG_RSBAC_UM_VIRTUAL
++ RSBAC_LIST_DEF_DATA |
++#endif
++ RSBAC_LIST_PERSIST,
++ NULL,
++ pax_user_get_conv,
++ &def_aci,
++ RSBAC_PAX_ACI_USER_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "USER PAX");
++ } else
++ if (!rsbac_no_defaults
++ && !rsbac_list_count(user_handles.pax)) {
++ rsbac_uid_t user;
++ rsbac_system_role_int_t role;
++
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): USER PAX ACI could not be read - generating standard entries!\n");
++ user = RSBAC_SYSADM_UID;
++ role = SR_administrator;
++ if (rsbac_list_add(user_handles.pax, &user, &role))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SYSADM USER PAX entry could not be added!\n");
++ user = RSBAC_SECOFF_UID;
++ role = SR_security_officer;
++ if (rsbac_list_add(user_handles.pax, &user, &role))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): SECOFF USER PAX entry could not be added!\n");
++ }
++ }
++#endif
++
++ rsbac_kfree(list_info_p);
++ return err;
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int register_process_lists(void)
++#else
++static int __init register_process_lists(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_list_info_t *list_info_p;
++
++ list_info_p = rsbac_kmalloc_unlocked(sizeof(*list_info_p));
++ if (!list_info_p) {
++ return -ENOMEM;
++ }
++ rsbac_pr_debug(ds, "registering PROCESS lists\n");
++ {
++ struct rsbac_gen_process_aci_t def_aci = DEFAULT_GEN_P_ACI;
++
++ list_info_p->version = RSBAC_GEN_PROCESS_ACI_VERSION;
++ list_info_p->key = RSBAC_GEN_PROCESS_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_pid_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_gen_process_aci_t);
++ list_info_p->max_age = 0;
++ gen_nr_p_hashes = CONFIG_RSBAC_GEN_NR_P_LISTS;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &process_handles.gen,
++ list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ NULL, &def_aci,
++ RSBAC_GEN_ACI_PROCESS_NAME,
++ RSBAC_AUTO_DEV,
++ gen_nr_p_hashes,
++ (gen_nr_p_hashes > 1) ? rsbac_list_hash_pid : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "PROCESS GEN");
++ }
++ }
++#if defined(CONFIG_RSBAC_MAC)
++ {
++ struct rsbac_mac_process_aci_t def_aci = DEFAULT_MAC_P_ACI;
++
++ list_info_p->version = RSBAC_MAC_PROCESS_ACI_VERSION;
++ list_info_p->key = RSBAC_MAC_PROCESS_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_pid_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_mac_process_aci_t);
++ list_info_p->max_age = 0;
++ mac_nr_p_hashes = CONFIG_RSBAC_MAC_NR_P_LISTS;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &process_handles.mac,
++ list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ NULL, &def_aci,
++ RSBAC_MAC_ACI_PROCESS_NAME,
++ RSBAC_AUTO_DEV,
++ mac_nr_p_hashes,
++ (mac_nr_p_hashes > 1) ? rsbac_list_hash_pid : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "PROCESS MAC");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ {
++ struct rsbac_pm_process_aci_t def_aci = DEFAULT_PM_P_ACI;
++
++ list_info_p->version = RSBAC_PM_PROCESS_ACI_VERSION;
++ list_info_p->key = RSBAC_PM_PROCESS_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_pid_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_pm_process_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &process_handles.pm,
++ list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ NULL,
++ &def_aci,
++ RSBAC_PM_ACI_PROCESS_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_pid,
++ NULL);
++ if (err) {
++ registration_error(err, "PROCESS PM");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++ {
++ struct rsbac_daz_process_aci_t def_aci = DEFAULT_DAZ_P_ACI;
++
++ list_info_p->version = RSBAC_DAZ_PROCESS_ACI_VERSION;
++ list_info_p->key = RSBAC_DAZ_PROCESS_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_pid_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_daz_process_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &process_handles.daz,
++ list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL,
++ &def_aci,
++ RSBAC_DAZ_ACI_PROCESS_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_pid,
++ NULL);
++ if (err) {
++ registration_error(err, "PROCESS DAZ");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ {
++ struct rsbac_rc_process_aci_t def_aci = DEFAULT_RC_P_ACI;
++
++ list_info_p->version = RSBAC_RC_PROCESS_ACI_VERSION;
++ list_info_p->key = RSBAC_RC_PROCESS_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_pid_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_rc_process_aci_t);
++ list_info_p->max_age = 0;
++ rc_nr_p_hashes = CONFIG_RSBAC_RC_NR_P_LISTS;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &process_handles.rc,
++ list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ NULL, &def_aci,
++ RSBAC_RC_ACI_PROCESS_NAME,
++ RSBAC_AUTO_DEV,
++ rc_nr_p_hashes,
++ (rc_nr_p_hashes > 1) ? rsbac_list_hash_pid : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "PROCESS RC");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ {
++ struct rsbac_auth_process_aci_t def_aci = DEFAULT_AUTH_P_ACI;
++
++ list_info_p->version = RSBAC_AUTH_PROCESS_ACI_VERSION;
++ list_info_p->key = RSBAC_AUTH_PROCESS_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_pid_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_auth_process_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &process_handles.auth,
++ list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL,
++ &def_aci,
++ RSBAC_AUTH_ACI_PROCESS_NAME,
++ RSBAC_AUTO_DEV,
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++ RSBAC_LIST_MIN_MAX_HASHES,
++ rsbac_list_hash_pid,
++#else
++ 1,
++ NULL,
++#endif
++ NULL);
++ if (err) {
++ registration_error(err, "PROCESS AUTH");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ {
++ struct rsbac_cap_process_aci_t def_aci = DEFAULT_CAP_P_ACI;
++
++#if defined(CONFIG_RSBAC_CAP_PROC_HIDE)
++ if (rsbac_cap_process_hiding)
++ def_aci.cap_process_hiding = PH_from_other_users;
++#endif
++ list_info_p->version = RSBAC_CAP_PROCESS_ACI_VERSION;
++ list_info_p->key = RSBAC_CAP_PROCESS_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_pid_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_cap_process_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &process_handles.cap,
++ list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL,
++ &def_aci,
++ RSBAC_CAP_ACI_PROCESS_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_pid,
++ NULL);
++ if (err) {
++ registration_error(err, "PROCESS CAP");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ {
++ struct rsbac_jail_process_aci_t def_aci =
++ DEFAULT_JAIL_P_ACI;
++
++ list_info_p->version = RSBAC_JAIL_PROCESS_ACI_VERSION;
++ list_info_p->key = RSBAC_JAIL_PROCESS_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_pid_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_jail_process_aci_t);
++ list_info_p->max_age = 0;
++ jail_nr_p_hashes = CONFIG_RSBAC_JAIL_NR_P_LISTS;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &process_handles.jail,
++ list_info_p,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ NULL, &def_aci,
++ RSBAC_JAIL_ACI_PROCESS_NAME,
++ RSBAC_AUTO_DEV,
++ jail_nr_p_hashes,
++ (jail_nr_p_hashes > 1) ? rsbac_list_hash_pid : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "PROCESS JAIL");
++ }
++ }
++#endif
++
++ rsbac_kfree(list_info_p);
++ return err;
++}
++
++#ifdef CONFIG_RSBAC_UM
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int register_group_lists(void)
++#else
++static int __init register_group_lists(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_list_info_t *list_info_p;
++
++ list_info_p = rsbac_kmalloc_unlocked(sizeof(*list_info_p));
++ if (!list_info_p) {
++ return -ENOMEM;
++ }
++ rsbac_pr_debug(ds, "registering GROUP lists\n");
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ {
++ rsbac_rc_type_id_t def_aci = RSBAC_RC_GENERAL_TYPE;
++
++ list_info_p->version = RSBAC_RC_GROUP_ACI_VERSION;
++ list_info_p->key = RSBAC_RC_GROUP_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_gid_t);
++ list_info_p->data_size = sizeof(rsbac_rc_type_id_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &group_handles.rc, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++#ifndef CONFIG_RSBAC_UM_VIRTUAL
++ RSBAC_LIST_DEF_DATA |
++#endif
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL, NULL,
++ &def_aci,
++ RSBAC_RC_ACI_GROUP_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_gid,
++ NULL);
++ if (err) {
++ registration_error(err, "GROUP RC");
++ }
++ }
++#endif
++
++ rsbac_kfree(list_info_p);
++ return err;
++}
++#endif /* UM */
++
++#ifdef CONFIG_RSBAC_NET_DEV
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int register_netdev_lists(void)
++#else
++static int __init register_netdev_lists(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_list_info_t *list_info_p;
++
++ list_info_p = rsbac_kmalloc_unlocked(sizeof(*list_info_p));
++ if (!list_info_p) {
++ return -ENOMEM;
++ }
++ rsbac_pr_debug(ds, "registering NETDEV lists\n");
++#if defined(CONFIG_RSBAC_IND_NETDEV_LOG)
++ {
++ struct rsbac_gen_netdev_aci_t def_aci =
++ DEFAULT_GEN_NETDEV_ACI;
++
++ list_info_p->version = RSBAC_GEN_NETDEV_ACI_VERSION;
++ list_info_p->key = RSBAC_GEN_NETDEV_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_netdev_id_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_gen_netdev_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &netdev_handles.gen,
++ list_info_p,
++ RSBAC_LIST_BACKUP |
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA,
++ netdev_compare, NULL, &def_aci,
++ RSBAC_GEN_ACI_NETDEV_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "NETDEV General");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ {
++ rsbac_rc_type_id_t def_aci = RSBAC_RC_GENERAL_TYPE;
++
++ list_info_p->version = RSBAC_RC_NETDEV_ACI_VERSION;
++ list_info_p->key = RSBAC_RC_NETDEV_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_netdev_id_t);
++ list_info_p->data_size = sizeof(rsbac_rc_type_id_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &netdev_handles.rc,
++ list_info_p,
++ RSBAC_LIST_BACKUP |
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA,
++ netdev_compare, NULL, &def_aci,
++ RSBAC_RC_ACI_NETDEV_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "NETDEV RC");
++ }
++ }
++#endif
++
++ rsbac_kfree(list_info_p);
++ return err;
++}
++#endif /* NET_DEV */
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static void fill_default_nettemp(void)
++#else
++static void __init fill_default_nettemp(void)
++#endif
++{
++ rsbac_net_temp_id_t id;
++ struct rsbac_net_temp_data_t data;
++
++ id = RSBAC_NET_TEMP_LNET_ID;
++ memset(&data, 0, sizeof(data));
++ data.address_family = AF_INET;
++ data.type = RSBAC_NET_ANY;
++ data.protocol = RSBAC_NET_ANY;
++ strcpy(data.name, "Localnet");
++ data.address.inet.nr_addr = 1;
++ data.address.inet.valid_bits[0] = 8;
++ rsbac_net_str_to_inet(RSBAC_NET_TEMP_LNET_ADDRESS,
++ &data.address.inet.addr[0]);
++ data.ports.nr_ports = 0;
++ rsbac_list_add(net_temp_handle, &id, &data);
++
++ id = RSBAC_NET_TEMP_LAN_ID;
++ memset(&data, 0, sizeof(data));
++ data.address_family = AF_INET;
++ data.type = RSBAC_NET_ANY;
++ data.protocol = RSBAC_NET_ANY;
++ strcpy(data.name, "Internal LAN");
++ data.address.inet.nr_addr = 1;
++ data.address.inet.valid_bits[0] = 16;
++ rsbac_net_str_to_inet(RSBAC_NET_TEMP_LAN_ADDRESS,
++ &data.address.inet.addr[0]);
++ data.ports.nr_ports = 0;
++ rsbac_list_add(net_temp_handle, &id, &data);
++
++ id = RSBAC_NET_TEMP_AUTO_ID;
++ memset(&data, 0, sizeof(data));
++ data.address_family = AF_INET;
++ data.type = RSBAC_NET_ANY;
++ data.protocol = RSBAC_NET_ANY;
++ strcpy(data.name, "Auto-IPv4");
++ data.address.inet.nr_addr = 1;
++ data.address.inet.valid_bits[0] = 32;
++ data.ports.nr_ports = 0;
++ rsbac_list_add(net_temp_handle, &id, &data);
++
++ id = RSBAC_NET_TEMP_INET_ID;
++ memset(&data, 0, sizeof(data));
++ data.address_family = AF_INET;
++ data.type = RSBAC_NET_ANY;
++ data.protocol = RSBAC_NET_ANY;
++ strcpy(data.name, "AF_INET");
++ data.address.inet.nr_addr = 1;
++ data.address.inet.valid_bits[0] = 0;
++ data.ports.nr_ports = 0;
++ rsbac_list_add(net_temp_handle, &id, &data);
++
++ id = RSBAC_NET_TEMP_INET_ID;
++ memset(&data, 0, sizeof(data));
++ data.address_family = RSBAC_NET_ANY;
++ data.type = RSBAC_NET_ANY;
++ data.protocol = RSBAC_NET_ANY;
++ strcpy(data.name, "ALL");
++ data.ports.nr_ports = 0;
++ rsbac_list_add(net_temp_handle, &id, &data);
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int register_nettemp_list(void)
++#else
++static int __init register_nettemp_list(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_list_info_t *list_info_p;
++
++ list_info_p = rsbac_kmalloc_unlocked(sizeof(*list_info_p));
++ if (!list_info_p) {
++ return -ENOMEM;
++ }
++ rsbac_pr_debug(ds, "registering network template list\n");
++ list_info_p->version = RSBAC_NET_TEMP_VERSION;
++ list_info_p->key = RSBAC_NET_TEMP_KEY;
++ list_info_p->desc_size = sizeof(rsbac_net_temp_id_t);
++ list_info_p->data_size = sizeof(struct rsbac_net_temp_data_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &net_temp_handle,
++ list_info_p,
++ RSBAC_LIST_BACKUP |
++ RSBAC_LIST_PERSIST,
++ rsbac_list_compare_u32,
++ net_temp_get_conv,
++ NULL,
++ RSBAC_NET_TEMP_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "Network Template");
++ } else
++ if (!rsbac_no_defaults && !rsbac_list_count(net_temp_handle)) {
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): Network Templates could not be read - generating standard entries!\n");
++ fill_default_nettemp();
++ }
++ rsbac_kfree(list_info_p);
++ return err;
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int register_nettemp_aci_lists(void)
++#else
++static int __init register_nettemp_aci_lists(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_list_info_t *list_info_p;
++
++ list_info_p = rsbac_kmalloc_unlocked(sizeof(*list_info_p));
++ if (!list_info_p) {
++ return -ENOMEM;
++ }
++ rsbac_pr_debug(ds, "registering NETTEMP lists\n");
++#if defined(CONFIG_RSBAC_IND_NETOBJ_LOG)
++ {
++ list_info_p->version = RSBAC_GEN_NETOBJ_ACI_VERSION;
++ list_info_p->key = RSBAC_GEN_NETOBJ_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_net_temp_id_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_gen_netobj_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &nettemp_handles.gen,
++ list_info_p,
++ RSBAC_LIST_BACKUP |
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL, NULL,
++ &def_gen_netobj_aci,
++ RSBAC_GEN_ACI_NETTEMP_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_nettemp,
++ NULL);
++ if (err) {
++ registration_error(err, "NETTEMP GEN");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_MAC)
++ {
++ struct rsbac_mac_netobj_aci_t def_aci =
++ DEFAULT_MAC_NETOBJ_ACI;
++
++ list_info_p->version = RSBAC_MAC_NETOBJ_ACI_VERSION;
++ list_info_p->key = RSBAC_MAC_NETOBJ_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_net_temp_id_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_mac_netobj_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &nettemp_handles.mac,
++ list_info_p,
++ RSBAC_LIST_BACKUP |
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL, NULL,
++ &def_aci,
++ RSBAC_MAC_ACI_NETTEMP_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_nettemp,
++ NULL);
++ if (err) {
++ registration_error(err, "NETTEMP MAC");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ {
++ struct rsbac_pm_netobj_aci_t def_aci =
++ DEFAULT_PM_NETOBJ_ACI;
++
++ list_info_p->version = RSBAC_PM_NETOBJ_ACI_VERSION;
++ list_info_p->key = RSBAC_PM_NETOBJ_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_net_temp_id_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_pm_netobj_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &nettemp_handles.pm,
++ list_info_p,
++ RSBAC_LIST_BACKUP |
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL, NULL,
++ &def_aci,
++ RSBAC_PM_ACI_NETTEMP_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_nettemp,
++ NULL);
++ if (err) {
++ registration_error(err, "NETTEMP PM");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ {
++ struct rsbac_rc_nettemp_aci_t def_aci =
++ DEFAULT_RC_NETTEMP_ACI;
++
++ list_info_p->version = RSBAC_RC_NETOBJ_ACI_VERSION;
++ list_info_p->key = RSBAC_RC_NETOBJ_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_net_temp_id_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_rc_nettemp_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &nettemp_handles.rc,
++ list_info_p,
++ RSBAC_LIST_BACKUP |
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL, NULL,
++ &def_aci,
++ RSBAC_RC_ACI_NETTEMP_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_nettemp,
++ NULL);
++ if (err) {
++ registration_error(err, "NETTEMP RC");
++ }
++ }
++#endif
++
++ rsbac_kfree(list_info_p);
++ return err;
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int register_netobj_lists(void)
++#else
++static int __init register_netobj_lists(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_list_info_t *list_info_p;
++
++ list_info_p = rsbac_kmalloc_unlocked(sizeof(*list_info_p));
++ if (!list_info_p) {
++ return -ENOMEM;
++ }
++ rsbac_pr_debug(ds, "registering local NETOBJ lists\n");
++#if defined(CONFIG_RSBAC_MAC)
++ {
++ struct rsbac_mac_netobj_aci_t def_aci =
++ DEFAULT_MAC_NETOBJ_ACI;
++
++ list_info_p->version = RSBAC_MAC_NETOBJ_ACI_VERSION;
++ list_info_p->key = RSBAC_MAC_NETOBJ_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_net_obj_id_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_mac_netobj_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &lnetobj_handles.mac,
++ list_info_p,
++ RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ NULL,
++ &def_aci,
++ RSBAC_MAC_ACI_LNETOBJ_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_netobj,
++ NULL);
++ if (err) {
++ registration_error(err, "LNETOBJ MAC");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ {
++ struct rsbac_pm_netobj_aci_t def_aci =
++ DEFAULT_PM_NETOBJ_ACI;
++
++ list_info_p->version = RSBAC_PM_NETOBJ_ACI_VERSION;
++ list_info_p->key = RSBAC_PM_NETOBJ_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_net_obj_id_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_pm_netobj_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &lnetobj_handles.pm,
++ list_info_p,
++ RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ NULL,
++ &def_aci,
++ RSBAC_PM_ACI_LNETOBJ_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_netobj,
++ NULL);
++ if (err) {
++ registration_error(err, "LNETOBJ PM");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ {
++ rsbac_rc_type_id_t def_aci = RSBAC_RC_GENERAL_TYPE;
++
++ list_info_p->version = RSBAC_RC_NETOBJ_ACI_VERSION;
++ list_info_p->key = RSBAC_RC_NETOBJ_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_net_obj_id_t);
++ list_info_p->data_size = sizeof(rsbac_rc_type_id_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &lnetobj_handles.rc,
++ list_info_p,
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL,
++ &def_aci,
++ RSBAC_RC_ACI_LNETOBJ_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_netobj,
++ NULL);
++ if (err) {
++ registration_error(err, "LNETOBJ RC");
++ }
++ }
++#endif
++ rsbac_pr_debug(ds, "registering remote NETOBJ lists\n");
++#if defined(CONFIG_RSBAC_MAC)
++ {
++ struct rsbac_mac_netobj_aci_t def_aci =
++ DEFAULT_MAC_NETOBJ_ACI;
++
++ list_info_p->version = RSBAC_MAC_NETOBJ_ACI_VERSION;
++ list_info_p->key = RSBAC_MAC_NETOBJ_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_net_obj_id_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_mac_netobj_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &rnetobj_handles.mac,
++ list_info_p,
++ RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ NULL,
++ &def_aci,
++ RSBAC_MAC_ACI_RNETOBJ_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_netobj,
++ NULL);
++ if (err) {
++ registration_error(err, "RNETOBJ MAC");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ {
++ struct rsbac_pm_netobj_aci_t def_aci =
++ DEFAULT_PM_NETOBJ_ACI;
++
++ list_info_p->version = RSBAC_PM_NETOBJ_ACI_VERSION;
++ list_info_p->key = RSBAC_PM_NETOBJ_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_net_obj_id_t);
++ list_info_p->data_size =
++ sizeof(struct rsbac_pm_netobj_aci_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &rnetobj_handles.pm,
++ list_info_p,
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL,
++ &def_aci,
++ RSBAC_PM_ACI_RNETOBJ_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_netobj,
++ NULL);
++ if (err) {
++ registration_error(err, "RNETOBJ PM");
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ {
++ rsbac_rc_type_id_t def_aci = RSBAC_RC_GENERAL_TYPE;
++
++ list_info_p->version = RSBAC_RC_NETOBJ_ACI_VERSION;
++ list_info_p->key = RSBAC_RC_NETOBJ_ACI_KEY;
++ list_info_p->desc_size = sizeof(rsbac_net_obj_id_t);
++ list_info_p->data_size = sizeof(rsbac_rc_type_id_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &rnetobj_handles.rc,
++ list_info_p,
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL,
++ &def_aci,
++ RSBAC_RC_ACI_RNETOBJ_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_netobj,
++ NULL);
++ if (err) {
++ registration_error(err, "RNETOBJ RC");
++ }
++ }
++#endif
++
++ rsbac_kfree(list_info_p);
++ return err;
++}
++#endif /* NET_OBJ */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int rsbac_do_init(void)
++#else
++static int __init rsbac_do_init(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_device_list_item_t *device_p;
++ struct rsbac_device_list_item_t *new_device_p;
++ struct rsbac_list_info_t *list_info_p;
++ struct vfsmount *vfsmount_p;
++ u_int i;
++
++ rsbac_pr_debug(stack, "free stack: %lu\n", rsbac_stack_free_space());
++ list_info_p = rsbac_kmalloc_unlocked(sizeof(*list_info_p));
++ if (!list_info_p) {
++ return -ENOMEM;
++ }
++#ifdef CONFIG_RSBAC_INIT_DELAY
++ if (rsbac_root_vfsmount_p)
++ vfsmount_p = rsbac_root_vfsmount_p;
++ else
++#endif
++ {
++ spin_lock(&current->fs->lock);
++ vfsmount_p = mntget(current->fs->root.mnt);
++ spin_unlock(&current->fs->lock);
++ }
++ compiled_modules[0] = (char) 0;
++#ifdef CONFIG_RSBAC_REG
++ strcat(compiled_modules, " REG");
++#endif
++#ifdef CONFIG_RSBAC_MAC
++#ifdef CONFIG_RSBAC_MAC_LIGHT
++ strcat(compiled_modules, " MAC-L");
++#else
++ strcat(compiled_modules, " MAC");
++#endif
++#endif
++#ifdef CONFIG_RSBAC_PM
++ strcat(compiled_modules, " PM");
++#endif
++#ifdef CONFIG_RSBAC_DAZ
++ strcat(compiled_modules, " DAZ");
++#endif
++#ifdef CONFIG_RSBAC_FF
++ strcat(compiled_modules, " FF");
++#endif
++#ifdef CONFIG_RSBAC_RC
++ strcat(compiled_modules, " RC");
++#endif
++#ifdef CONFIG_RSBAC_AUTH
++ strcat(compiled_modules, " AUTH");
++#endif
++#ifdef CONFIG_RSBAC_ACL
++ strcat(compiled_modules, " ACL");
++#endif
++#ifdef CONFIG_RSBAC_CAP
++ strcat(compiled_modules, " CAP");
++#endif
++#ifdef CONFIG_RSBAC_JAIL
++ strcat(compiled_modules, " JAIL");
++#endif
++#ifdef CONFIG_RSBAC_RES
++ strcat(compiled_modules, " RES");
++#endif
++#ifdef CONFIG_RSBAC_PAX
++ strcat(compiled_modules, " PAX");
++#endif
++#ifdef CONFIG_RSBAC_MAINT
++ rsbac_printk(KERN_INFO "rsbac_do_init(): Initializing RSBAC %s (Maintenance Mode)\n",
++ RSBAC_VERSION);
++ /* Print banner we are initializing */
++ printk(KERN_INFO
++ "rsbac_do_init(): Initializing RSBAC %s on device %02u:%02u (Maintenance Mode)\n",
++ RSBAC_VERSION,
++ RSBAC_MAJOR(vfsmount_p->mnt_sb->s_dev),
++ RSBAC_MINOR(vfsmount_p->mnt_sb->s_dev));
++
++ rsbac_printk(KERN_INFO "rsbac_do_init(): Supported module data structures:%s\n",
++ compiled_modules);
++#else
++ rsbac_printk(KERN_INFO "rsbac_do_init(): Initializing RSBAC %s on device %02u:%02u\n",
++ RSBAC_VERSION,
++ RSBAC_MAJOR(vfsmount_p->mnt_sb->s_dev),
++ RSBAC_MINOR(vfsmount_p->mnt_sb->s_dev));
++ /* Print banner we are initializing */
++#ifdef CONFIG_RSBAC_RMSG_NOSYSLOG
++ if (rsbac_nosyslog)
++#endif
++ printk(KERN_INFO
++ "rsbac_do_init(): Initializing RSBAC %s\n",
++ RSBAC_VERSION);
++
++ rsbac_printk(KERN_INFO "rsbac_do_init(): compiled modules:%s\n",
++ compiled_modules);
++#endif
++
++ device_item_slab = rsbac_slab_create("rsbac_device_item",
++ sizeof(struct rsbac_device_list_item_t));
++
++ for (i = 0; i < RSBAC_NR_DEVICE_LISTS; i++) {
++ device_head_p[i] = rsbac_kmalloc_clear_unlocked(sizeof(*device_head_p[i]));
++ if (!device_head_p[i]) {
++ rsbac_printk(KERN_WARNING
++ "rsbac_do_init(): Failed to allocate device_list_heads[%s]\n", i);
++ return -ENOMEM;
++ }
++ spin_lock_init(&device_list_locks[i]);
++ init_srcu_struct(&device_list_srcu[i]);
++ lockdep_set_class(&device_list_locks[i], &device_list_lock_class);
++ }
++
++#if defined(CONFIG_RSBAC_PROC)
++ rsbac_pr_debug(stack, "free stack before registering proc dir: %lu\n",
++ rsbac_stack_free_space());
++ rsbac_printk(KERN_INFO "rsbac_do_init(): Registering RSBAC proc dir\n");
++ register_all_rsbac_proc();
++#endif
++ rsbac_pr_debug(stack, "free stack before get_super: %lu\n",
++ rsbac_stack_free_space());
++ /* read fd aci from root device */
++ rsbac_pr_debug(ds, "reading aci from device "
++ "number %02u:%02u\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev));
++ /* create a private device item */
++ new_device_p = create_device_item(vfsmount_p);
++ if (!new_device_p) {
++ rsbac_printk(KERN_CRIT
++ "rsbac_do_init(): Could not alloc device item!\n");
++ err = -RSBAC_ECOULDNOTADDDEVICE;
++ goto out;
++ }
++ /* Add new_device_p to device list */
++ /* OK, go on */
++ device_p = add_device_item(new_device_p);
++ if (!device_p) {
++ rsbac_printk(KERN_CRIT
++ "rsbac_do_init(): Could not add device!\n");
++ clear_device_item(new_device_p);
++ err = -RSBAC_ECOULDNOTADDDEVICE;
++ goto out;
++ }
++
++ /* init lists - we need the root device_p to be initialized, but no generic list registered */
++ rsbac_printk(KERN_INFO "rsbac_do_init(): Initializing generic lists\n");
++ rsbac_list_init();
++
++ rsbac_pr_debug(stack, "free stack before init_debug: %lu\n",
++ rsbac_stack_free_space());
++ rsbac_init_debug();
++
++ rsbac_printk(KERN_INFO "rsbac_do_init(): reading FD attributes from root dev\n");
++ rsbac_pr_debug(stack, "free stack before reading FD lists: %lu\n",
++ rsbac_stack_free_space());
++ /* no locking needed, device_p is known and there can be no parallel init! */
++ if ((err = register_fd_lists(device_p, rsbac_root_dev))) {
++ char *tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): File/Dir lists registration failed for dev %02u:%02u, err %s!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev),
++ get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++ rsbac_pr_debug(stack, "free stack before DEV lists registration: %lu\n",
++ rsbac_stack_free_space());
++ register_dev_lists();
++ rsbac_pr_debug(stack, "free stack before registering IPC lists: %lu\n",
++ rsbac_stack_free_space());
++ register_ipc_lists();
++ rsbac_pr_debug(stack, "free stack before registering USER lists 1: %lu\n",
++ rsbac_stack_free_space());
++ register_user_lists1();
++ rsbac_pr_debug(stack, "free stack before registering USER lists 2: %lu\n",
++ rsbac_stack_free_space());
++ register_user_lists2();
++ rsbac_pr_debug(stack, "free stack before registering PROCESS aci: %lu\n",
++ rsbac_stack_free_space());
++ register_process_lists();
++
++
++#ifdef CONFIG_RSBAC_UM
++ rsbac_pr_debug(stack, "free stack before GROUP lists registration: %lu\n",
++ rsbac_stack_free_space());
++ register_group_lists();
++#endif /* CONFIG_RSBAC_UM */
++
++#ifdef CONFIG_RSBAC_NET_DEV
++ register_netdev_lists();
++#endif
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++ register_nettemp_list();
++ register_nettemp_aci_lists();
++ register_netobj_lists();
++#endif /* NET_OBJ */
++
++#ifdef CONFIG_RSBAC_FD_CACHE
++ if (!rsbac_fd_cache_disable)
++ register_fd_cache_lists();
++#endif
++
++/* Call other init functions */
++#if defined(CONFIG_RSBAC_MAC)
++ rsbac_pr_debug(stack, "free stack before init_mac: %lu\n",
++ rsbac_stack_free_space());
++ rsbac_init_mac();
++#endif
++
++#ifdef CONFIG_RSBAC_PM
++ rsbac_pr_debug(stack, "free stack before init_pm: %lu\n",
++ rsbac_stack_free_space());
++ rsbac_init_pm();
++#endif
++
++#if defined(CONFIG_RSBAC_DAZ) && !defined(CONFIG_RSBAC_MAINT)
++ rsbac_pr_debug(stack, "free stack before init_daz: %lu\n",
++ rsbac_stack_free_space());
++ rsbac_init_daz();
++#endif
++
++#if defined(CONFIG_RSBAC_RC)
++ rsbac_pr_debug(stack, "free stack before init_rc: %lu\n",
++ rsbac_stack_free_space());
++ rsbac_init_rc();
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH)
++ rsbac_pr_debug(stack, "free stack before init_auth: %lu\n",
++ rsbac_stack_free_space());
++ rsbac_init_auth();
++ if (rsbac_auth_enable_login) {
++ struct dentry *t_dentry;
++ struct dentry *dir_dentry = NULL;
++ struct rsbac_auth_fd_aci_t auth_fd_aci =
++ DEFAULT_AUTH_FD_ACI;
++
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): auth_enable_login is set: setting auth_may_setuid for %s\n",
++ RSBAC_AUTH_LOGIN_PATH);
++
++ /* lookup filename */
++ if (vfsmount_p) {
++ mutex_lock(&vfsmount_p->mnt_sb->s_root->d_inode->i_mutex);
++ dir_dentry =
++ rsbac_lookup_one_len(RSBAC_AUTH_LOGIN_PATH_DIR,
++ vfsmount_p->mnt_sb->s_root,
++ strlen
++ (RSBAC_AUTH_LOGIN_PATH_DIR));
++ mutex_unlock(&vfsmount_p->mnt_sb->s_root->d_inode->i_mutex);
++ }
++ if (!dir_dentry) {
++ err = -RSBAC_ENOTFOUND;
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): call to rsbac_lookup_one_len for /%s failed\n",
++ RSBAC_AUTH_LOGIN_PATH_DIR);
++ goto auth_out;
++ }
++ if (IS_ERR(dir_dentry)) {
++ err = PTR_ERR(dir_dentry);
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): call to rsbac_lookup_one_len for /%s returned %i\n",
++ RSBAC_AUTH_LOGIN_PATH_DIR, err);
++ goto auth_out;
++ }
++ if (!dir_dentry->d_inode) {
++ err = -RSBAC_ENOTFOUND;
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): call to rsbac_lookup_one_len for /%s failed\n",
++ RSBAC_AUTH_LOGIN_PATH_DIR);
++ dput(dir_dentry);
++ goto auth_out;
++ }
++
++ mutex_lock(&dir_dentry->d_inode->i_mutex);
++ t_dentry = rsbac_lookup_one_len(RSBAC_AUTH_LOGIN_PATH_FILE,
++ dir_dentry,
++ strlen
++ (RSBAC_AUTH_LOGIN_PATH_FILE));
++ mutex_unlock(&dir_dentry->d_inode->i_mutex);
++
++ if (!t_dentry) {
++ err = -RSBAC_ENOTFOUND;
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): call to rsbac_lookup_one_len for /%s/%s failed\n",
++ RSBAC_AUTH_LOGIN_PATH_DIR,
++ RSBAC_AUTH_LOGIN_PATH_FILE);
++ goto auth_out;
++ }
++ if (IS_ERR(t_dentry)) {
++ err = PTR_ERR(t_dentry);
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): call to rsbac_lookup_one_len for /%s/%s returned %i\n",
++ RSBAC_AUTH_LOGIN_PATH_DIR,
++ RSBAC_AUTH_LOGIN_PATH_FILE, err);
++ goto auth_out;
++ }
++ if (!t_dentry->d_inode) {
++ err = -RSBAC_ENOTFOUND;
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): call to rsbac_lookup_one_len for /%s/%s failed\n",
++ RSBAC_AUTH_LOGIN_PATH_DIR,
++ RSBAC_AUTH_LOGIN_PATH_FILE);
++ dput(t_dentry);
++ goto auth_out;
++ }
++
++ if (!t_dentry->d_inode) {
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): file %s not found\n",
++ RSBAC_AUTH_LOGIN_PATH);
++ err = -RSBAC_EINVALIDTARGET;
++ goto auth_out_dput;
++ }
++ /* is inode of type file? */
++ if (!S_ISREG(t_dentry->d_inode->i_mode)) {
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): %s is no file\n",
++ RSBAC_AUTH_LOGIN_PATH);
++ err = -RSBAC_EINVALIDTARGET;
++ goto auth_out_dput;
++ }
++ rsbac_list_get_data(device_p->handles.auth,
++ &t_dentry->d_inode->i_ino,
++ &auth_fd_aci);
++ auth_fd_aci.auth_may_setuid = TRUE;
++ if (rsbac_list_add(device_p->handles.auth, &t_dentry->d_inode->i_ino, &auth_fd_aci)) { /* Adding failed! */
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): Could not add AUTH file/dir item!\n");
++ err = -RSBAC_ECOULDNOTADDITEM;
++ }
++
++ auth_out_dput:
++ auth_out:
++ {
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_ACL)
++ rsbac_pr_debug(stack, "free stack before init_acl: %lu\n",
++ rsbac_stack_free_space());
++ rsbac_init_acl();
++#endif
++
++#if defined(CONFIG_RSBAC_UM)
++ rsbac_pr_debug(stack, "free stack before init_um: %lu\n",
++ rsbac_stack_free_space());
++ rsbac_init_um();
++#endif
++ rsbac_pr_debug(stack, "free stack before init_adf: %lu\n",
++ rsbac_stack_free_space());
++ rsbac_init_adf();
++
++#if defined(CONFIG_RSBAC_PAX) && defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++ pax_set_initial_flags_func = rsbac_pax_set_flags_func;
++#endif
++
++/* Tell that rsbac is initialized */
++ rsbac_allow_mounts = TRUE;
++
++/* Add initrd mount */
++#if 0 && defined(CONFIG_BLK_DEV_INITRD)
++ if (initrd_start) {
++ sb_p = user_get_super(MKDEV(RAMDISK_MAJOR, 0));
++ if (sb_p) {
++ rsbac_mount(sb_p, NULL);
++ drop_super(sb_p);
++ }
++ sb_p = user_get_super(MKDEV(RAMDISK_MAJOR, INITRD_MINOR));
++ if (sb_p) {
++ rsbac_mount(sb_p, NULL);
++ drop_super(sb_p);
++ }
++ }
++#endif
++
++/* Add delayed mounts */
++ if (rsbac_mount_list) {
++ struct rsbac_mount_list_t * mount_p = rsbac_mount_list;
++
++ while (mount_p) {
++ /* skip root dev */
++ if(!lookup_device(mount_p->vfsmount_p->mnt_sb->s_dev, device_hash(mount_p->vfsmount_p->mnt_sb->s_dev))) {
++ rsbac_printk(KERN_INFO "rsbac_do_init(): mounting delayed device %02u:%02u, fs-type %s\n",
++ MAJOR(mount_p->vfsmount_p->mnt_sb->s_dev),
++ MINOR(mount_p->vfsmount_p->mnt_sb->s_dev),
++ mount_p->vfsmount_p->mnt_sb->s_type->name);
++ rsbac_mount(mount_p->vfsmount_p);
++ } else {
++ mntput(mount_p->vfsmount_p);
++ }
++ rsbac_mount_list = mount_p;
++ mount_p = mount_p->next;
++ kfree(rsbac_mount_list);
++ }
++ rsbac_mount_list = NULL;
++ }
++
++/* Tell that rsbac is initialized */
++ rsbac_initialized = TRUE;
++
++/* Force a check, if configured */
++#ifdef CONFIG_RSBAC_INIT_CHECK
++ rsbac_pr_debug(stack, "free stack before rsbac_check: %lu\n",
++ rsbac_stack_free_space());
++ rsbac_printk(KERN_INFO "rsbac_do_init(): Forcing consistency check.\n");
++ rsbac_check_lists(1);
++#if defined(CONFIG_RSBAC_ACL)
++ rsbac_check_acl(1);
++#endif
++#endif
++
++ if (!current->fs) {
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): current->fs is invalid!\n");
++ err = -RSBAC_EINVALIDPOINTER;
++ }
++ out:
++ /* We are up and running */
++ rsbac_printk(KERN_INFO "rsbac_do_init(): Ready.\n");
++
++ kfree(list_info_p);
++ return err;
++}
++
++
++#if (defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)) \
++ || defined(CONFIG_RSBAC_INIT_THREAD)
++/* rsbac kernel timer for auto-write */
++void wakeup_rsbacd(u_long dummy)
++{
++ wake_up(&rsbacd_wait);
++}
++#endif
++
++#ifdef CONFIG_RSBAC_INIT_THREAD
++/* rsbac kernel daemon for init */
++static int rsbac_initd(void *dummy)
++{
++ rsbac_printk(KERN_INFO "rsbac_initd(): Initializing.\n");
++
++/* Dead loop for timeout testing */
++/* while(1) { } */
++
++ rsbac_pr_debug(stack, "free stack before rsbac_do_init(): %lu\n",
++ rsbac_stack_free_space());
++ /* init RSBAC */
++ rsbac_do_init();
++
++ rsbac_pr_debug(stack, "free stack after rsbac_do_init(): %lu\n",
++ rsbac_stack_free_space());
++ /* wake up init process */
++ wake_up(&rsbacd_wait);
++ /* ready */
++ rsbac_printk(KERN_INFO "rsbac_initd(): Exiting.\n");
++ do_exit(0);
++ return 0;
++}
++#endif
++
++#if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++/* rsbac kernel daemon for auto-write */
++static int rsbacd(void *dummy)
++{
++ struct task_struct *tsk = current;
++ char *name = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ unsigned long list_check_time = jiffies + HZ * rsbac_list_check_interval;
++
++ rsbac_printk(KERN_INFO "rsbacd(): Initializing.\n");
++
++ sys_close(0);
++ sys_close(1);
++ sys_close(2);
++
++ rsbac_pr_debug(auto, "Setting auto timer.\n");
++/* This might already have been done for rsbac_initd thread */
++#ifndef CONFIG_RSBAC_INIT_THREAD
++ init_timer(&rsbac_timer);
++ rsbac_timer.function = wakeup_rsbacd;
++ rsbac_timer.data = 0;
++ rsbac_timer.expires = jiffies + auto_interval;
++ add_timer(&rsbac_timer);
++#endif
++ rsbac_pr_debug(stack, "free stack: %lu\n", rsbac_stack_free_space());
++ for (;;) {
++ /* wait */
++ /* Unblock all signals. */
++ flush_signals(tsk);
++ spin_lock_irq(&tsk->sighand->siglock);
++ flush_signal_handlers(tsk, 1);
++ sigemptyset(&tsk->blocked);
++ recalc_sigpending();
++ spin_unlock_irq(&tsk->sighand->siglock);
++ /* set new timer */
++ mod_timer(&rsbac_timer, jiffies + auto_interval);
++ interruptible_sleep_on(&rsbacd_wait);
++#ifdef CONFIG_PM
++ if (try_to_freeze())
++ continue;
++ /* sleep */
++#endif
++
++ /* Cleanup lists regularly */
++ if (time_after_eq(jiffies, list_check_time)) {
++ list_check_time =
++ jiffies +
++ HZ * rsbac_list_check_interval;
++ rsbac_pr_debug(auto, "cleaning up lists\n");
++ rsbac_check_lists(1);
++ }
++ /* Write lists */
++ if (rsbac_initialized && !rsbac_debug_no_write) {
++ int err = 0;
++ /* rsbac_pr_debug(auto, "calling rsbac_write()\n"); */
++ down(&rsbac_write_sem);
++ if (!rsbac_debug_no_write) {
++ up(&rsbac_write_sem);
++ err = rsbac_write();
++ } else
++ up(&rsbac_write_sem);
++ if (err < 0) {
++ if (name)
++ rsbac_printk(KERN_WARNING "rsbacd(): rsbac_write returned error %s!\n",
++ get_error_name(name,
++ err));
++ else
++ rsbac_printk(KERN_WARNING "rsbacd(): rsbac_write returned error %i!\n",
++ err);
++ } else if (err > 0)
++ rsbac_pr_debug(auto, "rsbac_write() wrote %i "
++ "lists\n", err);
++ }
++ }
++ return 0;
++}
++#endif
++
++/************************************************* */
++/* Init function */
++/************************************************* */
++
++/* All functions return 0, if no error occurred, and a negative error code */
++/* otherwise. The error codes are defined in rsbac_error.h. */
++
++struct rsbac_kthread_t {
++ struct list_head list;
++ rsbac_pid_t pid;
++};
++struct rsbac_kthread_t * rsbac_kthread;
++int rsbac_kthread_size_t;
++
++int rsbac_kthreads_init(void)
++{
++ rsbac_kthread_size_t = sizeof(struct rsbac_kthread_t);
++ rsbac_kthread = kmalloc(rsbac_kthread_size_t, GFP_ATOMIC);
++ INIT_LIST_HEAD(&rsbac_kthread->list);
++ return 0;
++}
++
++int rsbac_mark_kthread(rsbac_pid_t pid)
++{
++ struct rsbac_kthread_t * rsbac_kthread_new;
++
++ if (rsbac_initialized)
++ return 0;
++ rsbac_kthread_new = kmalloc(rsbac_kthread_size_t, GFP_ATOMIC);
++ rsbac_kthread_new->pid = pid;
++ list_add(&rsbac_kthread_new->list, &rsbac_kthread->list);
++ return 0;
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++int rsbac_init(kdev_t root_dev)
++#else
++int __init rsbac_init(kdev_t root_dev)
++#endif
++{
++#ifdef CONFIG_RSBAC_RC
++ struct rsbac_rc_process_aci_t rc_init_p_aci = DEFAULT_RC_P_INIT_ACI;
++#endif
++#ifdef CONFIG_RSBAC_INIT_THREAD
++ struct task_struct * rsbac_init_thread;
++#endif
++ struct task_struct * rsbacd_thread;
++#if defined(CONFIG_RSBAC_MAC) || defined(CONFIG_RSBAC_RC)
++ rsbac_pid_t init_pid;
++ struct rsbac_kthread_t * rsbac_kthread_entry;
++ struct list_head * p;
++#endif
++
++ int err = 0;
++#if defined(CONFIG_RSBAC_AUTO_WRITE) \
++ || defined(CONFIG_RSBAC_INIT_THREAD) || defined(CONFIG_RSBAC_NO_WRITE)
++ rsbac_pid_t rsbacd_pid;
++#endif
++
++ if (rsbac_initialized) {
++ rsbac_printk(KERN_WARNING "rsbac_init(): RSBAC already initialized\n");
++ return -RSBAC_EREINIT;
++ }
++ if (!current->fs) {
++ rsbac_printk(KERN_WARNING "rsbac_init(): current->fs is invalid!\n");
++ return -RSBAC_EINVALIDPOINTER;
++ }
++
++ rsbac_root_dev = root_dev;
++
++#if (defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)) \
++ || defined(CONFIG_RSBAC_INIT_THREAD)
++ /* init the rsbacd wait queue head */
++ init_waitqueue_head(&rsbacd_wait);
++#endif
++
++#ifdef CONFIG_RSBAC_INIT_THREAD
++/* trigger dependency */
++#ifdef CONFIG_RSBAC_MAX_INIT_TIME
++#endif
++ rsbac_printk(KERN_INFO "rsbac_init(): Setting init timeout to %u seconds (%u jiffies).\n",
++ RSBAC_MAX_INIT_TIME, RSBAC_MAX_INIT_TIME * HZ);
++ init_timer(&rsbac_timer);
++ rsbac_timer.function = wakeup_rsbacd;
++ rsbac_timer.data = 0;
++ rsbac_timer.expires = jiffies + (RSBAC_MAX_INIT_TIME * HZ);
++ add_timer(&rsbac_timer);
++
++/* Start rsbac thread for init */
++ rsbac_init_thread = kthread_create(rsbac_initd, NULL, "rsbac_initd");
++ if (IS_ERR(rsbac_init_thread))
++ goto panic;
++ rsbacd_pid = task_pid(rsbac_init_thread);
++ wake_up_process(rsbac_init_thread);
++ rsbac_printk(KERN_INFO "rsbac_init(): Started rsbac_initd thread with pid %u\n",
++ pid_nr(rsbacd_pid));
++
++ if (!rsbac_initialized)
++ interruptible_sleep_on(&rsbacd_wait);
++ if (!rsbac_initialized) {
++ rsbac_printk(KERN_ERR
++ "rsbac_init(): *** RSBAC init timed out - RSBAC not correctly initialized! ***\n");
++ rsbac_printk(KERN_ERR
++ "rsbac_init(): *** Killing rsbac_initd! ***\n");
++ sys_kill(pid_nr(rsbacd_pid), SIGKILL);
++ rsbac_initialized = FALSE;
++ }
++#else
++ rsbac_do_init();
++#endif
++
++#if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++ if (rsbac_initialized) {
++ /* Start rsbacd thread for auto write */
++ rsbacd_thread = kthread_create(rsbacd, NULL, "rsbacd");
++ if (IS_ERR(rsbacd_thread)) {
++ rsbac_printk(KERN_ERR
++ "rsbac_init(): *** Starting rsbacd thread failed with error %i! ***\n",
++ PTR_ERR(rsbacd_thread));
++ } else {
++ rsbacd_pid = task_pid(rsbacd_thread);
++ wake_up_process(rsbacd_thread);
++ rsbac_printk(KERN_INFO "rsbac_init(): Started rsbacd thread with pid %u\n",
++ pid_nr(rsbacd_pid));
++ }
++ }
++#endif
++
++/* Ready. */
++/* schedule(); */
++#ifdef CONFIG_RSBAC_INIT_THREAD
++ sys_wait4(-1, NULL, WNOHANG, NULL);
++#endif
++
++/* Add all processes to list of processes as init processes */
++#if defined(CONFIG_RSBAC_MAC) || defined(CONFIG_RSBAC_RC)
++ {
++#ifdef CONFIG_RSBAC_MAC
++ struct rsbac_mac_user_aci_t * mac_u_aci_p;
++#endif
++#ifdef CONFIG_RSBAC_RC
++ struct rsbac_rc_user_aci_t * rc_u_aci_p;
++#endif
++ rsbac_uid_t user = RSBAC_SYSADM_UID;
++ rsbac_pid_t pid = find_pid_ns(1, &init_pid_ns);
++ struct task_struct *p;
++
++#ifdef CONFIG_RSBAC_RC
++ union rsbac_target_id_t k_tid;
++ union rsbac_attribute_value_t k_attr_val;
++#endif
++
++ rsbac_printk(KERN_INFO "rsbac_init(): Adjusting attributes of existing processes\n");
++/* Prepare entries: change standard values to root's values */
++#ifdef CONFIG_RSBAC_MAC
++ mac_u_aci_p = rsbac_kmalloc_unlocked(sizeof(*mac_u_aci_p));
++ if (mac_u_aci_p) {
++ if(!rsbac_list_get_data
++ (user_handles.mac, &user, mac_u_aci_p)) {
++ mac_init_p_aci.owner_sec_level =
++ mac_u_aci_p->security_level;
++ mac_init_p_aci.owner_initial_sec_level =
++ mac_u_aci_p->initial_security_level;
++ mac_init_p_aci.current_sec_level =
++ mac_u_aci_p->initial_security_level;
++ mac_init_p_aci.owner_min_sec_level =
++ mac_u_aci_p->min_security_level;
++ mac_init_p_aci.mac_owner_categories =
++ mac_u_aci_p->mac_categories;
++ mac_init_p_aci.mac_owner_initial_categories =
++ mac_u_aci_p->mac_initial_categories;
++ mac_init_p_aci.mac_curr_categories =
++ mac_u_aci_p->mac_initial_categories;
++ mac_init_p_aci.mac_owner_min_categories =
++ mac_u_aci_p->mac_min_categories;
++ mac_init_p_aci.min_write_open =
++ mac_u_aci_p->security_level;
++ mac_init_p_aci.max_read_open =
++ mac_u_aci_p->min_security_level;
++ mac_init_p_aci.min_write_categories =
++ mac_u_aci_p->mac_categories;
++ mac_init_p_aci.max_read_categories =
++ mac_u_aci_p->mac_min_categories;
++ mac_init_p_aci.mac_process_flags =
++ (mac_u_aci_p->
++ mac_user_flags & RSBAC_MAC_P_FLAGS) |
++ RSBAC_MAC_DEF_INIT_P_FLAGS;
++ }
++ rsbac_kfree(mac_u_aci_p);
++ }
++#endif
++
++/* Set process aci - first init */
++#ifdef CONFIG_RSBAC_MAC
++ if (rsbac_list_add
++ (process_handles.mac, &pid,
++ &mac_init_p_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): MAC ACI for Init process 1 could not be added!");
++#endif
++#ifdef CONFIG_RSBAC_RC
++ /* Get boot role */
++ if (rsbac_rc_get_boot_role(&rc_init_p_aci.rc_role)) { /* none: use root's role */
++ rc_u_aci_p = rsbac_kmalloc_unlocked(sizeof(*rc_u_aci_p));
++ if (rc_u_aci_p) {
++ if (!rsbac_list_get_data
++ (user_handles.rc, &user, rc_u_aci_p)) {
++ rc_init_p_aci.rc_role = rc_u_aci_p->rc_role;
++ } else { /* last resort: general role */
++ rsbac_ds_get_error("rsbac_do_init",
++ A_rc_def_role);
++ rc_init_p_aci.rc_role =
++ RSBAC_RC_GENERAL_ROLE;
++ }
++ rsbac_kfree(rc_u_aci_p);
++ }
++ }
++ rc_kernel_p_aci.rc_role = rc_init_p_aci.rc_role;
++ if (rsbac_list_add
++ (process_handles.rc, &pid,
++ &rc_init_p_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): RC ACI for Init process 1 could not be added!");
++#endif
++ read_lock(&tasklist_lock);
++ for_each_process(p)
++ {
++ /* not for kernel and init though... */
++ if ((!p->pid) || (p->pid == 1))
++ continue;
++ pid = task_pid(p);
++ rsbac_pr_debug(ds, "setting aci for process %u (%s)\n", pid, p->comm);
++#ifdef CONFIG_RSBAC_MAC
++ if (rsbac_list_add
++ (process_handles.mac, &pid,
++ &mac_init_p_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): MAC ACI for Init process %u could not be added!\n",
++ pid);
++#endif
++#ifdef CONFIG_RSBAC_RC
++ k_tid.process = pid;
++ if (rsbac_get_attr(SW_GEN, T_PROCESS,
++ k_tid,
++ A_kernel_thread,
++ &k_attr_val,
++ FALSE)) {
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): RC ACI for Kernel thread %u could not be added!\n", pid);
++ }
++ if (k_attr_val.kernel_thread) {
++ if (rsbac_list_add
++ (process_handles.rc,
++ &pid, &rc_kernel_p_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): RC ACI for Kernel thread %u could not be added!\n",
++ pid);
++ }
++#endif
++ }
++ read_unlock(&tasklist_lock);
++ }
++ list_for_each(p, &rsbac_kthread->list) {
++ rsbac_kthread_entry = list_entry(p,
++ struct rsbac_kthread_t, list);
++ if (pid_nr(rsbac_kthread_entry->pid) != 1
++ && rsbac_kthread_entry->pid != rsbacd_pid)
++ {
++ read_lock(&tasklist_lock);
++ if(pid_task(rsbac_kthread_entry->pid, PIDTYPE_PID)) {
++ read_unlock(&tasklist_lock);
++ rsbac_kthread_notify(rsbac_kthread_entry->pid);
++ }
++ else {
++ read_unlock(&tasklist_lock);
++ rsbac_pr_debug(ds, "rsbac_do_init(): skipping gone away pid %u\n",
++ pid_nr(rsbac_kthread_entry->pid));
++ }
++ /* kernel list implementation is for exclusive
++ * wizards use, let's not free it now till
++ * i know why it oops. consume about no
++ * memory anyway. michal.
++ */
++
++ /* list_del(&rsbac_kthread_entry->list);
++ * kfree(rsbac_kthread_entry);*/
++ }
++ } /* explicitly mark init and rsbacd */
++ init_pid = find_pid_ns(1, &init_pid_ns);
++#ifdef CONFIG_RSBAC_MAC
++ if (rsbac_list_add(process_handles.mac, &init_pid, &mac_init_p_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): MAC ACI for \"init\" process could not be added!");
++ if (rsbac_list_add(process_handles.mac, &rsbacd_pid, &mac_init_p_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): MAC ACI for \"rsbacd\" process could not be added!");
++#endif
++#ifdef CONFIG_RSBAC_RC
++ if (rsbac_list_add(process_handles.rc, &init_pid, &rc_init_p_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): RC ACI for \"init\" process could not be added");
++ if (rsbac_list_add(process_handles.rc, &rsbacd_pid, &rc_kernel_p_aci))
++ rsbac_printk(KERN_WARNING "rsbac_do_init(): RC ACI for \"rsbacd\" process could not be added");
++#endif
++
++ /*kfree(rsbac_kthread);*/
++#endif /* MAC or RC */
++
++ rsbac_printk(KERN_INFO "rsbac_init(): Ready.\n");
++ return err;
++
++#ifdef CONFIG_RSBAC_INIT_THREAD
++panic:
++ rsbac_printk(KERN_ERR "rsbac_init(): *** RSBAC init failed to start - RSBAC not correctly initialized! ***\n");
++ /* let's panic - but only when in secure mode, warn otherwise */
++#if !defined(CONFIG_RSBAC_MAINT)
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if (!rsbac_softmode)
++ panic("RSBAC: rsbac_init(): *** Unable to initialize - PANIC ***\n");
++#endif
++ panic("RSBAC: rsbac_init(): *** Unable to initialize - PANIC ***\n");
++#endif
++#endif
++}
++
++int rsbac_kthread_notify(rsbac_pid_t pid)
++{
++ if (!rsbac_initialized)
++ return 0;
++// rsbac_printk(KERN_DEBUG "rsbac_kthread_notify: marking pid %u!\n",
++// pid);
++/* Set process aci */
++#ifdef CONFIG_RSBAC_MAC
++ if (rsbac_list_add
++ (process_handles.mac, &pid, &mac_init_p_aci))
++ rsbac_printk(KERN_WARNING "rsbac_kthread_notify(): MAC ACI for kernel process %u could not be added!",
++ pid_nr(pid));
++#endif
++#ifdef CONFIG_RSBAC_RC
++ if (rsbac_list_add
++ (process_handles.rc, &pid, &rc_kernel_p_aci))
++ rsbac_printk(KERN_WARNING "rsbac_kthread_notify(): RC ACI for kernel process %u could not be added!",
++ pid_nr(pid));
++#endif
++ return 0;
++}
++
++/* When mounting a device, its ACI must be read and added to the ACI lists. */
++
++EXPORT_SYMBOL(rsbac_mount);
++int rsbac_mount(struct vfsmount * vfsmount_p)
++{
++ int err = 0;
++ struct rsbac_device_list_item_t *device_p;
++ struct rsbac_device_list_item_t *new_device_p;
++ rsbac_boolean_t old_no_write;
++ u_int hash;
++ int srcu_idx;
++
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_mount(): called from interrupt, process %u(%s)!\n",
++ current->pid, current->comm);
++ return -RSBAC_EFROMINTERRUPT;
++ }
++ if (!vfsmount_p || !vfsmount_p->mnt_sb) {
++ rsbac_printk(KERN_WARNING "rsbac_mount(): called with NULL pointer\n");
++ return -RSBAC_EINVALIDPOINTER;
++ }
++ if (!rsbac_allow_mounts) {
++ struct rsbac_mount_list_t * mount_p;
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++ if (!RSBAC_MAJOR(rsbac_delayed_root)
++ && !RSBAC_MINOR(rsbac_delayed_root)
++ && rsbac_delayed_root_str[0]
++ ) { /* translate string to kdev_t */
++ char *p = rsbac_delayed_root_str;
++ u_int major = 0;
++ u_int minor = 0;
++
++ major = simple_strtoul(p, NULL, 0);
++ while ((*p != ':') && (*p != '\0'))
++ p++;
++ if (*p) {
++ p++;
++ minor = simple_strtoul(p, NULL, 0);
++ }
++ rsbac_delayed_root = RSBAC_MKDEV(major, minor);
++ }
++ if (!rsbac_no_delay_init
++ && ((!RSBAC_MAJOR(rsbac_delayed_root)
++ && !RSBAC_MINOR(rsbac_delayed_root)
++ && (MAJOR(vfsmount_p->mnt_sb->s_dev) > 1)
++ )
++ || ((RSBAC_MAJOR(rsbac_delayed_root)
++ || RSBAC_MINOR(rsbac_delayed_root)
++ )
++ &&
++ ((MAJOR(vfsmount_p->mnt_sb->s_dev) ==
++ RSBAC_MAJOR(rsbac_delayed_root))
++ && (!RSBAC_MINOR(rsbac_delayed_root)
++ || (MINOR(vfsmount_p->mnt_sb->s_dev) ==
++ RSBAC_MINOR(rsbac_delayed_root))
++ )
++ )
++ )
++ )
++ ) {
++ if (RSBAC_MAJOR(rsbac_delayed_root)
++ || RSBAC_MINOR(rsbac_delayed_root)) {
++ rsbac_printk(KERN_INFO "rsbac_mount(): forcing delayed RSBAC init on DEV %02u:%02u, matching %02u:%02u!\n",
++ MAJOR(vfsmount_p->mnt_sb->s_dev),
++ MINOR(vfsmount_p->mnt_sb->s_dev),
++ RSBAC_MAJOR
++ (rsbac_delayed_root),
++ RSBAC_MINOR
++ (rsbac_delayed_root));
++ } else {
++ rsbac_printk(KERN_INFO "rsbac_mount(): forcing delayed RSBAC init on DEV %02u:%02u!\n",
++ MAJOR(vfsmount_p->mnt_sb->s_dev),
++ MINOR(vfsmount_p->mnt_sb->s_dev));
++ }
++ rsbac_root_vfsmount_p = vfsmount_p;
++ rsbac_init(vfsmount_p->mnt_sb->s_dev);
++ return 0;
++ }
++#endif
++
++ rsbac_printk(KERN_WARNING "rsbac_mount(): RSBAC not initialized while mounting DEV %02u:%02u, fs-type %s, delaying\n",
++ MAJOR(vfsmount_p->mnt_sb->s_dev), MINOR(vfsmount_p->mnt_sb->s_dev),
++ vfsmount_p->mnt_sb->s_type->name);
++ mount_p = kmalloc(sizeof(*mount_p), GFP_KERNEL);
++ if (mount_p) {
++ mount_p->vfsmount_p = mntget(vfsmount_p);
++ mount_p->next = rsbac_mount_list;
++ rsbac_mount_list = mount_p;
++ }
++
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ rsbac_pr_debug(ds, "mounting device %02u:%02u\n",
++ MAJOR(vfsmount_p->mnt_sb->s_dev), MINOR(vfsmount_p->mnt_sb->s_dev));
++ rsbac_pr_debug(stack, "free stack: %lu\n", rsbac_stack_free_space());
++ down(&rsbac_write_sem);
++ old_no_write = rsbac_debug_no_write;
++ rsbac_debug_no_write = TRUE;
++ up(&rsbac_write_sem);
++ hash = device_hash(vfsmount_p->mnt_sb->s_dev);
++ srcu_idx = srcu_read_lock(&device_list_srcu[hash]);
++ device_p = lookup_device(vfsmount_p->mnt_sb->s_dev, hash);
++ /* repeated mount? */
++ if (device_p) {
++ rsbac_printk(KERN_INFO "rsbac_mount: repeated mount %u of device %02u:%02u\n",
++ device_p->mount_count, MAJOR(vfsmount_p->mnt_sb->s_dev),
++ MINOR(vfsmount_p->mnt_sb->s_dev));
++ device_p->mount_count++;
++ if (!device_p->vfsmount_p)
++ device_p->vfsmount_p = mntget(vfsmount_p);
++ else
++ if ( real_mount(device_p->vfsmount_p)->mnt_mountpoint
++ && (real_mount(device_p->vfsmount_p)->mnt_mountpoint->d_sb->s_dev == device_p->vfsmount_p->mnt_sb->s_dev)
++ ) {
++ mntput(device_p->vfsmount_p);
++ device_p->vfsmount_p = mntget(vfsmount_p);
++ }
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ } else {
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ /* OK, go on */
++ new_device_p = create_device_item(vfsmount_p);
++ rsbac_pr_debug(stack, "after creating device item: free stack: %lu\n",
++ rsbac_stack_free_space());
++ if (!new_device_p) {
++ rsbac_debug_no_write = old_no_write;
++ return -RSBAC_ECOULDNOTADDDEVICE;
++ }
++
++ srcu_idx = srcu_read_lock(&device_list_srcu[hash]);
++ /* make sure to only add, if this device item has not been added in the meantime */
++ device_p = lookup_device(vfsmount_p->mnt_sb->s_dev, hash);
++ if (device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_mount(): mount race for device %02u:%02u detected!\n",
++ MAJOR(vfsmount_p->mnt_sb->s_dev),
++ MINOR(vfsmount_p->mnt_sb->s_dev));
++ device_p->mount_count++;
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ clear_device_item(new_device_p);
++ } else {
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ device_p = add_device_item(new_device_p);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_mount: adding device %02u:%02u failed!\n",
++ MAJOR(vfsmount_p->mnt_sb->s_dev),
++ MINOR(vfsmount_p->mnt_sb->s_dev));
++ clear_device_item(new_device_p);
++ rsbac_debug_no_write = old_no_write;
++ return -RSBAC_ECOULDNOTADDDEVICE;
++ }
++ mntget(device_p->vfsmount_p);
++ }
++
++ /* we do not lock device head - we know the device_p and hope for the best... */
++ /* also, we are within kernel mount sem */
++ if ((err = register_fd_lists(new_device_p, vfsmount_p->mnt_sb->s_dev))) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_mount(): File/Dir ACI registration failed for dev %02u:%02u, err %s!\n",
++ MAJOR(vfsmount_p->mnt_sb->s_dev),
++ MINOR(vfsmount_p->mnt_sb->s_dev),
++ get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++ rsbac_pr_debug(stack, "after registering fd lists: free stack: %lu\n",
++ rsbac_stack_free_space());
++ }
++
++/* call other mount functions */
++#if defined(CONFIG_RSBAC_MAC)
++ rsbac_mount_mac(vfsmount_p->mnt_sb->s_dev);
++ rsbac_pr_debug(stack, "after mount_mac: free stack: %lu\n",
++ rsbac_stack_free_space());
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ rsbac_mount_auth(vfsmount_p->mnt_sb->s_dev);
++ rsbac_pr_debug(stack, "after mount_auth: free stack: %lu\n",
++ rsbac_stack_free_space());
++#endif
++#if defined(CONFIG_RSBAC_ACL)
++ rsbac_mount_acl(vfsmount_p->mnt_sb->s_dev);
++ rsbac_pr_debug(stack, "after mount_acl: free stack: %lu\n",
++ rsbac_stack_free_space());
++#endif
++#if defined(CONFIG_RSBAC_REG)
++ rsbac_mount_reg(vfsmount_p->mnt_sb->s_dev);
++ rsbac_pr_debug(stack, "after mount_reg: free stack: %lu\n",
++ rsbac_stack_free_space());
++#endif /* REG */
++
++ rsbac_debug_no_write = old_no_write;
++ return err;
++}
++
++/* When umounting a device, its ACI must be removed from the ACI lists. */
++/* Removing the device ACI should be no problem. */
++
++EXPORT_SYMBOL(rsbac_umount);
++int rsbac_umount(struct vfsmount *vfsmount_p)
++{
++ struct rsbac_device_list_item_t *device_p;
++ kdev_t kdev;
++ u_int hash;
++
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_umount(): called from interrupt, process %u(%s)!\n",
++ current->pid, current->comm);
++ return -RSBAC_EFROMINTERRUPT;
++ }
++ if (!vfsmount_p) {
++ rsbac_printk(KERN_WARNING "rsbac_umount(): called with NULL pointer\n");
++ return -RSBAC_EINVALIDPOINTER;
++ }
++ if (!rsbac_initialized) {
++ rsbac_printk(KERN_WARNING "rsbac_umount(): RSBAC not initialized\n");
++ if (rsbac_mount_list) {
++ struct rsbac_mount_list_t * mount_p;
++ struct rsbac_mount_list_t * prev_mount_p;
++
++ mount_p = rsbac_mount_list;
++ prev_mount_p = NULL;
++ while (mount_p) {
++ if (mount_p->vfsmount_p == vfsmount_p) {
++ mntput(vfsmount_p);
++ rsbac_printk(KERN_WARNING "rsbac_umount(): found delayed mount for device %02u:%02u, removing\n",
++ RSBAC_MAJOR(vfsmount_p->mnt_sb->s_dev), RSBAC_MINOR(vfsmount_p->mnt_sb->s_dev));
++ if (prev_mount_p) {
++ prev_mount_p->next = mount_p->next;
++ kfree (mount_p);
++ mount_p = prev_mount_p->next;
++ } else {
++ rsbac_mount_list = mount_p->next;
++ kfree (mount_p);
++ mount_p = rsbac_mount_list;
++ }
++ } else {
++ prev_mount_p = mount_p;
++ mount_p = mount_p->next;
++ }
++ }
++ }
++
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ kdev = vfsmount_p->mnt_sb->s_dev;
++ rsbac_pr_debug(ds, "umounting device %02u:%02u\n",
++ MAJOR(kdev), MINOR(kdev));
++
++ /* sync attribute lists */
++#if defined(CONFIG_RSBAC_AUTO_WRITE)
++ if (!rsbac_debug_no_write) {
++ down(&rsbac_write_sem);
++ /* recheck no_write with lock - might have been set in between */
++ if (!rsbac_debug_no_write) {
++ up(&rsbac_write_sem);
++ rsbac_write();
++ } else
++ up(&rsbac_write_sem);
++ }
++#endif
++/* call other umount functions */
++#if defined(CONFIG_RSBAC_MAC)
++ rsbac_umount_mac(kdev);
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ rsbac_umount_auth(kdev);
++#endif
++#if defined(CONFIG_RSBAC_ACL)
++ rsbac_umount_acl(kdev);
++#endif
++#if defined(CONFIG_RSBAC_REG)
++ rsbac_umount_reg(kdev);
++#endif
++
++ hash = device_hash(kdev);
++ /* wait for write access to device_list_head */
++ spin_lock(&device_list_locks[hash]);
++ while (!RSBAC_IS_AUTO_DEV(umount_device_in_progress)) {
++ DECLARE_WAIT_QUEUE_HEAD(auto_wait);
++ struct timer_list auto_timer;
++
++ spin_unlock(&device_list_locks[hash]);
++
++ init_timer(&auto_timer);
++ auto_timer.function = wakeup_auto;
++ auto_timer.data = (u_long) & auto_wait;
++ auto_timer.expires = jiffies + HZ;
++ add_timer(&auto_timer);
++ interruptible_sleep_on(&auto_wait);
++
++ spin_lock(&device_list_locks[hash]);
++ }
++ /* OK, nobody else is working on it... */
++ umount_device_in_progress = kdev;
++ device_p = lookup_device(kdev, hash);
++ if (device_p) {
++ if (device_p->mount_count == 1) {
++ /* remove_device_item unlocks device_list_locks[hash]! */
++ remove_device_item(kdev);
++ aci_detach_fd_lists(device_p);
++ if (device_p->vfsmount_p)
++ mntput(device_p->vfsmount_p);
++ clear_device_item(device_p);
++ spin_lock(&device_list_locks[hash]);
++ } else {
++ if (device_p->mount_count > 1) {
++ device_p->mount_count--;
++ if (device_p->vfsmount_p == vfsmount_p) {
++ device_p->vfsmount_p = NULL;
++ spin_unlock(&device_list_locks[hash]);
++ mntput(vfsmount_p);
++ rsbac_printk(KERN_WARNING "rsbac_umount: removed primary mount for device %02u:%02u, inheritance broken!\n",
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev));
++ spin_lock(&device_list_locks[hash]);
++ }
++ } else {
++ rsbac_printk(KERN_WARNING "rsbac_umount: device %02u:%02u has mount_count < 1!\n",
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev));
++ }
++ }
++ }
++ umount_device_in_progress = RSBAC_AUTO_DEV;
++ spin_unlock(&device_list_locks[hash]);
++
++#ifdef CONFIG_RSBAC_FD_CACHE
++ rsbac_fd_cache_invalidate_all();
++#endif
++
++ return 0;
++}
++
++/* On pivot_root, we must unblock the dentry tree of the old root */
++/* by putting all cached rsbac.dat dentries */
++
++int rsbac_free_dat_dentries(void)
++{
++ struct rsbac_device_list_item_t *device_p;
++ u_int i;
++
++ if (!rsbac_initialized) {
++ rsbac_printk(KERN_WARNING "rsbac_free_dat_dentry(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++
++ rsbac_printk(KERN_INFO "rsbac_free_dat_dentry(): freeing dat dir dentries\n");
++
++ for (i = 0; i < RSBAC_NR_DEVICE_LISTS; i++) {
++ spin_lock(&device_list_locks[i]);
++ device_p = device_head_p[i]->head;
++ while (device_p) {
++ if (device_p->rsbac_dir_dentry_p) {
++ dput(device_p->rsbac_dir_dentry_p);
++ device_p->rsbac_dir_dentry_p = NULL;
++ }
++ device_p = device_p->next;
++ }
++ spin_unlock(&device_list_locks[i]);
++ }
++ return 0;
++}
++
++/***************************************************/
++/* We also need some status information... */
++
++int rsbac_stats(void)
++{
++ struct rsbac_device_list_item_t *device_p;
++ long fd_count;
++ u_long fd_sum = 0;
++ u_long dev_sum = 0;
++ u_long ipc_sum = 0;
++ u_long user_sum = 0;
++ u_long process_sum = 0;
++#if defined(CONFIG_RSBAC_UM)
++ u_long group_sum = 0;
++#endif
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ u_long nettemp_sum = 0;
++ u_long lnetobj_sum = 0;
++ u_long rnetobj_sum = 0;
++#endif
++ u_long total_sum = 0;
++ long tmp_count = 0;
++ u_int i;
++ int srcu_idx;
++
++ if (!rsbac_initialized) {
++ rsbac_printk(KERN_WARNING "rsbac_stats(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ for (i = 0; i < RSBAC_NR_DEVICE_LISTS; i++) {
++ srcu_idx = srcu_read_lock(&device_list_srcu[i]);
++/* rsbac_printk(KERN_INFO "rsbac_stats(): currently %u processes working on file/dir aci\n",
++ device_list_lock.lock); */
++ device_p = rcu_dereference(device_head_p[i])->head;
++ while (device_p) { /* for all sublists */
++ fd_count = rsbac_list_count(device_p->handles.gen);
++ if (fd_count > 0) {
++ rsbac_printk(", %lu GEN", fd_count);
++ fd_sum += fd_count;
++ }
++
++#if defined(CONFIG_RSBAC_MAC)
++ fd_count = rsbac_list_count(device_p->handles.mac);
++ if (fd_count > 0) {
++ rsbac_printk(", %lu MAC", fd_count);
++ fd_sum += fd_count;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_PM)
++ fd_count = rsbac_list_count(device_p->handles.pm);
++ if (fd_count > 0) {
++ rsbac_printk(", %lu PM", fd_count);
++ fd_sum += fd_count;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_DAZ)
++ fd_count = rsbac_list_count(device_p->handles.daz);
++ if (fd_count > 0) {
++ rsbac_printk(", %lu DAZ", fd_count);
++ fd_sum += fd_count;
++ }
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++ fd_count = rsbac_list_count(device_p->handles.dazs);
++ if (fd_count > 0) {
++ rsbac_printk(", %lu DAZ SCANNED", fd_count);
++ fd_sum += fd_count;
++ }
++#endif
++#endif
++
++#if defined(CONFIG_RSBAC_FF)
++ fd_count = rsbac_list_count(device_p->handles.ff);
++ if (fd_count > 0) {
++ rsbac_printk(", %lu FF", fd_count);
++ fd_sum += fd_count;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_RC)
++ fd_count = rsbac_list_count(device_p->handles.rc);
++ if (fd_count > 0) {
++ rsbac_printk(", %lu RC", fd_count);
++ fd_sum += fd_count;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH)
++ fd_count = rsbac_list_count(device_p->handles.auth);
++ if (fd_count > 0) {
++ rsbac_printk(", %lu AUTH", fd_count);
++ fd_sum += fd_count;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_CAP)
++ fd_count = rsbac_list_count(device_p->handles.cap);
++ if (fd_count > 0) {
++ rsbac_printk(", %lu CAP", fd_count);
++ fd_sum += fd_count;
++ }
++#endif
++#if defined(CONFIG_RSBAC_RES)
++ fd_count = rsbac_list_count(device_p->handles.res);
++ if (fd_count > 0) {
++ rsbac_printk(", %lu RES", fd_count);
++ fd_sum += fd_count;
++ }
++#endif
++#if defined(CONFIG_RSBAC_PAX)
++ fd_count = rsbac_list_count(device_p->handles.pax);
++ if (fd_count > 0) {
++ rsbac_printk(", %lu PAX", fd_count);
++ fd_sum += fd_count;
++ }
++#endif
++
++ rsbac_printk("\n");
++ device_p = device_p->next;
++ }
++ tmp_count += rcu_dereference(device_head_p[i])->count;
++ srcu_read_unlock(&device_list_srcu[i], srcu_idx);
++ }
++ rsbac_printk(KERN_INFO "rsbac_stats(): Sum of %u Devices with %lu fd-items\n",
++ tmp_count, fd_sum);
++ /* free access to device_list_head */
++ total_sum += fd_sum;
++
++ /* dev lists */
++ tmp_count = rsbac_list_count(dev_handles.gen);
++ rsbac_printk(KERN_INFO "DEV items: %lu GEN", tmp_count);
++ dev_sum += tmp_count;
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_list_count(dev_handles.mac);
++ rsbac_printk(", %lu MAC", tmp_count);
++ dev_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_list_count(dev_handles.pm);
++ rsbac_printk(", %lu PM", tmp_count);
++ dev_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_list_count(dev_major_handles.rc);
++ rsbac_printk(", %lu major RC", tmp_count);
++ dev_sum += tmp_count;
++ tmp_count = rsbac_list_count(dev_handles.rc);
++ rsbac_printk(", %lu RC", tmp_count);
++ dev_sum += tmp_count;
++#endif
++ rsbac_printk("\n");
++ rsbac_printk(KERN_INFO "Sum of %lu DEV items\n", dev_sum);
++ total_sum += dev_sum;
++
++ /* ipc lists */
++ rsbac_printk(KERN_INFO "IPC items: no GEN");
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_list_count(ipc_handles.mac);
++ rsbac_printk(", %lu MAC", tmp_count);
++ ipc_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_list_count(ipc_handles.pm);
++ rsbac_printk(", %lu PM", tmp_count);
++ ipc_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_list_count(ipc_handles.rc);
++ rsbac_printk(", %lu RC", tmp_count);
++ ipc_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ tmp_count = rsbac_list_count(ipc_handles.jail);
++ rsbac_printk(", %lu JAIL", tmp_count);
++ ipc_sum += tmp_count;
++#endif
++ rsbac_printk("\n");
++ rsbac_printk(KERN_INFO "Sum of %lu IPC items\n", ipc_sum);
++ total_sum += ipc_sum;
++
++ /* user lists */
++ tmp_count = rsbac_list_count(user_handles.gen);
++ rsbac_printk(KERN_INFO "USER items: %lu GEN", tmp_count);
++ user_sum += tmp_count;
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_list_count(user_handles.mac);
++ rsbac_printk(", %lu MAC", tmp_count);
++ user_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_list_count(user_handles.pm);
++ rsbac_printk(", %lu PM", tmp_count);
++ user_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++ tmp_count = rsbac_list_count(user_handles.daz);
++ rsbac_printk(", %lu DAZ", tmp_count);
++ user_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_list_count(user_handles.rc);
++ rsbac_printk(", %lu RC", tmp_count);
++ user_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ tmp_count = rsbac_list_count(user_handles.auth);
++ rsbac_printk(", %lu AUTH", tmp_count);
++ user_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ tmp_count = rsbac_list_count(user_handles.cap);
++ rsbac_printk(", %lu CAP", tmp_count);
++ user_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ tmp_count = rsbac_list_count(user_handles.jail);
++ rsbac_printk(", %lu JAIL", tmp_count);
++ user_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RES)
++ tmp_count = rsbac_list_count(user_handles.res);
++ rsbac_printk(", %lu RES", tmp_count);
++ user_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PAX)
++ tmp_count = rsbac_list_count(user_handles.pax);
++ rsbac_printk(", %lu PAX", tmp_count);
++ user_sum += tmp_count;
++#endif
++ rsbac_printk("\n");
++ rsbac_printk(KERN_INFO "Sum of %lu USER items\n", user_sum);
++ total_sum += user_sum;
++
++ /* process lists */
++ tmp_count = rsbac_list_count(process_handles.gen);
++ rsbac_printk(KERN_INFO "PROCESS items: %lu GEN", tmp_count);
++ process_sum += tmp_count;
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_list_count(process_handles.mac);
++ rsbac_printk(", %lu MAC", tmp_count);
++ process_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_list_count(process_handles.pm);
++ rsbac_printk(", %lu PM", tmp_count);
++ process_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++ tmp_count = rsbac_list_count(process_handles.daz);
++ rsbac_printk(", %lu DAZ", tmp_count);
++ process_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_list_count(process_handles.rc);
++ rsbac_printk(", %lu RC", tmp_count);
++ process_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ tmp_count = rsbac_list_count(process_handles.auth);
++ rsbac_printk(", %lu AUTH", tmp_count);
++ process_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ tmp_count = rsbac_list_count(process_handles.cap);
++ rsbac_printk(", %lu CAP", tmp_count);
++ process_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ tmp_count = rsbac_list_count(process_handles.jail);
++ rsbac_printk(", %lu JAIL", tmp_count);
++ process_sum += tmp_count;
++#endif
++ rsbac_printk("\n");
++ rsbac_printk(KERN_INFO "Sum of %lu PROCESS items\n", process_sum);
++ total_sum += process_sum;
++
++#if defined(CONFIG_RSBAC_UM)
++ /* group lists */
++ rsbac_printk(KERN_INFO "GROUP items: ");
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ tmp_count = rsbac_list_count(group_handles.rc);
++ rsbac_printk("%lu RC", tmp_count);
++ user_sum += tmp_count;
++#endif
++ rsbac_printk("\n");
++ rsbac_printk(KERN_INFO "Sum of %lu GROUP items\n", group_sum);
++ total_sum += group_sum;
++#endif
++
++#if defined(CONFIG_RSBAC_NET_OBJ)
++ /* nettemp lists */
++ rsbac_printk(KERN_INFO "NETTEMP items: ");
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_list_count(nettemp_handles.mac);
++ rsbac_printk("%lu MAC, ", tmp_count);
++ nettemp_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_list_count(nettemp_handles.pm);
++ rsbac_printk("%lu PM, ", tmp_count);
++ nettemp_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_list_count(nettemp_handles.rc);
++ rsbac_printk("%lu RC, ", tmp_count);
++ nettemp_sum += tmp_count;
++#endif
++ rsbac_printk("\n");
++ rsbac_printk(KERN_INFO "Sum of %lu NETTEMP items\n", nettemp_sum);
++ total_sum += nettemp_sum;
++
++ /* local netobj lists */
++ rsbac_printk(KERN_INFO "Local NETOBJ items:");
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_list_count(lnetobj_handles.mac);
++ rsbac_printk(" %lu MAC,", tmp_count);
++ lnetobj_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_list_count(lnetobj_handles.pm);
++ rsbac_printk(" %lu PM,", tmp_count);
++ lnetobj_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_list_count(lnetobj_handles.rc);
++ rsbac_printk(" %lu RC", tmp_count);
++ lnetobj_sum += tmp_count;
++#endif
++ rsbac_printk("\n");
++ rsbac_printk(KERN_INFO "Sum of %lu Local NETOBJ items\n",
++ lnetobj_sum);
++ total_sum += lnetobj_sum;
++
++ /* remote netobj lists */
++ rsbac_printk(KERN_INFO "Remote NETOBJ items:");
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_list_count(rnetobj_handles.mac);
++ rsbac_printk(" %lu MAC,", tmp_count);
++ rnetobj_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_list_count(rnetobj_handles.pm);
++ rsbac_printk(" %lu PM,", tmp_count);
++ rnetobj_sum += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_list_count(rnetobj_handles.rc);
++ rsbac_printk(" %lu RC", tmp_count);
++ rnetobj_sum += tmp_count;
++#endif
++ rsbac_printk("\n");
++ rsbac_printk(KERN_INFO "Sum of %lu Remote NETOBJ items\n",
++ rnetobj_sum);
++ total_sum += rnetobj_sum;
++#endif /* NET_OBJ */
++
++ rsbac_printk(KERN_INFO "Total of %lu registered rsbac-items\n", total_sum);
++
++ rsbac_printk(KERN_INFO "adf_request calls: file: %lu, dir: %lu, fifo: %lu, symlink: %lu, dev: %lu, ipc: %lu, scd: %lu, user: %lu, process: %lu, netdev: %lu, nettemp: %lu, netobj: %lu, unixsock: %lu\n",
++ rsbac_adf_request_count[T_FILE],
++ rsbac_adf_request_count[T_DIR],
++ rsbac_adf_request_count[T_FIFO],
++ rsbac_adf_request_count[T_SYMLINK],
++ rsbac_adf_request_count[T_DEV],
++ rsbac_adf_request_count[T_IPC],
++ rsbac_adf_request_count[T_SCD],
++ rsbac_adf_request_count[T_USER],
++ rsbac_adf_request_count[T_PROCESS],
++ rsbac_adf_request_count[T_NETDEV],
++ rsbac_adf_request_count[T_NETTEMP],
++ rsbac_adf_request_count[T_NETOBJ],
++ rsbac_adf_request_count[T_UNIXSOCK]);
++ rsbac_printk(KERN_INFO "adf_set_attr calls: file: %lu, dir: %lu, fifo: %lu, symlink: %lu, dev: %lu, ipc: %lu, scd: %lu, user: %lu, process: %lu, netdev: %lu, nettemp: %lu, netobj: %lu, unixsock: %lu\n",
++ rsbac_adf_set_attr_count[T_FILE],
++ rsbac_adf_set_attr_count[T_DIR],
++ rsbac_adf_set_attr_count[T_FIFO],
++ rsbac_adf_set_attr_count[T_SYMLINK],
++ rsbac_adf_set_attr_count[T_DEV],
++ rsbac_adf_set_attr_count[T_IPC],
++ rsbac_adf_set_attr_count[T_SCD],
++ rsbac_adf_set_attr_count[T_USER],
++ rsbac_adf_set_attr_count[T_PROCESS],
++ rsbac_adf_set_attr_count[T_NETDEV],
++ rsbac_adf_set_attr_count[T_NETTEMP],
++ rsbac_adf_set_attr_count[T_NETOBJ],
++ rsbac_adf_set_attr_count[T_UNIXSOCK]);
++
++#if defined(CONFIG_RSBAC_PM)
++ rsbac_stats_pm();
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ rsbac_stats_rc();
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ rsbac_stats_auth();
++#endif
++#if defined(CONFIG_RSBAC_ACL)
++ rsbac_stats_acl();
++#endif
++ return 0;
++}
++
++/***************************************************/
++/* rsbac_write() to write all dirty lists to disk */
++/* returns no. of lists written */
++
++#if defined(CONFIG_RSBAC_AUTO_WRITE)
++int rsbac_write()
++{
++ int err = 0;
++ u_int count = 0;
++ int subcount;
++
++ if (!rsbac_initialized) {
++ rsbac_printk(KERN_WARNING "rsbac_write(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (rsbac_debug_no_write)
++ return 0;
++
++ subcount = rsbac_write_lists();
++ if (subcount > 0) {
++ count += subcount;
++ } else if (subcount < 0) {
++ err = subcount;
++ if (err != -RSBAC_ENOTWRITABLE) {
++ rsbac_printk(KERN_WARNING "rsbac_write(): rsbac_write_lists() returned error %i\n",
++ err);
++ }
++ }
++
++#if defined(CONFIG_RSBAC_REG)
++ subcount = rsbac_write_reg();
++ if (subcount > 0) {
++ count += subcount;
++ } else if (subcount < 0) {
++ err = subcount;
++ if (err != -RSBAC_ENOTWRITABLE) {
++ rsbac_printk(KERN_WARNING "rsbac_write(): rsbac_write_reg() returned error %i\n",
++ err);
++ }
++ }
++#endif
++
++ if (count > 0)
++ rsbac_pr_debug(write, "total of %u lists written\n", count);
++ return count;
++}
++#endif
++
++/************************************************* */
++/* Attribute functions */
++/************************************************* */
++
++/* A rsbac_set_attr() call for a non-existing object, user */
++/* or process entry will first add the target and then set the attribute. */
++/* Invalid combinations and trying to set security_level to or from */
++/* SL_rsbac_internal return an error. */
++/* A rsbac_get_attr() call for a non-existing target will return the */
++/* default value stored in def_aci, which should be the first enum item.*/
++
++/* All these procedures handle the rw-spinlocks to protect the targets during */
++/* access. */
++
++/* get the parent of a target
++ * returns -RSBAC_EINVALIDTARGET for non-fs targets
++ * and -RSBAC_ENOTFOUND, if no parent available
++ * In kernels >= 2.4.0, device_p->d_covers is used and the device_p item is
++ * properly locked for reading, so never call with a write lock held on
++ * device_p!
++ */
++#if defined(CONFIG_RSBAC_REG)
++EXPORT_SYMBOL(rsbac_get_parent);
++#endif
++int rsbac_get_parent(enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_target_t *parent_target_p,
++ union rsbac_target_id_t *parent_tid_p)
++{
++ int srcu_idx;
++
++ if (!parent_target_p || !parent_tid_p)
++ return -RSBAC_EINVALIDPOINTER;
++/*
++ rsbac_pr_debug(ds, "Getting file/dir/fifo/symlink "
++ "parent for device %02u:%02u, inode %lu, dentry_p %p\n",
++ RSBAC_MAJOR(tid.file.device),
++ RSBAC_MINOR(tid.file.device),
++ (u_long)tid.file.inode, tid.file.dentry_p);
++*/
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ break;
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ if (!tid.file.dentry_p)
++ return -RSBAC_ENOTFOUND;
++
++#ifdef CONFIG_RSBAC_XSTATS
++ get_parent_count++;
++#endif
++ *parent_target_p = T_DIR;
++ /* Is this dentry root of a mounted device? */
++ if (tid.file.dentry_p->d_sb
++ && (tid.file.dentry_p->d_sb->s_root == tid.file.dentry_p)
++ ) {
++ struct rsbac_device_list_item_t *device_p;
++ u_int hash;
++
++ if (tid.file.device == rsbac_root_dev)
++ return -RSBAC_ENOTFOUND;
++ hash = device_hash(tid.file.device);
++ /* wait for read access to device_list_head */
++ srcu_idx = srcu_read_lock(&device_list_srcu[hash]);
++ device_p = lookup_device(tid.file.device, hash);
++ if (!device_p
++ || !device_p->vfsmount_p
++ || !real_mount(device_p->vfsmount_p)->mnt_mountpoint
++ || !real_mount(device_p->vfsmount_p)->mnt_mountpoint->d_parent
++ || (real_mount(device_p->vfsmount_p)->mnt_mountpoint->d_parent == real_mount(device_p->vfsmount_p)->mnt_mountpoint)
++ || !real_mount(device_p->vfsmount_p)->mnt_mountpoint->d_parent->d_inode
++ || !real_mount(device_p->vfsmount_p)->mnt_mountpoint->d_parent->d_inode->i_ino
++ || !real_mount(device_p->vfsmount_p)->mnt_mountpoint->d_sb
++ || !real_mount(device_p->vfsmount_p)->mnt_mountpoint->d_sb->s_dev
++ || (real_mount(device_p->vfsmount_p)->mnt_mountpoint->d_sb->s_dev == tid.file.device)) {
++ /* free access to device_list_head */
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ return -RSBAC_ENOTFOUND;
++ }
++ parent_tid_p->dir.device =
++ real_mount(device_p->vfsmount_p)->mnt_mountpoint->d_parent->d_sb->s_dev;
++ parent_tid_p->dir.inode =
++ real_mount(device_p->vfsmount_p)->mnt_mountpoint->d_parent->d_inode->i_ino;
++ parent_tid_p->dir.dentry_p = real_mount(device_p->vfsmount_p)->mnt_mountpoint->d_parent;
++ /* free access to device_list_head */
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ } else { /* no root of filesystem -> use d_parent, dev keeps unchanged */
++ if (!tid.file.dentry_p->d_parent) {
++ rsbac_printk(KERN_DEBUG "rsbac_get_parent(): oops - d_parent is NULL!\n");
++ return -RSBAC_ENOTFOUND;
++ }
++ if (tid.file.dentry_p == tid.file.dentry_p->d_parent) {
++ // rsbac_printk(KERN_DEBUG "rsbac_get_parent(): oops - d_parent == dentry_p!\n");
++ return -RSBAC_ENOTFOUND;
++ }
++ if (!tid.file.dentry_p->d_parent->d_inode) {
++ rsbac_printk(KERN_DEBUG "rsbac_get_parent(): oops - d_parent has no d_inode!\n");
++ return -RSBAC_ENOTFOUND;
++ }
++ if (!tid.file.dentry_p->d_parent->d_inode->i_ino)
++ {
++ rsbac_printk(KERN_DEBUG "rsbac_get_parent(): oops - d_parent d_inode->i_ino is 0!\n");
++ return -RSBAC_ENOTFOUND;
++ }
++ parent_tid_p->dir.device = tid.file.device;
++ parent_tid_p->dir.inode =
++ tid.file.dentry_p->d_parent->d_inode->i_ino;
++ parent_tid_p->dir.dentry_p = tid.file.dentry_p->d_parent;
++ }
++ return 0;
++}
++
++static int get_attr_fd(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value,
++ rsbac_boolean_t inherit)
++{
++ int err = 0;
++ struct rsbac_device_list_item_t *device_p;
++#if defined(CONFIG_RSBAC_FF)
++ rsbac_ff_flags_t ff_flags = 0;
++ rsbac_ff_flags_t ff_tmp_flags;
++ rsbac_ff_flags_t ff_mask = -1;
++#endif
++ u_int hash;
++ int srcu_idx;
++
++ /* use loop for inheritance - used to be recursive calls */
++ for (;;) {
++/* rsbac_pr_debug(ds, "Getting file/dir/fifo/"
++ "symlink attribute %u for device %02u:%02u, "
++ "inode %lu, dentry_p %p\n", attr,
++ RSBAC_MAJOR(tid_p->file.device),
++ RSBAC_MINOR(tid_p->file.device),
++ (u_long)tid_p->file.inode,
++ tid_p->file.dentry_p); */
++ hash = device_hash(tid_p->file.device);
++ /* wait for read access to device_list_head */
++ srcu_idx = srcu_read_lock(&device_list_srcu[hash]);
++ /* OK, go on */
++ /* rsbac_pr_debug(ds, "passed device read lock\n"); */
++ /* lookup device */
++ device_p = lookup_device(tid_p->file.device, hash);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_get_attr(): unknown device %02u:%02u\n",
++ RSBAC_MAJOR(tid_p->file.device),
++ RSBAC_MINOR(tid_p->file.device));
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ switch (module) {
++ case SW_GEN:
++ {
++ struct rsbac_gen_fd_aci_t aci =
++ DEFAULT_GEN_FD_ACI;
++
++ if (attr == A_internal) {
++ if (!device_p->rsbac_dir_inode
++ || !tid_p->file.inode)
++ value->internal = FALSE;
++ else if (device_p->
++ rsbac_dir_inode ==
++ tid_p->file.inode)
++ value->internal = TRUE;
++ else if (inherit) {
++ enum rsbac_target_t
++ parent_target;
++ union rsbac_target_id_t
++ parent_tid;
++
++ /* inheritance possible? */
++ if (!rsbac_get_parent(target, *tid_p, &parent_target, &parent_tid)) { /* yes: inherit this single level */
++ if (device_p->
++ rsbac_dir_inode
++ ==
++ parent_tid.
++ file.inode)
++ value->
++ internal
++ = TRUE;
++ else
++ value->
++ internal
++ =
++ FALSE;
++ } else {
++ value->internal =
++ FALSE;
++ }
++ } else {
++ value->internal = FALSE;
++ }
++
++ /* free access to device_list_head */
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ return 0;
++ }
++ rsbac_ta_list_get_data_ttl(ta_number,
++ device_p->
++ handles.gen,
++ NULL,
++ &tid_p->file.
++ inode, &aci);
++ switch (attr) {
++ case A_log_array_low:
++ value->log_array_low =
++ aci.log_array_low;
++ break;
++ case A_log_array_high:
++ value->log_array_high =
++ aci.log_array_high;
++ break;
++ case A_log_program_based:
++ value->log_program_based =
++ aci.log_program_based;
++ break;
++ case A_symlink_add_remote_ip:
++ value->symlink_add_remote_ip =
++ aci.symlink_add_remote_ip;
++ break;
++ case A_symlink_add_uid:
++ value->symlink_add_uid =
++ aci.symlink_add_uid;
++ break;
++ case A_symlink_add_mac_level:
++ value->symlink_add_mac_level =
++ aci.symlink_add_mac_level;
++ break;
++ case A_symlink_add_rc_role:
++ value->symlink_add_rc_role =
++ aci.symlink_add_rc_role;
++ break;
++ case A_linux_dac_disable:
++ value->linux_dac_disable =
++ aci.linux_dac_disable;
++ if ((value->linux_dac_disable ==
++ LDD_inherit) && inherit) {
++ enum rsbac_target_t
++ parent_target;
++ union rsbac_target_id_t
++ parent_tid;
++
++ /* free access to device_list_head - see above */
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ /* inheritance possible? */
++ if (!rsbac_get_parent
++ (target, *tid_p,
++ &parent_target,
++ &parent_tid)) {
++ target =
++ parent_target;
++ *tid_p =
++ parent_tid;
++ continue;
++ } else {
++ value->
++ linux_dac_disable
++ =
++ def_gen_root_dir_aci.
++ linux_dac_disable;
++ return 0;
++ }
++ }
++ break;
++ case A_fake_root_uid:
++ value->fake_root_uid =
++ aci.fake_root_uid;
++ break;
++ case A_auid_exempt:
++ value->auid_exempt =
++ aci.auid_exempt;
++ break;
++ case A_vset:
++ value->vset =
++ aci.vset;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++
++#if defined(CONFIG_RSBAC_MAC)
++ case SW_MAC:
++ {
++ struct rsbac_mac_fd_aci_t aci =
++ DEFAULT_MAC_FD_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ device_p->
++ handles.mac,
++ NULL,
++ &tid_p->file.
++ inode, &aci);
++ switch (attr) {
++ case A_security_level:
++ value->security_level =
++ aci.sec_level;
++ if ((value->security_level ==
++ SL_inherit) && inherit) {
++ enum rsbac_target_t
++ parent_target;
++ union rsbac_target_id_t
++ parent_tid;
++
++ /* free access to device_list_head - see above */
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ /* inheritance possible? */
++ if (!rsbac_get_parent
++ (target, *tid_p,
++ &parent_target,
++ &parent_tid)) {
++ target =
++ parent_target;
++ *tid_p =
++ parent_tid;
++ continue;
++ } else {
++ value->
++ security_level
++ =
++ def_mac_root_dir_aci.
++ sec_level;
++ return 0;
++ }
++ }
++ break;
++ case A_mac_categories:
++ value->mac_categories =
++ aci.mac_categories;
++ if ((value->mac_categories ==
++ RSBAC_MAC_INHERIT_CAT_VECTOR)
++ && inherit) {
++ enum rsbac_target_t
++ parent_target;
++ union rsbac_target_id_t
++ parent_tid;
++
++ /* free access to device_list_head - see above */
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ /* inheritance possible? */
++ if (!rsbac_get_parent
++ (target, *tid_p,
++ &parent_target,
++ &parent_tid)) {
++ target =
++ parent_target;
++ *tid_p =
++ parent_tid;
++ continue;
++ } else {
++ value->
++ mac_categories
++ =
++ def_mac_root_dir_aci.
++ mac_categories;
++ return 0;
++ }
++ }
++ break;
++ case A_mac_auto:
++ value->mac_auto = aci.mac_auto;
++ if ((value->mac_auto == MA_inherit)
++ && inherit) {
++ enum rsbac_target_t
++ parent_target;
++ union rsbac_target_id_t
++ parent_tid;
++
++ /* free access to device_list_head - see above */
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ /* inheritance possible? */
++ if (!rsbac_get_parent
++ (target, *tid_p,
++ &parent_target,
++ &parent_tid)) {
++ target =
++ parent_target;
++ *tid_p =
++ parent_tid;
++ continue;
++ } else {
++ value->mac_auto
++ =
++ def_mac_root_dir_aci.
++ mac_auto;
++ return 0;
++ }
++ }
++ break;
++ case A_mac_prop_trusted:
++ value->mac_prop_trusted =
++ aci.mac_prop_trusted;
++ break;
++ case A_mac_file_flags:
++ value->mac_file_flags =
++ aci.mac_file_flags;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* MAC */
++
++#if defined(CONFIG_RSBAC_PM)
++ case SW_PM:
++ {
++ struct rsbac_pm_fd_aci_t aci =
++ DEFAULT_PM_FD_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ device_p->
++ handles.pm,
++ NULL,
++ &tid_p->file.
++ inode, &aci);
++ switch (attr) {
++ case A_pm_object_class:
++ value->pm_object_class =
++ aci.pm_object_class;
++ break;
++ case A_pm_tp:
++ value->pm_tp = aci.pm_tp;
++ break;
++ case A_pm_object_type:
++ value->pm_object_type =
++ aci.pm_object_type;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* PM */
++
++#if defined(CONFIG_RSBAC_DAZ)
++ case SW_DAZ:
++ {
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++ if (attr == A_daz_scanned) {
++ err = rsbac_ta_list_get_data_ttl
++ (ta_number,
++ device_p->handles.dazs,
++ NULL, &tid_p->file.inode,
++ &value->daz_scanned);
++ } else
++#endif
++ {
++ struct rsbac_daz_fd_aci_t aci =
++ DEFAULT_DAZ_FD_ACI;
++
++ rsbac_ta_list_get_data_ttl
++ (ta_number,
++ device_p->handles.daz,
++ NULL, &tid_p->file.inode,
++ &aci);
++ switch (attr) {
++ case A_daz_scanner:
++ value->daz_scanner =
++ aci.daz_scanner;
++ break;
++ case A_daz_do_scan:
++ value->daz_do_scan = aci.daz_do_scan;
++ if( (value->daz_do_scan == DAZ_inherit)
++ && inherit) {
++ enum rsbac_target_t parent_target;
++ union rsbac_target_id_t parent_tid;
++
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ if(!rsbac_get_parent(target, *tid_p, &parent_target, &parent_tid)) {
++ target = parent_target;
++ *tid_p = parent_tid;
++ continue;
++ } else {
++ value->daz_do_scan
++ = def_daz_root_dir_aci.daz_do_scan;
++ return 0;
++ }
++ }
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ }
++ break;
++#endif /* DAZ */
++
++#if defined(CONFIG_RSBAC_FF)
++ case SW_FF:
++ {
++ switch (attr) {
++ case A_ff_flags:
++ ff_tmp_flags = RSBAC_FF_DEF;
++ rsbac_ta_list_get_data_ttl
++ (ta_number,
++ device_p->handles.ff,
++ NULL,
++ &tid_p->file.inode,
++ &ff_tmp_flags);
++ ff_flags |= ff_tmp_flags & ff_mask;
++ value->ff_flags = ff_flags;
++ if ((ff_tmp_flags &
++ FF_add_inherited)
++ && inherit) {
++ /* inheritance possible? */
++ if (!rsbac_get_parent
++ (target, *tid_p,
++ &target, tid_p)) {
++ /* free access to device_list_head - see above */
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ ff_mask &=
++ ~
++ (FF_no_delete_or_rename
++ |
++ FF_add_inherited);
++ ff_flags &=
++ ~
++ (FF_add_inherited);
++ continue;
++ } else
++ value->ff_flags &=
++ ~
++ (FF_add_inherited);
++ }
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* FF */
++
++#if defined(CONFIG_RSBAC_RC)
++ case SW_RC:
++ {
++ struct rsbac_rc_fd_aci_t aci =
++ DEFAULT_RC_FD_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ device_p->
++ handles.rc,
++ NULL,
++ &tid_p->file.
++ inode, &aci);
++ switch (attr) {
++ case A_rc_type_fd:
++ value->rc_type_fd = aci.rc_type_fd;
++ if (value->rc_type_fd ==
++ RC_type_inherit_parent
++ && inherit) {
++ enum rsbac_target_t
++ parent_target;
++ union rsbac_target_id_t
++ parent_tid;
++
++ /* free access to device_list_head - see above */
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ /* inheritance possible? */
++ if (!rsbac_get_parent
++ (target, *tid_p,
++ &parent_target,
++ &parent_tid)) {
++ target =
++ parent_target;
++ *tid_p =
++ parent_tid;
++ continue;
++ } else {
++ value->rc_type_fd
++ =
++ def_rc_root_dir_aci.
++ rc_type_fd;
++ return 0;
++ }
++ }
++ break;
++ case A_rc_force_role:
++ value->rc_force_role =
++ aci.rc_force_role;
++ if (value->rc_force_role ==
++ RC_role_inherit_parent
++ && inherit) {
++ enum rsbac_target_t
++ parent_target;
++ union rsbac_target_id_t
++ parent_tid;
++
++ /* free access to device_list_head - see above */
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ /* inheritance possible? */
++ if (!rsbac_get_parent
++ (target, *tid_p,
++ &parent_target,
++ &parent_tid)) {
++ target =
++ parent_target;
++ *tid_p =
++ parent_tid;
++ continue;
++ } else {
++ value->
++ rc_force_role =
++ def_rc_root_dir_aci.
++ rc_force_role;
++ return 0;
++ }
++ }
++ break;
++ case A_rc_initial_role:
++ value->rc_initial_role =
++ aci.rc_initial_role;
++ if (value->rc_initial_role ==
++ RC_role_inherit_parent
++ && inherit) {
++ enum rsbac_target_t
++ parent_target;
++ union rsbac_target_id_t
++ parent_tid;
++
++ /* free access to device_list_head - see above */
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ /* inheritance possible? */
++ if (!rsbac_get_parent
++ (target, *tid_p,
++ &parent_target,
++ &parent_tid)) {
++ target =
++ parent_target;
++ *tid_p =
++ parent_tid;
++ continue;
++ } else {
++ value->
++ rc_initial_role
++ =
++ def_rc_root_dir_aci.
++ rc_initial_role;
++ return 0;
++ }
++ }
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* RC */
++
++#if defined(CONFIG_RSBAC_AUTH)
++ case SW_AUTH:
++ {
++ struct rsbac_auth_fd_aci_t aci =
++ DEFAULT_AUTH_FD_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ device_p->
++ handles.auth,
++ NULL,
++ &tid_p->file.
++ inode, &aci);
++ switch (attr) {
++ case A_auth_may_setuid:
++ value->auth_may_setuid =
++ aci.auth_may_setuid;
++ break;
++ case A_auth_may_set_cap:
++ value->auth_may_set_cap =
++ aci.auth_may_set_cap;
++ break;
++ case A_auth_learn:
++ value->auth_learn = aci.auth_learn;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* AUTH */
++
++#if defined(CONFIG_RSBAC_CAP)
++ case SW_CAP:
++ {
++ struct rsbac_cap_fd_aci_t aci =
++ DEFAULT_CAP_FD_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ device_p->
++ handles.cap,
++ NULL,
++ &tid_p->file.
++ inode, &aci);
++ switch (attr) {
++ case A_min_caps:
++ value->min_caps.cap[0] = aci.min_caps.cap[0];
++ value->min_caps.cap[1] = aci.min_caps.cap[1];
++ break;
++ case A_max_caps:
++ value->max_caps.cap[0] = aci.max_caps.cap[0];
++ value->max_caps.cap[1] = aci.max_caps.cap[1];
++ break;
++ case A_cap_ld_env:
++ value->cap_ld_env = aci.cap_ld_env;
++ if ((value->cap_ld_env == LD_inherit) && inherit) {
++ enum rsbac_target_t parent_target;
++ union rsbac_target_id_t parent_tid;
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ if (!rsbac_get_parent(target,
++ *tid_p,
++ &parent_target,
++ &parent_tid)) {
++ target = parent_target;
++ *tid_p = parent_tid;
++ continue;
++ } else {
++ value->cap_ld_env = LD_deny;
++ return 0;
++ }
++ }
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* CAP */
++
++#if defined(CONFIG_RSBAC_RES)
++ case SW_RES:
++ {
++ struct rsbac_res_fd_aci_t aci =
++ DEFAULT_RES_FD_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ device_p->
++ handles.res,
++ NULL,
++ &tid_p->file.
++ inode, &aci);
++ switch (attr) {
++ case A_res_min:
++ memcpy(&value->res_array,
++ &aci.res_min,
++ sizeof(aci.res_min));
++ break;
++ case A_res_max:
++ memcpy(&value->res_array,
++ &aci.res_max,
++ sizeof(aci.res_max));
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* RES */
++
++#if defined(CONFIG_RSBAC_PAX)
++ case SW_PAX:
++ {
++ switch (attr) {
++ case A_pax_flags:
++ value->pax_flags =
++ RSBAC_PAX_DEF_FLAGS;
++ rsbac_ta_list_get_data_ttl
++ (ta_number,
++ device_p->handles.pax,
++ NULL, &tid_p->file.inode,
++ &value->pax_flags);
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* PAX */
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++ /* free access to device_list_head */
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ /* and return */
++ return err;
++ } /* end of for(;;) loop for inheritance */
++}
++
++static int get_attr_dev(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ struct rsbac_dev_desc_t dev,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value,
++ rsbac_boolean_t inherit)
++{
++ int err = 0;
++/* rsbac_pr_debug(ds, "Getting dev attribute\n"); */
++ switch (module) {
++ case SW_GEN:
++ {
++ struct rsbac_gen_dev_aci_t aci =
++ DEFAULT_GEN_DEV_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ dev_handles.gen,
++ NULL, &dev, &aci);
++ switch (attr) {
++ case A_log_array_low:
++ value->log_array_low = aci.log_array_low;
++ break;
++ case A_log_array_high:
++ value->log_array_high = aci.log_array_high;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++
++#if defined(CONFIG_RSBAC_MAC)
++ case SW_MAC:
++ {
++ struct rsbac_mac_dev_aci_t aci =
++ DEFAULT_MAC_DEV_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ dev_handles.mac,
++ NULL, &dev, &aci);
++ switch (attr) {
++ case A_security_level:
++ value->security_level = aci.sec_level;
++ break;
++ case A_mac_categories:
++ value->mac_categories = aci.mac_categories;
++ break;
++ case A_mac_check:
++ value->mac_check = aci.mac_check;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* MAC */
++
++#if defined(CONFIG_RSBAC_PM)
++ case SW_PM:
++ {
++ struct rsbac_pm_dev_aci_t aci = DEFAULT_PM_DEV_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ dev_handles.pm,
++ NULL, &dev, &aci);
++ switch (attr) {
++ case A_pm_object_class:
++ value->pm_object_class =
++ aci.pm_object_class;
++ break;
++ case A_pm_object_type:
++ value->pm_object_type = aci.pm_object_type;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* PM */
++
++#if defined(CONFIG_RSBAC_RC)
++ case SW_RC:
++ {
++ rsbac_rc_type_id_t type = RSBAC_RC_GENERAL_TYPE;
++
++ switch (dev.type) {
++ case D_char:
++ case D_block:
++ if (rsbac_ta_list_get_data_ttl(ta_number,
++ dev_handles.
++ rc, NULL,
++ &dev, &type)
++ || ((type == RC_type_inherit_parent)
++ && inherit)
++ ) {
++ dev.minor = 0;
++ rsbac_ta_list_get_data_ttl
++ (ta_number,
++ dev_major_handles.rc, NULL,
++ &dev, &type);
++ }
++ break;
++ case D_char_major:
++ case D_block_major:
++ dev.type -= (D_block_major - D_block);
++ dev.minor = 0;
++ rsbac_ta_list_get_data_ttl(ta_number,
++ dev_major_handles.
++ rc, NULL, &dev,
++ &type);
++ break;
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++ switch (attr) {
++ case A_rc_type:
++ value->rc_type = type;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* RC */
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++ /* and return */
++ return err;
++}
++
++static int get_attr_ipc(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value,
++ rsbac_boolean_t inherit)
++{
++ int err = 0;
++ /* rsbac_pr_debug(ds, "Getting ipc attribute\n"); */
++ /* lookup only, if not sock or (sock-id != NULL), OK with NULL fifo */
++ switch (module) {
++#if defined(CONFIG_RSBAC_MAC)
++ case SW_MAC:
++ {
++ struct rsbac_mac_ipc_aci_t aci =
++ DEFAULT_MAC_IPC_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ ipc_handles.mac,
++ NULL,
++ &tid_p->ipc, &aci);
++ switch (attr) {
++ case A_security_level:
++ value->security_level = aci.sec_level;
++ break;
++ case A_mac_categories:
++ value->mac_categories = aci.mac_categories;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* MAC */
++
++#if defined(CONFIG_RSBAC_PM)
++ case SW_PM:
++ {
++ struct rsbac_pm_ipc_aci_t aci = DEFAULT_PM_IPC_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ ipc_handles.pm,
++ NULL,
++ &tid_p->ipc, &aci);
++ switch (attr) {
++ case A_pm_object_class:
++ value->pm_object_class =
++ aci.pm_object_class;
++ break;
++ case A_pm_ipc_purpose:
++ value->pm_ipc_purpose = aci.pm_ipc_purpose;
++ break;
++ case A_pm_object_type:
++ value->pm_object_type = aci.pm_object_type;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* PM */
++
++#if defined(CONFIG_RSBAC_RC)
++ case SW_RC:
++ {
++ rsbac_rc_type_id_t type = RSBAC_RC_GENERAL_TYPE;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ ipc_handles.rc,
++ NULL,
++ &tid_p->ipc, &type);
++ switch (attr) {
++ case A_rc_type:
++ value->rc_type = type;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* RC */
++
++#if defined(CONFIG_RSBAC_JAIL)
++ case SW_JAIL:
++ {
++ rsbac_jail_id_t id = RSBAC_JAIL_DEF_ID;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ ipc_handles.jail,
++ NULL, &tid_p->ipc, &id);
++ switch (attr) {
++ case A_jail_id:
++ value->jail_id = id;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* JAIL */
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++ /* and return */
++ return err;
++}
++
++static int get_attr_user(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value,
++ rsbac_boolean_t inherit)
++{
++ int err = 0;
++#if defined(CONFIG_RSBAC_UM_VIRTUAL) || defined(CONFIG_RSBAC_RES)
++ rsbac_uid_t all_user;
++#endif
++
++ /* rsbac_pr_debug(ds, "Getting user attribute\n"); */
++ switch (module) {
++ case SW_GEN:
++ {
++ struct rsbac_gen_user_aci_t aci =
++ DEFAULT_GEN_U_ACI;
++
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ err = rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.gen,
++ NULL,
++ &tid_p->user, &aci);
++ if (err == -RSBAC_ENOTFOUND) {
++ err = 0;
++ if(inherit) {
++ all_user = RSBAC_GEN_UID(RSBAC_UID_SET(tid_p->user), RSBAC_ALL_USERS);
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.gen,
++ NULL,
++ &all_user,
++ &aci);
++ }
++ }
++#else
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.gen,
++ NULL,
++ &tid_p->user, &aci);
++#endif
++ switch (attr) {
++ case A_pseudo:
++ value->pseudo = aci.pseudo;
++ break;
++ case A_log_user_based:
++ value->log_user_based = aci.log_user_based;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++
++#if defined(CONFIG_RSBAC_MAC)
++ case SW_MAC:
++ {
++ struct rsbac_mac_user_aci_t aci =
++ DEFAULT_MAC_U_ACI;
++
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ err = rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.mac,
++ NULL,
++ &tid_p->user, &aci);
++ if (err == -RSBAC_ENOTFOUND) {
++ err = 0;
++ if(inherit) {
++ all_user = RSBAC_GEN_UID(RSBAC_UID_SET(tid_p->user), RSBAC_ALL_USERS);
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.mac,
++ NULL,
++ &all_user,
++ &aci);
++ }
++ }
++#else
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.mac,
++ NULL,
++ &tid_p->user, &aci);
++#endif
++ switch (attr) {
++ case A_security_level:
++ value->security_level = aci.security_level;
++ break;
++ case A_initial_security_level:
++ value->security_level =
++ aci.initial_security_level;
++ break;
++ case A_min_security_level:
++ value->security_level =
++ aci.min_security_level;
++ break;
++ case A_mac_categories:
++ value->mac_categories = aci.mac_categories;
++ break;
++ case A_mac_initial_categories:
++ value->mac_categories =
++ aci.mac_initial_categories;
++ break;
++ case A_mac_min_categories:
++ value->mac_categories =
++ aci.mac_min_categories;
++ break;
++ case A_system_role:
++ case A_mac_role:
++ value->system_role = aci.system_role;
++ break;
++ case A_mac_user_flags:
++ value->mac_user_flags = aci.mac_user_flags;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* MAC */
++
++#if defined(CONFIG_RSBAC_PM)
++ case SW_PM:
++ {
++ struct rsbac_pm_user_aci_t aci = DEFAULT_PM_U_ACI;
++
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ err = rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.pm,
++ NULL,
++ &tid_p->user, &aci);
++ if (err == -RSBAC_ENOTFOUND) {
++ err = 0;
++ if(inherit) {
++ all_user = RSBAC_GEN_UID(RSBAC_UID_SET(tid_p->user), RSBAC_ALL_USERS);
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.pm,
++ NULL,
++ &all_user,
++ &aci);
++ }
++ }
++#else
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.pm,
++ NULL,
++ &tid_p->user, &aci);
++#endif
++ switch (attr) {
++ case A_pm_task_set:
++ value->pm_task_set = aci.pm_task_set;
++ break;
++ case A_pm_role:
++ value->pm_role = aci.pm_role;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* PM */
++
++#if defined(CONFIG_RSBAC_DAZ)
++ case SW_DAZ:
++ {
++ rsbac_system_role_int_t role = SR_user;
++
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ err = rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.daz,
++ NULL,
++ &tid_p->user, &role);
++ if (err == -RSBAC_ENOTFOUND) {
++ err = 0;
++ if(inherit) {
++ all_user = RSBAC_GEN_UID(RSBAC_UID_SET(tid_p->user), RSBAC_ALL_USERS);
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.daz,
++ NULL,
++ &all_user,
++ &role);
++ }
++ }
++#else
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.daz,
++ NULL,
++ &tid_p->user, &role);
++#endif
++ switch (attr) {
++ case A_system_role:
++ case A_daz_role:
++ value->system_role = role;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* DAZ */
++
++#if defined(CONFIG_RSBAC_FF)
++ case SW_FF:
++ {
++ rsbac_system_role_int_t role = SR_user;
++
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ err = rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.ff,
++ NULL,
++ &tid_p->user, &role);
++ if (err == -RSBAC_ENOTFOUND) {
++ err = 0;
++ if(inherit) {
++ all_user = RSBAC_GEN_UID(RSBAC_UID_SET(tid_p->user), RSBAC_ALL_USERS);
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.ff,
++ NULL,
++ &all_user,
++ &role);
++ }
++ }
++#else
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.ff,
++ NULL,
++ &tid_p->user, &role);
++#endif
++ switch (attr) {
++ case A_system_role:
++ case A_ff_role:
++ value->system_role = role;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* FF */
++
++#if defined(CONFIG_RSBAC_RC)
++ case SW_RC:
++ {
++ struct rsbac_rc_user_aci_t aci = DEFAULT_RC_U_ACI;
++
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ err = rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.rc,
++ NULL,
++ &tid_p->user, &aci);
++ if (err == -RSBAC_ENOTFOUND) {
++ err = 0;
++ if(inherit) {
++ all_user = RSBAC_GEN_UID(RSBAC_UID_SET(tid_p->user), RSBAC_ALL_USERS);
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.rc,
++ NULL,
++ &all_user,
++ &aci);
++ }
++ }
++#else
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.rc,
++ NULL,
++ &tid_p->user, &aci);
++#endif
++ switch (attr) {
++ case A_rc_def_role:
++ value->rc_def_role = aci.rc_role;
++ break;
++ case A_rc_type:
++ value->rc_type = aci.rc_type;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* RC */
++
++#if defined(CONFIG_RSBAC_AUTH)
++ case SW_AUTH:
++ {
++ rsbac_system_role_int_t role = SR_user;
++
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ err = rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.auth,
++ NULL,
++ &tid_p->user, &role);
++ if (err == -RSBAC_ENOTFOUND) {
++ err = 0;
++ if(inherit) {
++ all_user = RSBAC_GEN_UID(RSBAC_UID_SET(tid_p->user), RSBAC_ALL_USERS);
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.auth,
++ NULL,
++ &all_user,
++ &role);
++ }
++ }
++#else
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.auth,
++ NULL,
++ &tid_p->user, &role);
++#endif
++ switch (attr) {
++ case A_system_role:
++ case A_auth_role:
++ value->system_role = role;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* AUTH */
++
++#if defined(CONFIG_RSBAC_CAP)
++ case SW_CAP:
++ {
++ struct rsbac_cap_user_aci_t aci =
++ DEFAULT_CAP_U_ACI;
++
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ err = rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.cap,
++ NULL,
++ &tid_p->user, &aci);
++ if (err == -RSBAC_ENOTFOUND) {
++ err = 0;
++ if(inherit) {
++ all_user = RSBAC_GEN_UID(RSBAC_UID_SET(tid_p->user), RSBAC_ALL_USERS);
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.cap,
++ NULL,
++ &all_user,
++ &aci);
++ }
++ }
++#else
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.cap,
++ NULL,
++ &tid_p->user, &aci);
++#endif
++ switch (attr) {
++ case A_system_role:
++ case A_cap_role:
++ value->system_role = aci.cap_role;
++ break;
++ case A_min_caps:
++ value->min_caps.cap[0] = aci.min_caps.cap[0];
++ value->min_caps.cap[1] = aci.min_caps.cap[1];
++ break;
++ case A_max_caps:
++ value->max_caps.cap[0] = aci.max_caps.cap[0];
++ value->max_caps.cap[1] = aci.max_caps.cap[1];
++ break;
++ case A_cap_ld_env:
++ value->cap_ld_env = aci.cap_ld_env;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* CAP */
++
++#if defined(CONFIG_RSBAC_JAIL)
++ case SW_JAIL:
++ {
++ rsbac_system_role_int_t role = SR_user;
++
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ err = rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.jail,
++ NULL,
++ &tid_p->user, &role);
++ if (err == -RSBAC_ENOTFOUND) {
++ err = 0;
++ if(inherit) {
++ all_user = RSBAC_GEN_UID(RSBAC_UID_SET(tid_p->user), RSBAC_ALL_USERS);
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.jail,
++ NULL,
++ &all_user,
++ &role);
++ }
++ }
++#else
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.jail,
++ NULL,
++ &tid_p->user, &role);
++#endif
++ switch (attr) {
++ case A_system_role:
++ case A_jail_role:
++ value->system_role = role;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* JAIL */
++
++#if defined(CONFIG_RSBAC_RES)
++ case SW_RES:
++ {
++ struct rsbac_res_user_aci_t aci =
++ DEFAULT_RES_U_ACI;
++
++ err = rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.res,
++ NULL,
++ &tid_p->user, &aci);
++ if (err == -RSBAC_ENOTFOUND) {
++ err = 0;
++ if(inherit) {
++ all_user = RSBAC_GEN_UID(RSBAC_UID_SET(tid_p->user), RSBAC_ALL_USERS);
++ err = rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.res,
++ NULL,
++ &all_user,
++ &aci);
++ if (err == -RSBAC_ENOTFOUND) {
++ err = 0;
++ if (RSBAC_UID_SET(tid_p->user)) {
++ all_user = RSBAC_ALL_USERS;
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.res,
++ NULL,
++ &all_user,
++ &aci);
++ }
++ }
++ }
++ }
++ switch (attr) {
++ case A_system_role:
++ case A_res_role:
++ value->system_role = aci.res_role;
++ break;
++ case A_res_min:
++ memcpy(&value->res_array, &aci.res_min,
++ sizeof(aci.res_min));
++ break;
++ case A_res_max:
++ memcpy(&value->res_array, &aci.res_max,
++ sizeof(aci.res_max));
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* RES */
++
++#if defined(CONFIG_RSBAC_PAX)
++ case SW_PAX:
++ {
++ rsbac_system_role_int_t role = SR_user;
++
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ err = rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.pax,
++ NULL,
++ &tid_p->user, &role);
++ if (err == -RSBAC_ENOTFOUND) {
++ err = 0;
++ if(inherit) {
++ all_user = RSBAC_GEN_UID(RSBAC_UID_SET(tid_p->user), RSBAC_ALL_USERS);
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.pax,
++ NULL,
++ &all_user,
++ &role);
++ }
++ }
++#else
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.pax,
++ NULL,
++ &tid_p->user, &role);
++#endif
++ switch (attr) {
++ case A_system_role:
++ case A_pax_role:
++ value->system_role = role;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* PAX */
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++ /* and return */
++ return err;
++}
++
++static int get_attr_process(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value,
++ rsbac_boolean_t inherit)
++{
++ int err = 0;
++/* rsbac_pr_debug(ds, "Getting process attribute"); */
++ switch (module) {
++ case SW_GEN:
++ {
++ struct rsbac_gen_process_aci_t aci =
++ DEFAULT_GEN_P_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ process_handles.gen,
++ NULL, &tid_p->process,
++ &aci);
++ switch (attr) {
++ case A_vset:
++ value->vset = aci.vset;
++ break;
++ case A_log_program_based:
++ value->log_program_based =
++ aci.log_program_based;
++ break;
++ case A_fake_root_uid:
++ value->fake_root_uid = aci.fake_root_uid;
++ break;
++ case A_audit_uid:
++ value->audit_uid = aci.audit_uid;
++ break;
++ case A_auid_exempt:
++ value->auid_exempt = aci.auid_exempt;
++ break;
++ case A_remote_ip:
++ value->remote_ip = aci.remote_ip;
++ break;
++ case A_kernel_thread:
++ value->kernel_thread = aci.kernel_thread;
++ break;
++#if defined(CONFIG_RSBAC_AUTH_LEARN) || defined(CONFIG_RSBAC_CAP_LEARN)
++ case A_program_file:
++ value->program_file = aci.program_file;
++ break;
++#endif
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++
++#if defined(CONFIG_RSBAC_MAC)
++ case SW_MAC:
++ {
++ struct rsbac_mac_process_aci_t aci =
++ DEFAULT_MAC_P_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ process_handles.mac,
++ NULL, &tid_p->process,
++ &aci);
++ switch (attr) {
++ case A_security_level:
++ value->security_level =
++ aci.owner_sec_level;
++ break;
++ case A_initial_security_level:
++ value->security_level =
++ aci.owner_initial_sec_level;
++ break;
++ case A_min_security_level:
++ value->security_level =
++ aci.owner_min_sec_level;
++ break;
++ case A_mac_categories:
++ value->mac_categories =
++ aci.mac_owner_categories;
++ break;
++ case A_mac_initial_categories:
++ value->mac_categories =
++ aci.mac_owner_initial_categories;
++ break;
++ case A_mac_min_categories:
++ value->mac_categories =
++ aci.mac_owner_min_categories;
++ break;
++ case A_current_sec_level:
++ value->current_sec_level =
++ aci.current_sec_level;
++ break;
++ case A_mac_curr_categories:
++ value->mac_categories =
++ aci.mac_curr_categories;
++ break;
++ case A_min_write_open:
++ value->min_write_open = aci.min_write_open;
++ break;
++ case A_min_write_categories:
++ value->mac_categories =
++ aci.min_write_categories;
++ break;
++ case A_max_read_open:
++ value->max_read_open = aci.max_read_open;
++ break;
++ case A_max_read_categories:
++ value->mac_categories =
++ aci.max_read_categories;
++ break;
++ case A_mac_process_flags:
++ value->mac_process_flags =
++ aci.mac_process_flags;
++ break;
++ case A_mac_auto:
++ if (aci.mac_process_flags & MAC_auto)
++ value->mac_auto = TRUE;
++ else
++ value->mac_auto = FALSE;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* MAC */
++
++#if defined(CONFIG_RSBAC_PM)
++ case SW_PM:
++ {
++ struct rsbac_pm_process_aci_t aci =
++ DEFAULT_PM_P_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ process_handles.pm,
++ NULL,
++ &tid_p->process, &aci);
++ switch (attr) {
++ case A_pm_tp:
++ value->pm_tp = aci.pm_tp;
++ break;
++ case A_pm_current_task:
++ value->pm_current_task =
++ aci.pm_current_task;
++ break;
++ case A_pm_process_type:
++ value->pm_process_type =
++ aci.pm_process_type;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* PM */
++
++#if defined(CONFIG_RSBAC_DAZ)
++ case SW_DAZ:
++ {
++ struct rsbac_daz_process_aci_t aci =
++ DEFAULT_DAZ_P_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ process_handles.daz,
++ NULL,
++ &tid_p->process, &aci);
++ switch (attr) {
++ case A_daz_scanner:
++ value->daz_scanner = aci.daz_scanner;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* DAZ */
++
++#if defined(CONFIG_RSBAC_RC)
++ case SW_RC:
++ {
++ struct rsbac_rc_process_aci_t aci =
++ DEFAULT_RC_P_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ process_handles.rc,
++ NULL, &tid_p->process,
++ &aci);
++ switch (attr) {
++ case A_rc_role:
++ value->rc_role = aci.rc_role;
++ break;
++ case A_rc_type:
++ value->rc_type = aci.rc_type;
++ break;
++ case A_rc_select_type:
++ value->rc_select_type = aci.rc_select_type;
++ break;
++ case A_rc_force_role:
++ value->rc_force_role = aci.rc_force_role;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* RC */
++
++#if defined(CONFIG_RSBAC_AUTH)
++ case SW_AUTH:
++ {
++ struct rsbac_auth_process_aci_t aci =
++ DEFAULT_AUTH_P_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ process_handles.auth,
++ NULL,
++ &tid_p->process, &aci);
++ switch (attr) {
++ case A_auth_may_setuid:
++ value->auth_may_setuid =
++ aci.auth_may_setuid;
++ break;
++ case A_auth_may_set_cap:
++ value->auth_may_set_cap =
++ aci.auth_may_set_cap;
++ break;
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++ case A_auth_start_uid:
++ value->auth_start_uid = aci.auth_start_uid;
++ break;
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ case A_auth_start_euid:
++ value->auth_start_euid =
++ aci.auth_start_euid;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ case A_auth_start_gid:
++ value->auth_start_gid = aci.auth_start_gid;
++ break;
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ case A_auth_start_egid:
++ value->auth_start_egid =
++ aci.auth_start_egid;
++ break;
++#endif
++#endif
++ case A_auth_learn:
++ value->auth_learn = aci.auth_learn;
++ break;
++#else
++ case A_auth_learn:
++ value->auth_learn = FALSE;
++ break;
++#endif
++ case A_auth_last_auth:
++ value->auth_last_auth = aci.auth_last_auth;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* AUTH */
++
++#if defined(CONFIG_RSBAC_CAP)
++ case SW_CAP:
++ {
++ struct rsbac_cap_process_aci_t aci =
++ DEFAULT_CAP_P_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ process_handles.cap,
++ NULL,
++ &tid_p->process, &aci);
++ switch (attr) {
++ case A_cap_process_hiding:
++ value->cap_process_hiding =
++ aci.cap_process_hiding;
++ break;
++#if defined(CONFIG_RSBAC_CAP_LOG_MISSING) || defined(CONFIG_RSBAC_CAP_LEARN)
++ case A_max_caps_user:
++ value->max_caps_user.cap[0] = aci.max_caps_user.cap[0];
++ value->max_caps_user.cap[1] = aci.max_caps_user.cap[1];
++ break;
++ case A_max_caps_program:
++ value->max_caps_program.cap[0] = aci.max_caps_program.cap[0];
++ value->max_caps_program.cap[1] = aci.max_caps_program.cap[1];
++ break;
++#endif
++ case A_cap_ld_env:
++ value->cap_ld_env = aci.cap_ld_env;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* CAP */
++
++#if defined(CONFIG_RSBAC_JAIL)
++ case SW_JAIL:
++ {
++ struct rsbac_jail_process_aci_t aci =
++ DEFAULT_JAIL_P_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ process_handles.jail,
++ NULL, &tid_p->process,
++ &aci);
++ switch (attr) {
++ case A_jail_id:
++ value->jail_id = aci.id;
++ break;
++ case A_jail_parent:
++ value->jail_parent = aci.parent;
++ break;
++ case A_jail_ip:
++ value->jail_ip = aci.ip;
++ break;
++ case A_jail_flags:
++ value->jail_flags = aci.flags;
++ break;
++ case A_jail_max_caps:
++ value->jail_max_caps.cap[0] = aci.max_caps.cap[0];
++ value->jail_max_caps.cap[1] = aci.max_caps.cap[1];
++ break;
++ case A_jail_scd_get:
++ value->jail_scd_get = aci.scd_get;
++ break;
++ case A_jail_scd_modify:
++ value->jail_scd_modify = aci.scd_modify;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* JAIL */
++
++#if defined(CONFIG_RSBAC_PAX)
++ case SW_PAX:
++ {
++ struct task_struct *task_p;
++
++ switch (attr) {
++ case A_pax_flags:
++ read_lock(&tasklist_lock);
++ task_p = pid_task(tid_p->process, PIDTYPE_PID);
++ if (task_p) {
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ if (task_p->mm)
++ value->pax_flags =
++ task_p->mm->
++ pax_flags &
++ RSBAC_PAX_ALL_FLAGS;
++ else
++#endif
++ value->pax_flags = 0;
++ } else
++ err = -RSBAC_EINVALIDTARGET;
++ read_unlock(&tasklist_lock);
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* PAX */
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++ return err;
++}
++
++#ifdef CONFIG_RSBAC_UM
++static int get_attr_group(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value,
++ rsbac_boolean_t inherit)
++{
++ int err = 0;
++
++ /* rsbac_pr_debug(ds, "Getting group attribute\n"); */
++ switch (module) {
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ case SW_RC:
++ {
++ rsbac_rc_type_id_t type = RSBAC_RC_GENERAL_TYPE;
++
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ err = rsbac_ta_list_get_data_ttl(ta_number,
++ group_handles.rc,
++ NULL,
++ &tid_p->group, &type);
++ if (err == -RSBAC_ENOTFOUND) {
++ err = 0;
++ if(inherit) {
++ rsbac_gid_t all_group;
++
++ all_group = RSBAC_GEN_GID(RSBAC_GID_SET(tid_p->group), RSBAC_ALL_GROUPS);
++ rsbac_ta_list_get_data_ttl(ta_number,
++ group_handles.rc,
++ NULL,
++ &all_group,
++ &type);
++ }
++ }
++#else
++ rsbac_ta_list_get_data_ttl(ta_number,
++ group_handles.rc,
++ NULL,
++ &tid_p->group, &type);
++#endif
++ switch (attr) {
++ case A_rc_type:
++ value->rc_type = type;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* RC */
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++ /* and return */
++ return err;
++}
++#endif
++
++#ifdef CONFIG_RSBAC_NET_DEV
++static int get_attr_netdev(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value,
++ rsbac_boolean_t inherit)
++{
++ int err = 0;
++ /* rsbac_pr_debug(ds, "Getting netdev attribute\n"); */
++ switch (module) {
++#if defined(CONFIG_RSBAC_IND_NETDEV_LOG)
++ case SW_GEN:
++ {
++ struct rsbac_gen_netdev_aci_t aci =
++ DEFAULT_GEN_NETDEV_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ netdev_handles.gen,
++ NULL,
++ &tid_p->netdev, &aci);
++ switch (attr) {
++ case A_log_array_low:
++ value->log_array_low = aci.log_array_low;
++ break;
++ case A_log_array_high:
++ value->log_array_high = aci.log_array_high;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ case SW_RC:
++ {
++ rsbac_rc_type_id_t type = RSBAC_RC_GENERAL_TYPE;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ netdev_handles.rc,
++ NULL,
++ &tid_p->netdev, &type);
++ switch (attr) {
++ case A_rc_type:
++ value->rc_type = type;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* RC */
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++ /* and return */
++ return err;
++}
++#endif
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++static int get_attr_nettemp(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value,
++ rsbac_boolean_t inherit)
++{
++ int err = 0;
++ /* rsbac_pr_debug(ds, "Getting nettemp attribute"); */
++ if (tid_p->nettemp
++ && !rsbac_ta_list_exist(ta_number, net_temp_handle, &tid_p->nettemp)
++ )
++ return -RSBAC_EINVALIDTARGET;
++ switch (module) {
++#if defined(CONFIG_RSBAC_IND_NETOBJ_LOG)
++ case SW_GEN:
++ {
++ struct rsbac_gen_fd_aci_t aci =
++ DEFAULT_GEN_NETOBJ_ACI;
++
++ if (tid_p->nettemp)
++ rsbac_ta_list_get_data_ttl(ta_number,
++ nettemp_handles.
++ gen, NULL,
++ &tid_p->nettemp,
++ &aci);
++ switch (attr) {
++ case A_log_array_low:
++ value->log_array_low = aci.log_array_low;
++ break;
++ case A_log_array_high:
++ value->log_array_high = aci.log_array_high;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif
++#if defined(CONFIG_RSBAC_MAC)
++ case SW_MAC:
++ {
++ struct rsbac_mac_netobj_aci_t aci =
++ DEFAULT_MAC_NETOBJ_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ nettemp_handles.mac,
++ NULL,
++ &tid_p->nettemp, &aci);
++ switch (attr) {
++ case A_security_level:
++ value->security_level = aci.sec_level;
++ break;
++ case A_mac_categories:
++ value->mac_categories = aci.mac_categories;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* MAC */
++
++#if defined(CONFIG_RSBAC_PM)
++ case SW_PM:
++ {
++ struct rsbac_pm_netobj_aci_t aci =
++ DEFAULT_PM_NETOBJ_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ nettemp_handles.pm,
++ NULL,
++ &tid_p->nettemp, &aci);
++ switch (attr) {
++ case A_pm_object_class:
++ value->pm_object_class =
++ aci.pm_object_class;
++ break;
++ case A_pm_ipc_purpose:
++ value->pm_ipc_purpose = aci.pm_ipc_purpose;
++ break;
++ case A_pm_object_type:
++ value->pm_object_type = aci.pm_object_type;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* PM */
++
++#if defined(CONFIG_RSBAC_RC)
++ case SW_RC:
++ {
++ struct rsbac_rc_nettemp_aci_t aci =
++ DEFAULT_RC_NETTEMP_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ nettemp_handles.rc,
++ NULL,
++ &tid_p->nettemp, &aci);
++ switch (attr) {
++ case A_rc_type:
++ value->rc_type = aci.netobj_type;
++ break;
++
++ case A_rc_type_nt:
++ value->rc_type = aci.nettemp_type;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* RC */
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++ return err;
++}
++
++static int get_attr_netobj(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value,
++ rsbac_boolean_t inherit)
++{
++ int err = 0;
++ /* rsbac_pr_debug(ds, "Getting netobj attribute"); */
++ switch (module) {
++#if defined(CONFIG_RSBAC_IND_NETOBJ_LOG)
++ case SW_GEN:
++ {
++ struct rsbac_gen_netobj_aci_t aci =
++ DEFAULT_GEN_NETOBJ_ACI;
++ rsbac_net_temp_id_t temp;
++
++ switch (attr) {
++ case A_local_log_array_low:
++ case A_local_log_array_high:
++ if(!ta_number && tid_p->netobj.local_temp)
++ temp = tid_p->netobj.local_temp;
++ else
++ rsbac_ta_net_lookup_templates(ta_number,
++ &tid_p->
++ netobj,
++ &temp, NULL);
++ break;
++ case A_remote_log_array_low:
++ case A_remote_log_array_high:
++ if(!ta_number && tid_p->netobj.remote_temp)
++ temp = tid_p->netobj.remote_temp;
++ else
++ rsbac_ta_net_lookup_templates(ta_number,
++ &tid_p->
++ netobj, NULL,
++ &temp);
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (temp)
++ rsbac_ta_list_get_data_ttl(ta_number,
++ nettemp_handles.
++ gen, NULL,
++ &temp, &aci);
++ switch (attr) {
++ case A_local_log_array_low:
++ case A_remote_log_array_low:
++ value->log_array_low = aci.log_array_low;
++ break;
++ case A_local_log_array_high:
++ case A_remote_log_array_high:
++ value->log_array_high = aci.log_array_high;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif
++#if defined(CONFIG_RSBAC_MAC)
++ case SW_MAC:
++ {
++ struct rsbac_mac_netobj_aci_t aci =
++ DEFAULT_MAC_NETOBJ_ACI;
++
++ switch (attr) {
++ case A_local_sec_level:
++ case A_local_mac_categories:
++ if (rsbac_ta_list_get_data_ttl(ta_number, lnetobj_handles.mac, NULL, &tid_p->netobj.sock_p, &aci)) { /* not found -> fallback to template */
++ rsbac_net_temp_id_t temp = 0;
++
++ if(!ta_number && tid_p->netobj.local_temp)
++ temp = tid_p->netobj.local_temp;
++ else
++ rsbac_ta_net_lookup_templates
++ (ta_number, &tid_p->netobj,
++ &temp, NULL);
++ if (temp)
++ rsbac_ta_list_get_data_ttl
++ (ta_number,
++ nettemp_handles.mac,
++ NULL, &temp, &aci);
++ }
++ break;
++
++ case A_remote_sec_level:
++ case A_remote_mac_categories:
++ if (rsbac_ta_list_get_data_ttl(ta_number, rnetobj_handles.mac, NULL, &tid_p->netobj.sock_p, &aci)) { /* not found -> fallback to template */
++ rsbac_net_temp_id_t temp = 0;
++
++ if(!ta_number && tid_p->netobj.remote_temp)
++ temp = tid_p->netobj.remote_temp;
++ else
++ rsbac_ta_net_lookup_templates
++ (ta_number, &tid_p->netobj,
++ NULL, &temp);
++ if (temp)
++ rsbac_ta_list_get_data_ttl
++ (ta_number,
++ nettemp_handles.mac,
++ NULL, &temp, &aci);
++ }
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (err)
++ break;
++ switch (attr) {
++ case A_local_sec_level:
++ case A_remote_sec_level:
++ value->security_level = aci.sec_level;
++ break;
++ case A_local_mac_categories:
++ case A_remote_mac_categories:
++ value->mac_categories = aci.mac_categories;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* MAC */
++
++#if defined(CONFIG_RSBAC_PM)
++ case SW_PM:
++ {
++ struct rsbac_pm_netobj_aci_t aci =
++ DEFAULT_PM_NETOBJ_ACI;
++
++ switch (attr) {
++ case A_local_pm_object_class:
++ case A_local_pm_ipc_purpose:
++ case A_local_pm_object_type:
++ if (rsbac_ta_list_get_data_ttl(ta_number, lnetobj_handles.pm, NULL, &tid_p->netobj.sock_p, &aci)) { /* not found -> fallback to template */
++ rsbac_net_temp_id_t temp = 0;
++
++ if(!ta_number && tid_p->netobj.local_temp)
++ temp = tid_p->netobj.local_temp;
++ else
++ rsbac_ta_net_lookup_templates
++ (ta_number, &tid_p->netobj,
++ &temp, NULL);
++ if (temp)
++ rsbac_ta_list_get_data_ttl
++ (ta_number,
++ nettemp_handles.pm,
++ NULL, &temp, &aci);
++ }
++ break;
++
++ case A_remote_pm_object_class:
++ case A_remote_pm_ipc_purpose:
++ case A_remote_pm_object_type:
++ if (rsbac_ta_list_get_data_ttl(ta_number, rnetobj_handles.pm, NULL, &tid_p->netobj.sock_p, &aci)) { /* not found -> fallback to template */
++ rsbac_net_temp_id_t temp = 0;
++
++ if(!ta_number && tid_p->netobj.remote_temp)
++ temp = tid_p->netobj.remote_temp;
++ else
++ rsbac_ta_net_lookup_templates
++ (ta_number, &tid_p->netobj,
++ NULL, &temp);
++ if (temp)
++ rsbac_ta_list_get_data_ttl
++ (ta_number,
++ nettemp_handles.pm,
++ NULL, &temp, &aci);
++ }
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (err)
++ break;
++ switch (attr) {
++ case A_local_pm_object_class:
++ case A_remote_pm_object_class:
++ value->pm_object_class =
++ aci.pm_object_class;
++ break;
++ case A_local_pm_ipc_purpose:
++ case A_remote_pm_ipc_purpose:
++ value->pm_ipc_purpose = aci.pm_ipc_purpose;
++ break;
++ case A_local_pm_object_type:
++ case A_remote_pm_object_type:
++ value->pm_object_type = aci.pm_object_type;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* PM */
++
++#if defined(CONFIG_RSBAC_RC)
++ case SW_RC:
++ {
++ rsbac_rc_type_id_t type = RSBAC_RC_GENERAL_TYPE;
++
++ switch (attr) {
++ case A_local_rc_type:
++ if (rsbac_ta_list_get_data_ttl(ta_number, lnetobj_handles.rc, NULL, &tid_p->netobj.sock_p, &type)) { /* not found -> fallback to template */
++ rsbac_net_temp_id_t temp = 0;
++ struct rsbac_rc_nettemp_aci_t aci;
++
++ if(!ta_number && tid_p->netobj.local_temp)
++ temp = tid_p->netobj.local_temp;
++ else
++ rsbac_ta_net_lookup_templates
++ (ta_number, &tid_p->netobj,
++ &temp, NULL);
++ if (temp) {
++ if (!rsbac_ta_list_get_data_ttl(ta_number, nettemp_handles.rc, NULL, &temp, &aci))
++ type = aci.netobj_type;
++ }
++ }
++ break;
++
++ case A_remote_rc_type:
++ if (rsbac_ta_list_get_data_ttl(ta_number, rnetobj_handles.rc, NULL, &tid_p->netobj.sock_p, &type)) { /* not found -> fallback to template */
++ rsbac_net_temp_id_t temp = 0;
++ struct rsbac_rc_nettemp_aci_t aci;
++
++ if(!ta_number && tid_p->netobj.remote_temp)
++ temp = tid_p->netobj.remote_temp;
++ else
++ rsbac_ta_net_lookup_templates
++ (ta_number, &tid_p->netobj,
++ NULL, &temp);
++ if (temp) {
++ if (!rsbac_ta_list_get_data_ttl(ta_number, nettemp_handles.rc, NULL, &temp, &aci))
++ type =
++ aci.
++ netobj_type;
++ }
++ }
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err)
++ value->rc_type = type;
++ }
++ break;
++#endif /* RC */
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++ return err;
++}
++#endif /* NET_OBJ */
++
++#ifdef CONFIG_RSBAC_FD_CACHE
++int rsbac_fd_cache_invalidate(struct rsbac_fs_file_t * file_p)
++{
++ struct rsbac_fd_cache_desc_t fd_desc;
++ int i;
++
++ fd_desc.device = file_p->device;
++ fd_desc.inode = file_p->inode;
++
++ for (i = 0; i < SW_NONE; i++) {
++ if (fd_cache_handle[i])
++ rsbac_list_lol_remove(fd_cache_handle[i], &fd_desc);
++ }
++#ifdef CONFIG_RSBAC_XSTATS
++ fd_cache_invalidates++;
++#endif
++ return 0;
++}
++
++int rsbac_fd_cache_invalidate_all(void)
++{
++ int i;
++
++ for (i = 0; i < SW_NONE; i++) {
++ if (fd_cache_handle[i])
++ rsbac_list_lol_remove_all(fd_cache_handle[i]);
++ }
++#ifdef CONFIG_RSBAC_XSTATS
++ fd_cache_invalidate_alls++;
++#endif
++ return 0;
++}
++#endif
++
++/* The value parameter to rsbac_get_attr(s) and rsbac_set_attr() is a pointer */
++/* to the appropiate data structure holding the attribute value. */
++
++int rsbac_ta_get_attr(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value_p,
++ rsbac_boolean_t inherit)
++{
++ int err = 0;
++
++ if (!rsbac_initialized) {
++ rsbac_printk(KERN_WARNING "rsbac_get_attr(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (!value_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_get_attr(): called from interrupt, process %u(%s)!\n",
++ current->pid, current->comm);
++ return -RSBAC_EFROMINTERRUPT;
++ }
++#ifdef CONFIG_RSBAC_XSTATS
++ get_attr_count[target]++;
++#endif
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++#ifdef CONFIG_RSBAC_FD_CACHE
++ if (inherit && !ta_number && fd_cache_handle[module] && (RSBAC_MAJOR(tid.file.device) > 1)) {
++ struct rsbac_fd_cache_desc_t fd_desc;
++ rsbac_enum_t cache_attr = attr;
++
++ fd_desc.device = tid.file.device;
++ fd_desc.inode = tid.file.inode;
++ if (!rsbac_list_lol_get_subdata(fd_cache_handle[module],
++ &fd_desc, &cache_attr,
++ value_p)) {
++#ifdef CONFIG_RSBAC_XSTATS
++ fd_cache_hits[module]++;
++#endif
++ return 0;
++ }
++ err = get_attr_fd(0, module, target, &tid,
++ attr, value_p, TRUE);
++ if (!err && !ta_number) {
++#if 0
++ rsbac_pr_debug(auto, "Adding fd cache item device %02u:%02u inode %u attr %u\n",
++ RSBAC_MAJOR(fd_desc.device), RSBAC_MINOR(fd_desc.device),
++ fd_desc.inode, attr);
++#endif
++ rsbac_list_lol_subadd_ttl(fd_cache_handle[module],
++ rsbac_fd_cache_ttl,
++ &fd_desc, &cache_attr,
++ value_p);
++#ifdef CONFIG_RSBAC_XSTATS
++ fd_cache_misses[module]++;
++#endif
++ }
++ return err;
++ }
++#endif
++
++ return get_attr_fd(ta_number, module, target, &tid,
++ attr, value_p, inherit);
++
++ case T_DEV:
++ return get_attr_dev(ta_number, module, target, tid.dev,
++ attr, value_p, inherit);
++
++ case T_IPC:
++ return get_attr_ipc(ta_number, module, target, &tid,
++ attr, value_p, inherit);
++
++ case T_USER:
++ return get_attr_user(ta_number, module, target, &tid,
++ attr, value_p, inherit);
++
++ case T_PROCESS:
++ return get_attr_process(ta_number, module, target, &tid,
++ attr, value_p, inherit);
++
++#ifdef CONFIG_RSBAC_UM
++ case T_GROUP:
++ return get_attr_group(ta_number, module, target, &tid,
++ attr, value_p, inherit);
++#endif /* CONFIG_RSBAC_UM */
++
++#ifdef CONFIG_RSBAC_NET_DEV
++ case T_NETDEV:
++ return get_attr_netdev(ta_number, module, target, &tid,
++ attr, value_p, inherit);
++#endif
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++ case T_NETTEMP:
++ return get_attr_nettemp(ta_number, module, target, &tid,
++ attr, value_p, inherit);
++
++ case T_NETOBJ:
++ return get_attr_netobj(ta_number, module, target, &tid,
++ attr, value_p, inherit);
++#endif
++
++ /* switch target: no valid target */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ return err;
++}
++
++/************************************************************************** */
++
++static int set_attr_fd(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value_p)
++{
++ int err = 0;
++ struct rsbac_device_list_item_t *device_p;
++ u_int hash;
++ int srcu_idx;
++
++ /* rsbac_pr_debug(ds, "Setting file/dir/fifo/symlink "
++ "attribute %u for device %02u:%02u, inode %lu, "
++ "dentry_p %p\n", attr,
++ RSBAC_MAJOR(tid_p->file.device),
++ RSBAC_MINOR(tid_p->file.device),
++ (u_long)tid_p->file.inode, tid_p->file.dentry_p); */
++ hash = device_hash(tid_p->file.device);
++ srcu_idx = srcu_read_lock(&device_list_srcu[hash]);
++ /* rsbac_pr_debug(ds, "passed device read lock\n"); */
++ /* lookup device */
++ device_p = lookup_device(tid_p->file.device, hash);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_set_attr(): unknown device %02u:%02u\n",
++ RSBAC_MAJOR(tid_p->file.
++ device),
++ RSBAC_MINOR(tid_p->file.
++ device));
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ switch (module) {
++ case SW_GEN:
++ {
++ struct rsbac_gen_fd_aci_t aci = DEFAULT_GEN_FD_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ device_p->handles.gen,
++ NULL,
++ &tid_p->file.inode,
++ &aci);
++ switch (attr) {
++ case A_log_array_low:
++ aci.log_array_low = value_p->log_array_low;
++ break;
++ case A_log_array_high:
++ aci.log_array_high =
++ value_p->log_array_high;
++ break;
++ case A_log_program_based:
++ aci.log_program_based =
++ value_p->log_program_based;
++ break;
++ case A_symlink_add_remote_ip:
++ aci.symlink_add_remote_ip =
++ value_p->symlink_add_remote_ip;
++ break;
++ case A_symlink_add_uid:
++ aci.symlink_add_uid =
++ value_p->symlink_add_uid;
++ break;
++ case A_symlink_add_mac_level:
++ aci.symlink_add_mac_level =
++ value_p->symlink_add_mac_level;
++ break;
++ case A_symlink_add_rc_role:
++ aci.symlink_add_rc_role =
++ value_p->symlink_add_rc_role;
++ break;
++ case A_linux_dac_disable:
++ aci.linux_dac_disable =
++ value_p->linux_dac_disable;
++ break;
++ case A_fake_root_uid:
++ aci.fake_root_uid = value_p->fake_root_uid;
++ break;
++ case A_auid_exempt:
++ aci.auid_exempt = value_p->auid_exempt;
++ break;
++ case A_vset:
++ aci.vset = value_p->vset;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ device_p->
++ handles.gen,
++ 0,
++ &tid_p->file.
++ inode, &aci);
++ }
++ }
++ break;
++
++#if defined(CONFIG_RSBAC_MAC)
++ case SW_MAC:
++ {
++ struct rsbac_mac_fd_aci_t aci = DEFAULT_MAC_FD_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ device_p->handles.mac,
++ NULL,
++ &tid_p->file.inode,
++ &aci);
++ switch (attr) {
++ case A_security_level:
++ aci.sec_level = value_p->security_level;
++ break;
++ case A_mac_categories:
++ aci.mac_categories =
++ value_p->mac_categories;
++ break;
++ case A_mac_auto:
++ aci.mac_auto = value_p->mac_auto;
++ break;
++ case A_mac_prop_trusted:
++ aci.mac_prop_trusted =
++ value_p->mac_prop_trusted;
++ break;
++ case A_mac_file_flags:
++ aci.mac_file_flags =
++ value_p->
++ mac_file_flags & RSBAC_MAC_F_FLAGS;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ device_p->
++ handles.mac,
++ 0,
++ &tid_p->file.
++ inode, &aci);
++ }
++ }
++ break;
++#endif /* MAC */
++
++#if defined(CONFIG_RSBAC_PM)
++ case SW_PM:
++ {
++ struct rsbac_pm_fd_aci_t aci = DEFAULT_PM_FD_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ device_p->handles.pm,
++ NULL,
++ &tid_p->file.inode,
++ &aci);
++ switch (attr) {
++ case A_pm_object_class:
++ aci.pm_object_class =
++ value_p->pm_object_class;
++ break;
++ case A_pm_tp:
++ aci.pm_tp = value_p->pm_tp;
++ break;
++ case A_pm_object_type:
++ aci.pm_object_type =
++ value_p->pm_object_type;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ device_p->
++ handles.pm,
++ 0,
++ &tid_p->file.
++ inode, &aci);
++ }
++ }
++ break;
++#endif /* PM */
++
++#if defined(CONFIG_RSBAC_DAZ)
++ case SW_DAZ:
++ {
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++ if (attr == A_daz_scanned) {
++ err =
++ rsbac_list_add_ttl(device_p->handles.dazs,
++ rsbac_daz_ttl,
++ &tid_p->file.inode,
++ &value_p->
++ daz_scanned);
++ } else
++#endif
++ {
++ struct rsbac_daz_fd_aci_t aci =
++ DEFAULT_DAZ_FD_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ device_p->
++ handles.daz,
++ NULL,
++ &tid_p->file.inode,
++ &aci);
++ switch (attr) {
++ case A_daz_scanner:
++ aci.daz_scanner =
++ value_p->daz_scanner;
++ break;
++ case A_daz_do_scan:
++ aci.daz_do_scan = value_p->daz_do_scan;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl
++ (ta_number,
++ device_p->handles.daz,
++ 0,
++ &tid_p->file.inode, &aci);
++ }
++ }
++ }
++ break;
++#endif /* DAZ */
++
++#if defined(CONFIG_RSBAC_FF)
++ case SW_FF:
++ {
++ switch (attr) {
++ case A_ff_flags:
++ err = rsbac_ta_list_add_ttl(ta_number,
++ device_p->
++ handles.ff,
++ 0,
++ &tid_p->file.
++ inode,
++ &value_p->ff_flags);
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* FF */
++
++#if defined(CONFIG_RSBAC_RC)
++ case SW_RC:
++ {
++ struct rsbac_rc_fd_aci_t aci = DEFAULT_RC_FD_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ device_p->handles.rc,
++ NULL,
++ &tid_p->file.inode,
++ &aci);
++ switch (attr) {
++ case A_rc_type_fd:
++ aci.rc_type_fd = value_p->rc_type_fd;
++ break;
++ case A_rc_force_role:
++ aci.rc_force_role = value_p->rc_force_role;
++ break;
++ case A_rc_initial_role:
++ aci.rc_initial_role =
++ value_p->rc_initial_role;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ device_p->
++ handles.rc,
++ 0,
++ &tid_p->file.
++ inode, &aci);
++ }
++ }
++ break;
++#endif /* RC */
++
++#if defined(CONFIG_RSBAC_AUTH)
++ case SW_AUTH:
++ {
++ struct rsbac_auth_fd_aci_t aci =
++ DEFAULT_AUTH_FD_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ device_p->handles.auth,
++ NULL,
++ &tid_p->file.inode,
++ &aci);
++ switch (attr) {
++ case A_auth_may_setuid:
++ aci.auth_may_setuid =
++ value_p->auth_may_setuid;
++ break;
++ case A_auth_may_set_cap:
++ aci.auth_may_set_cap =
++ value_p->auth_may_set_cap;
++ break;
++ case A_auth_learn:
++ aci.auth_learn = value_p->auth_learn;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ device_p->
++ handles.auth,
++ 0,
++ &tid_p->file.
++ inode, &aci);
++ }
++ }
++ break;
++#endif /* AUTH */
++
++#if defined(CONFIG_RSBAC_CAP)
++ case SW_CAP:
++ {
++ struct rsbac_cap_fd_aci_t aci = DEFAULT_CAP_FD_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ device_p->handles.cap,
++ NULL,
++ &tid_p->file.inode,
++ &aci);
++ switch (attr) {
++ case A_min_caps:
++ aci.min_caps.cap[0] = value_p->min_caps.cap[0];
++ aci.min_caps.cap[1] = value_p->min_caps.cap[1];
++ break;
++ case A_max_caps:
++ aci.max_caps.cap[0] = value_p->max_caps.cap[0];
++ aci.max_caps.cap[1] = value_p->max_caps.cap[1];
++ break;
++ case A_cap_ld_env:
++ aci.cap_ld_env = value_p->cap_ld_env;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ device_p->
++ handles.cap,
++ 0,
++ &tid_p->file.
++ inode, &aci);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_RES)
++ case SW_RES:
++ {
++ struct rsbac_res_fd_aci_t aci = DEFAULT_RES_FD_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ device_p->handles.res,
++ NULL,
++ &tid_p->file.inode,
++ &aci);
++ switch (attr) {
++ case A_res_min:
++ memcpy(&aci.res_min, &value_p->res_array,
++ sizeof(aci.res_min));
++ break;
++ case A_res_max:
++ memcpy(&aci.res_max, &value_p->res_array,
++ sizeof(aci.res_max));
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ struct rsbac_res_fd_aci_t def_aci =
++ DEFAULT_RES_FD_ACI;
++
++ if (memcmp(&aci, &def_aci, sizeof(aci)))
++ err = rsbac_ta_list_add_ttl
++ (ta_number,
++ device_p->handles.res,
++ 0,
++ &tid_p->file.inode, &aci);
++ else
++ err =
++ rsbac_ta_list_remove(ta_number,
++ device_p->
++ handles.res,
++ &tid_p->file.
++ inode);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_PAX)
++ case SW_PAX:
++ {
++ switch (attr) {
++ case A_pax_flags:
++ value_p->pax_flags &= RSBAC_PAX_ALL_FLAGS;
++ err = rsbac_ta_list_add_ttl(ta_number,
++ device_p->
++ handles.pax,
++ 0,
++ &tid_p->file.
++ inode,
++ &value_p->pax_flags);
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* PAX */
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++ /* free access to device_list_head */
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++
++#ifdef CONFIG_RSBAC_FD_CACHE
++ if (fd_cache_handle[module]) {
++ if (target == T_DIR)
++ rsbac_list_lol_remove_all(fd_cache_handle[module]);
++ else {
++ struct rsbac_fd_cache_desc_t fd_desc;
++
++ fd_desc.device = tid_p->file.device;
++ fd_desc.inode = tid_p->file.inode;
++ rsbac_list_lol_remove(fd_cache_handle[module], &fd_desc);
++ }
++ }
++#endif
++
++ return err;
++}
++
++static int set_attr_dev(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ struct rsbac_dev_desc_t dev,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value_p)
++{
++ int err = 0;
++ /* rsbac_pr_debug(ds, "Setting dev attribute\n"); */
++ switch (module) {
++ case SW_GEN:
++ {
++ struct rsbac_gen_dev_aci_t aci =
++ DEFAULT_GEN_DEV_ACI;
++
++ if (dev.type > D_char)
++ return -RSBAC_EINVALIDTARGET;
++ rsbac_ta_list_get_data_ttl(ta_number,
++ dev_handles.gen,
++ NULL, &dev, &aci);
++ switch (attr) {
++ case A_log_array_low:
++ aci.log_array_low = value_p->log_array_low;
++ break;
++ case A_log_array_high:
++ aci.log_array_high =
++ value_p->log_array_high;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ dev_handles.
++ gen, 0, &dev,
++ &aci);
++ }
++ }
++ break;
++
++#if defined(CONFIG_RSBAC_MAC)
++ case SW_MAC:
++ {
++ struct rsbac_mac_dev_aci_t aci =
++ DEFAULT_MAC_DEV_ACI;
++
++ if (dev.type > D_char)
++ return -RSBAC_EINVALIDTARGET;
++ rsbac_ta_list_get_data_ttl(ta_number,
++ dev_handles.mac,
++ NULL, &dev, &aci);
++ switch (attr) {
++ case A_security_level:
++ aci.sec_level = value_p->security_level;
++ break;
++ case A_mac_categories:
++ aci.mac_categories =
++ value_p->mac_categories;
++ break;
++ case A_mac_check:
++ aci.mac_check = value_p->mac_check;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ dev_handles.
++ mac, 0, &dev,
++ &aci);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_PM)
++ case SW_PM:
++ {
++ struct rsbac_pm_dev_aci_t aci = DEFAULT_PM_DEV_ACI;
++
++ if (dev.type > D_char)
++ return -RSBAC_EINVALIDTARGET;
++ rsbac_ta_list_get_data_ttl(ta_number,
++ dev_handles.pm,
++ NULL, &dev, &aci);
++ switch (attr) {
++ case A_pm_object_type:
++ aci.pm_object_type =
++ value_p->pm_object_type;
++ break;
++ case A_pm_object_class:
++ aci.pm_object_class =
++ value_p->pm_object_class;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ dev_handles.pm,
++ 0, &dev, &aci);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_RC)
++ case SW_RC:
++ {
++ rsbac_rc_type_id_t type = value_p->rc_type;
++ struct rsbac_dev_desc_t dev_desc;
++ rsbac_list_handle_t handle;
++
++ dev_desc.major = dev.major;
++ dev_desc.minor = dev.minor;
++ switch (dev.type) {
++ case D_char:
++ dev_desc.type = D_char;
++ handle = dev_handles.rc;
++ break;
++ case D_block:
++ dev_desc.type = D_block;
++ handle = dev_handles.rc;
++ break;
++ case D_char_major:
++ if (type > RC_type_max_value)
++ return -RSBAC_EINVALIDVALUE;
++ dev_desc.type = D_char;
++ dev_desc.minor = 0;
++ handle = dev_major_handles.rc;
++ break;
++ case D_block_major:
++ if (type > RC_type_max_value)
++ return -RSBAC_EINVALIDVALUE;
++ dev_desc.type = D_block;
++ dev_desc.minor = 0;
++ handle = dev_major_handles.rc;
++ break;
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ switch (attr) {
++ case A_rc_type:
++ err = rsbac_ta_list_add_ttl(ta_number,
++ handle,
++ 0,
++ &dev_desc,
++ &type);
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++
++ return err;
++}
++
++static int set_attr_ipc(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value_p)
++{
++ int err = 0;
++ /* rsbac_pr_debug(ds, "Setting ipc attribute"); */
++ switch (module) {
++#if defined(CONFIG_RSBAC_MAC)
++ case SW_MAC:
++ {
++ struct rsbac_mac_ipc_aci_t aci =
++ DEFAULT_MAC_IPC_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ ipc_handles.mac,
++ NULL,
++ &tid_p->ipc, &aci);
++ switch (attr) {
++ case A_security_level:
++ aci.sec_level = value_p->security_level;
++ break;
++ case A_mac_categories:
++ aci.mac_categories =
++ value_p->mac_categories;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ ipc_handles.
++ mac, 0,
++ &tid_p->ipc,
++ &aci);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_PM)
++ case SW_PM:
++ {
++ struct rsbac_pm_ipc_aci_t aci = DEFAULT_PM_IPC_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ ipc_handles.pm,
++ NULL,
++ &tid_p->ipc, &aci);
++ switch (attr) {
++ case A_pm_object_type:
++ aci.pm_object_type =
++ value_p->pm_object_type;
++ break;
++ case A_pm_ipc_purpose:
++ aci.pm_ipc_purpose =
++ value_p->pm_ipc_purpose;
++ break;
++ case A_pm_object_class:
++ aci.pm_object_class =
++ value_p->pm_object_class;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ ipc_handles.pm,
++ 0,
++ &tid_p->ipc,
++ &aci);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_RC)
++ case SW_RC:
++ {
++ rsbac_rc_type_id_t type = value_p->rc_type;
++
++ switch (attr) {
++ case A_rc_type:
++ err = rsbac_ta_list_add_ttl(ta_number,
++ ipc_handles.rc,
++ 0,
++ &tid_p->ipc,
++ &type);
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_JAIL)
++ case SW_JAIL:
++ {
++ rsbac_jail_id_t id = value_p->jail_id;
++
++ switch (attr) {
++ case A_jail_id:
++/* if (id)
++ rsbac_pr_debug(aef,
++ "Setting jail_id for IPC "
++ "%s %lu to %u\n",
++ get_ipc_target_name(tmp,
++ tid_p->ipc.type),
++ tid_p->ipc.id.id_nr,
++ id); */
++ err = rsbac_ta_list_add_ttl(ta_number,
++ ipc_handles.
++ jail, 0,
++ &tid_p->ipc,
++ &id);
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++
++ return err;
++}
++
++static int set_attr_user(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value_p)
++{
++ int err = 0;
++ /* rsbac_pr_debug(ds, "Setting %s user attribute %i "
++ "for %u to %i\n",
++ get_switch_target_name(tmp, module), attr,
++ tid_p->user, value_p->dummy); */
++ switch (module) {
++ case SW_GEN:
++ {
++ struct rsbac_gen_user_aci_t aci =
++ DEFAULT_GEN_U_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.gen,
++ NULL,
++ &tid_p->user, &aci);
++ switch (attr) {
++ case A_pseudo:
++ aci.pseudo = value_p->pseudo;
++ break;
++ case A_log_user_based:
++ aci.log_user_based =
++ value_p->log_user_based;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ user_handles.
++ gen, 0,
++ &tid_p->user,
++ &aci);
++ }
++ }
++ break;
++
++#if defined(CONFIG_RSBAC_MAC)
++ case SW_MAC:
++ {
++ struct rsbac_mac_user_aci_t aci =
++ DEFAULT_MAC_U_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.mac,
++ NULL,
++ &tid_p->user, &aci);
++ switch (attr) {
++ case A_security_level:
++ if (value_p->security_level <
++ aci.min_security_level)
++ err = -RSBAC_EINVALIDVALUE;
++ else
++ aci.security_level =
++ value_p->security_level;
++ break;
++ case A_initial_security_level:
++ if ((value_p->security_level <
++ aci.min_security_level)
++ || (value_p->security_level >
++ aci.security_level)
++ )
++ err = -RSBAC_EINVALIDVALUE;
++ else
++ aci.initial_security_level =
++ value_p->security_level;
++ break;
++ case A_min_security_level:
++ if (value_p->security_level >
++ aci.security_level)
++ err = -RSBAC_EINVALIDVALUE;
++ else
++ aci.min_security_level =
++ value_p->security_level;
++ break;
++ case A_mac_categories:
++ if ((value_p->mac_categories & aci.
++ mac_min_categories) !=
++ aci.mac_min_categories)
++ err = -RSBAC_EINVALIDVALUE;
++ else
++ aci.mac_categories =
++ value_p->mac_categories;
++ break;
++ case A_mac_initial_categories:
++ if (((value_p->mac_categories & aci.
++ mac_min_categories) !=
++ aci.mac_min_categories)
++ ||
++ ((value_p->mac_categories & aci.
++ mac_categories) !=
++ value_p->mac_categories)
++ )
++ err = -RSBAC_EINVALIDVALUE;
++ else
++ aci.mac_initial_categories =
++ value_p->mac_categories;
++ break;
++ case A_mac_min_categories:
++ if ((value_p->mac_categories & aci.
++ mac_categories) !=
++ value_p->mac_categories)
++ err = -RSBAC_EINVALIDVALUE;
++ else
++ aci.mac_min_categories =
++ value_p->mac_categories;
++ break;
++ case A_system_role:
++ case A_mac_role:
++ aci.system_role = value_p->system_role;
++ break;
++ case A_mac_user_flags:
++ aci.mac_user_flags =
++ value_p->
++ mac_user_flags & RSBAC_MAC_U_FLAGS;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ user_handles.
++ mac, 0,
++ &tid_p->user,
++ &aci);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_PM)
++ case SW_PM:
++ {
++ struct rsbac_pm_user_aci_t aci = DEFAULT_PM_U_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.pm,
++ NULL,
++ &tid_p->user, &aci);
++ switch (attr) {
++ case A_pm_task_set:
++ aci.pm_task_set = value_p->pm_task_set;
++ break;
++ case A_pm_role:
++ aci.pm_role = value_p->pm_role;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ user_handles.
++ pm, 0,
++ &tid_p->user,
++ &aci);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_DAZ)
++ case SW_DAZ:
++ {
++ rsbac_system_role_int_t role =
++ value_p->system_role;
++
++ switch (attr) {
++ case A_system_role:
++ case A_daz_role:
++ err = rsbac_ta_list_add_ttl(ta_number,
++ user_handles.
++ daz, 0,
++ &tid_p->user,
++ &role);
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_FF)
++ case SW_FF:
++ {
++ rsbac_system_role_int_t role =
++ value_p->system_role;
++
++ switch (attr) {
++ case A_system_role:
++ case A_ff_role:
++ err = rsbac_ta_list_add_ttl(ta_number,
++ user_handles.
++ ff, 0,
++ &tid_p->user,
++ &role);
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_RC)
++ case SW_RC:
++ {
++ struct rsbac_rc_user_aci_t aci = DEFAULT_RC_U_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.rc,
++ NULL,
++ &tid_p->user, &aci);
++ switch (attr) {
++ case A_rc_def_role:
++ aci.rc_role = value_p->rc_def_role;
++ break;
++ case A_rc_type:
++ aci.rc_type = value_p->rc_type;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ user_handles.
++ rc, 0,
++ &tid_p->user,
++ &aci);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH)
++ case SW_AUTH:
++ {
++ rsbac_system_role_int_t role =
++ value_p->system_role;
++
++ switch (attr) {
++ case A_system_role:
++ case A_auth_role:
++ err = rsbac_ta_list_add_ttl(ta_number,
++ user_handles.
++ auth, 0,
++ &tid_p->user,
++ &role);
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_CAP)
++ case SW_CAP:
++ {
++ struct rsbac_cap_user_aci_t aci =
++ DEFAULT_CAP_U_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.cap,
++ NULL,
++ &tid_p->user, &aci);
++ switch (attr) {
++ case A_system_role:
++ case A_cap_role:
++ aci.cap_role = value_p->system_role;
++ break;
++ case A_min_caps:
++ aci.min_caps.cap[0] = value_p->min_caps.cap[0];
++ aci.min_caps.cap[1] = value_p->min_caps.cap[1];
++ break;
++ case A_max_caps:
++ aci.max_caps.cap[0] = value_p->max_caps.cap[0];
++ aci.max_caps.cap[1] = value_p->max_caps.cap[1];
++ break;
++ case A_cap_ld_env:
++ aci.cap_ld_env = value_p->cap_ld_env;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ user_handles.
++ cap, 0,
++ &tid_p->user,
++ &aci);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_JAIL)
++ case SW_JAIL:
++ {
++ rsbac_system_role_int_t role =
++ value_p->system_role;
++
++ switch (attr) {
++ case A_system_role:
++ case A_jail_role:
++ err = rsbac_ta_list_add_ttl(ta_number,
++ user_handles.
++ jail, 0,
++ &tid_p->user,
++ &role);
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_RES)
++ case SW_RES:
++ {
++ struct rsbac_res_user_aci_t aci =
++ DEFAULT_RES_U_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ user_handles.res,
++ NULL,
++ &tid_p->user, &aci);
++ switch (attr) {
++ case A_system_role:
++ case A_res_role:
++ aci.res_role = value_p->system_role;
++ break;
++ case A_res_min:
++ memcpy(&aci.res_min, &value_p->res_array,
++ sizeof(aci.res_min));
++ break;
++ case A_res_max:
++ memcpy(&aci.res_max, &value_p->res_array,
++ sizeof(aci.res_max));
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ struct rsbac_res_user_aci_t def_aci =
++ DEFAULT_RES_U_ACI;
++
++ if (tid_p->user != RSBAC_ALL_USERS) {
++ rsbac_uid_t all_users =
++ RSBAC_ALL_USERS;
++
++ rsbac_ta_list_get_data_ttl
++ (ta_number, user_handles.res,
++ NULL, &all_users, &def_aci);
++ }
++ if (memcmp(&aci, &def_aci, sizeof(aci)))
++ err =
++ rsbac_ta_list_add_ttl
++ (ta_number, user_handles.res,
++ 0, &tid_p->user, &aci);
++ else
++ err =
++ rsbac_ta_list_remove(ta_number,
++ user_handles.
++ res,
++ &tid_p->
++ user);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_PAX)
++ case SW_PAX:
++ {
++ rsbac_system_role_int_t role =
++ value_p->system_role;
++
++ switch (attr) {
++ case A_system_role:
++ case A_pax_role:
++ err = rsbac_ta_list_add_ttl(ta_number,
++ user_handles.
++ pax, 0,
++ &tid_p->user,
++ &role);
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++
++ return err;
++}
++
++static int set_attr_process(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value_p)
++{
++ int err = 0;
++ /* rsbac_pr_debug(ds, "Setting process attribute\n"); */
++ if (!tid_p->process) {
++ rsbac_printk(KERN_WARNING "rsbac_set_attr(): Trying to set attribute for process 0!\n");
++ return -RSBAC_EINVALIDTARGET;
++ }
++ switch (module) {
++ case SW_GEN:
++ {
++ struct rsbac_gen_process_aci_t aci =
++ DEFAULT_GEN_P_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ process_handles.gen,
++ NULL, &tid_p->process,
++ &aci);
++ switch (attr) {
++ case A_vset:
++ aci.vset = value_p->vset;
++ break;
++ case A_log_program_based:
++ aci.log_program_based =
++ value_p->log_program_based;
++ break;
++ case A_fake_root_uid:
++ aci.fake_root_uid = value_p->fake_root_uid;
++ break;
++ case A_audit_uid:
++ aci.audit_uid = value_p->audit_uid;
++ break;
++ case A_auid_exempt:
++ aci.auid_exempt = value_p->auid_exempt;
++ break;
++ case A_remote_ip:
++ aci.remote_ip = value_p->remote_ip;
++ break;
++ case A_kernel_thread:
++ aci.kernel_thread = value_p->kernel_thread;
++ break;
++#if defined(CONFIG_RSBAC_AUTH_LEARN) || defined(CONFIG_RSBAC_CAP_LEARN)
++ case A_program_file:
++ aci.program_file =
++ value_p->program_file;
++ break;
++#endif
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ process_handles.gen,
++ 0,
++ &tid_p->
++ process, &aci);
++ }
++ }
++ break;
++
++#if defined(CONFIG_RSBAC_MAC)
++ case SW_MAC:
++ {
++ struct rsbac_mac_process_aci_t aci =
++ DEFAULT_MAC_P_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ process_handles.mac,
++ NULL, &tid_p->process,
++ &aci);
++ switch (attr) {
++ case A_security_level:
++ aci.owner_sec_level =
++ value_p->security_level;
++ break;
++ case A_initial_security_level:
++ aci.owner_initial_sec_level =
++ value_p->security_level;
++ break;
++ case A_min_security_level:
++ aci.owner_min_sec_level =
++ value_p->security_level;
++ break;
++ case A_mac_categories:
++ aci.mac_owner_categories =
++ value_p->mac_categories;
++ break;
++ case A_mac_initial_categories:
++ aci.mac_owner_initial_categories =
++ value_p->mac_categories;
++ break;
++ case A_mac_min_categories:
++ aci.mac_owner_min_categories =
++ value_p->mac_categories;
++ break;
++ case A_current_sec_level:
++ aci.current_sec_level =
++ value_p->current_sec_level;
++ break;
++ case A_mac_curr_categories:
++ aci.mac_curr_categories =
++ value_p->mac_categories;
++ break;
++ case A_min_write_open:
++ aci.min_write_open =
++ value_p->min_write_open;
++ break;
++ case A_min_write_categories:
++ aci.min_write_categories =
++ value_p->mac_categories;
++ break;
++ case A_max_read_open:
++ aci.max_read_open = value_p->max_read_open;
++ break;
++ case A_max_read_categories:
++ aci.max_read_categories =
++ value_p->mac_categories;
++ break;
++ case A_mac_process_flags:
++ aci.mac_process_flags =
++ value_p->
++ mac_process_flags & RSBAC_MAC_P_FLAGS;
++ break;
++ case A_mac_auto:
++ if (value_p->mac_auto)
++ aci.mac_process_flags |= MAC_auto;
++ else
++ aci.mac_process_flags &= ~MAC_auto;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ process_handles.mac,
++ 0,
++ &tid_p->
++ process, &aci);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_PM)
++ case SW_PM:
++ {
++ struct rsbac_pm_process_aci_t aci =
++ DEFAULT_PM_P_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ process_handles.pm,
++ NULL,
++ &tid_p->process, &aci);
++ switch (attr) {
++ case A_pm_tp:
++ aci.pm_tp = value_p->pm_tp;
++ break;
++ case A_pm_current_task:
++ aci.pm_current_task =
++ value_p->pm_current_task;
++ break;
++ case A_pm_process_type:
++ aci.pm_process_type =
++ value_p->pm_process_type;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ process_handles.
++ pm, 0,
++ &tid_p->
++ process, &aci);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_DAZ)
++ case SW_DAZ:
++ {
++ struct rsbac_daz_process_aci_t aci =
++ DEFAULT_DAZ_P_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ process_handles.daz,
++ NULL,
++ &tid_p->process, &aci);
++ switch (attr) {
++ case A_daz_scanner:
++ aci.daz_scanner = value_p->daz_scanner;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ process_handles.
++ daz, 0,
++ &tid_p->
++ process, &aci);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_RC)
++ case SW_RC:
++ {
++ struct rsbac_rc_process_aci_t aci =
++ DEFAULT_RC_P_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ process_handles.rc,
++ NULL, &tid_p->process,
++ &aci);
++ switch (attr) {
++ case A_rc_role:
++ aci.rc_role = value_p->rc_role;
++ break;
++ case A_rc_type:
++ aci.rc_type = value_p->rc_type;
++ break;
++ case A_rc_select_type:
++ aci.rc_select_type = value_p->rc_select_type;
++ break;
++ case A_rc_force_role:
++ aci.rc_force_role = value_p->rc_force_role;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ process_handles.rc,
++ 0,
++ &tid_p->
++ process, &aci);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH)
++ case SW_AUTH:
++ {
++ struct rsbac_auth_process_aci_t aci =
++ DEFAULT_AUTH_P_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ process_handles.auth,
++ NULL,
++ &tid_p->process, &aci);
++ switch (attr) {
++ case A_auth_may_setuid:
++ aci.auth_may_setuid =
++ value_p->auth_may_setuid;
++ break;
++ case A_auth_may_set_cap:
++ aci.auth_may_set_cap =
++ value_p->auth_may_set_cap;
++ break;
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++ case A_auth_start_uid:
++ aci.auth_start_uid =
++ value_p->auth_start_uid;
++ break;
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ case A_auth_start_euid:
++ aci.auth_start_euid =
++ value_p->auth_start_euid;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ case A_auth_start_gid:
++ aci.auth_start_gid =
++ value_p->auth_start_gid;
++ break;
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ case A_auth_start_egid:
++ aci.auth_start_egid =
++ value_p->auth_start_egid;
++ break;
++#endif
++#endif
++ case A_auth_learn:
++ aci.auth_learn = value_p->auth_learn;
++ break;
++#endif
++ case A_auth_last_auth:
++ aci.auth_last_auth =
++ value_p->auth_last_auth;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ process_handles.auth,
++ 0,
++ &tid_p->process,
++ &aci);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_CAP)
++ case SW_CAP:
++ {
++ struct rsbac_cap_process_aci_t aci =
++ DEFAULT_CAP_P_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ process_handles.cap,
++ NULL,
++ &tid_p->process, &aci);
++ switch (attr) {
++ case A_cap_process_hiding:
++ aci.cap_process_hiding =
++ value_p->cap_process_hiding;
++ break;
++#if defined(CONFIG_RSBAC_CAP_LOG_MISSING) || defined(CONFIG_RSBAC_CAP_LEARN)
++ case A_max_caps_user:
++ aci.max_caps_user.cap[0] = value_p->max_caps_user.cap[0];
++ aci.max_caps_user.cap[1] = value_p->max_caps_user.cap[1];
++ break;
++ case A_max_caps_program:
++ aci.max_caps_program.cap[0] = value_p->max_caps_program.cap[0];
++ aci.max_caps_program.cap[1] = value_p->max_caps_program.cap[1];
++#endif
++ break;
++ case A_cap_ld_env:
++ aci.cap_ld_env = value_p->cap_ld_env;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ process_handles.
++ cap, 0,
++ &tid_p->
++ process, &aci);
++ }
++ }
++ break;
++#endif
++
++#if defined(CONFIG_RSBAC_JAIL)
++ case SW_JAIL:
++ {
++ struct rsbac_jail_process_aci_t aci =
++ DEFAULT_JAIL_P_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ process_handles.jail,
++ NULL, &tid_p->process,
++ &aci);
++ switch (attr) {
++ case A_jail_id:
++ aci.id = value_p->jail_id;
++ break;
++ case A_jail_parent:
++ aci.parent = value_p->jail_parent;
++ break;
++ case A_jail_ip:
++ aci.ip = value_p->jail_ip;
++ break;
++ case A_jail_flags:
++ aci.flags = value_p->jail_flags;
++ break;
++ case A_jail_max_caps:
++ aci.max_caps.cap[0] = value_p->jail_max_caps.cap[0];
++ aci.max_caps.cap[1] = value_p->jail_max_caps.cap[1];
++ break;
++ case A_jail_scd_get:
++ aci.scd_get = value_p->jail_scd_get;
++ break;
++ case A_jail_scd_modify:
++ aci.scd_modify = value_p->jail_scd_modify;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ process_handles.jail,
++ 0,
++ &tid_p->
++ process, &aci);
++ }
++ }
++ break;
++#endif
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++
++ return err;
++}
++
++#ifdef CONFIG_RSBAC_UM
++static int set_attr_group(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value_p)
++{
++ int err = 0;
++ /* rsbac_pr_debug(ds, "Setting group attribute\n"); */
++ switch (module) {
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ case SW_RC:
++ {
++ rsbac_rc_type_id_t type = value_p->rc_type;
++ rsbac_gid_t group_desc;
++
++ group_desc = tid_p->group;
++
++ switch (attr) {
++ case A_rc_type:
++ err = rsbac_ta_list_add_ttl(ta_number,
++ group_handles.
++ rc, 0,
++ &group_desc,
++ &type);
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++
++ return err;
++}
++#endif /* UM */
++
++#ifdef CONFIG_RSBAC_NET_DEV
++static int set_attr_netdev(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value_p)
++{
++ int err = 0;
++ /* rsbac_pr_debug(ds, "Setting netdev attribute\n"); */
++ switch (module) {
++#if defined(CONFIG_RSBAC_IND_NETDEV_LOG)
++ case SW_GEN:
++ {
++ struct rsbac_gen_netdev_aci_t aci =
++ DEFAULT_GEN_NETDEV_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ netdev_handles.gen,
++ NULL,
++ &tid_p->netdev, &aci);
++ switch (attr) {
++ case A_log_array_low:
++ aci.log_array_low = value_p->log_array_low;
++ break;
++ case A_log_array_high:
++ aci.log_array_high =
++ value_p->log_array_high;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ netdev_handles.
++ gen, 0,
++ &tid_p->netdev,
++ &aci);
++ }
++ }
++ break;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ case SW_RC:
++ {
++ rsbac_rc_type_id_t type = value_p->rc_type;
++
++ switch (attr) {
++ case A_rc_type:
++ err = rsbac_ta_list_add_ttl(ta_number,
++ netdev_handles.
++ rc, 0,
++ &tid_p->netdev,
++ &type);
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++
++ return err;
++}
++#endif
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++static int set_attr_nettemp(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value_p)
++{
++ int err = 0;
++ /* rsbac_pr_debug(ds, "Setting nettemp attribute\n"); */
++ if (!rsbac_ta_list_exist(ta_number, net_temp_handle, &tid_p->nettemp))
++ return -RSBAC_EINVALIDTARGET;
++ switch (module) {
++#if defined(CONFIG_RSBAC_IND_NETOBJ_LOG)
++ case SW_GEN:
++ {
++ struct rsbac_gen_netobj_aci_t aci =
++ DEFAULT_GEN_NETOBJ_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ nettemp_handles.gen,
++ NULL,
++ &tid_p->nettemp, &aci);
++ switch (attr) {
++ case A_log_array_low:
++ aci.log_array_low = value_p->log_array_low;
++ break;
++ case A_log_array_high:
++ aci.log_array_high =
++ value_p->log_array_high;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ nettemp_handles.
++ gen, 0,
++ &tid_p->
++ nettemp, &aci);
++ }
++ }
++ break;
++#endif /* IND_NETOBJ_LOG */
++#if defined(CONFIG_RSBAC_MAC)
++ case SW_MAC:
++ {
++ struct rsbac_mac_netobj_aci_t aci =
++ DEFAULT_MAC_NETOBJ_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ nettemp_handles.mac,
++ NULL,
++ &tid_p->nettemp, &aci);
++ switch (attr) {
++ case A_security_level:
++ aci.sec_level = value_p->security_level;
++ break;
++ case A_mac_categories:
++ aci.mac_categories =
++ value_p->mac_categories;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ nettemp_handles.
++ mac, 0,
++ &tid_p->
++ nettemp, &aci);
++ }
++ }
++ break;
++#endif /* MAC */
++
++#if defined(CONFIG_RSBAC_PM)
++ case SW_PM:
++ {
++ struct rsbac_pm_netobj_aci_t aci =
++ DEFAULT_PM_NETOBJ_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ nettemp_handles.pm,
++ NULL,
++ &tid_p->nettemp, &aci);
++ switch (attr) {
++ case A_pm_object_class:
++ aci.pm_object_class =
++ value_p->pm_object_class;
++ break;
++ case A_pm_ipc_purpose:
++ aci.pm_ipc_purpose =
++ value_p->pm_ipc_purpose;
++ break;
++ case A_pm_object_type:
++ aci.pm_object_type =
++ value_p->pm_object_type;
++ break;
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ nettemp_handles.
++ pm, 0,
++ &tid_p->
++ nettemp, &aci);
++ }
++ }
++ break;
++#endif /* PM */
++
++#if defined(CONFIG_RSBAC_RC)
++ case SW_RC:
++ {
++ struct rsbac_rc_nettemp_aci_t aci =
++ DEFAULT_RC_NETTEMP_ACI;
++
++ rsbac_ta_list_get_data_ttl(ta_number,
++ nettemp_handles.rc,
++ NULL,
++ &tid_p->nettemp, &aci);
++ switch (attr) {
++ case A_rc_type:
++ aci.netobj_type = value_p->rc_type;
++ break;
++ case A_rc_type_nt:
++ aci.nettemp_type = value_p->rc_type;
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (!err) {
++ err = rsbac_ta_list_add_ttl(ta_number,
++ nettemp_handles.
++ rc, 0,
++ &tid_p->
++ nettemp, &aci);
++ }
++ }
++ break;
++#endif /* RC */
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++
++ return err;
++}
++
++static int set_attr_netobj(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t *tid_p,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *value_p)
++{
++ int err = 0;
++ /* rsbac_pr_debug(ds, "Setting netobj attribute\n"); */
++ switch (module) {
++#if defined(CONFIG_RSBAC_MAC)
++ case SW_MAC:
++ {
++ struct rsbac_mac_netobj_aci_t aci =
++ DEFAULT_MAC_NETOBJ_ACI;
++
++ switch (attr) {
++ case A_local_sec_level:
++ case A_local_mac_categories:
++ if (rsbac_ta_list_get_data_ttl(ta_number, lnetobj_handles.mac, NULL, &tid_p->netobj.sock_p, &aci)) { /* not found -> fallback to template */
++ rsbac_net_temp_id_t temp = 0;
++
++ rsbac_ta_net_lookup_templates
++ (ta_number, &tid_p->netobj,
++ &temp, NULL);
++ if (temp)
++ rsbac_ta_list_get_data_ttl
++ (ta_number,
++ nettemp_handles.mac,
++ NULL, &temp, &aci);
++ }
++ break;
++
++ case A_remote_sec_level:
++ case A_remote_mac_categories:
++ if (rsbac_ta_list_get_data_ttl(ta_number, rnetobj_handles.mac, NULL, &tid_p->netobj.sock_p, &aci)) { /* not found -> fallback to template */
++ rsbac_net_temp_id_t temp = 0;
++
++ rsbac_ta_net_lookup_templates
++ (ta_number, &tid_p->netobj,
++ NULL, &temp);
++ if (temp)
++ rsbac_ta_list_get_data_ttl
++ (ta_number,
++ nettemp_handles.mac,
++ NULL, &temp, &aci);
++ }
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (err)
++ break;
++ {
++ switch (attr) {
++ case A_local_sec_level:
++ aci.sec_level =
++ value_p->security_level;
++ err =
++ rsbac_ta_list_add_ttl
++ (ta_number,
++ lnetobj_handles.mac, 0,
++ &tid_p->netobj.sock_p, &aci);
++ break;
++ case A_remote_sec_level:
++ aci.sec_level =
++ value_p->security_level;
++ err =
++ rsbac_ta_list_add_ttl
++ (ta_number,
++ rnetobj_handles.mac, 0,
++ &tid_p->netobj.sock_p, &aci);
++ break;
++ case A_local_mac_categories:
++ aci.mac_categories =
++ value_p->mac_categories;
++ err =
++ rsbac_ta_list_add_ttl
++ (ta_number,
++ lnetobj_handles.mac, 0,
++ &tid_p->netobj.sock_p, &aci);
++ break;
++ case A_remote_mac_categories:
++ aci.mac_categories =
++ value_p->mac_categories;
++ err =
++ rsbac_ta_list_add_ttl
++ (ta_number,
++ rnetobj_handles.mac, 0,
++ &tid_p->netobj.sock_p, &aci);
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ }
++ break;
++#endif /* MAC */
++
++#if defined(CONFIG_RSBAC_PM)
++ case SW_PM:
++ {
++ struct rsbac_pm_netobj_aci_t aci =
++ DEFAULT_PM_NETOBJ_ACI;
++
++ switch (attr) {
++ case A_local_pm_object_class:
++ case A_local_pm_ipc_purpose:
++ case A_local_pm_object_type:
++ if (rsbac_ta_list_get_data_ttl(ta_number, lnetobj_handles.pm, NULL, &tid_p->netobj.sock_p, &aci)) { /* not found -> fallback to template */
++ rsbac_net_temp_id_t temp = 0;
++
++ rsbac_ta_net_lookup_templates
++ (ta_number, &tid_p->netobj,
++ &temp, NULL);
++ if (temp)
++ rsbac_ta_list_get_data_ttl
++ (ta_number,
++ nettemp_handles.pm,
++ NULL, &temp, &aci);
++ }
++ break;
++
++ case A_remote_pm_object_class:
++ case A_remote_pm_ipc_purpose:
++ case A_remote_pm_object_type:
++ if (rsbac_ta_list_get_data_ttl(ta_number, rnetobj_handles.pm, NULL, &tid_p->netobj.sock_p, &aci)) { /* not found -> fallback to template */
++ rsbac_net_temp_id_t temp = 0;
++
++ rsbac_ta_net_lookup_templates
++ (ta_number, &tid_p->netobj,
++ NULL, &temp);
++ if (temp)
++ rsbac_ta_list_get_data_ttl
++ (ta_number,
++ nettemp_handles.pm,
++ NULL, &temp, &aci);
++ }
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ if (err)
++ break;
++ {
++ switch (attr) {
++ case A_local_pm_object_class:
++ aci.pm_object_class =
++ value_p->pm_object_class;
++ err =
++ rsbac_ta_list_add_ttl
++ (ta_number, lnetobj_handles.pm,
++ 0, &tid_p->netobj.sock_p,
++ &aci);
++ break;
++ case A_remote_pm_object_class:
++ aci.pm_object_class =
++ value_p->pm_object_class;
++ err =
++ rsbac_ta_list_add_ttl
++ (ta_number, rnetobj_handles.pm,
++ 0, &tid_p->netobj.sock_p,
++ &aci);
++ break;
++ case A_local_pm_ipc_purpose:
++ aci.pm_ipc_purpose =
++ value_p->pm_ipc_purpose;
++ err =
++ rsbac_ta_list_add_ttl
++ (ta_number, lnetobj_handles.pm,
++ 0, &tid_p->netobj.sock_p,
++ &aci);
++ break;
++ case A_remote_pm_ipc_purpose:
++ aci.pm_ipc_purpose =
++ value_p->pm_ipc_purpose;
++ err =
++ rsbac_ta_list_add_ttl
++ (ta_number, rnetobj_handles.pm,
++ 0, &tid_p->netobj.sock_p,
++ &aci);
++ break;
++ case A_local_pm_object_type:
++ aci.pm_object_type =
++ value_p->pm_object_type;
++ err =
++ rsbac_ta_list_add_ttl
++ (ta_number, lnetobj_handles.pm,
++ 0, &tid_p->netobj.sock_p,
++ &aci);
++ break;
++ case A_remote_pm_object_type:
++ aci.pm_object_type =
++ value_p->pm_object_type;
++ err =
++ rsbac_ta_list_add_ttl
++ (ta_number, rnetobj_handles.pm,
++ 0, &tid_p->netobj.sock_p,
++ &aci);
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ }
++ break;
++#endif /* PM */
++
++#if defined(CONFIG_RSBAC_RC)
++ case SW_RC:
++ {
++ rsbac_rc_type_id_t type = value_p->rc_type;
++
++ switch (attr) {
++ case A_local_rc_type:
++ err = rsbac_ta_list_add_ttl(ta_number,
++ lnetobj_handles.
++ rc, 0,
++ &tid_p->netobj.
++ sock_p, &type);
++ break;
++
++ case A_remote_rc_type:
++ err = rsbac_ta_list_add_ttl(ta_number,
++ rnetobj_handles.
++ rc, 0,
++ &tid_p->netobj.
++ sock_p, &type);
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ }
++ break;
++#endif /* RC */
++
++ default:
++ err = -RSBAC_EINVALIDMODULE;
++ }
++
++ return err;
++}
++#endif /* UM */
++
++
++int rsbac_ta_set_attr(rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t value)
++{
++ int err = 0;
++/*
++#ifdef CONFIG_RSBAC_DEBUG
++ char tmp[RSBAC_MAXNAMELEN];
++#endif
++*/
++ if (!rsbac_initialized) {
++ rsbac_printk(KERN_WARNING "rsbac_set_attr(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_set_attr(): called from interrupt, process %u(%s)!\n",
++ current->pid, current->comm);
++ return -RSBAC_EFROMINTERRUPT;
++ }
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ err = set_attr_fd(ta_number, module, target, &tid, attr, &value);
++ break;
++
++ case T_DEV:
++ err =
++ set_attr_dev(ta_number, module, target, tid.dev, attr,
++ &value);
++ break;
++
++ case T_IPC:
++ err =
++ set_attr_ipc(ta_number, module, target, &tid, attr,
++ &value);
++ break;
++
++ case T_USER:
++ err =
++ set_attr_user(ta_number, module, target, &tid, attr,
++ &value);
++ break;
++
++ case T_PROCESS:
++ err =
++ set_attr_process(ta_number, module, target, &tid, attr,
++ &value);
++ break;
++
++#ifdef CONFIG_RSBAC_UM
++ case T_GROUP:
++ err =
++ set_attr_group(ta_number, module, target, &tid, attr,
++ &value);
++ break;
++#endif /* CONFIG_RSBAC_UM */
++
++#ifdef CONFIG_RSBAC_NET_DEV
++ case T_NETDEV:
++ err =
++ set_attr_netdev(ta_number, module, target, &tid, attr,
++ &value);
++ break;
++#endif
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++ case T_NETTEMP:
++ err =
++ set_attr_nettemp(ta_number, module, target, &tid, attr,
++ &value);
++ break;
++
++ case T_NETOBJ:
++ err =
++ set_attr_netobj(ta_number, module, target, &tid, attr,
++ &value);
++ break;
++#endif /* NET_OBJ */
++
++ /* switch(target): no valid target */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++#ifdef CONFIG_RSBAC_XSTATS
++ if (!err)
++ set_attr_count[target]++;
++#endif
++ return err;
++}
++
++/************************************************************************** */
++
++int rsbac_ta_remove_target(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid)
++{
++ int error = 0;
++ struct rsbac_device_list_item_t *device_p;
++ u_int hash;
++ int srcu_idx;
++
++ if (!rsbac_initialized) {
++ // rsbac_printk(KERN_WARNING "rsbac_remove_target(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_remove_target(): called from interrupt!\n");
++ return -RSBAC_EFROMINTERRUPT;
++ }
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ /* rsbac_pr_debug(ds, "Removing file/dir/fifo/symlink ACI\n"); */
++#if defined(CONFIG_RSBAC_MAC)
++ /* file and dir items can also have mac_f_trusets -> remove first */
++ if ((target == T_FILE)
++ || (target == T_DIR)
++ )
++ error = rsbac_mac_remove_f_trusets(tid.file);
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ /* file and dir items can also have auth_f_capsets -> remove first */
++ if ((target == T_FILE)
++ || (target == T_DIR)
++ )
++ error = rsbac_auth_remove_f_capsets(tid.file);
++#endif
++#if defined(CONFIG_RSBAC_ACL)
++ /* items can also have an acl_fd_item -> remove first */
++ error = rsbac_acl_remove_acl(ta_number, target, tid);
++#endif
++ hash = device_hash(tid.file.device);
++ /* wait for read access to device_list_head */
++ srcu_idx = srcu_read_lock(&device_list_srcu[hash]);
++
++ /* lookup device */
++ device_p = lookup_device(tid.file.device, hash);
++ if (!device_p) {
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_remove_target(): unknown device %02u:%02u\n",
++ RSBAC_MAJOR(tid.file.
++ device),
++ RSBAC_MINOR(tid.file.
++ device));
++ return -RSBAC_EINVALIDDEV;
++ }
++ rsbac_ta_list_remove(ta_number,
++ device_p->handles.gen,
++ &tid.file.inode);
++#if defined(CONFIG_RSBAC_MAC)
++ rsbac_ta_list_remove(ta_number,
++ device_p->handles.mac,
++ &tid.file.inode);
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ rsbac_ta_list_remove(ta_number,
++ device_p->handles.pm,
++ &tid.file.inode);
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++ rsbac_ta_list_remove(ta_number,
++ device_p->handles.daz,
++ &tid.file.inode);
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++ rsbac_ta_list_remove(ta_number,
++ device_p->handles.dazs,
++ &tid.file.inode);
++#endif
++#endif
++#if defined(CONFIG_RSBAC_FF)
++ rsbac_ta_list_remove(ta_number,
++ device_p->handles.ff,
++ &tid.file.inode);
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ rsbac_ta_list_remove(ta_number,
++ device_p->handles.rc,
++ &tid.file.inode);
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ rsbac_ta_list_remove(ta_number,
++ device_p->handles.auth,
++ &tid.file.inode);
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ rsbac_ta_list_remove(ta_number,
++ device_p->handles.cap,
++ &tid.file.inode);
++#endif
++#if defined(CONFIG_RSBAC_PAX)
++ rsbac_ta_list_remove(ta_number,
++ device_p->handles.pax,
++ &tid.file.inode);
++#endif
++#if defined(CONFIG_RSBAC_RES)
++ rsbac_ta_list_remove(ta_number,
++ device_p->handles.res,
++ &tid.file.inode);
++#endif
++
++ /* free access to device_list_head */
++ srcu_read_unlock(&device_list_srcu[hash], srcu_idx);
++#ifdef CONFIG_RSBAC_FD_CACHE
++ rsbac_fd_cache_invalidate(&tid.file);
++#endif
++ break;
++
++ case T_DEV:
++ {
++ switch (tid.dev.type)
++ {
++ case D_block:
++ case D_char:
++ rsbac_ta_list_remove(ta_number,
++ dev_handles.gen,
++ &tid.dev);
++#if defined(CONFIG_RSBAC_MAC)
++ rsbac_ta_list_remove(ta_number,
++ dev_handles.mac,
++ &tid.dev);
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ rsbac_ta_list_remove(ta_number,
++ dev_handles.pm,
++ &tid.dev);
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ rsbac_ta_list_remove(ta_number,
++ dev_handles.rc,
++ &tid.dev);
++#endif
++ break;
++ case D_block_major:
++ case D_char_major:
++ {
++ enum rsbac_dev_type_t orig_devtype=tid.dev.type;
++
++ if (tid.dev.type==D_block_major)
++ tid.dev.type=D_block;
++ else
++ tid.dev.type=D_char;
++ rsbac_ta_list_remove(ta_number,
++ dev_major_handles.gen,
++ &tid.dev);
++#if defined(CONFIG_RSBAC_MAC)
++ rsbac_ta_list_remove(ta_number,
++ dev_major_handles.mac,
++ &tid.dev);
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ rsbac_ta_list_remove(ta_number,
++ dev_major_handles.pm,
++ &tid.dev);
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ rsbac_ta_list_remove(ta_number,
++ dev_major_handles.rc,
++ &tid.dev);
++#endif
++ tid.dev.type=orig_devtype;
++ break;
++ }
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++ }
++ break;
++
++ case T_IPC:
++ /* rsbac_pr_debug(ds, "Removing ipc ACI\n"); */
++#if defined(CONFIG_RSBAC_MAC)
++ rsbac_ta_list_remove(ta_number, ipc_handles.mac, &tid.ipc);
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ rsbac_ta_list_remove(ta_number, ipc_handles.pm, &tid.ipc);
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ rsbac_ta_list_remove(ta_number, ipc_handles.rc, &tid.ipc);
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ rsbac_ta_list_remove(ta_number,
++ ipc_handles.jail, &tid.ipc);
++#endif
++ break;
++
++ case T_USER:
++ /* rsbac_pr_debug(ds, "Removing user ACI"); */
++ rsbac_ta_list_remove(ta_number,
++ user_handles.gen, &tid.user);
++#if defined(CONFIG_RSBAC_MAC)
++ rsbac_ta_list_remove(ta_number,
++ user_handles.mac, &tid.user);
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ rsbac_ta_list_remove(ta_number,
++ user_handles.pm, &tid.user);
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++ rsbac_ta_list_remove(ta_number,
++ user_handles.daz, &tid.user);
++#endif
++#if defined(CONFIG_RSBAC_FF)
++ rsbac_ta_list_remove(ta_number,
++ user_handles.ff, &tid.user);
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ rsbac_ta_list_remove(ta_number,
++ user_handles.rc, &tid.user);
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ rsbac_ta_list_remove(ta_number,
++ user_handles.auth, &tid.user);
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ rsbac_ta_list_remove(ta_number,
++ user_handles.cap, &tid.user);
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ rsbac_ta_list_remove(ta_number,
++ user_handles.jail, &tid.user);
++#endif
++#if defined(CONFIG_RSBAC_PAX)
++ rsbac_ta_list_remove(ta_number,
++ user_handles.pax, &tid.user);
++#endif
++#if defined(CONFIG_RSBAC_RES)
++ rsbac_ta_list_remove(ta_number,
++ user_handles.res, &tid.user);
++#endif
++ break;
++
++ case T_PROCESS:
++/* too noisy... kicked out.
++ rsbac_pr_debug(ds, "Removing process ACI\n");
++*/
++#if defined(CONFIG_RSBAC_ACL)
++ /* process items can also have an acl_p_item -> remove first */
++ error = rsbac_acl_remove_acl(ta_number, target, tid);
++#endif
++ rsbac_ta_list_remove(ta_number,
++ process_handles.gen,
++ &tid.process);
++#if defined(CONFIG_RSBAC_MAC)
++ /* process items can also have mac_p_trusets -> remove first */
++ error = rsbac_mac_remove_p_trusets(tid.process);
++ rsbac_ta_list_remove(ta_number,
++ process_handles.mac,
++ &tid.process);
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ rsbac_ta_list_remove(ta_number,
++ process_handles.pm, &tid.process);
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++ rsbac_ta_list_remove(ta_number,
++ process_handles.daz, &tid.process);
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ rsbac_ta_list_remove(ta_number,
++ process_handles.rc,
++ &tid.process);
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ /* process items can also have auth_p_capsets -> remove first */
++ error = rsbac_auth_remove_p_capsets(tid.process);
++ rsbac_ta_list_remove(ta_number,
++ process_handles.auth, &tid.process);
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ rsbac_ta_list_remove(ta_number,
++ process_handles.cap, &tid.process);
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ rsbac_ta_list_remove(ta_number,
++ process_handles.jail,
++ &tid.process);
++#endif
++ break;
++
++#ifdef CONFIG_RSBAC_UM
++ case T_GROUP:
++ /* rsbac_pr_debug(ds, "Removing group ACI\n"); */
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ rsbac_ta_list_remove(ta_number,
++ group_handles.rc, &tid.group);
++#endif
++ break;
++#endif /* CONFIG_RSBAC_UM */
++
++#ifdef CONFIG_RSBAC_NET_DEV
++ case T_NETDEV:
++#if defined(CONFIG_RSBAC_IND_NETDEV_LOG)
++ rsbac_ta_list_remove(ta_number,
++ netdev_handles.gen, &tid.netdev);
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ rsbac_ta_list_remove(ta_number,
++ netdev_handles.rc, &tid.netdev);
++#endif
++ break;
++#endif
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++ case T_NETTEMP:
++/* too noisy... kicked out.
++ rsbac_pr_debug(ds, "Removing nettemp ACI\n");
++*/
++#if defined(CONFIG_RSBAC_IND_NETOBJ_LOG)
++ rsbac_ta_list_remove(ta_number,
++ nettemp_handles.gen, &tid.nettemp);
++#endif
++#if defined(CONFIG_RSBAC_MAC)
++ rsbac_ta_list_remove(ta_number,
++ nettemp_handles.mac, &tid.nettemp);
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ rsbac_ta_list_remove(ta_number,
++ nettemp_handles.pm, &tid.nettemp);
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ rsbac_ta_list_remove(ta_number,
++ nettemp_handles.rc, &tid.nettemp);
++#endif
++#if defined(CONFIG_RSBAC_ACL_NET_OBJ_PROT)
++ rsbac_acl_remove_acl(ta_number, T_NETTEMP_NT, tid);
++ rsbac_acl_remove_acl(ta_number, T_NETTEMP, tid);
++#endif
++ break;
++
++ case T_NETOBJ:
++/* too noisy... kicked out.
++ rsbac_pr_debug(ds, "Removing netobj ACI\n");
++*/
++#if defined(CONFIG_RSBAC_MAC)
++ rsbac_ta_list_remove(ta_number,
++ lnetobj_handles.mac,
++ &tid.netobj.sock_p);
++ rsbac_ta_list_remove(ta_number,
++ rnetobj_handles.mac,
++ &tid.netobj.sock_p);
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ rsbac_ta_list_remove(ta_number,
++ lnetobj_handles.pm,
++ &tid.netobj.sock_p);
++ rsbac_ta_list_remove(ta_number,
++ rnetobj_handles.pm,
++ &tid.netobj.sock_p);
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ rsbac_ta_list_remove(ta_number,
++ lnetobj_handles.rc,
++ &tid.netobj.sock_p);
++ rsbac_ta_list_remove(ta_number,
++ rnetobj_handles.rc,
++ &tid.netobj.sock_p);
++#endif
++ break;
++
++#endif
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++#ifdef CONFIG_RSBAC_XSTATS
++ remove_count[target]++;
++#endif
++ return error;
++}
++EXPORT_SYMBOL(rsbac_ta_remove_target);
++
++int rsbac_ta_list_all_dev(rsbac_list_ta_number_t ta_number,
++ struct rsbac_dev_desc_t **id_pp)
++{
++ int count = 0;
++ int tmp_count;
++
++ tmp_count = rsbac_ta_list_count(ta_number, dev_handles.gen);
++ if (tmp_count > 0)
++ count += tmp_count;
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_ta_list_count(ta_number, dev_handles.mac);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_ta_list_count(ta_number, dev_handles.pm);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_ta_list_count(ta_number, dev_major_handles.rc);
++ if (tmp_count > 0)
++ count += tmp_count;
++ tmp_count = rsbac_ta_list_count(ta_number, dev_handles.rc);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++ if (id_pp) {
++ struct rsbac_dev_desc_t *i_id_p = NULL;
++ char *pos = NULL;
++#if defined(CONFIG_RSBAC_MAC) || defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_RC)
++ u_int i;
++#endif
++
++ if (count > 0) {
++ int i_count = 0;
++
++ i_count = count + 20; /* max value to expect */
++ *id_pp = rsbac_kmalloc_unlocked(i_count * sizeof(**id_pp));
++ if (!*id_pp)
++ return -RSBAC_ENOMEM;
++ pos = (char *) *id_pp;
++ tmp_count = rsbac_ta_list_get_all_desc(ta_number,
++ dev_handles.
++ gen,
++ (void **)
++ &i_id_p);
++ if (tmp_count > 0) {
++ if (tmp_count > i_count)
++ tmp_count = i_count;
++ memcpy(pos, i_id_p,
++ tmp_count * sizeof(*i_id_p));
++ rsbac_kfree(i_id_p);
++ count = tmp_count;
++ i_count -= tmp_count;
++ pos += tmp_count * sizeof(*i_id_p);
++ } else
++ count = 0;
++#if defined(CONFIG_RSBAC_MAC)
++ if (i_count) {
++ tmp_count =
++ rsbac_ta_list_get_all_desc(ta_number,
++ dev_handles.
++ mac,
++ (void **)
++ &i_id_p);
++ if (tmp_count > 0) {
++ if (tmp_count > i_count)
++ tmp_count = i_count;
++ for (i = 0; i < tmp_count; i++) {
++ if (!rsbac_ta_list_exist
++ (ta_number,
++ dev_handles.gen,
++ &i_id_p[i])) {
++ memcpy(pos,
++ &i_id_p[i],
++ sizeof
++ (*i_id_p));
++ pos +=
++ sizeof
++ (*i_id_p);
++ count++;
++ i_count--;
++ }
++ }
++ rsbac_kfree(i_id_p);
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ if (i_count) {
++ tmp_count =
++ rsbac_ta_list_get_all_desc(ta_number,
++ dev_handles.
++ pm,
++ (void **)
++ &i_id_p);
++ if (tmp_count > 0) {
++ if (tmp_count > i_count)
++ tmp_count = i_count;
++ for (i = 0; i < tmp_count; i++) {
++ if (!rsbac_ta_list_exist
++ (ta_number,
++ dev_handles.gen,
++ &i_id_p[i]))
++#if defined(CONFIG_RSBAC_MAC)
++ if (!rsbac_ta_list_exist(ta_number, dev_handles.mac, &i_id_p[i]))
++#endif
++ {
++ memcpy(pos,
++ &i_id_p
++ [i],
++ sizeof
++ (*i_id_p));
++ pos +=
++ sizeof
++ (*i_id_p);
++ count++;
++ i_count--;
++ }
++ }
++ rsbac_kfree(i_id_p);
++ }
++ }
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ if (i_count) {
++ tmp_count =
++ rsbac_ta_list_get_all_desc(ta_number,
++ dev_major_handles.
++ rc,
++ (void **)
++ &i_id_p);
++ if (tmp_count > 0) {
++ if (tmp_count > i_count)
++ tmp_count = i_count;
++ for (i = 0; i < tmp_count; i++) {
++ i_id_p[i].type +=
++ (D_block_major -
++ D_block);
++ memcpy(pos, &i_id_p[i],
++ sizeof(*i_id_p));
++ pos += sizeof(*i_id_p);
++ count++;
++ i_count--;
++ }
++ rsbac_kfree(i_id_p);
++ }
++ }
++ if (i_count) {
++ tmp_count =
++ rsbac_ta_list_get_all_desc(ta_number,
++ dev_handles.
++ rc,
++ (void **)
++ &i_id_p);
++ if (tmp_count > 0) {
++ if (tmp_count > i_count)
++ tmp_count = i_count;
++ for (i = 0; i < tmp_count; i++) {
++ if (!rsbac_ta_list_exist
++ (ta_number,
++ dev_handles.gen,
++ &i_id_p[i]))
++#if defined(CONFIG_RSBAC_MAC)
++ if (!rsbac_ta_list_exist(ta_number, dev_handles.mac, &i_id_p[i]))
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ if (!rsbac_ta_list_exist(ta_number, dev_handles.pm, &i_id_p[i]))
++#endif
++ {
++ memcpy
++ (pos,
++ &i_id_p
++ [i],
++ sizeof
++ (*i_id_p));
++ pos += sizeof(*i_id_p);
++ count++;
++ i_count--;
++ }
++ }
++ rsbac_kfree(i_id_p);
++ }
++ }
++#endif
++ if (!count)
++ rsbac_kfree(*id_pp);
++ }
++ }
++ return count;
++}
++
++/* Copy new items, of they do not exist. Adjust list counters. */
++static int copy_new_uids(rsbac_list_handle_t list,
++ rsbac_list_ta_number_t ta_number,
++ int *count_p,
++ int *i_count_p, rsbac_uid_t * res_id_p)
++{
++ rsbac_uid_t *i_id_p = NULL;
++ rsbac_boolean_t found;
++ int tmp_count;
++ int i;
++ int j;
++
++ if (!list || !count_p || !i_count_p || !res_id_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!*i_count_p)
++ return 0;
++/* rsbac_pr_debug(ds, "list %p, ta_number %u, count %u, "
++ "i_count %u, res_id_p %p, res_id_p[0] %u\n",
++ list, ta_number, *count_p, *i_count_p, res_id_p,
++ res_id_p[0]); */
++ tmp_count =
++ rsbac_ta_list_get_all_desc(ta_number, list, (void **) &i_id_p);
++ if (tmp_count > 0) {
++ if (tmp_count > *i_count_p)
++ tmp_count = *i_count_p;
++ for (i = 0; i < tmp_count; i++) {
++ found = FALSE;
++ for (j = 0; j < *count_p; j++) {
++ if (res_id_p[j] == i_id_p[i]) {
++ found = TRUE;
++ break;
++ }
++ }
++ if (found == FALSE) {
++ res_id_p[*count_p] = i_id_p[i];
++ (*count_p)++;
++ (*i_count_p)--;
++ }
++ }
++ rsbac_kfree(i_id_p);
++ }
++ return 0;
++}
++
++int rsbac_ta_list_all_user(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t ** id_pp)
++{
++ int count = 0;
++ int tmp_count;
++
++ tmp_count = rsbac_ta_list_count(ta_number, user_handles.gen);
++ if (tmp_count > 0)
++ count += tmp_count;
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_ta_list_count(ta_number, user_handles.mac);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_ta_list_count(ta_number, user_handles.pm);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++ tmp_count = rsbac_ta_list_count(ta_number, user_handles.daz);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_FF)
++ tmp_count = rsbac_ta_list_count(ta_number, user_handles.ff);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_ta_list_count(ta_number, user_handles.rc);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ tmp_count = rsbac_ta_list_count(ta_number, user_handles.auth);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ tmp_count = rsbac_ta_list_count(ta_number, user_handles.cap);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ tmp_count = rsbac_ta_list_count(ta_number, user_handles.jail);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PAX)
++ tmp_count = rsbac_ta_list_count(ta_number, user_handles.pax);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RES)
++ tmp_count = rsbac_ta_list_count(ta_number, user_handles.res);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++ if (id_pp) {
++ if (count > 0) {
++ int i_count;
++ rsbac_uid_t *i_id_p = NULL;
++
++ i_count = count + 20; /* max value to expect */
++ *id_pp = rsbac_kmalloc_unlocked(i_count * sizeof(**id_pp));
++ if (!*id_pp)
++ return -RSBAC_ENOMEM;
++ tmp_count = rsbac_ta_list_get_all_desc(ta_number,
++ user_handles.
++ gen,
++ (void **)
++ &i_id_p);
++ if (tmp_count > 0) {
++ if (tmp_count > i_count)
++ tmp_count = i_count;
++ memcpy(*id_pp, i_id_p,
++ tmp_count * sizeof(*i_id_p));
++ rsbac_kfree(i_id_p);
++ count = tmp_count;
++ i_count -= tmp_count;
++ } else
++ count = 0;
++#if defined(CONFIG_RSBAC_MAC)
++ copy_new_uids(user_handles.mac, ta_number, &count,
++ &i_count, *id_pp);
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ copy_new_uids(user_handles.pm, ta_number, &count,
++ &i_count, *id_pp);
++#endif
++#if defined(CONFIG_RSBAC_DAZ)
++ copy_new_uids(user_handles.daz, ta_number, &count,
++ &i_count, *id_pp);
++#endif
++#if defined(CONFIG_RSBAC_FF)
++ copy_new_uids(user_handles.ff, ta_number, &count,
++ &i_count, *id_pp);
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ copy_new_uids(user_handles.rc, ta_number, &count,
++ &i_count, *id_pp);
++#endif
++#if defined(CONFIG_RSBAC_AUTH)
++ copy_new_uids(user_handles.auth, ta_number, &count,
++ &i_count, *id_pp);
++#endif
++#if defined(CONFIG_RSBAC_CAP)
++ copy_new_uids(user_handles.cap, ta_number, &count,
++ &i_count, *id_pp);
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ copy_new_uids(user_handles.jail, ta_number, &count,
++ &i_count, *id_pp);
++#endif
++#if defined(CONFIG_RSBAC_PAX)
++ copy_new_uids(user_handles.pax, ta_number, &count,
++ &i_count, *id_pp);
++#endif
++#if defined(CONFIG_RSBAC_RES)
++ copy_new_uids(user_handles.res, ta_number, &count,
++ &i_count, *id_pp);
++#endif
++ if (!count)
++ rsbac_kfree(*id_pp);
++ }
++ }
++ return count;
++}
++
++/* Copy new items, of they do not exist. Adjust list counters. */
++static int copy_new_ipcs(rsbac_list_handle_t list,
++ rsbac_list_ta_number_t ta_number,
++ int *count_p,
++ int *i_count_p, struct rsbac_ipc_t * res_id_p)
++{
++ struct rsbac_ipc_t *i_id_p = NULL;
++ rsbac_boolean_t found;
++ int tmp_count;
++ int i;
++ int j;
++
++ if (!list || !count_p || !i_count_p || !res_id_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!*i_count_p)
++ return 0;
++/* rsbac_pr_debug(ds, "list %p, ta_number %u, count %u, "
++ "i_count %u, res_id_p %p, res_id_p[0] %u\n",
++ list, ta_number, *count_p, *i_count_p, res_id_p,
++ res_id_p[0]); */
++ tmp_count =
++ rsbac_ta_list_get_all_desc(ta_number, list, (void **) &i_id_p);
++ if (tmp_count > 0) {
++ if (tmp_count > *i_count_p)
++ tmp_count = *i_count_p;
++ for (i = 0; i < tmp_count; i++) {
++ found = FALSE;
++ for (j = 0; j < *count_p; j++) {
++ if (!ipc_compare(&res_id_p[j], &i_id_p[i])) {
++ found = TRUE;
++ break;
++ }
++ }
++ if (found == FALSE) {
++ res_id_p[*count_p] = i_id_p[i];
++ (*count_p)++;
++ (*i_count_p)--;
++ }
++ }
++ rsbac_kfree(i_id_p);
++ }
++ return 0;
++}
++
++int rsbac_ta_list_all_ipc(rsbac_list_ta_number_t ta_number,
++ struct rsbac_ipc_t ** id_pp)
++{
++ int count = 0;
++ int tmp_count;
++
++#if defined(CONFIG_RSBAC_MAC)
++ tmp_count = rsbac_ta_list_count(ta_number, ipc_handles.mac);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ tmp_count = rsbac_ta_list_count(ta_number, ipc_handles.pm);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_ta_list_count(ta_number, ipc_handles.rc);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ tmp_count = rsbac_ta_list_count(ta_number, ipc_handles.jail);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++ if (id_pp) {
++ if (count > 0) {
++ int i_count;
++
++ i_count = count + 20; /* max value to expect */
++ *id_pp = rsbac_kmalloc_unlocked(i_count * sizeof(**id_pp));
++ if (!*id_pp)
++ return -RSBAC_ENOMEM;
++ count = 0;
++#if defined(CONFIG_RSBAC_MAC)
++ copy_new_ipcs(ipc_handles.mac, ta_number, &count,
++ &i_count, *id_pp);
++#endif
++#if defined(CONFIG_RSBAC_PM)
++ copy_new_ipcs(ipc_handles.pm, ta_number, &count,
++ &i_count, *id_pp);
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ copy_new_ipcs(ipc_handles.rc, ta_number, &count,
++ &i_count, *id_pp);
++#endif
++#if defined(CONFIG_RSBAC_JAIL)
++ copy_new_ipcs(ipc_handles.jail, ta_number, &count,
++ &i_count, *id_pp);
++#endif
++ if (!count)
++ rsbac_kfree(*id_pp);
++ }
++ }
++ return count;
++}
++
++int rsbac_ta_list_all_group(rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t ** id_pp)
++{
++#if defined(CONFIG_RSBAC_RC_UM_PROT)
++ int count = 0;
++ int tmp_count;
++
++ tmp_count = rsbac_ta_list_count(ta_number, group_handles.rc);
++ if (tmp_count > 0)
++ count += tmp_count;
++ if (id_pp) {
++ if (count > 0) {
++ int i_count;
++ rsbac_gid_t *i_id_p = NULL;
++
++ i_count = count + 20; /* max value to expect */
++ *id_pp = rsbac_kmalloc_unlocked(i_count * sizeof(**id_pp));
++ if (!*id_pp)
++ return -RSBAC_ENOMEM;
++ tmp_count = rsbac_ta_list_get_all_desc(ta_number,
++ group_handles.
++ rc,
++ (void **)
++ &i_id_p);
++ if (tmp_count > 0) {
++ if (tmp_count > i_count)
++ tmp_count = i_count;
++ memcpy(*id_pp, i_id_p,
++ tmp_count * sizeof(*i_id_p));
++ rsbac_kfree(i_id_p);
++ count = tmp_count;
++ i_count -= tmp_count;
++ } else
++ count = 0;
++ if (!count)
++ rsbac_kfree(*id_pp);
++ }
++ }
++ return count;
++#else
++ return 0;
++#endif
++}
++
++
++#ifdef CONFIG_RSBAC_NET_DEV
++int rsbac_ta_net_list_all_netdev(rsbac_list_ta_number_t ta_number,
++ rsbac_netdev_id_t ** id_pp)
++{
++ int count = 0;
++ int tmp_count;
++
++#if defined(CONFIG_RSBAC_IND_NETDEV_LOG)
++ tmp_count = rsbac_ta_list_count(ta_number, netdev_handles.gen);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ tmp_count = rsbac_ta_list_count(ta_number, netdev_handles.rc);
++ if (tmp_count > 0)
++ count += tmp_count;
++#endif
++ if (id_pp) {
++ rsbac_netdev_id_t *i_id_p = NULL;
++ char *pos = NULL;
++#if defined(CONFIG_RSBAC_RC)
++ u_int i;
++#endif
++
++ if (count > 0) {
++ int i_count = 0;
++
++ i_count = count + 20; /* max value to expect */
++ *id_pp = rsbac_kmalloc_unlocked(i_count * sizeof(**id_pp));
++ if (!*id_pp)
++ return -RSBAC_ENOMEM;
++ pos = (char *) *id_pp;
++#if defined(CONFIG_RSBAC_IND_NETDEV_LOG)
++ tmp_count = rsbac_ta_list_get_all_desc(ta_number,
++ netdev_handles.
++ gen,
++ (void **)
++ &i_id_p);
++ if (tmp_count > 0) {
++ if (tmp_count > i_count)
++ tmp_count = i_count;
++ memcpy(pos, i_id_p,
++ tmp_count * sizeof(*i_id_p));
++ rsbac_kfree(i_id_p);
++ count = tmp_count;
++ i_count -= tmp_count;
++ pos += tmp_count * sizeof(*i_id_p);
++ } else
++ count = 0;
++#endif
++#if defined(CONFIG_RSBAC_RC)
++ if (i_count) {
++ tmp_count =
++ rsbac_ta_list_get_all_desc(ta_number,
++ netdev_handles.
++ rc,
++ (void **)
++ &i_id_p);
++ if (tmp_count > 0) {
++ if (tmp_count > i_count)
++ tmp_count = i_count;
++ for (i = 0; i < tmp_count; i++) {
++#if defined(CONFIG_RSBAC_IND_NETDEV_LOG)
++ if (!rsbac_ta_list_exist
++ (ta_number,
++ netdev_handles.gen,
++ &i_id_p[i]))
++#endif
++ {
++ memcpy(pos,
++ &i_id_p[i],
++ sizeof
++ (*i_id_p));
++ pos +=
++ sizeof
++ (*i_id_p);
++ count++;
++ i_count--;
++ }
++ }
++ rsbac_kfree(i_id_p);
++ }
++ }
++#endif
++ if (!count)
++ rsbac_kfree(*id_pp);
++ }
++ }
++ return count;
++}
++#endif
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++/* Get a template id from a net description */
++int rsbac_net_get_id(rsbac_list_ta_number_t ta_number,
++ struct rsbac_net_description_t *desc_p,
++ rsbac_net_temp_id_t * id_p)
++{
++ if (!rsbac_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++ if (!id_p || !desc_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (rsbac_ta_list_get_desc(ta_number,
++ net_temp_handle,
++ id_p, desc_p, rsbac_net_compare_data)
++ )
++ *id_p = RSBAC_NET_UNKNOWN;
++ return 0;
++}
++
++/* get the template ids for a netobj */
++/* set *_temp_p to NULL, if you do not need it */
++int rsbac_ta_net_lookup_templates(rsbac_list_ta_number_t ta_number,
++ struct rsbac_net_obj_desc_t *netobj_p,
++ rsbac_net_temp_id_t * local_temp_p,
++ rsbac_net_temp_id_t * remote_temp_p)
++{
++ struct rsbac_net_description_t *rsbac_net_desc_p;
++ int err = 0;
++ struct net_device *dev;
++
++ if (!netobj_p || !netobj_p->sock_p || !netobj_p->sock_p->sk
++ || !netobj_p->sock_p->ops)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!local_temp_p && !remote_temp_p)
++ return -RSBAC_EINVALIDVALUE;
++
++ rsbac_net_desc_p = rsbac_kmalloc_unlocked(sizeof(*rsbac_net_desc_p));
++ if (!rsbac_net_desc_p)
++ return -RSBAC_ENOMEM;
++
++ rsbac_net_desc_p->address_family = netobj_p->sock_p->ops->family;
++ rsbac_net_desc_p->type = netobj_p->sock_p->type;
++ rsbac_net_desc_p->protocol = netobj_p->sock_p->sk->sk_protocol;
++ if (netobj_p->sock_p->sk->sk_bound_dev_if) {
++ dev = dev_get_by_index(&init_net, netobj_p->sock_p->sk->
++ sk_bound_dev_if);
++ if (dev) {
++ strcpy(rsbac_net_desc_p->netdev, dev->name);
++ dev_put(dev);
++ } else
++ rsbac_net_desc_p->netdev[0] = RSBAC_NET_UNKNOWN;
++ } else
++ rsbac_net_desc_p->netdev[0] = RSBAC_NET_UNKNOWN;
++ if (local_temp_p) {
++ switch (rsbac_net_desc_p->address_family) {
++ case AF_INET:
++ if (netobj_p->local_addr) {
++ struct sockaddr_in *addr =
++ netobj_p->local_addr;
++
++ rsbac_net_desc_p->address =
++ &addr->sin_addr.s_addr;
++ rsbac_net_desc_p->address_len =
++ sizeof(__u32);
++ rsbac_net_desc_p->port =
++ ntohs(addr->sin_port);
++ } else {
++ rsbac_net_desc_p->address =
++ &inet_sk(netobj_p->sock_p->sk)->
++ inet_rcv_saddr;
++ rsbac_net_desc_p->address_len =
++ sizeof(__u32);
++ rsbac_net_desc_p->port =
++ inet_sk(netobj_p->sock_p->sk)->inet_num;
++ }
++ dev = ip_dev_find(&init_net, *(__u32 *) rsbac_net_desc_p->address);
++
++ if (dev) {
++ strcpy(rsbac_net_desc_p->netdev,
++ dev->name);
++ dev_put(dev);
++ }
++ break;
++ case AF_UNIX:
++ rsbac_printk(KERN_WARNING "rsbac_ta_net_lookup_templates(): unsupported family AF_UNIX, should be target UNIXSOCK or IPC-anonunix\n");
++ BUG();
++ return -RSBAC_EINVALIDTARGET;
++
++ default:
++ rsbac_net_desc_p->address = NULL;
++ rsbac_net_desc_p->port = RSBAC_NET_UNKNOWN;
++ }
++ if ((err = rsbac_net_get_id(ta_number, rsbac_net_desc_p,
++ local_temp_p))) {
++ *local_temp_p = 0;
++ rsbac_printk(KERN_WARNING "rsbac_net_lookup_templates(): rsbac_net_get_id for local returned error %u\n",
++ err);
++ }
++ if (rsbac_net_desc_p->address_family == AF_INET)
++ rsbac_pr_debug(ds_net,
++ "user %u temp id for local is %u\n",
++ current_uid(), *local_temp_p);
++ }
++ if (remote_temp_p) {
++ switch (rsbac_net_desc_p->address_family) {
++ case AF_INET:
++ if (netobj_p->remote_addr) {
++ struct sockaddr_in *addr =
++ netobj_p->remote_addr;
++
++ rsbac_net_desc_p->address =
++ &addr->sin_addr.s_addr;
++ rsbac_net_desc_p->address_len =
++ sizeof(__u32);
++ rsbac_net_desc_p->port =
++ ntohs(addr->sin_port);
++ } else {
++ rsbac_net_desc_p->address =
++ &inet_sk(netobj_p->sock_p->sk)->inet_daddr;
++ rsbac_net_desc_p->address_len =
++ sizeof(__u32);
++ rsbac_net_desc_p->port =
++ ntohs(inet_sk(netobj_p->sock_p->sk)->
++ inet_dport);
++ }
++ dev = ip_dev_find(&init_net, *(__u32 *) rsbac_net_desc_p->address);
++
++ if (dev) {
++ strcpy(rsbac_net_desc_p->netdev,
++ dev->name);
++ dev_put(dev);
++ }
++ break;
++ case AF_UNIX:
++ rsbac_printk(KERN_WARNING "rsbac_ta_net_lookup_templates(): unsupported family AF_UNIX, should be target UNIXSOCK or IPC-anonunix\n");
++ return -RSBAC_EINVALIDTARGET;
++
++ default:
++ rsbac_net_desc_p->address = NULL;
++ rsbac_net_desc_p->address_len = 0;
++ rsbac_net_desc_p->port = RSBAC_NET_UNKNOWN;
++ }
++ if ((err =
++ rsbac_net_get_id(ta_number, rsbac_net_desc_p,
++ remote_temp_p))) {
++ *remote_temp_p = 0;
++ rsbac_printk(KERN_WARNING "rsbac_net_lookup_templates(): rsbac_net_get_id for remote returned error %u\n",
++ err);
++ }
++ if (rsbac_net_desc_p->address_family == AF_INET)
++ rsbac_pr_debug(ds_net,
++ "user %u temp id for remote is %u\n",
++ current_uid(), *remote_temp_p);
++ }
++ rsbac_kfree(rsbac_net_desc_p);
++ return 0;
++}
++
++void rsbac_net_obj_cleanup(rsbac_net_obj_id_t netobj)
++{
++ union rsbac_target_id_t tid;
++
++ tid.netobj.sock_p = netobj;
++ rsbac_remove_target(T_NETOBJ, tid);
++}
++
++int rsbac_ta_net_template_exists(rsbac_list_ta_number_t ta_number,
++ rsbac_net_temp_id_t id)
++{
++ return rsbac_ta_list_exist(ta_number, net_temp_handle, &id);
++}
++
++int rsbac_ta_net_template(rsbac_list_ta_number_t ta_number,
++ enum rsbac_net_temp_syscall_t call,
++ rsbac_net_temp_id_t id,
++ union rsbac_net_temp_syscall_data_t *data_p)
++{
++ struct rsbac_net_temp_data_t int_data;
++ int err;
++
++ memset(&int_data, 0, sizeof(int_data));
++ int_data.address_family = AF_MAX;
++ int_data.type = RSBAC_NET_ANY;
++ int_data.protocol = RSBAC_NET_ANY;
++ strcpy(int_data.name, "DEFAULT");
++
++ switch (call) {
++ case NTS_new_template:
++ case NTS_check_id:
++ break;
++ case NTS_copy_template:
++ err = rsbac_ta_list_get_data_ttl(ta_number,
++ net_temp_handle,
++ NULL,
++ &data_p->id, &int_data);
++ if (err)
++ return err;
++ break;
++ default:
++ err = rsbac_ta_list_get_data_ttl(ta_number,
++ net_temp_handle,
++ NULL, &id, &int_data);
++ if (err)
++ return err;
++ }
++ /* get data values from user space */
++ switch (call) {
++ case NTS_set_address:
++ if(int_data.address_family == AF_INET) {
++ int i;
++
++ memcpy(&int_data.address.inet, &data_p->address.inet,
++ sizeof(int_data.address.inet));
++ if(int_data.address.inet.nr_addr > RSBAC_NET_NR_INET_ADDR)
++ return -RSBAC_EINVALIDVALUE;
++ for(i=0; i<int_data.address.inet.nr_addr; i++)
++ if(int_data.address.inet.valid_bits[i] > 32)
++ return -RSBAC_EINVALIDVALUE;
++ } else {
++ memcpy(&int_data.address.other, &data_p->address.other,
++ sizeof(int_data.address.other));
++ }
++ return rsbac_ta_list_add_ttl(ta_number, net_temp_handle, 0,
++ &id, &int_data);
++ case NTS_set_address_family:
++ if(int_data.address_family != data_p->address_family) {
++ int_data.address_family = data_p->address_family;
++ memset(&int_data.address, 0, sizeof(int_data.address));
++ }
++ return rsbac_ta_list_add_ttl(ta_number,
++ net_temp_handle,
++ 0, &id, &int_data);
++ case NTS_set_type:
++ int_data.type = data_p->type;
++ return rsbac_ta_list_add_ttl(ta_number,
++ net_temp_handle,
++ 0, &id, &int_data);
++ case NTS_set_protocol:
++ int_data.protocol = data_p->protocol;
++ return rsbac_ta_list_add_ttl(ta_number,
++ net_temp_handle,
++ 0, &id, &int_data);
++ case NTS_set_netdev:
++ strncpy(int_data.netdev, data_p->netdev, RSBAC_IFNAMSIZ);
++ int_data.netdev[RSBAC_IFNAMSIZ] = 0;
++ return rsbac_ta_list_add_ttl(ta_number,
++ net_temp_handle,
++ 0, &id, &int_data);
++ case NTS_set_ports:
++ memcpy(&int_data.ports, &data_p->ports,
++ sizeof(int_data.ports));
++ if(int_data.ports.nr_ports > RSBAC_NET_NR_PORTS)
++ return -RSBAC_EINVALIDVALUE;
++ return rsbac_ta_list_add_ttl(ta_number,
++ net_temp_handle,
++ 0, &id, &int_data);
++ case NTS_set_name:
++ strncpy(int_data.name, data_p->name,
++ RSBAC_NET_TEMP_NAMELEN - 1);
++ int_data.name[RSBAC_NET_TEMP_NAMELEN - 1] = 0;
++ return rsbac_ta_list_add_ttl(ta_number,
++ net_temp_handle,
++ 0, &id, &int_data);
++ case NTS_new_template:
++ if (rsbac_ta_list_exist(ta_number, net_temp_handle, &id))
++ return -RSBAC_EEXISTS;
++ strncpy(int_data.name, data_p->name,
++ RSBAC_NET_TEMP_NAMELEN - 1);
++ int_data.name[RSBAC_NET_TEMP_NAMELEN - 1] = 0;
++ return rsbac_ta_list_add_ttl(ta_number,
++ net_temp_handle,
++ 0, &id, &int_data);
++ case NTS_copy_template:
++ if (rsbac_ta_list_exist(ta_number, net_temp_handle, &id))
++ return -RSBAC_EEXISTS;
++ return rsbac_ta_list_add_ttl(ta_number,
++ net_temp_handle,
++ 0, &id, &int_data);
++ case NTS_delete_template:
++ return rsbac_ta_list_remove(ta_number, net_temp_handle,
++ &id);
++ case NTS_check_id:
++ if (rsbac_ta_list_exist(ta_number, net_temp_handle, &id)) {
++ data_p->id = id;
++ return 0;
++ } else
++ return -RSBAC_ENOTFOUND;
++ case NTS_get_address:
++ memcpy(&data_p->address, &int_data.address,
++ sizeof(int_data.address));
++ return 0;
++ case NTS_get_address_family:
++ data_p->address_family = int_data.address_family;
++ return 0;
++ case NTS_get_type:
++ data_p->type = int_data.type;
++ return 0;
++ case NTS_get_protocol:
++ data_p->protocol = int_data.protocol;
++ return 0;
++ case NTS_get_netdev:
++ strncpy(data_p->netdev, int_data.netdev, RSBAC_IFNAMSIZ);
++ return 0;
++ case NTS_get_ports:
++ memcpy(&data_p->ports, &int_data.ports,
++ sizeof(int_data.ports));
++ return 0;
++ case NTS_get_name:
++ strcpy(data_p->name, int_data.name);
++ return 0;
++
++ default:
++ return -RSBAC_EINVALIDREQUEST;
++ }
++}
++
++int rsbac_ta_net_list_all_template(rsbac_list_ta_number_t ta_number,
++ rsbac_net_temp_id_t ** id_pp)
++{
++ if (id_pp)
++ return rsbac_ta_list_get_all_desc(ta_number,
++ net_temp_handle,
++ (void **) id_pp);
++ else
++ return rsbac_ta_list_count(ta_number, net_temp_handle);
++}
++
++int rsbac_ta_net_template_exist(rsbac_list_ta_number_t ta_number,
++ rsbac_net_temp_id_t temp)
++{
++ return rsbac_ta_list_exist(ta_number, net_temp_handle, &temp);
++}
++
++int rsbac_net_remote_request(enum rsbac_adf_request_t request)
++{
++ switch (request) {
++ case R_SEND:
++ case R_RECEIVE:
++ case R_READ:
++ case R_WRITE:
++ case R_ACCEPT:
++ case R_CONNECT:
++ return TRUE;
++
++ default:
++ return FALSE;
++ }
++}
++
++#endif /* NET_OBJ */
++
++#if defined(CONFIG_RSBAC_DAZ)
++EXPORT_SYMBOL(rsbac_daz_get_ttl);
++/* Get ttl for new cache items in seconds */
++rsbac_time_t rsbac_daz_get_ttl(void)
++{
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++ return rsbac_daz_ttl;
++#else
++ return 0;
++#endif
++}
++
++EXPORT_SYMBOL(rsbac_daz_set_ttl);
++void rsbac_daz_set_ttl(rsbac_time_t ttl)
++{
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++ if (ttl) {
++ if (ttl > RSBAC_LIST_MAX_AGE_LIMIT)
++ ttl = RSBAC_LIST_MAX_AGE_LIMIT;
++ rsbac_daz_ttl = ttl;
++ }
++#endif
++}
++
++EXPORT_SYMBOL(rsbac_daz_flush_cache);
++int rsbac_daz_flush_cache(void)
++{
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++ struct rsbac_device_list_item_t *device_p;
++ u_int i;
++ int srcu_idx;
++
++ for (i = 0; i < RSBAC_NR_DEVICE_LISTS; i++) {
++ srcu_idx = srcu_read_lock(&device_list_srcu[i]);
++ device_p = rcu_dereference(device_head_p[i])->head;
++ while (device_p) {
++ rsbac_list_remove_all(device_p->handles.dazs);
++ device_p = device_p->next;
++ }
++ srcu_read_unlock(&device_list_srcu[i], srcu_idx);
++ }
++#endif
++ return 0;
++}
++#endif
++
++#if defined(CONFIG_RSBAC_JAIL)
++static int rsbac_jail_exists_compare(void * data1, void * data2)
++{
++ struct rsbac_jail_process_aci_t * aci_p = data1;
++
++ return memcmp(&aci_p->id, data2, sizeof(rsbac_jail_id_t));
++}
++
++rsbac_boolean_t rsbac_jail_exists(rsbac_jail_id_t jail_id)
++{
++ rsbac_pid_t pid;
++
++ if(!rsbac_ta_list_get_desc(0,
++ process_handles.jail,
++ &pid,
++ &jail_id,
++ rsbac_jail_exists_compare))
++ return TRUE;
++ else
++ return FALSE;
++}
++#endif
++
++void rsbac_flags_set(unsigned long int rsbac_flags)
++{
++}
+diff --git a/rsbac/data_structures/acl_data_structures.c b/rsbac/data_structures/acl_data_structures.c
+new file mode 100644
+index 0000000..6f71257
+--- /dev/null
++++ b/rsbac/data_structures/acl_data_structures.c
+@@ -0,0 +1,8398 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of ACL data structures */
++/* Author and (c) 1999-2011: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 12/Jul/2011 */
++/*************************************************** */
++
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <asm/uaccess.h>
++#include <rsbac/types.h>
++#include <rsbac/aci_data_structures.h>
++#include <rsbac/acl_data_structures.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/adf.h>
++#include <rsbac/aci.h>
++#include <rsbac/acl.h>
++#include <rsbac/lists.h>
++#include <rsbac/proc_fs.h>
++#include <rsbac/getname.h>
++#include <rsbac/acl_getname.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/network.h>
++#include <linux/string.h>
++#include <linux/srcu.h>
++#include <linux/seq_file.h>
++#include <linux/module.h>
++
++/************************************************************************** */
++/* Global Variables */
++/************************************************************************** */
++
++/* The following global variables are needed for access to ACL data.*/
++
++static struct rsbac_acl_device_list_head_t * device_list_head_p;
++static spinlock_t device_list_lock;
++static struct srcu_struct device_list_srcu;
++static struct lock_class_key device_list_lock_class;
++
++static rsbac_list_handle_t dev_handle = NULL;
++static rsbac_list_handle_t dev_major_handle = NULL;
++static rsbac_list_handle_t scd_handle = NULL;
++static rsbac_list_handle_t group_handle = NULL;
++static rsbac_list_handle_t gm_handle = NULL;
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++static rsbac_list_handle_t netdev_handle = NULL;
++#endif
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++static rsbac_list_handle_t nettemp_nt_handle = NULL;
++static rsbac_list_handle_t nettemp_handle = NULL;
++static rsbac_list_handle_t netobj_handle = NULL;
++#endif
++
++static rsbac_list_handle_t default_fd_handle = NULL;
++static rsbac_list_handle_t default_dev_handle = NULL;
++static rsbac_list_handle_t default_ipc_handle = NULL;
++static rsbac_list_handle_t default_scd_handle = NULL;
++static rsbac_list_handle_t u_handle = NULL;
++static rsbac_list_handle_t default_u_handle = NULL;
++static rsbac_list_handle_t default_p_handle = NULL;
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++static rsbac_list_handle_t g_handle = NULL;
++static rsbac_list_handle_t default_g_handle = NULL;
++#endif
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++static rsbac_list_handle_t default_netdev_handle = NULL;
++static rsbac_acl_rights_vector_t default_netdev_rights = 0;
++#endif
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++static rsbac_list_handle_t default_nettemp_nt_handle = NULL;
++static rsbac_list_handle_t default_netobj_handle = NULL;
++static rsbac_acl_rights_vector_t default_nettemp_nt_rights = 0;
++static rsbac_acl_rights_vector_t default_netobj_rights = 0;
++#endif
++
++static rsbac_acl_group_id_t group_last_new = 0;
++
++static rsbac_acl_rights_vector_t default_fd_rights = 0;
++static rsbac_acl_rights_vector_t default_dev_rights = 0;
++static rsbac_acl_rights_vector_t default_ipc_rights = 0;
++static rsbac_acl_rights_vector_t default_scd_rights = 0;
++static rsbac_acl_rights_vector_t default_u_rights = 0;
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++static rsbac_acl_rights_vector_t default_g_rights = 0;
++#endif
++static rsbac_acl_rights_vector_t default_p_rights = 0;
++
++static struct kmem_cache * acl_device_item_slab = NULL;
++
++/**************************************************/
++/* Declarations of external functions */
++/**************************************************/
++
++rsbac_boolean_t writable(struct super_block *sb_p);
++
++/**************************************************/
++/* Declarations of internal functions */
++/**************************************************/
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++/* nr_hashes is always 2^n, no matter what the macros say */
++
++static u_int nr_fd_hashes = RSBAC_ACL_NR_FD_LISTS;
++
++static u_int group_hash(void * desc, __u32 nr_hashes)
++{
++ return (*((rsbac_acl_group_id_t *) desc) & (nr_hashes - 1));
++}
++
++static int entry_compare(void *desc1, void *desc2)
++{
++ int result;
++ struct rsbac_acl_entry_desc_t *i_desc1 = desc1;
++ struct rsbac_acl_entry_desc_t *i_desc2 = desc2;
++
++ result = memcmp(&i_desc1->subj_type,
++ &i_desc2->subj_type, sizeof(i_desc1->subj_type));
++ if (result)
++ return result;
++ else
++ return memcmp(&i_desc1->subj_id,
++ &i_desc2->subj_id, sizeof(i_desc1->subj_id));
++}
++
++static int dev_compare(void *desc1, void *desc2)
++{
++ int result;
++ struct rsbac_dev_desc_t *i_desc1 = desc1;
++ struct rsbac_dev_desc_t *i_desc2 = desc2;
++
++ result = memcmp(&i_desc1->type,
++ &i_desc2->type, sizeof(i_desc1->type));
++ if (result)
++ return result;
++ result = memcmp(&i_desc1->major,
++ &i_desc2->major, sizeof(i_desc1->major));
++ if (result)
++ return result;
++ return memcmp(&i_desc1->minor,
++ &i_desc2->minor, sizeof(i_desc1->minor));
++}
++
++static int dev_major_compare(void *desc1, void *desc2)
++{
++ int result;
++ struct rsbac_dev_desc_t *i_desc1 = desc1;
++ struct rsbac_dev_desc_t *i_desc2 = desc2;
++
++ result = memcmp(&i_desc1->type,
++ &i_desc2->type, sizeof(i_desc1->type));
++ if (result)
++ return result;
++ return memcmp(&i_desc1->major,
++ &i_desc2->major, sizeof(i_desc1->major));
++}
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++static int netdev_compare(void *desc1, void *desc2)
++{
++ return strncmp(desc1, desc2, RSBAC_IFNAMSIZ);
++}
++#endif
++
++static int fd_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t));
++ memcpy(new_data, old_data, sizeof(rsbac_acl_rights_vector_t));
++ return 0;
++}
++
++static int fd_old_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ rsbac_acl_rights_vector_t *new = new_data;
++ rsbac_acl_rights_vector_t *old = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t));
++ *new = (*old & RSBAC_ALL_REQUEST_VECTOR)
++ | ((*old & ~(RSBAC_ALL_REQUEST_VECTOR)) <<
++ (RSBAC_ACL_SPECIAL_RIGHT_BASE -
++ RSBAC_ACL_OLD_SPECIAL_RIGHT_BASE));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *fd_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_FD_OLD_LIST_VERSION:
++ return fd_conv;
++ case RSBAC_ACL_FD_OLD_OLD_LIST_VERSION:
++ return fd_old_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int dev_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ memcpy(new_desc, old_desc, sizeof(struct rsbac_dev_desc_t));
++ memcpy(new_data, old_data, sizeof(rsbac_acl_rights_vector_t));
++ return 0;
++}
++
++static int dev_old_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ rsbac_acl_rights_vector_t *new = new_data;
++ rsbac_acl_rights_vector_t *old = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(struct rsbac_dev_desc_t));
++ *new = (*old & RSBAC_ALL_REQUEST_VECTOR)
++ | ((*old & ~(RSBAC_ALL_REQUEST_VECTOR)) <<
++ (RSBAC_ACL_SPECIAL_RIGHT_BASE -
++ RSBAC_ACL_OLD_SPECIAL_RIGHT_BASE));
++ return 0;
++}
++
++static int dev_old_old_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_dev_desc_t *new = new_desc;
++ struct rsbac_dev_t *old = old_desc;
++ rsbac_acl_rights_vector_t *newd = new_data;
++ rsbac_acl_rights_vector_t *oldd = old_data;
++
++
++ memcpy(new_data, old_data, sizeof(rsbac_acl_rights_vector_t));
++ new->type = old->type;
++ new->major = RSBAC_MAJOR(old->id);
++ new->minor = RSBAC_MINOR(old->id);
++ *newd = (*oldd & RSBAC_ALL_REQUEST_VECTOR)
++ | ((*oldd & ~(RSBAC_ALL_REQUEST_VECTOR)) <<
++ (RSBAC_ACL_SPECIAL_RIGHT_BASE -
++ RSBAC_ACL_OLD_SPECIAL_RIGHT_BASE));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *dev_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_DEV_OLD_LIST_VERSION:
++ return dev_conv;
++ case RSBAC_ACL_DEV_OLD_OLD_LIST_VERSION:
++ return dev_old_conv;
++ case RSBAC_ACL_DEV_OLD_OLD_OLD_LIST_VERSION:
++ return dev_old_old_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int scd_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ memcpy(new_desc, old_desc, sizeof(__u8));
++ memcpy(new_data, old_data, sizeof(rsbac_acl_rights_vector_t));
++ return 0;
++}
++
++static int scd_old_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ rsbac_acl_rights_vector_t *new = new_data;
++ rsbac_acl_rights_vector_t *old = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(__u8));
++ *new = (*old & RSBAC_ALL_REQUEST_VECTOR)
++ | ((*old & ~(RSBAC_ALL_REQUEST_VECTOR)) <<
++ (RSBAC_ACL_SPECIAL_RIGHT_BASE -
++ RSBAC_ACL_OLD_SPECIAL_RIGHT_BASE));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *scd_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_SCD_OLD_LIST_VERSION:
++ return scd_conv;
++ case RSBAC_ACL_SCD_OLD_OLD_LIST_VERSION:
++ return scd_old_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int u_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ rsbac_uid_t *new = new_desc;
++ rsbac_old_uid_t *old = old_desc;
++
++ *new = *old;
++ memcpy(new_data, old_data, sizeof(rsbac_acl_rights_vector_t));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *u_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_U_OLD_LIST_VERSION:
++ return u_conv;
++ default:
++ return NULL;
++ }
++}
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++static int g_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ rsbac_gid_t *new = new_desc;
++ rsbac_old_gid_t *old = old_desc;
++
++ *new = *old;
++ memcpy(new_data, old_data, sizeof(rsbac_acl_rights_vector_t));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *g_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_G_OLD_LIST_VERSION:
++ return g_conv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++static int netdev_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ memcpy(new_desc, old_desc, sizeof(rsbac_netdev_id_t));
++ memcpy(new_data, old_data, sizeof(rsbac_acl_rights_vector_t));
++ return 0;
++}
++
++static int netdev_old_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ rsbac_acl_rights_vector_t *new = new_data;
++ rsbac_acl_rights_vector_t *old = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_netdev_id_t));
++ *new = (*old & RSBAC_ALL_REQUEST_VECTOR)
++ | ((*old & ~(RSBAC_ALL_REQUEST_VECTOR)) <<
++ (RSBAC_ACL_SPECIAL_RIGHT_BASE -
++ RSBAC_ACL_OLD_SPECIAL_RIGHT_BASE));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *netdev_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_NETDEV_OLD_LIST_VERSION:
++ return netdev_conv;
++ case RSBAC_ACL_NETDEV_OLD_OLD_LIST_VERSION:
++ return netdev_old_conv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++static int nettemp_nt_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ memcpy(new_desc, old_desc, sizeof(rsbac_net_temp_id_t));
++ memcpy(new_data, old_data, sizeof(rsbac_acl_rights_vector_t));
++ return 0;
++}
++
++static int nettemp_nt_old_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ rsbac_acl_rights_vector_t *new = new_data;
++ rsbac_acl_rights_vector_t *old = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_net_temp_id_t));
++ *new = (*old & RSBAC_ALL_REQUEST_VECTOR)
++ | ((*old & ~(RSBAC_ALL_REQUEST_VECTOR)) <<
++ (RSBAC_ACL_SPECIAL_RIGHT_BASE -
++ RSBAC_ACL_OLD_SPECIAL_RIGHT_BASE));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *nettemp_nt_get_conv(rsbac_version_t
++ old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_NETTEMP_NT_OLD_LIST_VERSION:
++ return nettemp_nt_conv;
++ case RSBAC_ACL_NETTEMP_NT_OLD_OLD_LIST_VERSION:
++ return nettemp_nt_old_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int nettemp_old_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ memcpy(new_desc, old_desc, sizeof(rsbac_net_temp_id_t));
++ memcpy(new_data, old_data, sizeof(rsbac_acl_rights_vector_t));
++ return 0;
++}
++
++static int nettemp_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ rsbac_acl_rights_vector_t *new = new_data;
++ rsbac_acl_rights_vector_t *old = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_net_temp_id_t));
++ *new = (*old & RSBAC_ALL_REQUEST_VECTOR)
++ | ((*old & ~(RSBAC_ALL_REQUEST_VECTOR)) <<
++ (RSBAC_ACL_SPECIAL_RIGHT_BASE -
++ RSBAC_ACL_OLD_SPECIAL_RIGHT_BASE));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *nettemp_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_NETTEMP_OLD_LIST_VERSION:
++ return nettemp_conv;
++ case RSBAC_ACL_NETTEMP_OLD_OLD_LIST_VERSION:
++ return nettemp_old_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int netobj_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ memcpy(new_desc, old_desc, sizeof(rsbac_net_obj_id_t));
++ memcpy(new_data, old_data, sizeof(rsbac_acl_rights_vector_t));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *netobj_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_NETOBJ_OLD_LIST_VERSION:
++ return netobj_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int gm_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ *((rsbac_uid_t *) new_desc) = *((rsbac_old_uid_t *) old_desc);
++ return 0;
++}
++
++static rsbac_list_conv_function_t *gm_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_GM_OLD_VERSION:
++ return gm_conv;
++ default:
++ return NULL;
++ }
++}
++
++
++static int common_subconv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_acl_entry_desc_t *new_d = new_desc;
++ struct rsbac_acl_old_entry_desc_t *old_d = old_desc;
++
++ memcpy(new_data, old_data, sizeof(rsbac_acl_rights_vector_t));
++ new_d->subj_type = old_d->subj_type;
++ new_d->subj_id = old_d->subj_id;
++ return 0;
++}
++
++static int common_old_subconv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ rsbac_acl_rights_vector_t *new = new_data;
++ rsbac_acl_rights_vector_t *old = old_data;
++ struct rsbac_acl_entry_desc_t *new_d = new_desc;
++ struct rsbac_acl_old_entry_desc_t *old_d = old_desc;
++
++ new_d->subj_type = old_d->subj_type;
++ new_d->subj_id = old_d->subj_id;
++ *new = (*old & RSBAC_ALL_REQUEST_VECTOR)
++ | ((*old & ~(RSBAC_ALL_REQUEST_VECTOR)) <<
++ (RSBAC_ACL_SPECIAL_RIGHT_BASE -
++ RSBAC_ACL_OLD_SPECIAL_RIGHT_BASE));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *fd_get_subconv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_FD_OLD_LIST_VERSION:
++ return common_subconv;
++ case RSBAC_ACL_FD_OLD_OLD_LIST_VERSION:
++ return common_old_subconv;
++ default:
++ return NULL;
++ }
++}
++
++static rsbac_list_conv_function_t *dev_get_subconv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_DEV_OLD_LIST_VERSION:
++ return common_subconv;
++ case RSBAC_ACL_DEV_OLD_OLD_LIST_VERSION:
++ return common_old_subconv;
++ default:
++ return NULL;
++ }
++}
++
++static rsbac_list_conv_function_t *scd_get_subconv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_SCD_OLD_LIST_VERSION:
++ return common_subconv;
++ case RSBAC_ACL_SCD_OLD_OLD_LIST_VERSION:
++ return common_old_subconv;
++ default:
++ return NULL;
++ }
++}
++
++static rsbac_list_conv_function_t *u_get_subconv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_U_OLD_LIST_VERSION:
++ return common_subconv;
++ default:
++ return NULL;
++ }
++}
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++static rsbac_list_conv_function_t *g_get_subconv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_G_OLD_LIST_VERSION:
++ return common_subconv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++static rsbac_list_conv_function_t *netdev_get_subconv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_NETDEV_OLD_LIST_VERSION:
++ return common_subconv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++static rsbac_list_conv_function_t *nettemp_nt_get_subconv(rsbac_version_t
++ old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_NETTEMP_NT_OLD_LIST_VERSION:
++ return common_subconv;
++ default:
++ return NULL;
++ }
++}
++
++static rsbac_list_conv_function_t *nettemp_get_subconv(rsbac_version_t
++ old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_NETTEMP_OLD_LIST_VERSION:
++ return common_subconv;
++ default:
++ return NULL;
++ }
++}
++
++static rsbac_list_conv_function_t *netobj_get_subconv(rsbac_version_t
++ old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_NETOBJ_OLD_LIST_VERSION:
++ return common_subconv;
++ default:
++ return NULL;
++ }
++}
++
++static int gm_subconv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ memcpy(new_desc, old_desc, sizeof(rsbac_acl_group_id_t));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *gm_get_subconv(rsbac_version_t
++ old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_GM_OLD_VERSION:
++ return gm_subconv;
++ default:
++ return NULL;
++ }
++}
++
++static rsbac_list_conv_function_t *def_fd_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_DEF_FD_OLD_LIST_VERSION:
++ return common_subconv;
++ case RSBAC_ACL_DEF_FD_OLD_OLD_LIST_VERSION:
++ return common_old_subconv;
++ default:
++ return NULL;
++ }
++}
++
++static rsbac_list_conv_function_t *def_dev_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_DEF_DEV_OLD_LIST_VERSION:
++ return common_subconv;
++ case RSBAC_ACL_DEF_DEV_OLD_OLD_LIST_VERSION:
++ return common_old_subconv;
++ default:
++ return NULL;
++ }
++}
++
++static rsbac_list_conv_function_t *def_ipc_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_DEF_IPC_OLD_LIST_VERSION:
++ return common_subconv;
++ case RSBAC_ACL_DEF_IPC_OLD_OLD_LIST_VERSION:
++ return common_old_subconv;
++ default:
++ return NULL;
++ }
++}
++
++static rsbac_list_conv_function_t *def_scd_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_DEF_SCD_OLD_LIST_VERSION:
++ return common_subconv;
++ case RSBAC_ACL_DEF_SCD_OLD_OLD_LIST_VERSION:
++ return common_old_subconv;
++ default:
++ return NULL;
++ }
++}
++
++static rsbac_list_conv_function_t *def_u_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_DEF_U_OLD_LIST_VERSION:
++ return common_subconv;
++ case RSBAC_ACL_DEF_U_OLD_OLD_LIST_VERSION:
++ return common_old_subconv;
++ default:
++ return NULL;
++ }
++}
++
++static rsbac_list_conv_function_t *def_p_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_DEF_P_OLD_LIST_VERSION:
++ return common_subconv;
++ case RSBAC_ACL_DEF_P_OLD_OLD_LIST_VERSION:
++ return common_old_subconv;
++ default:
++ return NULL;
++ }
++}
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++static rsbac_list_conv_function_t *def_g_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_DEF_G_OLD_LIST_VERSION:
++ return common_subconv;
++ case RSBAC_ACL_DEF_G_OLD_OLD_LIST_VERSION:
++ return common_old_subconv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++static rsbac_list_conv_function_t *def_netdev_get_conv(rsbac_version_t
++ old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_DEF_NETDEV_OLD_LIST_VERSION:
++ return common_subconv;
++ case RSBAC_ACL_DEF_NETDEV_OLD_OLD_LIST_VERSION:
++ return common_old_subconv;
++ default:
++ return NULL;
++ }
++}
++#endif
++
++static rsbac_list_conv_function_t *def_nettemp_nt_get_conv(rsbac_version_t
++ old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_DEF_NETTEMP_NT_OLD_LIST_VERSION:
++ return common_subconv;
++ case RSBAC_ACL_DEF_NETTEMP_NT_OLD_OLD_LIST_VERSION:
++ return common_old_subconv;
++ default:
++ return NULL;
++ }
++}
++
++static rsbac_list_conv_function_t *def_netobj_get_conv(rsbac_version_t
++ old_version)
++{
++ switch (old_version) {
++ case RSBAC_ACL_DEF_NETOBJ_OLD_LIST_VERSION:
++ return common_subconv;
++ case RSBAC_ACL_DEF_NETOBJ_OLD_OLD_LIST_VERSION:
++ return common_old_subconv;
++ default:
++ return NULL;
++ }
++}
++
++
++/* acl_register_fd_lists() */
++/* register fd ACL lists for device */
++
++static int acl_register_fd_lists(struct rsbac_acl_device_list_item_t
++ *device_p, kdev_t kdev)
++{
++ int err = 0;
++ int tmperr;
++ struct rsbac_list_lol_info_t lol_info;
++ rsbac_acl_rights_vector_t def_mask = RSBAC_ACL_DEFAULT_FD_MASK;
++
++ if (!device_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++ /* register all the ACL lists of lists */
++ lol_info.version = RSBAC_ACL_FD_LIST_VERSION;
++ lol_info.key = RSBAC_ACL_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_inode_nr_t);
++ lol_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* mask */
++ lol_info.subdesc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ lol_info.subdata_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ lol_info.max_age = 0;
++ tmperr = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->handle,
++ &lol_info,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ entry_compare,
++ fd_get_conv,
++ fd_get_subconv, &def_mask,
++ NULL,
++ RSBAC_ACL_FD_FILENAME, kdev,
++ nr_fd_hashes,
++ (nr_fd_hashes > 0) ? rsbac_list_hash_fd : NULL,
++ RSBAC_ACL_FD_OLD_FILENAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "acl_register_fd_lists(): registering list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_ACL_FD_FILENAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ return err;
++}
++
++/* acl_detach_fd_lists() */
++/* detach from fd ACL lists for device */
++
++static int acl_detach_fd_lists(struct rsbac_acl_device_list_item_t
++ *device_p)
++{
++ int err = 0;
++ int tmperr;
++
++ if (!device_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++ /* detach all the ACL lists of lists */
++ tmperr = rsbac_list_lol_detach(&device_p->handle,
++ RSBAC_ACL_LIST_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "acl_detach_fd_lists(): detaching from list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_ACL_FD_FILENAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ return err;
++}
++
++/************************************************************************** */
++/* The lookup functions return NULL, if the item is not found, and a */
++/* pointer to the item otherwise. */
++
++/* first the device item lookup */
++static struct rsbac_acl_device_list_item_t *acl_lookup_device(kdev_t kdev)
++{
++ struct rsbac_acl_device_list_item_t *curr = rcu_dereference(device_list_head_p)->curr;
++
++ /* if there is no current item or it is not the right one, search... */
++ if (!curr || (RSBAC_MAJOR(curr->id) != RSBAC_MAJOR(kdev))
++ || (RSBAC_MINOR(curr->id) != RSBAC_MINOR(kdev))
++ ) {
++ curr = rcu_dereference(device_list_head_p)->head;
++ while (curr
++ && ((RSBAC_MAJOR(curr->id) != RSBAC_MAJOR(kdev))
++ || (RSBAC_MINOR(curr->id) != RSBAC_MINOR(kdev))
++ )
++ ) {
++ curr = curr->next;
++ }
++ if (curr)
++ rcu_dereference(device_list_head_p)->curr = curr;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++/************************************************************************** */
++/* The add_item() functions add an item to the list, set head.curr to it, */
++/* and return a pointer to the item. */
++/* These functions will NOT check, if there is already an item under the */
++/* same ID! If this happens, the lookup functions will return the old item! */
++
++/* Create a device item without adding to list. No locking needed. */
++static struct rsbac_acl_device_list_item_t
++*create_device_item(kdev_t kdev)
++{
++ struct rsbac_acl_device_list_item_t *new_item_p;
++
++ /* allocate memory for new device, return NULL, if failed */
++ if (!(new_item_p = rsbac_smalloc_clear_unlocked(acl_device_item_slab)))
++ return NULL;
++ new_item_p->id = kdev;
++ new_item_p->mount_count = 1;
++ return new_item_p;
++}
++
++/* Add an existing device item to list. Locking needed. */
++static struct rsbac_acl_device_list_item_t
++*add_device_item(struct rsbac_acl_device_list_item_t *device_p)
++{
++ struct rsbac_acl_device_list_head_t * new_p;
++ struct rsbac_acl_device_list_head_t * old_p;
++
++ if (!device_p)
++ return NULL;
++
++ spin_lock(&device_list_lock);
++ old_p = device_list_head_p;
++ new_p = rsbac_kmalloc(sizeof(*new_p));
++ *new_p = *old_p;
++ /* add new device to device list */
++ if (!new_p->head) { /* first device */
++ new_p->head = device_p;
++ new_p->tail = device_p;
++ new_p->curr = device_p;
++ new_p->count = 1;
++ device_p->prev = NULL;
++ device_p->next = NULL;
++ } else { /* there is another device -> hang to tail */
++ device_p->prev = new_p->tail;
++ device_p->next = NULL;
++ new_p->tail->next = device_p;
++ new_p->tail = device_p;
++ new_p->curr = device_p;
++ new_p->count++;
++ }
++ rcu_assign_pointer(device_list_head_p, new_p);
++ spin_unlock(&device_list_lock);
++ synchronize_srcu(&device_list_srcu);
++ rsbac_kfree(old_p);
++ return device_p;
++}
++
++/************************************************************************** */
++/* The remove_item() functions remove an item from the list. If this item */
++/* is head, tail or curr, these pointers are set accordingly. */
++/* To speed up removing several subsequent items, curr is set to the next */
++/* item, if possible. */
++/* If the item is not found, nothing is done. */
++
++static void clear_device_item(struct rsbac_acl_device_list_item_t
++ *device_p)
++{
++ if (!device_p)
++ return;
++ acl_detach_fd_lists(device_p);
++ rsbac_sfree(acl_device_item_slab, device_p);;
++}
++
++static void remove_device_item(kdev_t kdev)
++{
++ struct rsbac_acl_device_list_item_t *item_p;
++ struct rsbac_acl_device_list_head_t * new_p;
++ struct rsbac_acl_device_list_head_t * old_p;
++
++ old_p = device_list_head_p;
++ new_p = rsbac_kmalloc(sizeof(*new_p));
++ *new_p = *old_p;
++
++ /* first we must locate the item. */
++ if ((item_p = acl_lookup_device(kdev))) { /* ok, item was found */
++ if (new_p->head == item_p) { /* item is head */
++ if (new_p->tail == item_p) { /* item is head and tail = only item -> list will be empty */
++ new_p->head = NULL;
++ new_p->tail = NULL;
++ } else { /* item is head, but not tail -> next item becomes head */
++ item_p->next->prev = NULL;
++ new_p->head = item_p->next;
++ }
++ } else { /* item is not head */
++ if (new_p->tail == item_p) { /*item is not head, but tail -> previous item becomes tail */
++ item_p->prev->next = NULL;
++ new_p->tail = item_p->prev;
++ } else { /* item is neither head nor tail -> item is cut out */
++ item_p->prev->next = item_p->next;
++ item_p->next->prev = item_p->prev;
++ }
++ }
++
++ /* curr is no longer valid -> reset. */
++ new_p->curr = NULL;
++ /* adjust counter */
++ new_p->count--;
++ rcu_assign_pointer(device_list_head_p, new_p);
++ spin_unlock(&device_list_lock);
++ synchronize_rcu();
++ rsbac_kfree(old_p);
++
++ /* now we can remove the item from memory. This means cleaning up */
++ /* everything below. */
++ clear_device_item(item_p);
++ } /* end of if: item was found */
++ else
++ spin_unlock(&device_list_lock);
++}
++
++/************************************************* */
++/* proc functions */
++/************************************************* */
++
++#if defined(CONFIG_RSBAC_PROC)
++static int
++acl_devices_proc_show(struct seq_file *m, void *v)
++{
++ struct rsbac_acl_device_list_item_t *device_p;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized())
++ return -ENOSYS;
++
++ seq_printf(m, "%u RSBAC ACL Devices\n-------------------\n",
++ rcu_dereference(device_list_head_p)->count);
++
++ /* wait for read access to device_list_head */
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ for (device_p = rcu_dereference(device_list_head_p)->head; device_p;
++ device_p = device_p->next) {
++ seq_printf(m,
++ "%02u:%02u with mount_count = %u\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ device_p->mount_count);
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++
++ return 0;
++}
++
++static ssize_t acl_devices_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, acl_devices_proc_show, NULL);
++}
++
++static const struct file_operations acl_devices_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = acl_devices_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *acl_devices;
++
++static int
++stats_acl_proc_show(struct seq_file *m, void *v)
++{
++ u_int item_count = 0;
++ u_int member_count = 0;
++ struct rsbac_acl_device_list_item_t *device_p;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "stats_acl_proc_info(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ rsbac_pr_debug(aef_acl, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ seq_printf(m, "ACL Status\n-----------\n");
++
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = rcu_dereference(device_list_head_p)->head;
++ while (device_p) {
++ item_count = rsbac_list_lol_count(device_p->handle);
++ member_count = rsbac_list_lol_all_subcount(device_p->handle);
++ seq_printf(m,
++ "device %02u:%02u has %i file ACLs, sum of %i members\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), item_count,
++ member_count);
++ device_p = device_p->next;
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++
++ /* dev list */
++ seq_printf(m,
++ "%li device ACL items, sum of %li members\n",
++ rsbac_list_lol_count(dev_handle),
++ rsbac_list_lol_all_subcount(dev_handle));
++ seq_printf(m,
++ "%li device major ACL items, sum of %li members\n",
++ rsbac_list_lol_count(dev_major_handle),
++ rsbac_list_lol_all_subcount(dev_major_handle));
++
++ /* SCD list */
++ seq_printf(m,
++ "%li scd ACL items, sum of %li members\n",
++ rsbac_list_lol_count(scd_handle),
++ rsbac_list_lol_all_subcount(scd_handle));
++
++ /* user list */
++ seq_printf(m,
++ "%li user ACL items, sum of %li members\n",
++ rsbac_list_lol_count(u_handle),
++ rsbac_list_lol_all_subcount(u_handle));
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ /* Linux group list */
++ seq_printf(m,
++ "%li Linux group ACL items, sum of %li members\n",
++ rsbac_list_lol_count(g_handle),
++ rsbac_list_lol_all_subcount(g_handle));
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ /* netdev list */
++ seq_printf(m,
++ "%li network device ACL items, sum of %li members\n",
++ rsbac_list_lol_count(netdev_handle),
++ rsbac_list_lol_all_subcount(netdev_handle));
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ /* nettemp_nt list */
++ seq_printf(m,
++ "%li network template NT ACL items, sum of %li members\n",
++ rsbac_list_lol_count(nettemp_nt_handle),
++ rsbac_list_lol_all_subcount(nettemp_nt_handle));
++ /* nettemp list */
++ seq_printf(m,
++ "%li network template ACL items, sum of %li members\n",
++ rsbac_list_lol_count(nettemp_handle),
++ rsbac_list_lol_all_subcount(nettemp_handle));
++ /* netobj list */
++ seq_printf(m,
++ "%li network object ACL items, sum of %li members\n",
++ rsbac_list_lol_count(netobj_handle),
++ rsbac_list_lol_all_subcount(netobj_handle));
++#endif
++
++ seq_printf(m, "%li groups, last new is %u\n",
++ rsbac_list_count(group_handle), group_last_new);
++
++ /* protect gm list */
++ seq_printf(m,
++ "%li group member items, sum of %li group memberships\n",
++ rsbac_list_lol_count(gm_handle),
++ rsbac_list_lol_all_subcount(gm_handle));
++ return 0;
++}
++
++static ssize_t stats_acl_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, stats_acl_proc_show, NULL);
++}
++
++static const struct file_operations stats_acl_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = stats_acl_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *stats_acl;
++
++static int
++acl_acllist_proc_show(struct seq_file *m, void *v)
++{
++ u_int i, j, k;
++ char tmp1[80], tmp2[80];
++ u_int count = 0;
++ int tmp_count;
++ int tmp_sub_count;
++ u_int member_count = 0;
++ struct rsbac_acl_device_list_item_t *device_p;
++ rsbac_inode_nr_t *fd_desc_p;
++ struct rsbac_dev_desc_t *dev_desc_p;
++ __u8 *scd_desc_p;
++ rsbac_uid_t *u_desc_p;
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ rsbac_gid_t *g_desc_p;
++#endif
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ rsbac_netdev_id_t *netdev_desc_p;
++#endif
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ rsbac_net_temp_id_t *nettemp_desc_p;
++ rsbac_net_obj_id_t *netobj_desc_p;
++#endif
++ struct rsbac_acl_entry_desc_t *sub_desc_p;
++ rsbac_acl_rights_vector_t rights;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "acl_acllist_proc_info(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ rsbac_pr_debug(aef_acl, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ seq_printf(m, "ACL Lists\n----------\n");
++
++ seq_printf(m,
++ "Default FD ACL: %li members:",
++ rsbac_list_count(default_fd_handle));
++ tmp_count =
++ rsbac_list_get_all_desc(default_fd_handle,
++ (void **) &sub_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (RSBAC_UID_SET(sub_desc_p[i].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[i].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ else
++ seq_printf(m, " %s %u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++
++ /* default_dev list */
++ seq_printf(m,
++ "\nDefault Device ACL: %li members:",
++ rsbac_list_count(default_dev_handle));
++ tmp_count =
++ rsbac_list_get_all_desc(default_dev_handle,
++ (void **) &sub_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (RSBAC_UID_SET(sub_desc_p[i].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[i].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ else
++ seq_printf(m, " %s %u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++
++ /* default_ipc_list */
++ seq_printf(m,
++ "\nDefault IPC ACL: %li members:",
++ rsbac_list_count(default_ipc_handle));
++ tmp_count =
++ rsbac_list_get_all_desc(default_ipc_handle,
++ (void **) &sub_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (RSBAC_UID_SET(sub_desc_p[i].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[i].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ else
++ seq_printf(m, " %s %u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++
++ /* default_scd_list */
++ seq_printf(m,
++ "\nDefault SCD ACL: %li members:",
++ rsbac_list_count(default_scd_handle));
++ tmp_count =
++ rsbac_list_get_all_desc(default_scd_handle,
++ (void **) &sub_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (RSBAC_UID_SET(sub_desc_p[i].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[i].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ else
++ seq_printf(m, " %s %u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++
++ /* default_u_list */
++ seq_printf(m,
++ "\nDefault User ACL: %li members:",
++ rsbac_list_count(default_u_handle));
++ tmp_count =
++ rsbac_list_get_all_desc(default_u_handle,
++ (void **) &sub_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (RSBAC_UID_SET(sub_desc_p[i].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[i].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ else
++ seq_printf(m, " %s %u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++
++ /* default_p list */
++ seq_printf(m,
++ "\nDefault Process ACL: %li members:",
++ rsbac_list_count(default_p_handle));
++ tmp_count =
++ rsbac_list_get_all_desc(default_p_handle,
++ (void **) &sub_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (RSBAC_UID_SET(sub_desc_p[i].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[i].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ else
++ seq_printf(m, " %s %u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ /* default_g_list */
++ seq_printf(m,
++ "\nDefault Linux Group ACL: %li members:",
++ rsbac_list_count(default_g_handle));
++ tmp_count =
++ rsbac_list_get_all_desc(default_g_handle,
++ (void **) &sub_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (RSBAC_UID_SET(sub_desc_p[i].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[i].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ else
++ seq_printf(m, " %s %u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ /* default_netdev list */
++ seq_printf(m,
++ "\nDefault Network Device ACL: %li members:",
++ rsbac_list_count(default_netdev_handle));
++ tmp_count =
++ rsbac_list_get_all_desc(default_netdev_handle,
++ (void **) &sub_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (RSBAC_UID_SET(sub_desc_p[i].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[i].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ else
++ seq_printf(m, " %s %u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ /* default_netdev list */
++ seq_printf(m,
++ "\nDefault Network Template NT ACL: %li members:",
++ rsbac_list_count(default_nettemp_nt_handle));
++ tmp_count =
++ rsbac_list_get_all_desc(default_nettemp_nt_handle,
++ (void **) &sub_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (RSBAC_UID_SET(sub_desc_p[i].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[i].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ else
++ seq_printf(m, " %s %u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++ /* default_netobj list */
++ seq_printf(m,
++ "\nDefault Network Object ACL: %li members:",
++ rsbac_list_count(default_netobj_handle));
++ tmp_count =
++ rsbac_list_get_all_desc(default_netobj_handle,
++ (void **) &sub_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (RSBAC_UID_SET(sub_desc_p[i].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[i].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ else
++ seq_printf(m, " %s %u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [i].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[i].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++#endif
++
++ seq_printf(m, "\n\nFile/Dir/Fifo/Symlink ACLs:\n");
++
++ /* protect device list */
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = rcu_dereference(device_list_head_p)->head;
++ while (device_p) {
++ /* reset counters */
++ count = 0;
++ member_count = 0;
++ seq_printf(m,
++ "\nDevice %02u:%02u\n inode count mask+members",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id));
++ tmp_count = rsbac_list_lol_get_all_desc(device_p->handle,
++ (void **)
++ &fd_desc_p);
++ if (tmp_count > 0) {
++ for (j = 0; j < tmp_count; j++) {
++ seq_printf(m,
++ "\n%6u\t %li\t",
++ fd_desc_p[j],
++ rsbac_list_lol_subcount
++ (device_p->handle,
++ &fd_desc_p[j]));
++ }
++ if (!rsbac_list_lol_get_data
++ (device_p->handle,
++ &fd_desc_p[j], &rights)) {
++ seq_printf(m,
++ "%s\n\t\t",
++ u64tostracl
++ (tmp1,
++ rights));
++ }
++ tmp_sub_count =
++ rsbac_list_lol_get_all_subdesc
++ (device_p->handle,
++ &fd_desc_p[j],
++ (void **) &sub_desc_p);
++ if (tmp_sub_count > 0) {
++ for (k = 0;
++ k < tmp_sub_count;
++ k++) {
++ if (RSBAC_UID_SET(sub_desc_p[k].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [k].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[k].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[k].subj_id));
++ else
++ seq_printf(m,
++ "%s %u, ",
++ get_acl_subject_type_name
++ (tmp1,
++ sub_desc_p
++ [k].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p
++ [k].
++ subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ member_count +=
++ tmp_sub_count;
++ }
++ count += tmp_count;
++ rsbac_kfree(fd_desc_p);
++ }
++ seq_printf(m,
++ "\n%u file ACLs, sum of %u members\n", count,
++ member_count);
++ device_p = device_p->next;
++ }
++ /* unprotect device list */
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++
++ /* dev list */
++ seq_printf(m,
++ "\nDevice ACLs:\ntype+id count mask+members");
++
++ member_count = 0;
++ tmp_count =
++ rsbac_list_lol_get_all_desc(dev_handle, (void **) &dev_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (!rsbac_list_lol_get_data
++ (dev_handle, &dev_desc_p[i], &rights)) {
++ seq_printf(m,
++ "\n%c%02u:%02u\t %3li\t%s\n\t\t",
++ 'B' + dev_desc_p[i].type,
++ dev_desc_p[i].major,
++ dev_desc_p[i].minor,
++ rsbac_list_lol_subcount
++ (dev_handle, &dev_desc_p[i]),
++ u64tostracl(tmp1, rights));
++ }
++ tmp_sub_count =
++ rsbac_list_lol_get_all_subdesc(dev_handle,
++ &dev_desc_p[i],
++ (void **)
++ &sub_desc_p);
++ if (tmp_sub_count > 0) {
++ for (j = 0; j < tmp_sub_count; j++) {
++ if (RSBAC_UID_SET(sub_desc_p[j].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [j].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[j].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ else
++ seq_printf(m,
++ "%s %u, ",
++ get_acl_subject_type_name
++ (tmp1,
++ sub_desc_p[j].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ member_count += tmp_sub_count;
++ }
++ }
++ rsbac_kfree(dev_desc_p);
++ }
++ seq_printf(m,
++ "\n\n%i device ACL items, sum of %u members\n",
++ tmp_count, member_count);
++
++ /* dev major list */
++ seq_printf(m,
++ "\nDevice major ACLs:\ntype+id count mask+members");
++
++ member_count = 0;
++ tmp_count =
++ rsbac_list_lol_get_all_desc(dev_major_handle,
++ (void **) &dev_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (!rsbac_list_lol_get_data
++ (dev_major_handle, &dev_desc_p[i], &rights)) {
++ seq_printf(m,
++ "\n%c%02u\t %3li\t%s\n\t\t",
++ 'B' + dev_desc_p[i].type,
++ dev_desc_p[i].major,
++ rsbac_list_lol_subcount
++ (dev_major_handle,
++ &dev_desc_p[i]),
++ u64tostracl(tmp1, rights));
++ }
++ tmp_sub_count =
++ rsbac_list_lol_get_all_subdesc
++ (dev_major_handle, &dev_desc_p[i],
++ (void **) &sub_desc_p);
++ if (tmp_sub_count > 0) {
++ for (j = 0; j < tmp_sub_count; j++) {
++ if (RSBAC_UID_SET(sub_desc_p[j].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [j].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[j].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ else
++ seq_printf(m,
++ "%s %u, ",
++ get_acl_subject_type_name
++ (tmp1,
++ sub_desc_p[j].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ member_count += tmp_sub_count;
++ }
++ }
++ rsbac_kfree(dev_desc_p);
++ }
++ seq_printf(m,
++ "\n\n%i device major ACL items, sum of %u members\n",
++ tmp_count, member_count);
++ /* scd list */
++ member_count = 0;
++ seq_printf(m,
++ "\nSCD ACLs:\nname count mask+members");
++ tmp_count =
++ rsbac_list_lol_get_all_desc(scd_handle, (void **) &scd_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (!rsbac_list_lol_get_data
++ (scd_handle, &scd_desc_p[i], &rights)) {
++ seq_printf(m,
++ "\n%-16s %3li\t%s\n\t\t\t",
++ get_acl_scd_type_name(tmp1,
++ scd_desc_p
++ [i]),
++ rsbac_list_lol_subcount
++ (scd_handle, &scd_desc_p[i]),
++ u64tostracl(tmp2, rights));
++ }
++ tmp_sub_count =
++ rsbac_list_lol_get_all_subdesc(scd_handle,
++ &scd_desc_p[i],
++ (void **)
++ &sub_desc_p);
++ if (tmp_sub_count > 0) {
++ for (j = 0; j < tmp_sub_count; j++) {
++ if (RSBAC_UID_SET(sub_desc_p[j].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [j].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[j].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ else
++ seq_printf(m,
++ "%s %u, ",
++ get_acl_subject_type_name
++ (tmp1,
++ sub_desc_p[j].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ member_count += tmp_sub_count;
++ }
++ }
++ rsbac_kfree(scd_desc_p);
++ }
++ seq_printf(m,
++ "\n\n%u SCD ACL items, sum of %u members\n", tmp_count,
++ member_count);
++
++ /* user list */
++ seq_printf(m,
++ "\nUser ACLs:\nuid count mask+members");
++
++ member_count = 0;
++ tmp_count =
++ rsbac_list_lol_get_all_desc(u_handle, (void **) &u_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (!rsbac_list_lol_get_data
++ (u_handle, &u_desc_p[i], &rights)) {
++ if (RSBAC_UID_SET(u_desc_p[i]))
++ seq_printf(m,
++ "\n%u/%u\t %3li\t%s\n\t\t",
++ RSBAC_UID_SET(u_desc_p[i]),
++ RSBAC_UID_NUM(u_desc_p[i]),
++ rsbac_list_lol_subcount
++ (u_handle, &u_desc_p[i]),
++ u64tostracl(tmp1, rights));
++ else
++ seq_printf(m,
++ "\n%u\t %3li\t%s\n\t\t",
++ RSBAC_UID_NUM(u_desc_p[i]),
++ rsbac_list_lol_subcount
++ (u_handle, &u_desc_p[i]),
++ u64tostracl(tmp1, rights));
++ }
++ tmp_sub_count =
++ rsbac_list_lol_get_all_subdesc(u_handle,
++ &u_desc_p[i],
++ (void **)
++ &sub_desc_p);
++ if (tmp_sub_count > 0) {
++ for (j = 0; j < tmp_sub_count; j++) {
++ if (RSBAC_UID_SET(sub_desc_p[j].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [j].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[j].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ else
++ seq_printf(m,
++ "%s %u, ",
++ get_acl_subject_type_name
++ (tmp1,
++ sub_desc_p[j].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ member_count += tmp_sub_count;
++ }
++ }
++ rsbac_kfree(u_desc_p);
++ }
++ seq_printf(m,
++ "\n\n%i user ACL items, sum of %u members\n",
++ tmp_count, member_count);
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ /* Linux group list */
++ seq_printf(m,
++ "\nLinux group ACLs:\ngid count mask+members");
++
++ member_count = 0;
++ tmp_count =
++ rsbac_list_lol_get_all_desc(g_handle, (void **) &g_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (!rsbac_list_lol_get_data
++ (g_handle, &g_desc_p[i], &rights)) {
++ if (RSBAC_GID_SET(g_desc_p[i]))
++ seq_printf(m,
++ "\n%u/%u\t %3li\t%s\n\t\t",
++ RSBAC_GID_SET(g_desc_p[i]),
++ RSBAC_GID_NUM(g_desc_p[i]),
++ rsbac_list_lol_subcount
++ (g_handle, &g_desc_p[i]),
++ u64tostracl(tmp1, rights));
++ else
++ seq_printf(m,
++ "\n%u\t %3li\t%s\n\t\t",
++ RSBAC_GID_NUM(g_desc_p[i]),
++ rsbac_list_lol_subcount
++ (g_handle, &g_desc_p[i]),
++ u64tostracl(tmp1, rights));
++ }
++ tmp_sub_count =
++ rsbac_list_lol_get_all_subdesc(g_handle,
++ &g_desc_p[i],
++ (void **)
++ &sub_desc_p);
++ if (tmp_sub_count > 0) {
++ for (j = 0; j < tmp_sub_count; j++) {
++ if (RSBAC_GID_SET(sub_desc_p[j].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [j].
++ subj_type),
++ RSBAC_GID_SET(sub_desc_p[j].subj_id),
++ RSBAC_GID_NUM(sub_desc_p[j].subj_id));
++ else
++ seq_printf(m,
++ "%s %u, ",
++ get_acl_subject_type_name
++ (tmp1,
++ sub_desc_p[j].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ member_count += tmp_sub_count;
++ }
++ }
++ rsbac_kfree(g_desc_p);
++ }
++ seq_printf(m,
++ "\n\n%i Linux group ACL items, sum of %u members\n",
++ tmp_count, member_count);
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ /* netdev list */
++ seq_printf(m,
++ "\nNetwork Device ACLs:\nname\t\t count mask+members");
++ member_count = 0;
++ tmp_count =
++ rsbac_list_lol_get_all_desc(netdev_handle,
++ (void **) &netdev_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (!rsbac_list_lol_get_data
++ (netdev_handle, &netdev_desc_p[i], &rights)) {
++ seq_printf(m,
++ "\n%-16s %3li\t %s\n\t\t",
++ netdev_desc_p[i],
++ rsbac_list_lol_subcount
++ (netdev_handle,
++ &netdev_desc_p[i]),
++ u64tostracl(tmp1, rights));
++ }
++ tmp_sub_count =
++ rsbac_list_lol_get_all_subdesc(netdev_handle,
++ &netdev_desc_p
++ [i],
++ (void **)
++ &sub_desc_p);
++ if (tmp_sub_count > 0) {
++ for (j = 0; j < tmp_sub_count; j++) {
++ if (RSBAC_UID_SET(sub_desc_p[j].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [j].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[j].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ else
++ seq_printf(m,
++ "%s %u, ",
++ get_acl_subject_type_name
++ (tmp1,
++ sub_desc_p[j].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ member_count += tmp_sub_count;
++ }
++ }
++ rsbac_kfree(netdev_desc_p);
++ }
++ seq_printf(m,
++ "\n\n%i network device ACL items, sum of %u members\n",
++ tmp_count, member_count);
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ /* nettemp_nt list */
++ seq_printf(m,
++ "\nNetwork Template NT (template protection) ACLs:\nTemplate count mask+members");
++
++ member_count = 0;
++ tmp_count =
++ rsbac_list_lol_get_all_desc(nettemp_nt_handle,
++ (void **) &nettemp_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (!rsbac_list_lol_get_data
++ (nettemp_nt_handle, &nettemp_desc_p[i],
++ &rights)) {
++ seq_printf(m,
++ "\n%10u %3li\t%s\n\t\t",
++ nettemp_desc_p[i],
++ rsbac_list_lol_subcount
++ (nettemp_nt_handle,
++ &nettemp_desc_p[i]),
++ u64tostracl(tmp1, rights));
++ }
++ tmp_sub_count =
++ rsbac_list_lol_get_all_subdesc
++ (nettemp_nt_handle, &nettemp_desc_p[i],
++ (void **) &sub_desc_p);
++ if (tmp_sub_count > 0) {
++ for (j = 0; j < tmp_sub_count; j++) {
++ if (RSBAC_UID_SET(sub_desc_p[j].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [j].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[j].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ else
++ seq_printf(m,
++ "%s %u, ",
++ get_acl_subject_type_name
++ (tmp1,
++ sub_desc_p[j].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ member_count += tmp_sub_count;
++ }
++ }
++ rsbac_kfree(nettemp_desc_p);
++ }
++ seq_printf(m,
++ "\n\n%i network template NT ACL items, sum of %u members\n",
++ tmp_count, member_count);
++
++ /* nettemp list */
++ seq_printf(m,
++ "\nNetwork Template (netobj protection) ACLs:\nTemplate count mask+members");
++ member_count = 0;
++ tmp_count =
++ rsbac_list_lol_get_all_desc(nettemp_handle,
++ (void **) &nettemp_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (!rsbac_list_lol_get_data
++ (nettemp_handle, &nettemp_desc_p[i],
++ &rights)) {
++ seq_printf(m,
++ "\n%10u %3li\t%s\n\t\t",
++ nettemp_desc_p[i],
++ rsbac_list_lol_subcount
++ (nettemp_handle,
++ &nettemp_desc_p[i]),
++ u64tostracl(tmp1, rights));
++ }
++ tmp_sub_count =
++ rsbac_list_lol_get_all_subdesc(nettemp_handle,
++ &nettemp_desc_p
++ [i],
++ (void **)
++ &sub_desc_p);
++ if (tmp_sub_count > 0) {
++ for (j = 0; j < tmp_sub_count; j++) {
++ if (RSBAC_UID_SET(sub_desc_p[j].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [j].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[j].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ else
++ seq_printf(m,
++ "%s %u, ",
++ get_acl_subject_type_name
++ (tmp1,
++ sub_desc_p[j].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ member_count += tmp_sub_count;
++ }
++ }
++ rsbac_kfree(nettemp_desc_p);
++ }
++ seq_printf(m,
++ "\n\n%i network template ACL items, sum of %u members\n",
++ tmp_count, member_count);
++
++ /* netobj list */
++ seq_printf(m,
++ "\nNetwork Object ACLs:\nObject-ID count mask+members");
++
++ member_count = 0;
++ tmp_count =
++ rsbac_list_lol_get_all_desc(netobj_handle,
++ (void **) &netobj_desc_p);
++ if (tmp_count > 0) {
++ for (i = 0; i < tmp_count; i++) {
++ if (!rsbac_list_lol_get_data
++ (netobj_handle, &netobj_desc_p[i], &rights)) {
++ seq_printf(m,
++ "\n%p %3li\t%s\n\t\t",
++ netobj_desc_p[i],
++ rsbac_list_lol_subcount
++ (netobj_handle,
++ &netobj_desc_p[i]),
++ u64tostracl(tmp1, rights));
++ }
++ tmp_sub_count =
++ rsbac_list_lol_get_all_subdesc(netobj_handle,
++ &netobj_desc_p
++ [i],
++ (void **)
++ &sub_desc_p);
++ if (tmp_sub_count > 0) {
++ for (j = 0; j < tmp_sub_count; j++) {
++ if (RSBAC_UID_SET(sub_desc_p[j].subj_id))
++ seq_printf(m, " %s %u/%u,",
++ get_acl_subject_type_name(tmp1,
++ sub_desc_p
++ [j].
++ subj_type),
++ RSBAC_UID_SET(sub_desc_p[j].subj_id),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ else
++ seq_printf(m,
++ "%s %u, ",
++ get_acl_subject_type_name
++ (tmp1,
++ sub_desc_p[j].
++ subj_type),
++ RSBAC_UID_NUM(sub_desc_p[j].subj_id));
++ }
++ rsbac_kfree(sub_desc_p);
++ member_count += tmp_sub_count;
++ }
++ }
++ rsbac_kfree(netobj_desc_p);
++ }
++ seq_printf(m,
++ "\n\n%i network object ACL items, sum of %u members\n",
++ tmp_count, member_count);
++#endif
++
++ return 0;
++}
++
++static ssize_t acl_acllist_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, acl_acllist_proc_show, NULL);
++}
++
++static const struct file_operations acl_acllist_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = acl_acllist_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *acl_acllist;
++
++static int
++acl_grouplist_proc_show(struct seq_file *m, void *v)
++{
++ char type;
++ int count, sub_count;
++ int i, j;
++ u_int member_count = 0;
++ struct rsbac_acl_group_entry_t *entry_p;
++ rsbac_uid_t *user_p;
++ rsbac_acl_group_id_t *group_p;
++ rsbac_time_t *ttl_p;
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "acl_grouplist_proc_info(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ rsbac_pr_debug(aef_acl, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ seq_printf(m, "ACL Groups\n----------\n");
++
++ /* group list */
++ seq_printf(m,
++ "Group list: %li groups, last new is %u\nID\ttype name\t\towner\n",
++ rsbac_list_count(group_handle), group_last_new);
++
++ count = rsbac_list_get_all_data(group_handle, (void **) &entry_p);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ if (entry_p[i].type == ACLG_GLOBAL)
++ type = 'G';
++ else
++ type = 'P';
++ if (RSBAC_UID_SET(entry_p[i].owner))
++ seq_printf(m, "%u\t%c %-18s %u/%u\n",
++ entry_p[i].id, type, entry_p[i].name,
++ RSBAC_UID_SET(entry_p[i].owner),
++ RSBAC_UID_NUM(entry_p[i].owner));
++ else
++ seq_printf(m, "%u\t%c %-18s %u\n",
++ entry_p[i].id, type, entry_p[i].name,
++ RSBAC_UID_NUM(entry_p[i].owner));
++ }
++ rsbac_kfree(entry_p);
++ }
++
++ /* group member list */
++ member_count = 0;
++ seq_printf(m,
++ "\nGroup memberships:\nuser count\tgroups");
++
++ count = rsbac_list_lol_get_all_desc(gm_handle, (void **) &user_p);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ sub_count =
++ rsbac_list_lol_get_all_subdesc_ttl(gm_handle,
++ &user_p[i],
++ (void **)
++ &group_p,
++ &ttl_p);
++ if (RSBAC_UID_SET(user_p[i]))
++ seq_printf(m, "\n%u/%u\t%i\t",
++ RSBAC_UID_SET(user_p[i]),
++ RSBAC_UID_NUM(user_p[i]),
++ sub_count);
++ else
++ seq_printf(m, "\n%u\t%i\t",
++ RSBAC_UID_NUM(user_p[i]),
++ sub_count);
++ if (sub_count > 0) {
++ for (j = 0; j < sub_count; j++) {
++ if (ttl_p[j])
++ seq_printf(m,
++ "%u(ttl:%i) ",
++ group_p[j],
++ ttl_p[j]);
++ else
++ seq_printf(m,
++ "%u ",
++ group_p[j]);
++ }
++ member_count += sub_count;
++ rsbac_kfree(group_p);
++ rsbac_kfree(ttl_p);
++ }
++ }
++ rsbac_kfree(user_p);
++ }
++ seq_printf(m,
++ "\n\n%u user items, sum of %u memberships\n", count,
++ member_count);
++ return 0;
++}
++
++static ssize_t acl_grouplist_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, acl_grouplist_proc_show, NULL);
++}
++
++static const struct file_operations acl_grouplist_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = acl_grouplist_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *acl_grouplist;
++
++#endif
++
++
++/************************************************* */
++/* Init functions */
++/************************************************* */
++
++/* All functions return 0, if no error occurred, and a negative error code */
++/* otherwise. The error codes are defined in rsbac/error.h. */
++
++/************************************************************************** */
++/* Initialization of all ACL data structures. After this call, all ACL */
++/* data is kept in memory for performance reasons, but is written to disk */
++/* on every change. */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static void registration_error(int err, char *listname)
++#else
++static void __init registration_error(int err, char *listname)
++#endif
++{
++ if (err) {
++ char *tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_init_acl(): Registering ACL %s list failed with error %s\n",
++ listname, get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++}
++
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++void acl_create_def(void)
++#else
++void __init acl_create_def(void)
++#endif
++{
++ if (!rsbac_list_count(default_fd_handle)) {
++ struct rsbac_acl_entry_desc_t desc;
++ struct rsbac_acl_entry_t acman_entry =
++ RSBAC_ACL_ACMAN_FD_ENTRY;
++ struct rsbac_acl_entry_t sysadm_entry =
++ RSBAC_ACL_SYSADM_FD_ENTRY;
++ struct rsbac_acl_entry_t gen_entry =
++ RSBAC_ACL_GENERAL_FD_ENTRY;
++
++ rsbac_printk(KERN_WARNING "rsbac_init_acl(): File/Dir default ACL empty on dev %02u:%02u, generating standard ACL!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev));
++ desc.subj_type = acman_entry.subj_type;
++ desc.subj_id = acman_entry.subj_id;
++ rsbac_list_add(default_fd_handle, &desc,
++ &acman_entry.rights);
++ desc.subj_type = sysadm_entry.subj_type;
++ desc.subj_id = sysadm_entry.subj_id;
++ rsbac_list_add(default_fd_handle, &desc,
++ &sysadm_entry.rights);
++ desc.subj_type = gen_entry.subj_type;
++ desc.subj_id = gen_entry.subj_id;
++ rsbac_list_add(default_fd_handle, &desc,
++ &gen_entry.rights);
++ }
++ if (!rsbac_list_count(default_dev_handle)) {
++ struct rsbac_acl_entry_desc_t desc;
++ struct rsbac_acl_entry_t acman_entry =
++ RSBAC_ACL_ACMAN_DEV_ENTRY;
++ struct rsbac_acl_entry_t sysadm_entry =
++ RSBAC_ACL_SYSADM_DEV_ENTRY;
++ struct rsbac_acl_entry_t gen_entry =
++ RSBAC_ACL_GENERAL_DEV_ENTRY;
++
++ rsbac_printk(KERN_WARNING "rsbac_init_acl(): Device default ACL empty on dev %02u:%02u, generating standard ACL!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev));
++ desc.subj_type = acman_entry.subj_type;
++ desc.subj_id = acman_entry.subj_id;
++ rsbac_list_add(default_dev_handle, &desc,
++ &acman_entry.rights);
++ desc.subj_type = sysadm_entry.subj_type;
++ desc.subj_id = sysadm_entry.subj_id;
++ rsbac_list_add(default_dev_handle, &desc,
++ &sysadm_entry.rights);
++ desc.subj_type = gen_entry.subj_type;
++ desc.subj_id = gen_entry.subj_id;
++ rsbac_list_add(default_dev_handle, &desc,
++ &gen_entry.rights);
++ }
++ if (!rsbac_list_count(default_ipc_handle)) {
++ struct rsbac_acl_entry_desc_t desc;
++ struct rsbac_acl_entry_t acman_entry =
++ RSBAC_ACL_ACMAN_IPC_ENTRY;
++ struct rsbac_acl_entry_t sysadm_entry =
++ RSBAC_ACL_SYSADM_IPC_ENTRY;
++ struct rsbac_acl_entry_t gen_entry =
++ RSBAC_ACL_GENERAL_IPC_ENTRY;
++
++ rsbac_printk(KERN_WARNING "rsbac_init_acl(): IPC default ACL empty on dev %02u:%02u, generating standard ACL!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev));
++ desc.subj_type = acman_entry.subj_type;
++ desc.subj_id = acman_entry.subj_id;
++ rsbac_list_add(default_ipc_handle, &desc,
++ &acman_entry.rights);
++ desc.subj_type = sysadm_entry.subj_type;
++ desc.subj_id = sysadm_entry.subj_id;
++ rsbac_list_add(default_ipc_handle, &desc,
++ &sysadm_entry.rights);
++ desc.subj_type = gen_entry.subj_type;
++ desc.subj_id = gen_entry.subj_id;
++ rsbac_list_add(default_ipc_handle, &desc,
++ &gen_entry.rights);
++ }
++ if (!rsbac_list_count(default_scd_handle)) {
++ struct rsbac_acl_entry_desc_t desc;
++ struct rsbac_acl_entry_t acman_entry =
++ RSBAC_ACL_ACMAN_SCD_ENTRY;
++
++ rsbac_printk(KERN_WARNING "rsbac_init_acl(): SCD default ACL empty on dev %02u:%02u, generating standard ACL!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev));
++ desc.subj_type = acman_entry.subj_type;
++ desc.subj_id = acman_entry.subj_id;
++ rsbac_list_add(default_scd_handle, &desc,
++ &acman_entry.rights);
++ }
++ if (!rsbac_list_lol_count(scd_handle)) {
++ struct rsbac_acl_entry_desc_t desc;
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_SCD_MASK;
++ struct rsbac_acl_entry_t gen_entry =
++ RSBAC_ACL_GENERAL_SCD_ENTRY;
++#ifdef CONFIG_RSBAC_USER_MOD_IOPERM
++ struct rsbac_acl_entry_t gen_ioports_entry =
++ RSBAC_ACL_GENERAL_SCD_IOPORTS_ENTRY;
++#endif
++ struct rsbac_acl_entry_t gen_other_entry =
++ RSBAC_ACL_GENERAL_SCD_OTHER_ENTRY;
++ struct rsbac_acl_entry_t gen_network_entry =
++ RSBAC_ACL_GENERAL_SCD_NETWORK_ENTRY;
++ struct rsbac_acl_entry_t sysadm_entry =
++ RSBAC_ACL_SYSADM_SCD_ENTRY;
++ struct rsbac_acl_entry_t sysadm_other_entry =
++ RSBAC_ACL_SYSADM_SCD_OTHER_ENTRY;
++#ifdef CONFIG_RSBAC_USER_MOD_IOPERM
++ struct rsbac_acl_entry_t sysadm_kmem_entry =
++ RSBAC_ACL_SYSADM_SCD_KMEM_ENTRY;
++#endif
++ struct rsbac_acl_entry_t acman_other_entry =
++ RSBAC_ACL_ACMAN_SCD_OTHER_ENTRY;
++ struct rsbac_acl_entry_t auditor_rsbaclog_entry =
++ RSBAC_ACL_AUDITOR_SCD_RSBACLOG_ENTRY;
++ __u8 scd;
++
++ rsbac_printk(KERN_WARNING "rsbac_init_acl(): SCD ACLs empty on dev %02u:%02u, generating standard entries!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev));
++ scd = ST_rlimit;
++ if (!rsbac_list_lol_add(scd_handle, &scd, &mask)) {
++ desc.subj_type = gen_entry.subj_type;
++ desc.subj_id = gen_entry.subj_id;
++ rsbac_list_lol_subadd(scd_handle, &scd, &desc,
++ &gen_entry.rights);
++ }
++ for (scd = ST_time_strucs; scd <= ST_rsbac; scd++) {
++ if (!rsbac_list_lol_add(scd_handle, &scd, &mask)) {
++ desc.subj_type = sysadm_entry.subj_type;
++ desc.subj_id = sysadm_entry.subj_id;
++ rsbac_list_lol_subadd(scd_handle, &scd,
++ &desc,
++ &sysadm_entry.
++ rights);
++ }
++ }
++ scd = ST_rsbac_log;
++ if (!rsbac_list_lol_add(scd_handle, &scd, &mask)) {
++ desc.subj_type = auditor_rsbaclog_entry.subj_type;
++ desc.subj_id = auditor_rsbaclog_entry.subj_id;
++ rsbac_list_lol_subadd(scd_handle, &scd, &desc,
++ &auditor_rsbaclog_entry.
++ rights);
++ }
++ scd = ST_network;
++ if (!rsbac_list_lol_add(scd_handle, &scd, &mask)) {
++ desc.subj_type = sysadm_entry.subj_type;
++ desc.subj_id = sysadm_entry.subj_id;
++ rsbac_list_lol_subadd(scd_handle, &scd, &desc,
++ &sysadm_entry.rights);
++ desc.subj_type = gen_network_entry.subj_type;
++ desc.subj_id = gen_network_entry.subj_id;
++ rsbac_list_lol_subadd(scd_handle, &scd, &desc,
++ &gen_network_entry.rights);
++ }
++ scd = ST_firewall;
++ if (!rsbac_list_lol_add(scd_handle, &scd, &mask)) {
++ desc.subj_type = sysadm_entry.subj_type;
++ desc.subj_id = sysadm_entry.subj_id;
++ rsbac_list_lol_subadd(scd_handle, &scd, &desc,
++ &sysadm_entry.rights);
++ desc.subj_type = gen_network_entry.subj_type;
++ desc.subj_id = gen_network_entry.subj_id;
++ rsbac_list_lol_subadd(scd_handle, &scd, &desc,
++ &gen_network_entry.rights);
++ }
++ scd = ST_priority;
++ if (!rsbac_list_lol_add(scd_handle, &scd, &mask)) {
++ desc.subj_type = sysadm_entry.subj_type;
++ desc.subj_id = sysadm_entry.subj_id;
++ rsbac_list_lol_subadd(scd_handle, &scd, &desc,
++ &sysadm_entry.rights);
++ }
++ scd = ST_sysfs;
++ if (!rsbac_list_lol_add(scd_handle, &scd, &mask)) {
++ desc.subj_type = sysadm_entry.subj_type;
++ desc.subj_id = sysadm_entry.subj_id;
++ rsbac_list_lol_subadd(scd_handle, &scd, &desc,
++ &sysadm_entry.rights);
++ }
++ for (scd = ST_quota; scd < ST_none; scd++)
++ if (!rsbac_list_lol_add(scd_handle, &scd, &mask)) {
++ desc.subj_type = sysadm_entry.subj_type;
++ desc.subj_id = sysadm_entry.subj_id;
++ rsbac_list_lol_subadd(scd_handle, &scd,
++ &desc,
++ &sysadm_entry.
++ rights);
++ }
++#ifdef CONFIG_RSBAC_USER_MOD_IOPERM
++ scd = ST_ioports;
++ if (!rsbac_list_lol_add(scd_handle, &scd, &mask)) {
++ desc.subj_type = gen_ioports_entry.subj_type;
++ desc.subj_id = gen_ioports_entry.subj_id;
++ rsbac_list_lol_subadd(scd_handle, &scd, &desc,
++ &gen_ioports_entry.rights);
++ }
++ scd = ST_kmem;
++ if (!rsbac_list_lol_add(scd_handle, &scd, &mask)) {
++ desc.subj_type = sysadm_kmem_entry.subj_type;
++ desc.subj_id = sysadm_kmem_entry.subj_id;
++ rsbac_list_lol_subadd(scd_handle, &scd, &desc,
++ &sysadm_kmem_entry.rights);
++ }
++#endif
++
++ scd = ST_other;
++ if (!rsbac_list_lol_add(scd_handle, &scd, &mask)) {
++ desc.subj_type = sysadm_other_entry.subj_type;
++ desc.subj_id = sysadm_other_entry.subj_id;
++ rsbac_list_lol_subadd(scd_handle, &scd, &desc,
++ &sysadm_other_entry.rights);
++ desc.subj_type = acman_other_entry.subj_type;
++ desc.subj_id = acman_other_entry.subj_id;
++ rsbac_list_lol_subadd(scd_handle, &scd, &desc,
++ &acman_other_entry.rights);
++ desc.subj_type = gen_other_entry.subj_type;
++ desc.subj_id = gen_other_entry.subj_id;
++ rsbac_list_lol_subadd(scd_handle, &scd, &desc,
++ &gen_other_entry.rights);
++ }
++ }
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++void acl_create_def2(void)
++#else
++void __init acl_create_def2(void)
++#endif
++{
++ if (!rsbac_list_count(default_u_handle)) {
++ struct rsbac_acl_entry_desc_t desc;
++ struct rsbac_acl_entry_t acman_entry =
++ RSBAC_ACL_ACMAN_U_ENTRY;
++ struct rsbac_acl_entry_t sysadm_entry =
++ RSBAC_ACL_SYSADM_U_ENTRY;
++ struct rsbac_acl_entry_t gen_entry =
++ RSBAC_ACL_GENERAL_U_ENTRY;
++
++ rsbac_printk(KERN_WARNING "rsbac_init_acl(): User default ACL empty on dev %02u:%02u, generating standard ACL!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev));
++ desc.subj_type = sysadm_entry.subj_type;
++ desc.subj_id = sysadm_entry.subj_id;
++ rsbac_list_add(default_u_handle, &desc,
++ &sysadm_entry.rights);
++ desc.subj_type = acman_entry.subj_type;
++ desc.subj_id = acman_entry.subj_id;
++ rsbac_list_add(default_u_handle, &desc,
++ &acman_entry.rights);
++ desc.subj_type = gen_entry.subj_type;
++ desc.subj_id = gen_entry.subj_id;
++ rsbac_list_add(default_u_handle, &desc, &gen_entry.rights);
++ }
++ if (!rsbac_list_count(default_p_handle)) {
++ struct rsbac_acl_entry_desc_t desc;
++ struct rsbac_acl_entry_t acman_entry =
++ RSBAC_ACL_ACMAN_P_ENTRY;
++ struct rsbac_acl_entry_t sysadm_entry =
++ RSBAC_ACL_SYSADM_P_ENTRY;
++ struct rsbac_acl_entry_t gen_entry =
++ RSBAC_ACL_GENERAL_P_ENTRY;
++
++ rsbac_printk(KERN_WARNING "rsbac_init_acl(): Process default ACL empty on dev %02u:%02u, generating standard ACL!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev));
++ desc.subj_type = sysadm_entry.subj_type;
++ desc.subj_id = sysadm_entry.subj_id;
++ rsbac_list_add(default_p_handle, &desc,
++ &sysadm_entry.rights);
++ desc.subj_type = acman_entry.subj_type;
++ desc.subj_id = acman_entry.subj_id;
++ rsbac_list_add(default_p_handle, &desc,
++ &acman_entry.rights);
++ desc.subj_type = gen_entry.subj_type;
++ desc.subj_id = gen_entry.subj_id;
++ rsbac_list_add(default_p_handle, &desc, &gen_entry.rights);
++ }
++ if (!rsbac_list_lol_count(gm_handle)) {
++ rsbac_printk(KERN_WARNING "rsbac_init_acl(): Group membership list empty on dev %02u:%02u!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev));
++ }
++ if (!rsbac_list_count(group_handle)) {
++ rsbac_printk(KERN_WARNING "rsbac_init_acl(): Group list empty on dev %02u:%02u!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev));
++ } else {
++ rsbac_list_get_max_desc(group_handle, &group_last_new);
++ }
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ if (!rsbac_list_count(default_g_handle)) {
++ struct rsbac_acl_entry_desc_t desc;
++ struct rsbac_acl_entry_t acman_entry =
++ RSBAC_ACL_ACMAN_G_ENTRY;
++ struct rsbac_acl_entry_t sysadm_entry =
++ RSBAC_ACL_SYSADM_G_ENTRY;
++ struct rsbac_acl_entry_t gen_entry =
++ RSBAC_ACL_GENERAL_G_ENTRY;
++
++ rsbac_printk(KERN_WARNING "rsbac_init_acl(): Linux group default ACL empty on dev %02u:%02u, generating standard ACL!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev));
++ desc.subj_type = sysadm_entry.subj_type;
++ desc.subj_id = sysadm_entry.subj_id;
++ rsbac_list_add(default_g_handle, &desc,
++ &sysadm_entry.rights);
++ desc.subj_type = acman_entry.subj_type;
++ desc.subj_id = acman_entry.subj_id;
++ rsbac_list_add(default_g_handle, &desc,
++ &acman_entry.rights);
++ desc.subj_type = gen_entry.subj_type;
++ desc.subj_id = gen_entry.subj_id;
++ rsbac_list_add(default_g_handle, &desc, &gen_entry.rights);
++ }
++#endif
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ if (!rsbac_list_count(default_netdev_handle)) {
++ struct rsbac_acl_entry_desc_t desc;
++ struct rsbac_acl_entry_t acman_entry =
++ RSBAC_ACL_ACMAN_NETDEV_ENTRY;
++ struct rsbac_acl_entry_t sysadm_entry =
++ RSBAC_ACL_SYSADM_NETDEV_ENTRY;
++ struct rsbac_acl_entry_t gen_entry =
++ RSBAC_ACL_GENERAL_NETDEV_ENTRY;
++
++ rsbac_printk(KERN_WARNING "rsbac_init_acl(): Network Device default ACL empty on dev %02u:%02u, generating standard ACL!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev));
++ desc.subj_type = acman_entry.subj_type;
++ desc.subj_id = acman_entry.subj_id;
++ rsbac_list_add(default_netdev_handle, &desc,
++ &acman_entry.rights);
++ desc.subj_type = sysadm_entry.subj_type;
++ desc.subj_id = sysadm_entry.subj_id;
++ rsbac_list_add(default_netdev_handle, &desc,
++ &sysadm_entry.rights);
++ desc.subj_type = gen_entry.subj_type;
++ desc.subj_id = gen_entry.subj_id;
++ rsbac_list_add(default_netdev_handle, &desc,
++ &gen_entry.rights);
++ }
++#endif
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ if (!rsbac_no_defaults
++ && !rsbac_list_count(default_nettemp_nt_handle)) {
++ struct rsbac_acl_entry_desc_t desc;
++ struct rsbac_acl_entry_t acman_entry =
++ RSBAC_ACL_ACMAN_NETTEMP_NT_ENTRY;
++ struct rsbac_acl_entry_t sysadm_entry =
++ RSBAC_ACL_SYSADM_NETTEMP_NT_ENTRY;
++ struct rsbac_acl_entry_t gen_entry =
++ RSBAC_ACL_GENERAL_NETTEMP_NT_ENTRY;
++
++ rsbac_printk(KERN_WARNING "rsbac_init_acl(): Network Template NT (template protection) default ACL empty on dev %02u:%02u, generating standard ACL!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev));
++ desc.subj_type = acman_entry.subj_type;
++ desc.subj_id = acman_entry.subj_id;
++ rsbac_list_add(default_nettemp_nt_handle, &desc,
++ &acman_entry.rights);
++ desc.subj_type = sysadm_entry.subj_type;
++ desc.subj_id = sysadm_entry.subj_id;
++ rsbac_list_add(default_nettemp_nt_handle, &desc,
++ &sysadm_entry.rights);
++ desc.subj_type = gen_entry.subj_type;
++ desc.subj_id = gen_entry.subj_id;
++ rsbac_list_add(default_nettemp_nt_handle, &desc,
++ &gen_entry.rights);
++ }
++ if (!rsbac_list_count(default_netobj_handle)) {
++ struct rsbac_acl_entry_desc_t desc;
++ struct rsbac_acl_entry_t acman_entry =
++ RSBAC_ACL_ACMAN_NETOBJ_ENTRY;
++ struct rsbac_acl_entry_t sysadm_entry =
++ RSBAC_ACL_SYSADM_NETOBJ_ENTRY;
++ struct rsbac_acl_entry_t gen_entry =
++ RSBAC_ACL_GENERAL_NETOBJ_ENTRY;
++
++ rsbac_printk(KERN_WARNING "rsbac_init_acl(): Network Object default ACL empty on dev %02u:%02u, generating standard ACL!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev));
++ desc.subj_type = acman_entry.subj_type;
++ desc.subj_id = acman_entry.subj_id;
++ rsbac_list_add(default_netobj_handle, &desc,
++ &acman_entry.rights);
++ desc.subj_type = sysadm_entry.subj_type;
++ desc.subj_id = sysadm_entry.subj_id;
++ rsbac_list_add(default_netobj_handle, &desc,
++ &sysadm_entry.rights);
++ desc.subj_type = gen_entry.subj_type;
++ desc.subj_id = gen_entry.subj_id;
++ rsbac_list_add(default_netobj_handle, &desc,
++ &gen_entry.rights);
++ }
++#endif
++}
++
++/* Because there can be no access to aci data structures before init, */
++/* rsbac_init_acl() will initialize all rw-spinlocks to unlocked. */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++int rsbac_init_acl(void)
++#else
++int __init rsbac_init_acl(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_acl_device_list_item_t *device_p = NULL;
++ char tmp[80];
++ struct rsbac_list_lol_info_t lol_info;
++ struct rsbac_list_info_t list_info;
++ rsbac_acl_rights_vector_t def_mask;
++
++ if (rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_init_acl(): RSBAC already initialized\n");
++ return -RSBAC_EREINIT;
++ }
++
++ /* set rw-spinlocks to unlocked status and init data structures */
++ rsbac_printk(KERN_INFO "rsbac_init_acl(): Initializing RSBAC: ACL subsystem\n");
++
++ acl_device_item_slab = rsbac_slab_create("rsbac_acl_device_item",
++ sizeof(struct rsbac_acl_device_list_item_t));
++
++ /* Init device list */
++ device_list_head_p = kmalloc(sizeof(*device_list_head_p), GFP_KERNEL);
++ if (!device_list_head_p) {
++ rsbac_printk(KERN_WARNING
++ "rsbac_init_acl(): Failed to allocate device_list_head\n");
++ return -ENOMEM;
++ }
++ spin_lock_init(&device_list_lock);
++ init_srcu_struct(&device_list_srcu);
++ lockdep_set_class(&device_list_lock, &device_list_lock_class);
++ device_list_head_p->head = NULL;
++ device_list_head_p->tail = NULL;
++ device_list_head_p->curr = NULL;
++ device_list_head_p->count = 0;
++
++ /* register ACL lists */
++ rsbac_pr_debug(ds_acl, "Registering lists\n");
++ device_p = create_device_item(rsbac_root_dev);
++ if (!device_p) {
++ rsbac_printk(KERN_CRIT
++ "rsbac_init_acl(): Could not create device!\n");
++ return -RSBAC_ECOULDNOTADDDEVICE;
++ }
++ if ((err = acl_register_fd_lists(device_p, rsbac_root_dev))) {
++ rsbac_printk(KERN_WARNING "rsbac_init_acl(): File/Dir ACL registration failed for dev %02u:%02u, err %s!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev),
++ get_error_name(tmp, err));
++ }
++ device_p = add_device_item(device_p);
++ if (!device_p) {
++ rsbac_printk(KERN_CRIT
++ "rsbac_init_acl(): Could not add device!\n");
++ return -RSBAC_ECOULDNOTADDDEVICE;
++ }
++
++ list_info.version = RSBAC_ACL_DEF_FD_LIST_VERSION;
++ list_info.key = RSBAC_ACL_LIST_KEY;
++ list_info.desc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ list_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &default_fd_handle, &list_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST,
++ entry_compare,
++ def_fd_get_conv,
++ NULL,
++ RSBAC_ACL_DEF_FD_FILENAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "default fd");
++ }
++
++ lol_info.version = RSBAC_ACL_DEV_LIST_VERSION;
++ lol_info.key = RSBAC_ACL_LIST_KEY;
++ lol_info.desc_size = sizeof(struct rsbac_dev_desc_t);
++ lol_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* mask */
++ lol_info.subdesc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ lol_info.subdata_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ lol_info.max_age = 0;
++ def_mask = RSBAC_ACL_DEFAULT_DEV_MASK;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &dev_handle, &lol_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ dev_compare,
++ entry_compare, dev_get_conv,
++ dev_get_subconv, &def_mask, NULL,
++ RSBAC_ACL_DEV_FILENAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_dev,
++ NULL);
++ if (err) {
++ registration_error(err, "dev");
++ }
++ lol_info.version = RSBAC_ACL_DEV_LIST_VERSION;
++ lol_info.key = RSBAC_ACL_LIST_KEY;
++ lol_info.desc_size = sizeof(struct rsbac_dev_desc_t);
++ lol_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* mask */
++ lol_info.subdesc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ lol_info.subdata_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ lol_info.max_age = 0;
++ def_mask = RSBAC_ACL_DEFAULT_DEV_MASK;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &dev_major_handle, &lol_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ dev_major_compare, entry_compare,
++ dev_get_conv, dev_get_subconv,
++ &def_mask, NULL,
++ RSBAC_ACL_DEV_MAJOR_FILENAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_dev,
++ NULL);
++ if (err) {
++ registration_error(err, "dev major");
++ }
++ list_info.version = RSBAC_ACL_DEF_DEV_LIST_VERSION;
++ list_info.key = RSBAC_ACL_LIST_KEY;
++ list_info.desc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ list_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &default_dev_handle, &list_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST,
++ entry_compare,
++ def_dev_get_conv,
++ NULL,
++ RSBAC_ACL_DEF_DEV_FILENAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "default dev");
++ }
++
++ list_info.version = RSBAC_ACL_DEF_IPC_LIST_VERSION;
++ list_info.key = RSBAC_ACL_LIST_KEY;
++ list_info.desc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ list_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &default_ipc_handle, &list_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST,
++ entry_compare,
++ def_ipc_get_conv,
++ NULL,
++ RSBAC_ACL_DEF_IPC_FILENAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "default ipc");
++ }
++
++ lol_info.version = RSBAC_ACL_SCD_LIST_VERSION;
++ lol_info.key = RSBAC_ACL_LIST_KEY;
++ lol_info.desc_size = sizeof(__u8);
++ lol_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* mask */
++ lol_info.subdesc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ lol_info.subdata_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ lol_info.max_age = 0;
++ def_mask = RSBAC_ACL_DEFAULT_SCD_MASK;
++ err = rsbac_list_lol_register(RSBAC_LIST_VERSION,
++ &scd_handle, &lol_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA, NULL,
++ entry_compare, scd_get_conv,
++ scd_get_subconv, &def_mask, NULL,
++ RSBAC_ACL_SCD_FILENAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "scd");
++ }
++
++ list_info.version = RSBAC_ACL_DEF_SCD_LIST_VERSION;
++ list_info.key = RSBAC_ACL_LIST_KEY;
++ list_info.desc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ list_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &default_scd_handle, &list_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST,
++ entry_compare,
++ def_scd_get_conv,
++ NULL,
++ RSBAC_ACL_DEF_SCD_FILENAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "default scd");
++ }
++
++ lol_info.version = RSBAC_ACL_U_LIST_VERSION;
++ lol_info.key = RSBAC_ACL_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_uid_t);
++ lol_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* mask */
++ lol_info.subdesc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ lol_info.subdata_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ lol_info.max_age = 0;
++ def_mask = RSBAC_ACL_DEFAULT_U_MASK;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &u_handle, &lol_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ entry_compare,
++ u_get_conv,
++ u_get_subconv,
++ &def_mask,
++ NULL,
++ RSBAC_ACL_U_FILENAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_uid,
++ NULL);
++ if (err) {
++ registration_error(err, "user");
++ }
++ list_info.version = RSBAC_ACL_DEF_U_LIST_VERSION;
++ list_info.key = RSBAC_ACL_LIST_KEY;
++ list_info.desc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ list_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &default_u_handle, &list_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST,
++ entry_compare,
++ def_u_get_conv,
++ NULL,
++ RSBAC_ACL_DEF_U_FILENAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "default user");
++ }
++
++ list_info.version = RSBAC_ACL_DEF_P_LIST_VERSION;
++ list_info.key = RSBAC_ACL_LIST_KEY;
++ list_info.desc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ list_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &default_p_handle, &list_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST,
++ entry_compare,
++ def_p_get_conv,
++ NULL,
++ RSBAC_ACL_DEF_P_FILENAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "default process");
++ }
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ lol_info.version = RSBAC_ACL_G_LIST_VERSION;
++ lol_info.key = RSBAC_ACL_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_gid_t);
++ lol_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* mask */
++ lol_info.subdesc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ lol_info.subdata_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ lol_info.max_age = 0;
++ def_mask = RSBAC_ACL_DEFAULT_G_MASK;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &g_handle, &lol_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ entry_compare,
++ g_get_conv,
++ g_get_subconv,
++ &def_mask,
++ NULL,
++ RSBAC_ACL_G_FILENAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_gid,
++ NULL);
++ if (err) {
++ registration_error(err, "Linux group");
++ }
++ list_info.version = RSBAC_ACL_DEF_G_LIST_VERSION;
++ list_info.key = RSBAC_ACL_LIST_KEY;
++ list_info.desc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ list_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &default_g_handle, &list_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST,
++ entry_compare,
++ def_g_get_conv,
++ NULL,
++ RSBAC_ACL_DEF_G_FILENAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "default Linux group");
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ lol_info.version = RSBAC_ACL_NETDEV_LIST_VERSION;
++ lol_info.key = RSBAC_ACL_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_netdev_id_t);
++ lol_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* mask */
++ lol_info.subdesc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ lol_info.subdata_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ lol_info.max_age = 0;
++ def_mask = RSBAC_ACL_DEFAULT_NETDEV_MASK;
++ err = rsbac_list_lol_register(RSBAC_LIST_VERSION,
++ &netdev_handle, &lol_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA, netdev_compare,
++ entry_compare, netdev_get_conv,
++ netdev_get_subconv, &def_mask, NULL,
++ RSBAC_ACL_NETDEV_FILENAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "netdev");
++ }
++ list_info.version = RSBAC_ACL_DEF_NETDEV_LIST_VERSION;
++ list_info.key = RSBAC_ACL_LIST_KEY;
++ list_info.desc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ list_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &default_netdev_handle, &list_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST,
++ entry_compare,
++ def_netdev_get_conv,
++ NULL,
++ RSBAC_ACL_DEF_NETDEV_FILENAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "default netdev");
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ lol_info.version = RSBAC_ACL_NETTEMP_NT_LIST_VERSION;
++ lol_info.key = RSBAC_ACL_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_net_temp_id_t);
++ lol_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* mask */
++ lol_info.subdesc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ lol_info.subdata_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ lol_info.max_age = 0;
++ def_mask = RSBAC_ACL_DEFAULT_NETTEMP_MASK;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &nettemp_nt_handle, &lol_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ entry_compare,
++ nettemp_nt_get_conv,
++ nettemp_nt_get_subconv,
++ &def_mask,
++ NULL,
++ RSBAC_ACL_NETTEMP_NT_FILENAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_nettemp,
++ NULL);
++ if (err) {
++ registration_error(err, "nettemp_nt");
++ }
++ list_info.version = RSBAC_ACL_DEF_NETTEMP_NT_LIST_VERSION;
++ list_info.key = RSBAC_ACL_LIST_KEY;
++ list_info.desc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ list_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &default_nettemp_nt_handle, &list_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST,
++ entry_compare,
++ def_nettemp_nt_get_conv,
++ NULL,
++ RSBAC_ACL_DEF_NETTEMP_NT_FILENAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "default nettemp_nt");
++ }
++ lol_info.version = RSBAC_ACL_NETTEMP_LIST_VERSION;
++ lol_info.key = RSBAC_ACL_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_net_temp_id_t);
++ lol_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* mask */
++ lol_info.subdesc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ lol_info.subdata_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ lol_info.max_age = 0;
++ def_mask = RSBAC_ACL_DEFAULT_NETOBJ_MASK;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &nettemp_handle, &lol_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ entry_compare,
++ nettemp_get_conv,
++ nettemp_get_subconv,
++ &def_mask,
++ NULL,
++ RSBAC_ACL_NETTEMP_FILENAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_nettemp,
++ NULL);
++ if (err) {
++ registration_error(err, "nettemp");
++ }
++ lol_info.version = RSBAC_ACL_NETOBJ_LIST_VERSION;
++ lol_info.key = RSBAC_ACL_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_net_obj_id_t);
++ lol_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* mask */
++ lol_info.subdesc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ lol_info.subdata_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ lol_info.max_age = 0;
++ def_mask = RSBAC_ACL_DEFAULT_NETOBJ_MASK;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &netobj_handle,
++ &lol_info,
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ entry_compare,
++ netobj_get_conv,
++ netobj_get_subconv,
++ &def_mask,
++ NULL,
++ RSBAC_ACL_NETOBJ_FILENAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_netobj,
++ NULL);
++ if (err) {
++ registration_error(err, "netobj");
++ }
++ list_info.version = RSBAC_ACL_DEF_NETOBJ_LIST_VERSION;
++ list_info.key = RSBAC_ACL_LIST_KEY;
++ list_info.desc_size = sizeof(struct rsbac_acl_entry_desc_t); /* subj_type + subj_id */
++ list_info.data_size = sizeof(rsbac_acl_rights_vector_t); /* rights */
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &default_netobj_handle, &list_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST,
++ entry_compare,
++ def_netobj_get_conv,
++ NULL,
++ RSBAC_ACL_DEF_NETOBJ_FILENAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "default netobj");
++ }
++#endif /* NET_OBJ_PROT */
++
++ /* groups */
++ list_info.version = RSBAC_ACL_GROUP_VERSION;
++ list_info.key = RSBAC_ACL_LIST_KEY;
++ list_info.desc_size = sizeof(rsbac_acl_group_id_t);
++ list_info.data_size = sizeof(struct rsbac_acl_group_entry_t);
++ list_info.max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &group_handle, &list_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_ACL_GROUP_FILENAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ group_hash,
++ NULL);
++ if (err) {
++ registration_error(err, "group");
++ }
++
++ /* group memberships */
++ lol_info.version = RSBAC_ACL_GM_VERSION;
++ lol_info.key = RSBAC_ACL_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_uid_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_acl_group_id_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &gm_handle, &lol_info,
++#if defined(CONFIG_RSBAC_ACL_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ NULL,
++ gm_get_conv,
++ gm_get_subconv,
++ NULL, NULL, RSBAC_ACL_GM_FILENAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_uid,
++ NULL);
++ if (err) {
++ registration_error(err, "gm");
++ }
++
++/* Create default lists */
++ if (!rsbac_no_defaults) {
++ acl_create_def();
++ acl_create_def2();
++ }
++#if defined(CONFIG_RSBAC_PROC)
++ acl_devices = proc_create("acl_devices",
++ S_IFREG | S_IRUGO,
++ proc_rsbac_root_p, &acl_devices_proc_fops);
++ stats_acl = proc_create("stats_acl",
++ S_IFREG | S_IRUGO,
++ proc_rsbac_root_p, &stats_acl_proc_fops);
++ acl_acllist = proc_create("acl_acllist",
++ S_IFREG | S_IRUGO,
++ proc_rsbac_root_p, &acl_acllist_proc_fops);
++ acl_grouplist = proc_create("acl_grouplist",
++ S_IFREG | S_IRUGO,
++ proc_rsbac_root_p, &acl_grouplist_proc_fops);
++#endif
++
++ rsbac_pr_debug(ds_acl, "Ready.\n");
++ return err;
++}
++
++int rsbac_mount_acl(kdev_t kdev)
++{
++ int err = 0;
++ struct rsbac_acl_device_list_item_t *device_p;
++ struct rsbac_acl_device_list_item_t *new_device_p;
++ int srcu_idx;
++
++ rsbac_pr_debug(ds_acl, "mounting device %02u:%02u\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev));
++ /* wait for read access to device_list_head */
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = acl_lookup_device(kdev);
++ /* repeated mount? */
++ if (device_p) {
++ rsbac_printk(KERN_INFO "rsbac_mount_acl: repeated mount %u of device %02u:%02u\n",
++ device_p->mount_count, RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev));
++ device_p->mount_count++;
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return 0;
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ /* OK, go on */
++ new_device_p = create_device_item(kdev);
++ if (!new_device_p)
++ return -RSBAC_ECOULDNOTADDDEVICE;
++
++ if ((err = acl_register_fd_lists(new_device_p, kdev))) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_mount_acl(): File/Dir ACL registration failed for dev %02u:%02u, err %s!\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev),
++ get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ /* make sure to only add, if this device item has not been added in the meantime */
++ device_p = acl_lookup_device(kdev);
++ if (device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_mount_acl(): mount race for device %02u:%02u detected!\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev));
++ device_p->mount_count++;
++ /* also detaches lists */
++ clear_device_item(new_device_p);
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ } else {
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ device_p = add_device_item(new_device_p);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_mount_acl: adding device %02u:%02u failed!\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev));
++ /* also detaches lists */
++ clear_device_item(new_device_p);
++ err = -RSBAC_ECOULDNOTADDDEVICE;
++ }
++ }
++
++ return err;
++}
++
++/* When umounting a device, its file/dir ACLs must be removed. */
++
++int rsbac_umount_acl(kdev_t kdev)
++{
++ struct rsbac_acl_device_list_item_t *device_p;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_umount(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ rsbac_pr_debug(ds_acl, "umounting device %02u:%02u\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev));
++ /* sync of attribute lists was done in rsbac_umount */
++ spin_lock(&device_list_lock);
++ /* OK, nobody else is working on it... */
++ device_p = acl_lookup_device(kdev);
++ if (device_p) {
++ if (device_p->mount_count == 1)
++ remove_device_item(kdev);
++ else {
++ if (device_p->mount_count > 1) {
++ device_p->mount_count--;
++ spin_unlock(&device_list_lock);
++ } else {
++ spin_unlock(&device_list_lock);
++ rsbac_printk(KERN_WARNING "rsbac_umount_acl: device %02u:%02u has mount_count < 1!\n",
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev));
++ }
++ }
++ }
++ else
++ spin_unlock(&device_list_lock);
++ return 0;
++}
++
++/***************************************************/
++/* We also need some status information... */
++
++int rsbac_stats_acl(void)
++{
++ struct rsbac_acl_device_list_item_t *device_p;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_stats_acl(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ rsbac_pr_debug(aef_acl, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ rsbac_printk(KERN_INFO "ACL Status\n-----------\n");
++
++ /* protect device list */
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = rcu_dereference(device_list_head_p)->head;
++ while (device_p) {
++ rsbac_printk(KERN_INFO "device %02u:%02u has %u file ACLs, sum of %u members\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ rsbac_list_lol_count(device_p->handle),
++ rsbac_list_lol_all_subcount(device_p->handle));
++ device_p = device_p->next;
++ }
++ /* unprotect device list */
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++
++ /* dev list */
++ rsbac_printk(KERN_INFO "%li device major ACL items, sum of %li members\n",
++ rsbac_list_lol_count(dev_major_handle),
++ rsbac_list_lol_all_subcount(dev_major_handle));
++ rsbac_printk(KERN_INFO "%li device ACL items, sum of %li members\n",
++ rsbac_list_lol_count(dev_handle),
++ rsbac_list_lol_all_subcount(dev_handle));
++
++ /* SCD list */
++ rsbac_printk(KERN_INFO "%li scd ACL items, sum of %li members\n",
++ rsbac_list_lol_count(scd_handle),
++ rsbac_list_lol_all_subcount(scd_handle));
++
++ /* user list */
++ rsbac_printk(KERN_INFO "%li user ACL items, sum of %li members\n",
++ rsbac_list_lol_count(u_handle),
++ rsbac_list_lol_all_subcount(u_handle));
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ /* Linux group list */
++ rsbac_printk(KERN_INFO "%li Linux group ACL items, sum of %li members\n",
++ rsbac_list_lol_count(g_handle),
++ rsbac_list_lol_all_subcount(g_handle));
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ /* netdev list */
++ rsbac_printk(KERN_INFO "%li network device ACL items, sum of %li members\n",
++ rsbac_list_lol_count(netdev_handle),
++ rsbac_list_lol_all_subcount(netdev_handle));
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ /* nettemp_nt list */
++ rsbac_printk(KERN_INFO "%li network template NT ACL items, sum of %li members\n",
++ rsbac_list_lol_count(nettemp_nt_handle),
++ rsbac_list_lol_all_subcount(nettemp_nt_handle));
++ /* nettemp list */
++ rsbac_printk(KERN_INFO "%li network template ACL items, sum of %li members\n",
++ rsbac_list_lol_count(nettemp_handle),
++ rsbac_list_lol_all_subcount(nettemp_handle));
++ /* netobj list */
++ rsbac_printk(KERN_INFO "%li network object ACL items, sum of %li members\n",
++ rsbac_list_lol_count(netobj_handle),
++ rsbac_list_lol_all_subcount(netobj_handle));
++#endif
++
++ rsbac_printk(KERN_INFO "%li groups, last new is %u\n",
++ rsbac_list_count(group_handle), group_last_new);
++
++ /* protect gm list */
++ rsbac_printk(KERN_INFO "%li group member items, sum of %li group memberships\n",
++ rsbac_list_lol_count(gm_handle),
++ rsbac_list_lol_all_subcount(gm_handle));
++
++ return 0;
++}
++
++/***************************************************/
++/* consistency checking (as far as possible) */
++
++int rsbac_check_acl(int correct)
++{
++ struct rsbac_acl_device_list_item_t *device_p;
++ u_long f_count = 0, f_sum = 0, tmp_count,
++ r_count, u_count, b_count, no_member_count;
++ long desc_count;
++ long sub_desc_count;
++ rsbac_inode_nr_t *fd_desc_p;
++ struct rsbac_dev_desc_t *dev_desc_p;
++ __u8 *scd_desc_p;
++ rsbac_uid_t *u_desc_p;
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ rsbac_gid_t *g_desc_p;
++#endif
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ rsbac_netdev_id_t *netdev_desc_p;
++#endif
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ rsbac_net_temp_id_t *nettemp_desc_p;
++ rsbac_net_obj_id_t *netobj_desc_p;
++#endif
++ struct rsbac_acl_entry_desc_t *sub_desc_p;
++ rsbac_uid_t *user_p;
++ rsbac_acl_group_id_t *group_p;
++ u_int i, j;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_check_acl(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++
++ /* group membership list */
++ tmp_count = 0;
++ desc_count =
++ rsbac_list_lol_get_all_desc(gm_handle, (void **) &user_p);
++ if (desc_count > 0) {
++ for (i = 0; i < desc_count; i++) {
++ sub_desc_count =
++ rsbac_list_lol_get_all_subdesc(gm_handle,
++ &user_p[i],
++ (void **)
++ &group_p);
++ if (sub_desc_count > 0) {
++ for (j = 0; j < sub_desc_count; j++) {
++ if (!rsbac_list_exist
++ (group_handle, &group_p[j])) {
++ rsbac_printk(KERN_WARNING "rsbac_check_acl(): removing user %u membership in non-existent group %u!\n",
++ user_p[i],
++ group_p[j]);
++ rsbac_list_lol_subremove
++ (gm_handle, &user_p[i],
++ &group_p[j]);
++ }
++ }
++ rsbac_kfree(group_p);
++ } else {
++ /* remove empty membership list */
++ if (!sub_desc_count)
++ rsbac_list_lol_remove(gm_handle,
++ &user_p[i]);
++ }
++ }
++ rsbac_kfree(user_p);
++ }
++ /* recalculated values! */
++ rsbac_printk(KERN_INFO "rsbac_check_acl(): %li group membership items\n",
++ rsbac_list_lol_count(gm_handle));
++
++ /* group list */
++ rsbac_printk(KERN_INFO "rsbac_check_acl(): %li group items\n",
++ rsbac_list_count(group_handle));
++
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++/* rsbac_printk(KERN_INFO "rsbac_check_acl(): currently %u processes working on file/dir aci\n",
++ device_list_head.lock.lock); */
++ device_p = rcu_dereference(device_list_head_p)->head;
++ while (device_p) { /* for all sublists */
++ f_count = 0;
++ r_count = 0;
++ u_count = 0;
++ b_count = 0;
++ no_member_count = 0;
++
++ tmp_count = 0;
++ desc_count = rsbac_list_lol_get_all_desc(device_p->handle,
++ (void **)
++ &fd_desc_p);
++ if (desc_count > 0) {
++ for (i = 0; i < desc_count; i++) {
++ /* check for group existence of all ACL entries for groups */
++ sub_desc_count =
++ rsbac_list_lol_get_all_subdesc
++ (device_p->handle,
++ &fd_desc_p[i],
++ (void **) &sub_desc_p);
++ if (sub_desc_count > 0) {
++ for (j = 0;
++ j < sub_desc_count;
++ j++) {
++ if ((sub_desc_p[j].
++ subj_type ==
++ ACLS_GROUP)
++ &&
++ sub_desc_p[j].
++ subj_id
++ &&
++ !rsbac_list_exist
++ (group_handle,
++ &sub_desc_p
++ [j].
++ subj_id)) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "fd_item for inode %u on device %02u:%02u has invalid group %u in ACL -> removing entry!\n",
++ fd_desc_p[i],
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (device_p->handle,
++ &fd_desc_p
++ [i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "fd_item for inode %u on device %02u:%02u has invalid group %u in ACL!\n",
++ fd_desc_p[i],
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ sub_desc_p[j].subj_id);
++ }
++#if defined(CONFIG_RSBAC_RC)
++ else if ((sub_desc_p[j].subj_type == ACLS_ROLE)
++ &&
++ (sub_desc_p
++ [j].
++ subj_id >
++ RC_role_max_value)
++ ) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "fd_item for inode %u on device %02u:%02u has invalid RC role %u in ACL -> removing entry!\n",
++ fd_desc_p[i],
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (device_p->handle,
++ &fd_desc_p
++ [i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "fd_item for inode %u on device %02u:%02u has invalid role %u in ACL!\n",
++ fd_desc_p[i],
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ sub_desc_p[j].subj_id);
++ }
++#endif
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++ }
++ tmp_count++;
++ rsbac_kfree(fd_desc_p);
++ f_count += desc_count;
++ }
++
++ switch (correct) {
++ case 2:
++ rsbac_printk(KERN_INFO "rsbac_check_acl(): Device %02u:%02u has %lu file/dir ACLs (%lu removed (%lu bad inodes, %lu dtimed inodes, %lu unlinked inodes, %lu had no members and default mask))\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), f_count,
++ b_count + r_count + u_count +
++ no_member_count, b_count, r_count,
++ u_count, no_member_count);
++ break;
++ case 1:
++ rsbac_printk(KERN_INFO "rsbac_check_acl(): Device %02u:%02u has %lu file/dir ACLs (%lu removed (%lu bad inodes, %lu dtimed inodes, %lu had no members and default mask), %lu unlinked inodes)\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), f_count,
++ b_count + r_count + no_member_count,
++ b_count, r_count, no_member_count,
++ u_count);
++ break;
++ default:
++ rsbac_printk(KERN_INFO "rsbac_check_acl(): Device %02u:%02u has %lu file/dir ACLs (%lu with bad inodes, %lu with dtimed inodes, %lu unlinked inodes, %lu without members and with default mask)\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), f_count,
++ b_count, r_count, u_count,
++ no_member_count);
++ }
++ f_sum += f_count;
++ /* go on */
++ device_p = device_p->next;
++ }
++ rsbac_printk(KERN_INFO "rsbac_check_acl(): Sum of %u Devices with %lu file/dir ACLs\n",
++ rcu_dereference(device_list_head_p)->count, f_sum);
++ /* free access to device_list_head */
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++
++ /* dev list */
++ tmp_count = 0;
++ desc_count =
++ rsbac_list_lol_get_all_desc(dev_handle, (void **) &dev_desc_p);
++ if (desc_count > 0) {
++ for (i = 0; i < desc_count; i++) {
++ /* check for group existence of all ACL entries for groups */
++ sub_desc_count =
++ rsbac_list_lol_get_all_subdesc(dev_handle,
++ &dev_desc_p[i],
++ (void **)
++ &sub_desc_p);
++ if (sub_desc_count > 0) {
++ for (j = 0; j < sub_desc_count; j++) {
++ if ((sub_desc_p[j].subj_type ==
++ ACLS_GROUP)
++ && sub_desc_p[j].subj_id
++ &&
++ !rsbac_list_exist(group_handle,
++ &sub_desc_p
++ [j].
++ subj_id)) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "dev_item %c%02u:%02u, has invalid group %u in ACL -> removing entry!\n",
++ 'B' + dev_desc_p[i].type,
++ dev_desc_p[i].major,
++ dev_desc_p[i].minor,
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (dev_handle,
++ &dev_desc_p
++ [i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "dev_item %c%02u:%02u, has invalid group %u in ACL!\n",
++ 'B' + dev_desc_p[i].type,
++ dev_desc_p[i].major,
++ dev_desc_p[i].minor,
++ sub_desc_p[j].subj_id);
++ }
++#if defined(CONFIG_RSBAC_RC)
++ else if ((sub_desc_p[j].
++ subj_type == ACLS_ROLE)
++ && (sub_desc_p[j].
++ subj_id >
++ RC_role_max_value)
++ ) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "dev_item %c%02u:%02u, has invalid role %u in ACL -> removing entry!\n",
++ 'B' + dev_desc_p[i].type,
++ dev_desc_p[i].major,
++ dev_desc_p[i].minor,
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (dev_handle,
++ &dev_desc_p
++ [i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "dev_item %c%02u:%02u, has invalid role %u in ACL!\n",
++ 'B' + dev_desc_p[i].type,
++ dev_desc_p[i].major,
++ dev_desc_p[i].minor,
++ sub_desc_p[j].subj_id);
++ }
++#endif
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++ }
++ rsbac_kfree(dev_desc_p);
++ f_sum += desc_count;
++ }
++ rsbac_printk(KERN_INFO "rsbac_check_acl(): %li device items\n",
++ desc_count);
++ tmp_count = 0;
++ desc_count =
++ rsbac_list_lol_get_all_desc(dev_major_handle,
++ (void **) &dev_desc_p);
++ if (desc_count > 0) {
++ for (i = 0; i < desc_count; i++) {
++ /* check for group existence of all ACL entries for groups */
++ sub_desc_count =
++ rsbac_list_lol_get_all_subdesc
++ (dev_major_handle, &dev_desc_p[i],
++ (void **) &sub_desc_p);
++ if (sub_desc_count > 0) {
++ for (j = 0; j < sub_desc_count; j++) {
++ if ((sub_desc_p[j].subj_type ==
++ ACLS_GROUP)
++ && sub_desc_p[j].subj_id
++ &&
++ !rsbac_list_exist(group_handle,
++ &sub_desc_p
++ [j].
++ subj_id)) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "dev_item %c%02u:%02u, has invalid group %u in ACL -> removing entry!\n",
++ 'B' + dev_desc_p[i].type,
++ dev_desc_p[i].major,
++ dev_desc_p[i].minor,
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (dev_major_handle,
++ &dev_desc_p
++ [i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "dev_item %c%02u:%02u, has invalid group %u in ACL!\n",
++ 'B' + dev_desc_p[i].type,
++ dev_desc_p[i].major,
++ dev_desc_p[i].minor,
++ sub_desc_p[j].subj_id);
++ }
++#if defined(CONFIG_RSBAC_RC)
++ else if ((sub_desc_p[j].
++ subj_type == ACLS_ROLE)
++ && (sub_desc_p[j].
++ subj_id >
++ RC_role_max_value)
++ ) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "dev_item %c%02u:%02u, has invalid role %u in ACL -> removing entry!\n",
++ 'B' + dev_desc_p[i].type,
++ dev_desc_p[i].major,
++ dev_desc_p[i].minor,
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (dev_major_handle,
++ &dev_desc_p
++ [i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "dev_item %c%02u:%02u, has invalid role %u in ACL!\n",
++ 'B' + dev_desc_p[i].type,
++ dev_desc_p[i].major,
++ dev_desc_p[i].minor,
++ sub_desc_p[j].subj_id);
++ }
++#endif
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++ }
++ rsbac_kfree(dev_desc_p);
++ f_sum += desc_count;
++ }
++ rsbac_printk(KERN_INFO "rsbac_check_acl(): %li device items\n",
++ desc_count);
++
++ /* SCD list */
++ tmp_count = 0;
++ desc_count =
++ rsbac_list_lol_get_all_desc(scd_handle, (void **) &scd_desc_p);
++ if (desc_count > 0) {
++ for (i = 0; i < desc_count; i++) {
++ /* check for group existence of all ACL entries for groups */
++ sub_desc_count =
++ rsbac_list_lol_get_all_subdesc(scd_handle,
++ &scd_desc_p[i],
++ (void **)
++ &sub_desc_p);
++ if (sub_desc_count > 0) {
++ for (j = 0; j < sub_desc_count; j++) {
++ if ((sub_desc_p[j].subj_type ==
++ ACLS_GROUP)
++ && sub_desc_p[j].subj_id
++ &&
++ !rsbac_list_exist(group_handle,
++ &sub_desc_p
++ [j].
++ subj_id)) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "scd_item %u has invalid group %u in ACL -> removing entry!\n",
++ scd_desc_p[i],
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (scd_handle,
++ &scd_desc_p
++ [i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "scd_item %u has invalid group %u in ACL!\n",
++ scd_desc_p[i],
++ sub_desc_p[j].subj_id);
++ }
++#if defined(CONFIG_RSBAC_RC)
++ else if ((sub_desc_p[j].
++ subj_type == ACLS_ROLE)
++ && (sub_desc_p[j].
++ subj_id >
++ RC_role_max_value)
++ ) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "scd_item %u has invalid role %u in ACL -> removing entry!\n",
++ scd_desc_p[i],
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (scd_handle,
++ &scd_desc_p
++ [i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "scd_item %u has invalid role %u in ACL!\n",
++ scd_desc_p[i],
++ sub_desc_p[j].subj_id);
++ }
++#endif
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++ }
++ rsbac_kfree(scd_desc_p);
++ f_sum += desc_count;
++ }
++ rsbac_printk(KERN_INFO "rsbac_check_acl(): %li SCD items\n",
++ desc_count);
++
++ /* User list */
++ tmp_count = 0;
++ desc_count =
++ rsbac_list_lol_get_all_desc(u_handle, (void **) &u_desc_p);
++ if (desc_count > 0) {
++ for (i = 0; i < desc_count; i++) {
++ /* check for group existence of all ACL entries for groups */
++ sub_desc_count =
++ rsbac_list_lol_get_all_subdesc(u_handle,
++ &u_desc_p[i],
++ (void **)
++ &sub_desc_p);
++ if (sub_desc_count > 0) {
++ for (j = 0; j < sub_desc_count; j++) {
++ if ((sub_desc_p[j].subj_type ==
++ ACLS_GROUP)
++ && sub_desc_p[j].subj_id
++ &&
++ !rsbac_list_exist(group_handle,
++ &sub_desc_p
++ [j].
++ subj_id)) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "u_item %u has invalid group %u in ACL -> removing entry!\n",
++ u_desc_p[i],
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (u_handle,
++ &u_desc_p[i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "u_item %u has invalid group %u in ACL!\n",
++ u_desc_p[i],
++ sub_desc_p[j].subj_id);
++ }
++#if defined(CONFIG_RSBAC_RC)
++ else if ((sub_desc_p[j].
++ subj_type == ACLS_ROLE)
++ && (sub_desc_p[j].
++ subj_id >
++ RC_role_max_value)
++ ) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "u_item %u has invalid role %u in ACL -> removing entry!\n",
++ u_desc_p[i],
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (u_handle,
++ &u_desc_p[i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "u_item %u has invalid role %u in ACL!\n",
++ u_desc_p[i],
++ sub_desc_p[j].subj_id);
++ }
++#endif
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++ }
++ rsbac_kfree(u_desc_p);
++ f_sum += desc_count;
++ }
++ rsbac_printk(KERN_INFO "rsbac_check_acl(): %li user items\n",
++ desc_count);
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ /* User list */
++ tmp_count = 0;
++ desc_count =
++ rsbac_list_lol_get_all_desc(g_handle, (void **) &g_desc_p);
++ if (desc_count > 0) {
++ for (i = 0; i < desc_count; i++) {
++ /* check for group existence of all ACL entries for groups */
++ sub_desc_count =
++ rsbac_list_lol_get_all_subdesc(g_handle,
++ &g_desc_p[i],
++ (void **)
++ &sub_desc_p);
++ if (sub_desc_count > 0) {
++ for (j = 0; j < sub_desc_count; j++) {
++ if ((sub_desc_p[j].subj_type ==
++ ACLS_GROUP)
++ && sub_desc_p[j].subj_id
++ &&
++ !rsbac_list_exist(group_handle,
++ &sub_desc_p
++ [j].
++ subj_id)) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "g_item %u has invalid group %u in ACL -> removing entry!\n",
++ g_desc_p[i],
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (g_handle,
++ &g_desc_p[i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "g_item %u has invalid group %u in ACL!\n",
++ g_desc_p[i],
++ sub_desc_p[j].subj_id);
++ }
++#if defined(CONFIG_RSBAC_RC)
++ else if ((sub_desc_p[j].
++ subj_type == ACLS_ROLE)
++ && (sub_desc_p[j].
++ subj_id >
++ RC_role_max_value)
++ ) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "g_item %u has invalid role %u in ACL -> removing entry!\n",
++ g_desc_p[i],
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (g_handle,
++ &g_desc_p[i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "g_item %u has invalid role %u in ACL!\n",
++ g_desc_p[i],
++ sub_desc_p[j].subj_id);
++ }
++#endif
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++ }
++ rsbac_kfree(g_desc_p);
++ f_sum += desc_count;
++ }
++ rsbac_printk(KERN_INFO "rsbac_check_acl(): %li Linux group items\n",
++ desc_count);
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ /* netdev list */
++ tmp_count = 0;
++ desc_count =
++ rsbac_list_lol_get_all_desc(netdev_handle,
++ (void **) &netdev_desc_p);
++ if (desc_count > 0) {
++ for (i = 0; i < desc_count; i++) {
++ /* check for group existence of all ACL entries for groups */
++ sub_desc_count =
++ rsbac_list_lol_get_all_subdesc(netdev_handle,
++ &netdev_desc_p
++ [i],
++ (void **)
++ &sub_desc_p);
++ if (sub_desc_count > 0) {
++ for (j = 0; j < sub_desc_count; j++) {
++ if ((sub_desc_p[j].subj_type ==
++ ACLS_GROUP)
++ && sub_desc_p[j].subj_id
++ &&
++ !rsbac_list_exist(group_handle,
++ &sub_desc_p
++ [j].
++ subj_id)) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "netdev_item %s has invalid group %u in ACL -> removing entry!\n",
++ netdev_desc_p[i],
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (netdev_handle,
++ &netdev_desc_p
++ [i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "netdev_item %s has invalid group %u in ACL!\n",
++ netdev_desc_p[i],
++ sub_desc_p[j].subj_id);
++ }
++#if defined(CONFIG_RSBAC_RC)
++ else if ((sub_desc_p[j].
++ subj_type == ACLS_ROLE)
++ && (sub_desc_p[j].
++ subj_id >
++ RC_role_max_value)
++ ) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "netdev_item %s has invalid role %u in ACL -> removing entry!\n",
++ netdev_desc_p[i],
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (netdev_handle,
++ &netdev_desc_p
++ [i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "netdev_item %s has invalid role %u in ACL!\n",
++ netdev_desc_p[i],
++ sub_desc_p[j].subj_id);
++ }
++#endif
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++ }
++ rsbac_kfree(netdev_desc_p);
++ f_sum += desc_count;
++ }
++ rsbac_printk(KERN_INFO "rsbac_check_acl(): %li network device items\n",
++ desc_count);
++#endif /* NET_DEV_PROT */
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ /* nettemp_nt list */
++ tmp_count = 0;
++ desc_count =
++ rsbac_list_lol_get_all_desc(nettemp_nt_handle,
++ (void **) &nettemp_desc_p);
++ if (desc_count > 0) {
++ for (i = 0; i < desc_count; i++) {
++ /* check for group existence of all ACL entries for groups */
++ sub_desc_count =
++ rsbac_list_lol_get_all_subdesc
++ (nettemp_nt_handle, &nettemp_desc_p[i],
++ (void **) &sub_desc_p);
++ if (sub_desc_count > 0) {
++ for (j = 0; j < sub_desc_count; j++) {
++ if ((sub_desc_p[j].subj_type ==
++ ACLS_GROUP)
++ && sub_desc_p[j].subj_id
++ &&
++ !rsbac_list_exist(group_handle,
++ &sub_desc_p
++ [j].
++ subj_id)) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "nettemp_nt_item %u has invalid group %u in ACL -> removing entry!\n",
++ nettemp_desc_p[i],
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (nettemp_nt_handle,
++ &nettemp_desc_p
++ [i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "nettemp_nt_item %u has invalid group %u in ACL!\n",
++ nettemp_desc_p[i],
++ sub_desc_p[j].subj_id);
++ }
++#if defined(CONFIG_RSBAC_RC)
++ else if ((sub_desc_p[j].
++ subj_type == ACLS_ROLE)
++ && (sub_desc_p[j].
++ subj_id >
++ RC_role_max_value)
++ ) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "nettemp_nt_item %u has invalid role %u in ACL -> removing entry!\n",
++ nettemp_desc_p[i],
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (nettemp_nt_handle,
++ &nettemp_desc_p
++ [i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "nettemp_nt_item %u has invalid role %u in ACL!\n",
++ nettemp_desc_p[i],
++ sub_desc_p[j].subj_id);
++ }
++#endif
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++ }
++ rsbac_kfree(nettemp_desc_p);
++ f_sum += desc_count;
++ }
++ rsbac_printk(KERN_INFO "rsbac_check_acl(): %li network template NT items\n",
++ desc_count);
++
++ /* nettemp list */
++ tmp_count = 0;
++ desc_count =
++ rsbac_list_lol_get_all_desc(nettemp_handle,
++ (void **) &nettemp_desc_p);
++ if (desc_count > 0) {
++ for (i = 0; i < desc_count; i++) {
++ /* check for group existence of all ACL entries for groups */
++ sub_desc_count =
++ rsbac_list_lol_get_all_subdesc(nettemp_handle,
++ &nettemp_desc_p
++ [i],
++ (void **)
++ &sub_desc_p);
++ if (sub_desc_count > 0) {
++ for (j = 0; j < sub_desc_count; j++) {
++ if ((sub_desc_p[j].subj_type ==
++ ACLS_GROUP)
++ && sub_desc_p[j].subj_id
++ &&
++ !rsbac_list_exist(group_handle,
++ &sub_desc_p
++ [j].
++ subj_id)) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "nettemp_item %u has invalid group %u in ACL -> removing entry!\n",
++ nettemp_desc_p[i],
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (nettemp_handle,
++ &nettemp_desc_p
++ [i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "nettemp_item %u has invalid group %u in ACL!\n",
++ nettemp_desc_p[i],
++ sub_desc_p[j].subj_id);
++ }
++#if defined(CONFIG_RSBAC_RC)
++ else if ((sub_desc_p[j].
++ subj_type == ACLS_ROLE)
++ && (sub_desc_p[j].
++ subj_id >
++ RC_role_max_value)
++ ) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "nettemp_item %u has invalid role %u in ACL -> removing entry!\n",
++ nettemp_desc_p[i],
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (nettemp_handle,
++ &nettemp_desc_p
++ [i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "nettemp_item %u has invalid role %u in ACL!\n",
++ nettemp_desc_p[i],
++ sub_desc_p[j].subj_id);
++ }
++#endif
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++ }
++ rsbac_kfree(nettemp_desc_p);
++ f_sum += desc_count;
++ }
++ rsbac_printk(KERN_INFO "rsbac_check_acl(): %li network template items\n",
++ desc_count);
++
++ /* netobj list */
++ tmp_count = 0;
++ desc_count =
++ rsbac_list_lol_get_all_desc(netobj_handle,
++ (void **) &netobj_desc_p);
++ if (desc_count > 0) {
++ for (i = 0; i < desc_count; i++) {
++ /* check for group existence of all ACL entries for groups */
++ sub_desc_count =
++ rsbac_list_lol_get_all_subdesc(netobj_handle,
++ &netobj_desc_p
++ [i],
++ (void **)
++ &sub_desc_p);
++ if (sub_desc_count > 0) {
++ for (j = 0; j < sub_desc_count; j++) {
++ if ((sub_desc_p[j].subj_type ==
++ ACLS_GROUP)
++ && sub_desc_p[j].subj_id
++ &&
++ !rsbac_list_exist(group_handle,
++ &sub_desc_p
++ [j].
++ subj_id)) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "netobj_item %p has invalid group %u in ACL -> removing entry!\n",
++ netobj_desc_p[i],
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (netobj_handle,
++ &netobj_desc_p
++ [i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "netobj_item %p has invalid group %u in ACL!\n",
++ netobj_desc_p[i],
++ sub_desc_p[j].subj_id);
++ }
++#if defined(CONFIG_RSBAC_RC)
++ else if ((sub_desc_p[j].
++ subj_type == ACLS_ROLE)
++ && (sub_desc_p[j].
++ subj_id >
++ RC_role_max_value)
++ ) {
++ if (correct) {
++ /* remove sub item and complain */
++ rsbac_pr_debug(ds, "netobj_item %p has invalid role %u in ACL -> removing entry!\n",
++ netobj_desc_p[i],
++ sub_desc_p[j].subj_id);
++ rsbac_list_lol_subremove
++ (netobj_handle,
++ &netobj_desc_p
++ [i],
++ &sub_desc_p
++ [j]);
++ } else /* complain */
++ rsbac_pr_debug(ds, "netobj_item %p has invalid role %u in ACL!\n",
++ netobj_desc_p[i],
++ sub_desc_p[j].subj_id);
++ }
++#endif
++ }
++ rsbac_kfree(sub_desc_p);
++ }
++ }
++ rsbac_kfree(netobj_desc_p);
++ f_sum += desc_count;
++ }
++ rsbac_printk(KERN_INFO "rsbac_check_acl(): %li network object items\n",
++ desc_count);
++#endif /* NET_OBJ_PROT */
++
++ rsbac_printk(KERN_INFO "rsbac_check_acl(): Total of %lu registered ACLs\n",
++ f_sum);
++
++ return 0;
++}
++
++/************************************************* */
++/* Access functions */
++/************************************************* */
++
++/* All these procedures handle the spinlocks to protect the targets during */
++/* access. */
++
++/* rsbac_acl_set_acl_entry
++ * Set ACL entry for given target and subject to given rights. If entry does
++ * not exist, it is created, thus cutting the inheritance from default/parent.
++ */
++
++int rsbac_acl_set_acl_entry(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ rsbac_acl_rights_vector_t rights,
++ rsbac_time_t ttl)
++{
++ int err = 0;
++ struct rsbac_acl_device_list_item_t *device_p;
++ struct rsbac_acl_entry_desc_t desc;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_set_acl_entry(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (subj_type >= ACLS_NONE)
++ return -RSBAC_EINVALIDVALUE;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_set_acl_entry(): called from interrupt!\n");
++ }
++#endif
++ desc.subj_type = subj_type;
++ desc.subj_id = subj_id;
++
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ rsbac_pr_debug(ds_acl, "Setting file/dir/fifo/symlink ACL for device %02u:%02u, inode %u\n",
++ RSBAC_MAJOR(tid.file.device),
++ RSBAC_MINOR(tid.file.device), tid.file.inode);
++ /* default entry? */
++ if (RSBAC_IS_ZERO_DEV(tid.file.device) && !tid.file.inode
++ && !tid.file.dentry_p)
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_fd_handle,
++ ttl, &desc, &rights);
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ /* lookup device */
++ device_p = acl_lookup_device(tid.file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_set_acl_entry(): Could not lookup device!\n");
++ /* free read lock */
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ if (!rsbac_ta_list_lol_exist
++ (ta_number, device_p->handle,
++ &tid.file.inode)) {
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_FD_MASK;
++
++ err = rsbac_ta_list_lol_add_ttl(ta_number,
++ device_p->handle,
++ 0, &tid.file.inode,
++ &mask);
++ if (err) {
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return err;
++ }
++ }
++ err =
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ device_p->handle, ttl,
++ &tid.file.inode, &desc,
++ &rights);
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ /* ready. */
++ return err;
++
++ case T_DEV:
++ rsbac_pr_debug(ds_acl, "Setting device ACL for dev %c %02u:%02u\n",
++ 'B' + tid.dev.type, tid.dev.major,
++ tid.dev.minor);
++ /* default entry? */
++ if (RSBAC_IS_ZERO_DEV_DESC(tid.dev))
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_dev_handle,
++ ttl, &desc, &rights);
++
++ {
++ switch (tid.dev.type) {
++ case D_char:
++ case D_block:
++ if (!rsbac_ta_list_lol_exist
++ (ta_number, dev_handle, &tid.dev)) {
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_DEV_MASK;
++
++ err =
++ rsbac_ta_list_lol_add_ttl
++ (ta_number, dev_handle, 0,
++ &tid.dev, &mask);
++ if (err)
++ return err;
++ }
++ return
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ dev_handle,
++ ttl,
++ &tid.dev,
++ &desc,
++ &rights);
++
++ case D_char_major:
++ case D_block_major:
++ tid.dev.type -= (D_block_major - D_block);
++ if (!rsbac_ta_list_lol_exist
++ (ta_number, dev_major_handle,
++ &tid.dev)) {
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_DEV_MASK;
++
++ err =
++ rsbac_ta_list_lol_add_ttl
++ (ta_number, dev_major_handle,
++ 0, &tid.dev, &mask);
++ if (err)
++ return err;
++ }
++ return
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ dev_major_handle,
++ ttl,
++ &tid.dev,
++ &desc,
++ &rights);
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++ }
++
++ case T_IPC:
++ /* default entry? */
++ if (tid.ipc.type == I_none)
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_ipc_handle,
++ ttl, &desc, &rights);
++ else
++ return -RSBAC_EINVALIDTARGET;
++
++ case T_SCD:
++ /* default entry? */
++ if (tid.scd == AST_none)
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_scd_handle,
++ ttl, &desc, &rights);
++
++ if (!rsbac_ta_list_lol_exist
++ (ta_number, scd_handle, &tid.scd)) {
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_SCD_MASK;
++
++ err = rsbac_ta_list_lol_add_ttl(ta_number,
++ scd_handle,
++ 0,
++ &tid.scd, &mask);
++ if (err)
++ return err;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number, scd_handle,
++ ttl, &tid.scd, &desc,
++ &rights);
++
++ case T_USER:
++ /* default entry? */
++ if (tid.user == RSBAC_NO_USER)
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_u_handle, ttl,
++ &desc, &rights);
++ if (!rsbac_ta_list_lol_exist
++ (ta_number, u_handle, &tid.user)) {
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_U_MASK;
++
++ err = rsbac_ta_list_lol_add_ttl(ta_number,
++ u_handle,
++ 0,
++ &tid.user, &mask);
++ if (err)
++ return err;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number, u_handle,
++ ttl, &tid.user, &desc,
++ &rights);
++
++
++ case T_PROCESS:
++ /* default entry? */
++ if (!tid.process)
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_p_handle, ttl,
++ &desc, &rights);
++ else
++ return -RSBAC_EINVALIDTARGET;
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ case T_GROUP:
++ /* default entry? */
++ if (tid.group == RSBAC_NO_GROUP)
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_g_handle, ttl,
++ &desc, &rights);
++ if (!rsbac_ta_list_lol_exist
++ (ta_number, g_handle, &tid.group)) {
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_G_MASK;
++
++ err = rsbac_ta_list_lol_add_ttl(ta_number,
++ g_handle,
++ 0,
++ &tid.group, &mask);
++ if (err)
++ return err;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number, g_handle,
++ ttl, &tid.group, &desc,
++ &rights);
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ case T_NETDEV:
++ rsbac_pr_debug(ds_acl, "Setting network device ACL for netdev %s\n",
++ tid.netdev);
++ /* default entry? */
++ if (!tid.netdev[0])
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_netdev_handle,
++ ttl, &desc, &rights);
++
++ if (!rsbac_ta_list_lol_exist
++ (ta_number, netdev_handle, &tid.netdev)) {
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_NETDEV_MASK;
++
++ err = rsbac_ta_list_lol_add_ttl(ta_number,
++ netdev_handle,
++ 0,
++ &tid.netdev,
++ &mask);
++ if (err)
++ return err;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ netdev_handle, ttl,
++ &tid.netdev, &desc,
++ &rights);
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ case T_NETTEMP_NT:
++ rsbac_pr_debug(ds_acl, "Setting network template NT ACL for "
++ "nettemp_nt %u\n", tid.nettemp);
++ /* default entry? */
++ if (!tid.nettemp)
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_nettemp_nt_handle,
++ ttl, &desc, &rights);
++
++ if (!rsbac_ta_list_lol_exist
++ (ta_number, nettemp_nt_handle, &tid.nettemp)) {
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_NETTEMP_MASK;
++
++ err = rsbac_ta_list_lol_add_ttl(ta_number,
++ nettemp_nt_handle,
++ 0,
++ &tid.nettemp,
++ &mask);
++ if (err)
++ return err;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ nettemp_nt_handle, ttl,
++ &tid.nettemp, &desc,
++ &rights);
++
++ case T_NETTEMP:
++ rsbac_pr_debug(ds_acl, "Setting network template ACL for nettemp %u\n",
++ tid.nettemp);
++ /* default entry? */
++ if (!tid.nettemp)
++ return -RSBAC_EINVALIDTARGET;
++ if (!rsbac_ta_net_template_exist(ta_number, tid.nettemp))
++ return -RSBAC_EINVALIDTARGET;
++
++ if (!rsbac_ta_list_lol_exist
++ (ta_number, nettemp_handle, &tid.nettemp)) {
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_NETOBJ_MASK;
++
++ err = rsbac_ta_list_lol_add_ttl(ta_number,
++ nettemp_handle,
++ 0,
++ &tid.nettemp,
++ &mask);
++ if (err)
++ return err;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ nettemp_handle, ttl,
++ &tid.nettemp, &desc,
++ &rights);
++
++ case T_NETOBJ:
++ rsbac_pr_debug(ds_acl, "Setting network object ACL for netobj %p\n",
++ tid.netobj.sock_p);
++ /* default entry? */
++ if (!tid.netobj.sock_p)
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_netobj_handle,
++ ttl, &desc, &rights);
++
++ if (!rsbac_ta_list_lol_exist
++ (ta_number, netobj_handle, &tid.netobj.sock_p)) {
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_NETOBJ_MASK;
++
++ err = rsbac_ta_list_lol_add_ttl(ta_number,
++ netobj_handle,
++ 0,
++ &tid.netobj.sock_p,
++ &mask);
++ if (err)
++ return err;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ netobj_handle, ttl,
++ &tid.netobj.sock_p,
++ &desc, &rights);
++#endif /* NET_OBJ_PROT */
++
++
++ default:
++ err = -RSBAC_EINVALIDTARGET;
++ }
++ return err;
++}
++
++/* rsbac_acl_remove_acl_entry
++ * Remove ACL entry for given target and subject. This reactivates the
++ * inheritance from default/parent.
++ */
++
++int rsbac_acl_remove_acl_entry(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id)
++{
++ int err = 0;
++ struct rsbac_acl_device_list_item_t *device_p;
++ struct rsbac_acl_entry_desc_t desc;
++#ifdef CONFIG_RSBAC_DEBUG
++ char tmp[RSBAC_MAXNAMELEN];
++#endif
++ rsbac_acl_rights_vector_t mask;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_remove_acl_entry(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (subj_type >= ACLS_NONE)
++ return -RSBAC_EINVALIDVALUE;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_remove_acl_entry(): called from interrupt!\n");
++ }
++#endif
++ desc.subj_type = subj_type;
++ desc.subj_id = subj_id;
++
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ rsbac_pr_debug(ds_acl, "Removing file/dir/fifo/symlink ACL entry %s %u for device %02u:%02u, inode %u\n",
++ get_acl_subject_type_name(tmp, desc.subj_type),
++ desc.subj_id,
++ RSBAC_MAJOR(tid.file.device),
++ RSBAC_MINOR(tid.file.device), tid.file.inode);
++ /* default entry? */
++ if (RSBAC_IS_ZERO_DEV(tid.file.device) && !tid.file.inode
++ && !tid.file.dentry_p)
++ return rsbac_ta_list_remove(ta_number,
++ default_fd_handle,
++ &desc);
++
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ /* lookup device */
++ device_p = acl_lookup_device(tid.file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_remove_acl_entry(): Could not lookup device!\n");
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ err = rsbac_ta_list_lol_subremove(ta_number,
++ device_p->handle,
++ &tid.file.inode, &desc);
++ /* if ACL is empty, remove it */
++ if (!err
++ && !rsbac_ta_list_lol_subcount(ta_number,
++ device_p->handle,
++ &tid.file.inode)
++ && !rsbac_ta_list_lol_get_data_ttl(ta_number,
++ device_p->handle,
++ NULL,
++ &tid.file.inode,
++ &mask)
++ && (mask == RSBAC_ACL_DEFAULT_FD_MASK)
++ ) {
++ err = rsbac_ta_list_lol_remove(ta_number,
++ device_p->handle,
++ &tid.file.inode);
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return err;
++
++ case T_DEV:
++ rsbac_pr_debug(ds_acl, "Removing device ACL entry for dev %c %02u:%02u\n",
++ 'B' + tid.dev.type, tid.dev.major,
++ tid.dev.minor);
++ /* default entry? */
++ if (RSBAC_IS_ZERO_DEV_DESC(tid.dev))
++ return rsbac_ta_list_remove(ta_number,
++ default_dev_handle,
++ &desc);
++
++ {
++ switch (tid.dev.type) {
++ case D_char:
++ case D_block:
++ err =
++ rsbac_ta_list_lol_subremove(ta_number,
++ dev_handle,
++ &tid.dev,
++ &desc);
++ /* if ACL is empty, remove it */
++ if (!err
++ &&
++ !rsbac_ta_list_lol_subcount(ta_number,
++ dev_handle,
++ &tid.dev)
++ &&
++ !rsbac_ta_list_lol_get_data_ttl
++ (ta_number, dev_handle, NULL, &tid.dev,
++ &mask)
++ && (mask == RSBAC_ACL_DEFAULT_DEV_MASK)
++ ) {
++ err =
++ rsbac_ta_list_lol_remove
++ (ta_number, dev_handle,
++ &tid.dev);
++ }
++ return err;
++
++ case D_char_major:
++ case D_block_major:
++ tid.dev.type -= (D_block_major - D_block);
++ err =
++ rsbac_ta_list_lol_subremove(ta_number,
++ dev_major_handle,
++ &tid.dev,
++ &desc);
++ /* if ACL is empty, remove it */
++ if (!err
++ &&
++ !rsbac_ta_list_lol_subcount(ta_number,
++ dev_major_handle,
++ &tid.dev)
++ &&
++ !rsbac_ta_list_lol_get_data_ttl
++ (ta_number, dev_major_handle, NULL,
++ &tid.dev, &mask)
++ && (mask == RSBAC_ACL_DEFAULT_DEV_MASK)
++ ) {
++ err =
++ rsbac_ta_list_lol_remove
++ (ta_number, dev_major_handle,
++ &tid.dev);
++ }
++ return err;
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++ }
++
++ case T_IPC:
++ rsbac_pr_debug(ds_acl, "Removing IPC ACL for type %u\n", tid.ipc.type);
++ /* default entry? */
++ if (tid.ipc.type == I_none)
++ return rsbac_ta_list_remove(ta_number,
++ default_ipc_handle,
++ &desc);
++ else
++ return -RSBAC_EINVALIDTARGET;
++
++ case T_SCD:
++ rsbac_pr_debug(ds_acl, "Removing SCD ACL entry for %s\n",
++ get_acl_scd_type_name(tmp, tid.scd));
++ /* default entry? */
++ if (tid.scd == AST_none)
++ return rsbac_ta_list_remove(ta_number,
++ default_scd_handle,
++ &desc);
++ err =
++ rsbac_ta_list_lol_subremove(ta_number, scd_handle,
++ &tid.scd, &desc);
++ /* if ACL is empty, remove it */
++ if (!err
++ && !rsbac_ta_list_lol_subcount(ta_number, scd_handle,
++ &tid.scd)
++ && !rsbac_ta_list_lol_get_data_ttl(ta_number,
++ scd_handle, NULL,
++ &tid.scd, &mask)
++ && (mask == RSBAC_ACL_DEFAULT_SCD_MASK)
++ ) {
++ err =
++ rsbac_ta_list_lol_remove(ta_number, scd_handle,
++ &tid.scd);
++ }
++ return err;
++
++ case T_USER:
++ rsbac_pr_debug(ds_acl, "Removing user ACL for user %u\n",
++ tid.user);
++ /* default entry? */
++ if (tid.user == RSBAC_NO_USER)
++ return rsbac_ta_list_remove(ta_number,
++ default_u_handle,
++ &desc);
++ err =
++ rsbac_ta_list_lol_subremove(ta_number, u_handle,
++ &tid.user, &desc);
++ /* if ACL is empty, remove it */
++ if (!err
++ && !rsbac_ta_list_lol_subcount(ta_number, u_handle,
++ &tid.user)
++ && !rsbac_ta_list_lol_get_data_ttl(ta_number, u_handle,
++ NULL, &tid.user,
++ &mask)
++ && (mask == RSBAC_ACL_DEFAULT_U_MASK)
++ ) {
++ err =
++ rsbac_ta_list_lol_remove(ta_number, u_handle,
++ &tid.user);
++ }
++ return err;
++
++ case T_PROCESS:
++ rsbac_pr_debug(ds_acl, "Removing process ACL for pid %u\n",
++ tid.process);
++ /* default entry? */
++ if (!tid.process)
++ return rsbac_ta_list_remove(ta_number,
++ default_p_handle,
++ &desc);
++ else
++ return -RSBAC_EINVALIDTARGET;
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ case T_GROUP:
++ rsbac_pr_debug(ds_acl, "Removing Linux group ACL for group %u\n",
++ tid.group);
++ /* default entry? */
++ if (tid.group == RSBAC_NO_GROUP)
++ return rsbac_ta_list_remove(ta_number,
++ default_g_handle,
++ &desc);
++ err =
++ rsbac_ta_list_lol_subremove(ta_number, g_handle,
++ &tid.group, &desc);
++ /* if ACL is empty, remove it */
++ if (!err
++ && !rsbac_ta_list_lol_subcount(ta_number, g_handle,
++ &tid.group)
++ && !rsbac_ta_list_lol_get_data_ttl(ta_number, g_handle,
++ NULL, &tid.group,
++ &mask)
++ && (mask == RSBAC_ACL_DEFAULT_G_MASK)
++ ) {
++ err =
++ rsbac_ta_list_lol_remove(ta_number, g_handle,
++ &tid.group);
++ }
++ return err;
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ case T_NETDEV:
++ rsbac_pr_debug(ds_acl, "Removing network device ACL entry for netdev %s\n",
++ tid.netdev);
++ /* default entry? */
++ if (!tid.netdev[0])
++ return rsbac_ta_list_remove(ta_number,
++ default_netdev_handle,
++ &desc);
++
++ err =
++ rsbac_ta_list_lol_subremove(ta_number, netdev_handle,
++ &tid.netdev, &desc);
++ /* if ACL is empty, remove it */
++ if (!err
++ && !rsbac_ta_list_lol_subcount(ta_number,
++ netdev_handle,
++ &tid.netdev)
++ && !rsbac_ta_list_lol_get_data_ttl(ta_number,
++ netdev_handle, NULL,
++ &tid.netdev, &mask)
++ && (mask == RSBAC_ACL_DEFAULT_NETDEV_MASK)
++ ) {
++ err =
++ rsbac_ta_list_lol_remove(ta_number,
++ netdev_handle,
++ &tid.netdev);
++ }
++ return err;
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ case T_NETTEMP_NT:
++ rsbac_pr_debug(ds_acl, "Removing network template NT ACL entry for "
++ "nettemp_nt %u\n", tid.nettemp);
++ /* default entry? */
++ if (!tid.nettemp)
++ return rsbac_ta_list_remove(ta_number,
++ default_nettemp_nt_handle,
++ &desc);
++ if (!rsbac_ta_net_template_exist(ta_number, tid.nettemp))
++ return -RSBAC_EINVALIDTARGET;
++
++ err =
++ rsbac_ta_list_lol_subremove(ta_number,
++ nettemp_nt_handle,
++ &tid.nettemp, &desc);
++ /* if ACL is empty, remove it */
++ if (!err
++ && !rsbac_ta_list_lol_subcount(ta_number,
++ nettemp_nt_handle,
++ &tid.nettemp)
++ && !rsbac_ta_list_lol_get_data_ttl(ta_number,
++ nettemp_nt_handle,
++ NULL, &tid.nettemp,
++ &mask)
++ && (mask == RSBAC_ACL_DEFAULT_NETTEMP_MASK)
++ ) {
++ err =
++ rsbac_ta_list_lol_remove(ta_number,
++ nettemp_nt_handle,
++ &tid.nettemp);
++ }
++ return err;
++
++ case T_NETTEMP:
++ rsbac_pr_debug(ds_acl, "Removing network template ACL entry for nettemp_nt %u\n",
++ tid.nettemp);
++ /* default entry? */
++ if (!tid.nettemp)
++ return -RSBAC_EINVALIDTARGET;
++ if (!rsbac_ta_net_template_exist(ta_number, tid.nettemp))
++ return -RSBAC_EINVALIDTARGET;
++
++ err =
++ rsbac_ta_list_lol_subremove(ta_number, nettemp_handle,
++ &tid.nettemp, &desc);
++ /* if ACL is empty, remove it */
++ if (!err
++ && !rsbac_ta_list_lol_subcount(ta_number,
++ nettemp_handle,
++ &tid.nettemp)
++ && !rsbac_ta_list_lol_get_data_ttl(ta_number,
++ nettemp_handle,
++ NULL, &tid.nettemp,
++ &mask)
++ && (mask == RSBAC_ACL_DEFAULT_NETOBJ_MASK)
++ ) {
++ err =
++ rsbac_ta_list_lol_remove(ta_number,
++ nettemp_handle,
++ &tid.nettemp);
++ }
++ return err;
++
++ case T_NETOBJ:
++ rsbac_pr_debug(ds_acl, "Removing network object ACL entry for netobj %p\n",
++ tid.netobj.sock_p);
++ /* default entry? */
++ if (!tid.netobj.sock_p)
++ return rsbac_ta_list_remove(ta_number,
++ default_netobj_handle,
++ &desc);
++
++ err =
++ rsbac_ta_list_lol_subremove(ta_number, netobj_handle,
++ &tid.netobj.sock_p, &desc);
++ /* if ACL is empty, remove it */
++ if (!err
++ && !rsbac_ta_list_lol_subcount(ta_number,
++ netobj_handle,
++ &tid.netobj.sock_p)
++ && !rsbac_ta_list_lol_get_data_ttl(ta_number,
++ netobj_handle, NULL,
++ &tid.netobj, &mask)
++ && (mask == RSBAC_ACL_DEFAULT_NETOBJ_MASK)
++ ) {
++ err =
++ rsbac_ta_list_lol_remove(ta_number,
++ netobj_handle,
++ &tid.netobj.sock_p);
++ }
++ return err;
++#endif /* NET_OBJ_PROT */
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++}
++
++/* rsbac_acl_remove_acl
++ * Remove ACL for given target. For cleanup on delete.
++ */
++
++int rsbac_acl_remove_acl(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid)
++{
++ int err = 0;
++#ifdef CONFIG_RSBAC_DEBUG
++ char tmp[RSBAC_MAXNAMELEN];
++#endif
++ struct rsbac_acl_device_list_item_t *device_p;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_remove_acl(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++#ifdef CONFIG_RSBAC_DEBUG
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_remove_acl(): called from interrupt!\n");
++ }
++#endif
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ rsbac_pr_debug(ds_acl, "Removing file/dir/fifo/symlink ACL for device %02u:%02u, inode %u\n",
++ RSBAC_MAJOR(tid.file.device),
++ RSBAC_MINOR(tid.file.device), tid.file.inode);
++ /* default entry? */
++ if (RSBAC_IS_ZERO_DEV(tid.file.device) && !tid.file.inode
++ && !tid.file.dentry_p)
++ return -RSBAC_EINVALIDTARGET;
++
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ /* lookup device */
++ device_p = acl_lookup_device(tid.file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_remove_acl(): Could not lookup device!\n");
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ err = rsbac_ta_list_lol_remove(ta_number,
++ device_p->handle,
++ &tid.file.inode);
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return err;
++
++ case T_DEV:
++ rsbac_pr_debug(ds_acl, "Removing device ACL for dev %c %02u:%02u\n",
++ 'B' + tid.dev.type, tid.dev.major,
++ tid.dev.minor);
++ /* default entry? */
++ if (RSBAC_IS_ZERO_DEV_DESC(tid.dev))
++ return -RSBAC_EINVALIDTARGET;
++ switch (tid.dev.type) {
++ case D_char:
++ case D_block:
++ return rsbac_ta_list_lol_remove(ta_number,
++ dev_handle,
++ &tid.dev);
++
++ case D_char_major:
++ case D_block_major:
++ tid.dev.type -= (D_block_major - D_block);
++ return rsbac_ta_list_lol_remove(ta_number,
++ dev_major_handle,
++ &tid.dev);
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case T_SCD:
++ rsbac_pr_debug(ds_acl, "Removing SCD ACL for %s\n",
++ get_acl_scd_type_name(tmp, tid.scd));
++ /* default entry? */
++ if (tid.scd == AST_none)
++ return -RSBAC_EINVALIDTARGET;
++ else
++ return rsbac_ta_list_lol_remove(ta_number,
++ scd_handle,
++ &tid.scd);
++
++ case T_USER:
++ rsbac_pr_debug(ds_acl, "Removing user ACL for user %u\n",
++ tid.user);
++ /* default entry? */
++ if (tid.user == RSBAC_NO_USER)
++ return -RSBAC_EINVALIDTARGET;
++ else
++ return rsbac_ta_list_lol_remove(ta_number,
++ u_handle,
++ &tid.user);
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ case T_GROUP:
++ rsbac_pr_debug(ds_acl, "Removing Linux group ACL for group %u\n",
++ tid.group);
++ /* default entry? */
++ if (tid.group == RSBAC_NO_GROUP)
++ return -RSBAC_EINVALIDTARGET;
++ else
++ return rsbac_ta_list_lol_remove(ta_number,
++ g_handle,
++ &tid.group);
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ case T_NETDEV:
++ rsbac_pr_debug(ds_acl, "Removing network device ACL for netdev %s\n",
++ tid.netdev);
++ /* default entry? */
++ if (!tid.netdev[0])
++ return -RSBAC_EINVALIDTARGET;
++ else
++ return rsbac_ta_list_lol_remove(ta_number,
++ netdev_handle,
++ &tid.netdev);
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ case T_NETTEMP_NT:
++ rsbac_pr_debug(ds_acl, "Removing network template NT ACL for nettemp_nt %u\n",
++ tid.nettemp);
++ /* default entry? */
++ if (!tid.nettemp)
++ return -RSBAC_EINVALIDTARGET;
++ else
++ return rsbac_ta_list_lol_remove(ta_number,
++ nettemp_nt_handle,
++ &tid.nettemp);
++ case T_NETTEMP:
++ rsbac_pr_debug(ds_acl, "Removing network template ACL for nettemp %u\n",
++ tid.nettemp);
++ /* default entry? */
++ if (!tid.nettemp)
++ return -RSBAC_EINVALIDTARGET;
++ else
++ return rsbac_ta_list_lol_remove(ta_number,
++ nettemp_handle,
++ &tid.nettemp);
++ case T_NETOBJ:
++ rsbac_pr_debug(ds_acl, "Removing network object ACL for netobj %p\n",
++ tid.netobj.sock_p);
++ /* default entry? */
++ if (!tid.netobj.sock_p)
++ return -RSBAC_EINVALIDTARGET;
++ else
++ return rsbac_ta_list_lol_remove(ta_number,
++ netobj_handle,
++ &tid.netobj.
++ sock_p);
++#endif
++
++ default:
++ err = -RSBAC_EINVALIDTARGET;
++ }
++ return err;
++}
++
++/* rsbac_acl_add_to_acl_entry
++ * Add given rights to ACL entry for given target and subject. If entry does
++ * not exist, behaviour is exactly like rsbac_acl_set_acl_entry.
++ */
++
++int rsbac_acl_add_to_acl_entry(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ rsbac_acl_rights_vector_t rights,
++ rsbac_time_t ttl)
++{
++ int err = 0;
++ struct rsbac_acl_device_list_item_t *device_p;
++ rsbac_acl_rights_vector_t old_rights;
++ struct rsbac_acl_entry_desc_t desc;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_add_to_acl_entry(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (subj_type >= ACLS_NONE)
++ return -RSBAC_EINVALIDVALUE;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_add_to_acl_entry(): called from interrupt!\n");
++ }
++#endif
++ desc.subj_type = subj_type;
++ desc.subj_id = subj_id;
++
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ /* default entry? */
++ if (RSBAC_IS_ZERO_DEV(tid.file.device) && !tid.file.inode
++ && !tid.file.dentry_p) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_fd_handle, NULL, &desc,
++ &old_rights))
++ rights |= old_rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_fd_handle,
++ ttl, &desc, &rights);
++ }
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ /* lookup device */
++ device_p = acl_lookup_device(tid.file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_set_acl_entry(): Could not lookup device!\n");
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ /* protect this list */
++ if (!rsbac_ta_list_lol_exist(ta_number, device_p->handle, &tid.file.inode)) { /* new acl */
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_FD_MASK;
++
++ err =
++ rsbac_ta_list_lol_add_ttl(ta_number,
++ device_p->handle, 0,
++ &tid.file.inode,
++ &mask);
++ if (err) {
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return err;
++ }
++ } else { /* old entry? */
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ device_p->handle,
++ NULL,
++ &tid.file.
++ inode,
++ &desc,
++ &old_rights))
++ rights |= old_rights;
++ }
++ err = rsbac_ta_list_lol_subadd_ttl(ta_number,
++ device_p->handle, ttl,
++ &tid.file.inode, &desc,
++ &rights);
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return err;
++
++ case T_DEV:
++ /* default entry? */
++ if (RSBAC_IS_ZERO_DEV_DESC(tid.dev)) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_dev_handle, NULL, &desc,
++ &old_rights))
++ rights |= old_rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_dev_handle,
++ ttl, &desc, &rights);
++ }
++ switch (tid.dev.type) {
++ case D_char:
++ case D_block:
++ if (!rsbac_ta_list_lol_exist(ta_number, dev_handle, &tid.dev)) { /* new acl */
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_DEV_MASK;
++
++ err =
++ rsbac_ta_list_lol_add_ttl(ta_number,
++ dev_handle,
++ 0, &tid.dev,
++ &mask);
++ if (err)
++ return err;
++ } else { /* old entry? */
++ if (!rsbac_ta_list_lol_get_subdata_ttl
++ (ta_number, dev_handle, NULL, &tid.dev,
++ &desc, &old_rights))
++ rights |= old_rights;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ dev_handle,
++ ttl, &tid.dev,
++ &desc,
++ &rights);
++
++ case D_char_major:
++ case D_block_major:
++ tid.dev.type -= (D_block_major - D_block);
++ if (!rsbac_ta_list_lol_exist(ta_number, dev_major_handle, &tid.dev)) { /* new acl */
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_DEV_MASK;
++
++ err =
++ rsbac_ta_list_lol_add_ttl(ta_number,
++ dev_major_handle,
++ 0, &tid.dev,
++ &mask);
++ if (err)
++ return err;
++ } else { /* old entry? */
++ if (!rsbac_ta_list_lol_get_subdata_ttl
++ (ta_number, dev_major_handle, NULL,
++ &tid.dev, &desc, &old_rights))
++ rights |= old_rights;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ dev_major_handle,
++ ttl, &tid.dev,
++ &desc,
++ &rights);
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case T_IPC:
++ /* default entry? */
++ if (tid.ipc.type == I_none) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_ipc_handle, NULL, &desc,
++ &old_rights))
++ rights |= old_rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_ipc_handle,
++ ttl, &desc, &rights);
++ } else
++ return -RSBAC_EINVALIDTARGET;
++
++ case T_SCD:
++ /* default entry? */
++ if (tid.scd == AST_none) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_scd_handle, NULL, &desc,
++ &old_rights))
++ rights |= old_rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_scd_handle,
++ ttl, &desc, &rights);
++ }
++ if (!rsbac_ta_list_lol_exist(ta_number, scd_handle, &tid.scd)) { /* new acl */
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_SCD_MASK;
++
++ err =
++ rsbac_ta_list_lol_add_ttl(ta_number,
++ scd_handle, 0,
++ &tid.scd, &mask);
++ if (err)
++ return err;
++ } else { /* old entry? */
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ scd_handle,
++ NULL,
++ &tid.scd,
++ &desc,
++ &old_rights))
++ rights |= old_rights;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ scd_handle,
++ ttl,
++ &tid.scd,
++ &desc, &rights);
++
++ case T_USER:
++ /* default entry? */
++ if (tid.user == RSBAC_NO_USER) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_u_handle, NULL, &desc,
++ &old_rights))
++ rights |= old_rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_u_handle, ttl,
++ &desc, &rights);
++ }
++ if (!rsbac_ta_list_lol_exist(ta_number, u_handle, &tid.user)) { /* new acl */
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_U_MASK;
++
++ err =
++ rsbac_ta_list_lol_add_ttl(ta_number, u_handle,
++ 0, &tid.user, &mask);
++ if (err)
++ return err;
++ } else { /* old subentry? */
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ u_handle,
++ NULL,
++ &tid.user,
++ &desc,
++ &old_rights))
++ rights |= old_rights;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ u_handle,
++ ttl,
++ &tid.user,
++ &desc, &rights);
++
++ case T_PROCESS:
++ /* default entry? */
++ if (!tid.process) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_p_handle, NULL, &desc,
++ &old_rights))
++ rights |= old_rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_p_handle, ttl,
++ &desc, &rights);
++ } else
++ return -RSBAC_EINVALIDTARGET;
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ case T_GROUP:
++ /* default entry? */
++ if (tid.group == RSBAC_NO_GROUP) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_g_handle, NULL, &desc,
++ &old_rights))
++ rights |= old_rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_g_handle, ttl,
++ &desc, &rights);
++ }
++ if (!rsbac_ta_list_lol_exist(ta_number, g_handle, &tid.group)) { /* new acl */
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_G_MASK;
++
++ err =
++ rsbac_ta_list_lol_add_ttl(ta_number, g_handle,
++ 0, &tid.group,
++ &mask);
++ if (err)
++ return err;
++ } else { /* old subentry? */
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ g_handle,
++ NULL,
++ &tid.group,
++ &desc,
++ &old_rights))
++ rights |= old_rights;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ g_handle,
++ ttl,
++ &tid.group,
++ &desc, &rights);
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ case T_NETDEV:
++ /* default entry? */
++ if (!tid.netdev[0]) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_netdev_handle, NULL, &desc,
++ &old_rights))
++ rights |= old_rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_netdev_handle,
++ ttl, &desc, &rights);
++ }
++ if (!rsbac_ta_list_lol_exist(ta_number, netdev_handle, &tid.netdev)) { /* new acl */
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_NETDEV_MASK;
++
++ err =
++ rsbac_ta_list_lol_add_ttl(ta_number,
++ netdev_handle, 0,
++ &tid.netdev, &mask);
++ if (err)
++ return err;
++ } else { /* old entry? */
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ netdev_handle,
++ NULL,
++ &tid.netdev,
++ &desc,
++ &old_rights))
++ rights |= old_rights;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ netdev_handle,
++ ttl,
++ &tid.netdev,
++ &desc, &rights);
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ case T_NETTEMP_NT:
++ /* default entry? */
++ if (!tid.nettemp) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_nettemp_nt_handle, NULL,
++ &desc, &old_rights))
++ rights |= old_rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_nettemp_nt_handle,
++ ttl, &desc, &rights);
++ }
++ if (!rsbac_ta_net_template_exist(ta_number, tid.nettemp))
++ return -RSBAC_EINVALIDTARGET;
++ if (!rsbac_ta_list_lol_exist(ta_number, nettemp_nt_handle, &tid.nettemp)) { /* new acl */
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_NETTEMP_MASK;
++
++ err =
++ rsbac_ta_list_lol_add_ttl(ta_number,
++ nettemp_nt_handle, 0,
++ &tid.nettemp, &mask);
++ if (err)
++ return err;
++ } else { /* old entry? */
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ nettemp_nt_handle,
++ NULL,
++ &tid.
++ nettemp,
++ &desc,
++ &old_rights))
++ rights |= old_rights;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ nettemp_nt_handle,
++ ttl,
++ &tid.nettemp,
++ &desc, &rights);
++ case T_NETTEMP:
++ /* default entry? */
++ if (!tid.nettemp) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++ if (!rsbac_ta_net_template_exist(ta_number, tid.nettemp))
++ return -RSBAC_EINVALIDTARGET;
++ if (!rsbac_ta_list_lol_exist(ta_number, nettemp_handle, &tid.nettemp)) { /* new acl */
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_NETOBJ_MASK;
++
++ err =
++ rsbac_ta_list_lol_add_ttl(ta_number,
++ nettemp_handle, 0,
++ &tid.nettemp, &mask);
++ if (err)
++ return err;
++ } else { /* old entry? */
++ if (!rsbac_ta_list_lol_get_subdata_ttl
++ (ta_number, nettemp_handle, NULL, &tid.nettemp,
++ &desc, &old_rights))
++ rights |= old_rights;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ nettemp_handle, ttl,
++ &tid.nettemp, &desc,
++ &rights);
++ case T_NETOBJ:
++ /* default entry? */
++ if (!tid.netobj.sock_p) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_netobj_handle, NULL, &desc,
++ &old_rights))
++ rights |= old_rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_netobj_handle,
++ ttl, &desc, &rights);
++ }
++ if (!rsbac_ta_list_lol_exist(ta_number, netobj_handle, &tid.netobj.sock_p)) { /* new acl */
++ rsbac_acl_rights_vector_t mask =
++ RSBAC_ACL_DEFAULT_NETOBJ_MASK;
++
++ err =
++ rsbac_ta_list_lol_add_ttl(ta_number,
++ netobj_handle, 0,
++ &tid.netobj.sock_p,
++ &mask);
++ if (err)
++ return err;
++ } else { /* old entry? */
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ netobj_handle,
++ NULL,
++ &tid.netobj.
++ sock_p,
++ &desc,
++ &old_rights))
++ rights |= old_rights;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ netobj_handle,
++ ttl,
++ &tid.netobj.sock_p,
++ &desc, &rights);
++#endif /* NET_OBJ_PROT */
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++}
++
++/* rsbac_acl_remove_from_acl_entry
++ * Remove given rights from ACL entry for given target and subject. If entry does
++ * not exist, nothing happens.
++ * This function does NOT remove the ACL entry, so removing all rights results in
++ * NO rights for this subject/target combination!
++ */
++
++int rsbac_acl_remove_from_acl_entry(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t
++ subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ rsbac_acl_rights_vector_t rights)
++{
++ int err = 0;
++ struct rsbac_acl_device_list_item_t *device_p;
++ rsbac_acl_rights_vector_t old_rights;
++ struct rsbac_acl_entry_desc_t desc;
++ rsbac_time_t ttl;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_remove_from_acl_entry(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (subj_type >= ACLS_NONE)
++ return -RSBAC_EINVALIDVALUE;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_remove_from_acl_entry(): called from interrupt!\n");
++ }
++#endif
++ desc.subj_type = subj_type;
++ desc.subj_id = subj_id;
++
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ /* default entry? */
++ if (RSBAC_IS_ZERO_DEV(tid.file.device) && !tid.file.inode
++ && !tid.file.dentry_p) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_fd_handle, &ttl, &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_fd_handle,
++ ttl, &desc,
++ &old_rights);
++ } else
++ return 0;
++ }
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ /* lookup device */
++ device_p = acl_lookup_device(tid.file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_remove_from_acl_entry(): Could not lookup device!\n");
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ device_p->handle,
++ &ttl,
++ &tid.file.inode,
++ &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ err = rsbac_ta_list_lol_subadd_ttl(ta_number,
++ device_p->handle,
++ ttl,
++ &tid.file.inode,
++ &desc,
++ &old_rights);
++ } else
++ err = 0;
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return err;
++
++ case T_DEV:
++ /* default entry? */
++ if (RSBAC_IS_ZERO_DEV_DESC(tid.dev)) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_dev_handle, &ttl, &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_dev_handle,
++ ttl, &desc,
++ &old_rights);
++ } else
++ return 0;
++ }
++ switch (tid.dev.type) {
++ case D_char:
++ case D_block:
++ if (!rsbac_ta_list_lol_get_subdata_ttl
++ (ta_number, dev_handle, &ttl, &tid.dev, &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ dev_handle,
++ ttl,
++ &tid.dev,
++ &desc,
++ &old_rights);
++ } else
++ return 0;
++
++ case D_char_major:
++ case D_block_major:
++ tid.dev.type -= (D_block_major - D_block);
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ dev_major_handle,
++ &ttl,
++ &tid.dev,
++ &desc,
++ &old_rights))
++ {
++ old_rights &= ~rights;
++ return
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ dev_major_handle,
++ ttl,
++ &tid.dev,
++ &desc,
++ &old_rights);
++ } else
++ return 0;
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case T_IPC:
++ /* default entry? */
++ if (tid.ipc.type == I_none) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_ipc_handle, &ttl, &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_ipc_handle,
++ ttl, &desc,
++ &old_rights);
++ } else
++ return 0;
++ } else
++ return -RSBAC_EINVALIDTARGET;
++
++ case T_SCD:
++ /* default entry? */
++ if (tid.scd == AST_none) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_scd_handle, &ttl, &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_scd_handle,
++ ttl, &desc,
++ &old_rights);
++ } else
++ return 0;
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ scd_handle,
++ &ttl,
++ &tid.scd,
++ &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ scd_handle,
++ ttl,
++ &tid.scd,
++ &desc,
++ &old_rights);
++ } else
++ return 0;
++
++ case T_USER:
++ /* default entry? */
++ if (tid.user == RSBAC_NO_USER) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_u_handle, &ttl, &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_u_handle,
++ ttl, &desc,
++ &old_rights);
++ } else
++ return 0;
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ u_handle,
++ &ttl,
++ &tid.user,
++ &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ u_handle,
++ ttl,
++ &tid.user,
++ &desc,
++ &old_rights);
++ } else
++ return 0;
++
++ case T_PROCESS:
++ /* default entry? */
++ if (!tid.process) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_p_handle, &ttl, &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_p_handle,
++ ttl, &desc,
++ &old_rights);
++ } else
++ return 0;
++ } else
++ return -RSBAC_EINVALIDTARGET;
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ case T_GROUP:
++ /* default entry? */
++ if (tid.group == RSBAC_NO_GROUP) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_g_handle, &ttl, &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_g_handle,
++ ttl, &desc,
++ &old_rights);
++ } else
++ return 0;
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ g_handle,
++ &ttl,
++ &tid.group,
++ &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ g_handle,
++ ttl,
++ &tid.group,
++ &desc,
++ &old_rights);
++ } else
++ return 0;
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ case T_NETDEV:
++ /* default entry? */
++ if (!tid.netdev[0]) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_netdev_handle, &ttl, &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_netdev_handle,
++ ttl, &desc,
++ &old_rights);
++ } else
++ return 0;
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ netdev_handle,
++ &ttl,
++ &tid.netdev,
++ &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ netdev_handle,
++ ttl,
++ &tid.netdev,
++ &desc,
++ &old_rights);
++ } else
++ return 0;
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ case T_NETTEMP_NT:
++ /* default entry? */
++ if (!tid.nettemp) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_nettemp_nt_handle, &ttl,
++ &desc, &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_nettemp_nt_handle,
++ ttl, &desc,
++ &old_rights);
++ } else
++ return 0;
++ }
++ if (!rsbac_ta_net_template_exist(ta_number, tid.nettemp))
++ return -RSBAC_EINVALIDTARGET;
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ nettemp_nt_handle,
++ &ttl,
++ &tid.nettemp,
++ &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ nettemp_nt_handle,
++ ttl,
++ &tid.nettemp,
++ &desc,
++ &old_rights);
++ } else
++ return 0;
++ case T_NETTEMP:
++ /* default entry? */
++ if (!tid.nettemp) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++ if (!rsbac_ta_net_template_exist(ta_number, tid.nettemp))
++ return -RSBAC_EINVALIDTARGET;
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ nettemp_handle,
++ &ttl,
++ &tid.nettemp,
++ &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ nettemp_handle,
++ ttl,
++ &tid.nettemp,
++ &desc,
++ &old_rights);
++ } else
++ return 0;
++ case T_NETOBJ:
++ /* default entry? */
++ if (!tid.netobj.sock_p) {
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_netobj_handle, &ttl, &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_add_ttl(ta_number,
++ default_netobj_handle,
++ ttl, &desc,
++ &old_rights);
++ } else
++ return 0;
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ netobj_handle,
++ &ttl,
++ &tid.netobj.sock_p,
++ &desc,
++ &old_rights)) {
++ old_rights &= ~rights;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ netobj_handle,
++ ttl,
++ &tid.netobj.
++ sock_p, &desc,
++ &old_rights);
++ } else
++ return 0;
++#endif /* NET_OBJ_PROT */
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++}
++
++/* rsbac_acl_set_mask
++ * Set inheritance mask for given target to given rights. If item does
++ * not exist, it is created.
++ */
++
++int rsbac_acl_set_mask(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_acl_rights_vector_t mask)
++{
++ int err = 0;
++#ifdef CONFIG_RSBAC_DEBUG
++ char tmp[80];
++#endif
++ struct rsbac_acl_device_list_item_t *device_p;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_set_mask(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (target >= T_NONE)
++ return -RSBAC_EINVALIDTARGET;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_set_mask(): called from interrupt!\n");
++ }
++#endif
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ /* default entry? */
++ if (RSBAC_IS_ZERO_DEV_DESC(tid.dev)) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++ rsbac_pr_debug(ds_acl, "Setting file/dir/fifo/symlink inheritance mask for device %02u:%02u, inode %u\n",
++ RSBAC_MAJOR(tid.file.device),
++ RSBAC_MINOR(tid.file.device), tid.file.inode);
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = acl_lookup_device(tid.file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_set_mask(): Could not lookup device!\n");
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ err = rsbac_ta_list_lol_add_ttl(ta_number,
++ device_p->handle,
++ 0, &tid.file.inode, &mask);
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return err;
++
++ case T_DEV:
++ /* default entry? */
++ if (tid.dev.type == D_none) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++ rsbac_pr_debug(ds_acl, "Setting device inheritance mask for dev %c %02u:%02u\n",
++ 'B' + tid.dev.type,
++ tid.dev.major, tid.dev.minor);
++ switch (tid.dev.type) {
++ case D_char:
++ case D_block:
++ return rsbac_ta_list_lol_add_ttl(ta_number,
++ dev_handle, 0,
++ &tid.dev, &mask);
++
++ case D_char_major:
++ case D_block_major:
++ tid.dev.type -= (D_block_major - D_block);
++ return rsbac_ta_list_lol_add_ttl(ta_number,
++ dev_major_handle,
++ 0, &tid.dev,
++ &mask);
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case T_SCD:
++ /* default entry? */
++ if (tid.scd == AST_none) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++ rsbac_pr_debug(ds_acl, "Setting SCD inheritance mask for %s\n",
++ get_acl_scd_type_name(tmp, tid.scd));
++ return rsbac_ta_list_lol_add_ttl(ta_number, scd_handle, 0,
++ &tid.scd, &mask);
++
++ case T_USER:
++ /* default entry? */
++ if (tid.user == RSBAC_NO_USER) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++ rsbac_pr_debug(ds_acl, "Setting user inheritance mask for user %u\n",
++ tid.user);
++ return rsbac_ta_list_lol_add_ttl(ta_number, u_handle, 0,
++ &tid.user, &mask);
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ case T_GROUP:
++ /* default entry? */
++ if (tid.group == RSBAC_NO_GROUP) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++ rsbac_pr_debug(ds_acl, "Setting Linux group inheritance mask for group %u\n",
++ tid.group);
++ return rsbac_ta_list_lol_add_ttl(ta_number, g_handle, 0,
++ &tid.group, &mask);
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ case T_NETDEV:
++ /* default entry? */
++ if (!tid.netdev[0]) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++ rsbac_pr_debug(ds_acl, "Setting network device inheritance mask for netdev %s\n",
++ tid.netdev);
++ return rsbac_ta_list_lol_add_ttl(ta_number, netdev_handle,
++ 0, &tid.netdev, &mask);
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ case T_NETTEMP_NT:
++ /* default entry? */
++ if (!tid.nettemp) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++ if (!rsbac_ta_net_template_exist(ta_number, tid.nettemp))
++ return -RSBAC_EINVALIDTARGET;
++ rsbac_pr_debug(ds_acl, "Setting network template NT inheritance mask for nettemp %u\n",
++ tid.nettemp);
++ return rsbac_ta_list_lol_add_ttl(ta_number,
++ nettemp_nt_handle, 0,
++ &tid.nettemp, &mask);
++
++ case T_NETTEMP:
++ /* default entry? */
++ if (!tid.nettemp) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++ if (!rsbac_ta_net_template_exist(ta_number, tid.nettemp))
++ return -RSBAC_EINVALIDTARGET;
++ rsbac_pr_debug(ds_acl, "Setting network template inheritance mask for nettemp %u\n",
++ tid.nettemp);
++ return rsbac_ta_list_lol_add_ttl(ta_number, nettemp_handle,
++ 0, &tid.nettemp, &mask);
++
++ case T_NETOBJ:
++ /* default entry? */
++ if (!tid.netobj.sock_p) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++ rsbac_pr_debug(ds_acl, "Setting network object inheritance mask for netobj %p\n",
++ tid.netobj.sock_p);
++ return rsbac_ta_list_lol_add_ttl(ta_number, netobj_handle,
++ 0, &tid.netobj.sock_p,
++ &mask);
++#endif /* NET_OBJ_PROT */
++
++ default:
++ err = -RSBAC_EINVALIDTARGET;
++ }
++ return err;
++}
++
++/* rsbac_acl_get_mask
++ * Get inheritance mask for given target. If item does
++ * not exist, default mask is returned.
++ */
++
++int rsbac_acl_get_mask(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ rsbac_acl_rights_vector_t * mask_p)
++{
++ int err = 0;
++ struct rsbac_acl_device_list_item_t *device_p;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_get_mask(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (target >= T_NONE)
++ return -RSBAC_EINVALIDTARGET;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_get_mask(): called from interrupt!\n");
++ }
++#endif
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ /* default entry? */
++ if (RSBAC_IS_ZERO_DEV(tid.file.device) && !tid.file.inode
++ && !tid.file.dentry_p) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ /* lookup device */
++ device_p = acl_lookup_device(tid.file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_get_mask(): Could not lookup device!\n");
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ err = rsbac_ta_list_lol_get_data_ttl(ta_number,
++ device_p->handle, NULL,
++ &tid.file.inode,
++ mask_p);
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ if (err == -RSBAC_ENOTFOUND) {
++ *mask_p = RSBAC_ACL_DEFAULT_FD_MASK;
++ err = 0;
++ }
++ /* ready. */
++ return err;
++
++ case T_DEV:
++ /* default entry? */
++ if (tid.dev.type == D_none) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ switch (tid.dev.type) {
++ case D_char:
++ case D_block:
++ err =
++ rsbac_ta_list_lol_get_data_ttl(ta_number,
++ dev_handle,
++ NULL, &tid.dev,
++ mask_p);
++ break;
++
++ case D_char_major:
++ case D_block_major:
++ tid.dev.type -= (D_block_major - D_block);
++ err =
++ rsbac_ta_list_lol_get_data_ttl(ta_number,
++ dev_major_handle,
++ NULL, &tid.dev,
++ mask_p);
++ break;
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++ if (err == -RSBAC_ENOTFOUND) {
++ *mask_p = RSBAC_ACL_DEFAULT_DEV_MASK;
++ err = 0;
++ }
++ /* ready. */
++ return err;
++
++ case T_SCD:
++ /* default entry? */
++ if (tid.scd == AST_none) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++ err =
++ rsbac_ta_list_lol_get_data_ttl(ta_number, scd_handle,
++ NULL, &tid.scd, mask_p);
++ if (err == -RSBAC_ENOTFOUND) {
++ *mask_p = RSBAC_ACL_DEFAULT_SCD_MASK;
++ err = 0;
++ }
++ /* ready. */
++ return err;
++
++ case T_USER:
++ /* default entry? */
++ if (tid.user == RSBAC_NO_USER) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++ err =
++ rsbac_ta_list_lol_get_data_ttl(ta_number, u_handle,
++ NULL, &tid.user,
++ mask_p);
++ if (err == -RSBAC_ENOTFOUND) {
++ *mask_p = RSBAC_ACL_DEFAULT_U_MASK;
++ err = 0;
++ }
++ /* ready. */
++ return err;
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ case T_GROUP:
++ /* default entry? */
++ if (tid.group == RSBAC_NO_GROUP) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++ err =
++ rsbac_ta_list_lol_get_data_ttl(ta_number, g_handle,
++ NULL, &tid.group,
++ mask_p);
++ if (err == -RSBAC_ENOTFOUND) {
++ *mask_p = RSBAC_ACL_DEFAULT_G_MASK;
++ err = 0;
++ }
++ /* ready. */
++ return err;
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ case T_NETDEV:
++ /* default entry? */
++ if (!tid.netdev[0]) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ err =
++ rsbac_ta_list_lol_get_data_ttl(ta_number,
++ netdev_handle, NULL,
++ &tid.netdev, mask_p);
++ if (err == -RSBAC_ENOTFOUND) {
++ *mask_p = RSBAC_ACL_DEFAULT_NETDEV_MASK;
++ err = 0;
++ }
++ /* ready. */
++ return err;
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ case T_NETTEMP_NT:
++ /* default entry? */
++ if (!tid.nettemp) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++ if (!rsbac_ta_net_template_exist(ta_number, tid.nettemp))
++ return -RSBAC_EINVALIDTARGET;
++
++ err =
++ rsbac_ta_list_lol_get_data_ttl(ta_number,
++ nettemp_nt_handle, NULL,
++ &tid.nettemp, mask_p);
++ if (err == -RSBAC_ENOTFOUND) {
++ *mask_p = RSBAC_ACL_DEFAULT_NETTEMP_MASK;
++ err = 0;
++ }
++ /* ready. */
++ return err;
++ case T_NETTEMP:
++ /* default entry? */
++ if (!tid.nettemp) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++ if (!rsbac_ta_net_template_exist(ta_number, tid.nettemp))
++ return -RSBAC_EINVALIDTARGET;
++
++ err =
++ rsbac_ta_list_lol_get_data_ttl(ta_number,
++ nettemp_handle, NULL,
++ &tid.nettemp, mask_p);
++ if (err == -RSBAC_ENOTFOUND) {
++ *mask_p = RSBAC_ACL_DEFAULT_NETOBJ_MASK;
++ err = 0;
++ }
++ /* ready. */
++ return err;
++ case T_NETOBJ:
++ /* default entry? */
++ if (!tid.netobj.sock_p) {
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ err =
++ rsbac_ta_list_lol_get_data_ttl(ta_number,
++ netobj_handle, NULL,
++ &tid.netobj.sock_p,
++ mask_p);
++ if (err == -RSBAC_ENOTFOUND) {
++ *mask_p = RSBAC_ACL_DEFAULT_NETOBJ_MASK;
++ err = 0;
++ }
++ /* ready. */
++ return err;
++#endif
++
++ default:
++ err = -RSBAC_EINVALIDTARGET;
++ }
++ return err;
++}
++
++/* rsbac_acl_get_rights
++ * Get rights from ACL entry for given target and subject.
++ * If entry does not exist and inherit is on, inherited rights are used.
++ * If there is no parent, the default rights vector for this target type is returned.
++ * This function does NOT add role or group rights to user rights!
++ */
++
++int rsbac_acl_get_rights(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ rsbac_acl_rights_vector_t * rights_p,
++ rsbac_boolean_t inherit)
++{
++ int err = 0;
++ struct rsbac_acl_device_list_item_t *device_p;
++ struct rsbac_acl_entry_desc_t desc;
++ rsbac_acl_rights_vector_t i_rights = 0;
++ rsbac_acl_rights_vector_t mask = -1;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_get_rights(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (!rights_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (subj_type >= ACLS_NONE)
++ return -RSBAC_EINVALIDVALUE;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_get_rights(): called from interrupt!\n");
++ }
++#endif
++ desc.subj_type = subj_type;
++ desc.subj_id = subj_id;
++
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ /* default entry? */
++ if (RSBAC_IS_ZERO_DEV(tid.file.device) && !tid.file.inode
++ && !tid.file.dentry_p) {
++ if (rsbac_ta_list_get_data_ttl
++ (ta_number, default_fd_handle, NULL, &desc,
++ rights_p)) {
++ /* last resort: default rights */
++ *rights_p = default_fd_rights;
++ }
++ return 0;
++ }
++ *rights_p = 0;
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ /* use loop for inheritance - used to be recursive calls */
++ for (;;) {
++ /* lookup device */
++ device_p = acl_lookup_device(tid.file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_get_rights(): Could not lookup device %02u:%02u!\n",
++ RSBAC_MAJOR(tid.file.
++ device),
++ RSBAC_MINOR(tid.file.
++ device));
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ device_p->handle,
++ NULL,
++ &tid.file.
++ inode,
++ &desc,
++ &i_rights))
++ {
++ *rights_p |= (i_rights & mask);
++ /* leave loop */
++ break;
++ } else if (inherit) {
++ enum rsbac_target_t parent_target;
++ union rsbac_target_id_t parent_tid;
++ rsbac_acl_rights_vector_t i_mask;
++
++ /* get mask to filter through in next round */
++ if (rsbac_ta_list_lol_get_data_ttl
++ (ta_number, device_p->handle,
++ NULL, &tid.file.inode, &i_mask)) {
++ /* no mask found, set default */
++ i_mask = RSBAC_ACL_DEFAULT_FD_MASK;
++ }
++ /* mask into cumulative mask */
++ mask &= i_mask;
++
++ /* inheritance possible? */
++ if (!rsbac_get_parent
++ (target, tid, &parent_target,
++ &parent_tid)) {
++ target = parent_target;
++ tid = parent_tid;
++ /* next round */
++ continue;
++ } else {
++ /* no inheritance possible -> try default_fd_acl */
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_fd_handle,
++ NULL, &desc, &i_rights)) {
++ /* found, use it */
++ *rights_p |=
++ (i_rights & mask);
++ } else {
++ /* last resort: default rights */
++ *rights_p |=
++ (default_fd_rights &
++ mask);
++ }
++ }
++ /* leave loop */
++ break;
++ } else { /* do not inherit */
++
++ /* last resort: default rights */
++ *rights_p |= default_fd_rights;
++ /* leave loop */
++ break;
++ }
++ } /* end of for(;;) inheritance loop */
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return err;
++
++ case T_DEV:
++ /* default entry? */
++
++ if (RSBAC_IS_ZERO_DEV_DESC(tid.dev)) {
++ if (rsbac_ta_list_get_data_ttl
++ (ta_number, default_dev_handle, NULL, &desc,
++ rights_p)) {
++ /* last resort: default rights */
++ *rights_p = default_dev_rights;
++ }
++ return 0;
++ }
++ if ((tid.dev.type >= D_char_major)
++ || (tid.dev.type == D_block_major)
++ ) {
++ tid.dev.type -= (D_block_major - D_block);
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ dev_major_handle,
++ NULL,
++ &tid.dev,
++ &desc,
++ &i_rights))
++ {
++ *rights_p |= i_rights;
++ } else {
++ rsbac_acl_rights_vector_t mask2;
++
++ /* get mask to filter through */
++ if (rsbac_ta_list_lol_get_data_ttl
++ (ta_number, dev_major_handle, NULL,
++ &tid.dev, &mask2)) {
++ /* no mask found, set default */
++ mask2 = RSBAC_ACL_DEFAULT_DEV_MASK;
++ }
++ /* try default_dev_acl */
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_dev_handle, NULL,
++ &desc, rights_p)) {
++ *rights_p &= mask2;
++ } else {
++ /* last resort: default rights */
++ *rights_p =
++ default_dev_rights & mask2;
++ }
++ }
++ return 0;
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ dev_handle,
++ NULL,
++ &tid.dev,
++ &desc, &i_rights)) {
++ *rights_p |= i_rights;
++ } else {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (rsbac_ta_list_lol_get_data_ttl(ta_number,
++ dev_handle,
++ NULL,
++ &tid.dev,
++ &mask)) {
++ /* no mask found, set default */
++ mask = RSBAC_ACL_DEFAULT_DEV_MASK;
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ dev_major_handle,
++ NULL,
++ &tid.dev,
++ &desc,
++ &i_rights))
++ {
++ i_rights &= mask;
++ *rights_p |= i_rights;
++ } else {
++ rsbac_acl_rights_vector_t mask2;
++
++ /* get mask to filter through */
++ if (rsbac_ta_list_lol_get_data_ttl
++ (ta_number, dev_major_handle, NULL,
++ &tid.dev, &mask2)) {
++ /* no mask found, set default */
++ mask2 = RSBAC_ACL_DEFAULT_DEV_MASK;
++ }
++ /* try default_dev_acl */
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_dev_handle, NULL,
++ &desc, rights_p)) {
++ *rights_p &= mask;
++ *rights_p &= mask2;
++ } else {
++ /* last resort: default rights */
++ *rights_p =
++ default_dev_rights & mask &
++ mask2;
++ }
++ }
++ }
++ return 0;
++
++ case T_IPC:
++
++ /* Use default ACL */
++ if (rsbac_ta_list_get_data_ttl
++ (ta_number, default_ipc_handle, NULL, &desc,
++ rights_p)) {
++ /* last resort: default rights */
++ *rights_p = default_ipc_rights;
++ }
++ return 0;
++
++ case T_SCD:
++ /* default entry? */
++ if ((tid.scd == AST_none)
++ || (tid.scd == ST_none)
++ ) {
++ if (rsbac_ta_list_get_data_ttl
++ (ta_number, default_scd_handle, NULL, &desc,
++ rights_p)) {
++ /* last resort: default rights */
++ *rights_p = default_scd_rights;
++ }
++ return 0;
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ scd_handle,
++ NULL,
++ &tid.scd,
++ &desc, &i_rights)) {
++ *rights_p |= i_rights;
++ } else {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (rsbac_ta_list_lol_get_data_ttl(ta_number,
++ scd_handle,
++ NULL,
++ &tid.scd,
++ &mask)) {
++ /* no mask found, set default */
++ mask = RSBAC_ACL_DEFAULT_SCD_MASK;
++ }
++ /* try default_dev_acl */
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_scd_handle, NULL, &desc,
++ rights_p)) {
++ *rights_p &= mask;
++ } else {
++ /* last resort: default rights */
++ *rights_p = default_scd_rights & mask;
++ }
++ }
++ return 0;
++
++ case T_USER:
++ /* default entry? */
++ if (tid.user == RSBAC_NO_USER) {
++ if (rsbac_ta_list_get_data_ttl
++ (ta_number, default_u_handle, NULL, &desc,
++ rights_p)) {
++ /* last resort: default rights */
++ *rights_p = default_u_rights;
++ }
++ return 0;
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ u_handle,
++ NULL,
++ &tid.user,
++ &desc, &i_rights)) {
++ *rights_p |= i_rights;
++ } else {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (rsbac_ta_list_lol_get_data_ttl(ta_number,
++ u_handle,
++ NULL,
++ &tid.user,
++ &mask)) {
++ /* no mask found, set default */
++ mask = RSBAC_ACL_DEFAULT_U_MASK;
++ }
++ /* try default_u_acl */
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_u_handle, NULL, &desc,
++ rights_p)) {
++ *rights_p &= mask;
++ } else {
++ /* last resort: default rights */
++ *rights_p = default_u_rights & mask;
++ }
++ }
++ return 0;
++
++ case T_PROCESS:
++
++ /* Use default entry */
++ if (rsbac_ta_list_get_data_ttl(ta_number, default_p_handle,
++ NULL, &desc, rights_p)) {
++ /* last resort: default rights */
++ *rights_p = default_p_rights;
++ }
++ return 0;
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ case T_GROUP:
++ /* default entry? */
++ if (tid.group == RSBAC_NO_GROUP) {
++ if (rsbac_ta_list_get_data_ttl
++ (ta_number, default_g_handle, NULL, &desc,
++ rights_p)) {
++ /* last resort: default rights */
++ *rights_p = default_g_rights;
++ }
++ return 0;
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ g_handle,
++ NULL,
++ &tid.group,
++ &desc, &i_rights)) {
++ *rights_p |= i_rights;
++ } else {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (rsbac_ta_list_lol_get_data_ttl(ta_number,
++ g_handle,
++ NULL,
++ &tid.group,
++ &mask)) {
++ /* no mask found, set default */
++ mask = RSBAC_ACL_DEFAULT_G_MASK;
++ }
++ /* try default_u_acl */
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_g_handle, NULL, &desc,
++ rights_p)) {
++ *rights_p &= mask;
++ } else {
++ /* last resort: default rights */
++ *rights_p = default_g_rights & mask;
++ }
++ }
++ return 0;
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ case T_NETDEV:
++ /* default entry? */
++
++ if (!tid.netdev[0]) {
++ if (rsbac_ta_list_get_data_ttl
++ (ta_number, default_netdev_handle, NULL, &desc,
++ rights_p)) {
++ /* last resort: default rights */
++ *rights_p = default_netdev_rights;
++ }
++ return 0;
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ netdev_handle,
++ NULL,
++ &tid.netdev,
++ &desc, &i_rights)) {
++ *rights_p |= i_rights;
++ } else {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (rsbac_ta_list_lol_get_data_ttl(ta_number,
++ netdev_handle,
++ NULL,
++ &tid.netdev,
++ &mask)) {
++ /* no mask found, set default */
++ mask = RSBAC_ACL_DEFAULT_NETDEV_MASK;
++ }
++ /* try default_dev_acl */
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_netdev_handle, NULL, &desc,
++ rights_p)) {
++ *rights_p &= mask;
++ } else {
++ /* last resort: default rights */
++ *rights_p = default_netdev_rights & mask;
++ }
++ }
++ return 0;
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ /* rights to template itself */
++ case T_NETTEMP_NT:
++ /* default entry? */
++
++ if (!tid.nettemp) {
++ if (rsbac_ta_list_get_data_ttl
++ (ta_number, default_nettemp_nt_handle, NULL,
++ &desc, rights_p)) {
++ /* last resort: default rights */
++ *rights_p = default_nettemp_nt_rights;
++ }
++ return 0;
++ }
++ if (!rsbac_ta_net_template_exist(ta_number, tid.nettemp))
++ return -RSBAC_EINVALIDTARGET;
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ nettemp_nt_handle,
++ NULL,
++ &tid.nettemp,
++ &desc, &i_rights)) {
++ *rights_p |= i_rights;
++ } else {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (rsbac_ta_list_lol_get_data_ttl(ta_number,
++ nettemp_nt_handle,
++ NULL,
++ &tid.nettemp,
++ &mask)) {
++ /* no mask found, set default */
++ mask = RSBAC_ACL_DEFAULT_NETTEMP_MASK;
++ }
++ /* try default_dev_acl */
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_nettemp_nt_handle, NULL,
++ &desc, rights_p)) {
++ *rights_p &= mask;
++ } else {
++ /* last resort: default rights */
++ *rights_p =
++ default_nettemp_nt_rights & mask;
++ }
++ }
++ return 0;
++
++ /* rights to netobjs fitting this template */
++ case T_NETTEMP:
++ /* default entry? */
++
++ if (!tid.nettemp) {
++ if (rsbac_ta_list_get_data_ttl
++ (ta_number, default_netobj_handle, NULL, &desc,
++ rights_p)) {
++ /* last resort: default rights */
++ *rights_p = default_netobj_rights;
++ }
++ return 0;
++ }
++ if (!rsbac_ta_net_template_exist(ta_number, tid.nettemp))
++ return -RSBAC_EINVALIDTARGET;
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ nettemp_handle,
++ NULL,
++ &tid.nettemp,
++ &desc, &i_rights)) {
++ *rights_p |= i_rights;
++ } else {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (rsbac_ta_list_lol_get_data_ttl(ta_number,
++ nettemp_handle,
++ NULL,
++ &tid.nettemp,
++ &mask)) {
++ /* no mask found, set default */
++ mask = RSBAC_ACL_DEFAULT_NETOBJ_MASK;
++ }
++ /* try default_dev_acl */
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_netobj_handle, NULL, &desc,
++ rights_p)) {
++ *rights_p &= mask;
++ } else {
++ /* last resort: default rights */
++ *rights_p = default_netobj_rights & mask;
++ }
++ }
++ return 0;
++
++ case T_NETOBJ:
++ /* default entry? */
++
++ if (!tid.nettemp) {
++ if (rsbac_ta_list_get_data_ttl
++ (ta_number, default_netobj_handle, NULL, &desc,
++ rights_p)) {
++ /* last resort: default rights */
++ *rights_p = default_netobj_rights;
++ }
++ return 0;
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ netobj_handle,
++ NULL,
++ &tid.netobj.sock_p,
++ &desc, &i_rights)) {
++ *rights_p |= i_rights;
++ } else {
++ rsbac_acl_rights_vector_t mask;
++ rsbac_net_temp_id_t temp = 0;
++
++ /* get mask to filter through */
++ if (rsbac_ta_list_lol_get_data_ttl(ta_number,
++ nettemp_handle,
++ NULL,
++ &temp, &mask)) {
++ /* no mask found, set default */
++ mask = RSBAC_ACL_DEFAULT_NETOBJ_MASK;
++ }
++ /* try nettemp_acl */
++ if(!ta_number && tid.netobj.local_temp)
++ temp = tid.netobj.local_temp;
++ else
++ rsbac_ta_net_lookup_templates(ta_number,
++ &tid.netobj,
++ &temp, NULL);
++
++ if (temp
++ &&
++ !rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ nettemp_handle,
++ NULL, &temp,
++ &desc,
++ &i_rights))
++ {
++ *rights_p |= i_rights;
++ } else {
++ /* get mask to filter through */
++ if (temp
++ &&
++ rsbac_ta_list_lol_get_data_ttl
++ (ta_number, nettemp_handle, NULL,
++ &temp, &mask)) {
++ /* no mask found, set default */
++ mask =
++ RSBAC_ACL_DEFAULT_NETOBJ_MASK;
++ }
++ /* try default_netobj_acl */
++ if (!rsbac_ta_list_get_data_ttl
++ (ta_number, default_netobj_handle,
++ NULL, &desc, rights_p)) {
++ *rights_p &= mask;
++ } else {
++ /* last resort: default rights */
++ *rights_p =
++ default_netobj_rights & mask;
++ }
++ }
++ }
++ return 0;
++#endif /* NET_OBJ_PROT */
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++}
++
++/* rsbac_acl_get_single_right
++ * Show, whether individual right is set for given target and subject.
++ * If right is not set, it is checked at all parents, unless it has been
++ * masked out. (Special case SUPERVISOR: unless
++ * CONFIG_RSBAC_ACL_SUPER_FILTER is set *and* supervisor has been masked out)
++ */
++
++int rsbac_acl_get_single_right(enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ enum rsbac_acl_subject_type_t subj_type,
++ rsbac_acl_subject_id_t subj_id,
++ enum rsbac_adf_request_t right,
++ rsbac_boolean_t * result)
++{
++ struct rsbac_acl_device_list_item_t *device_p;
++ rsbac_acl_rights_vector_t i_rvec;
++ rsbac_acl_rights_vector_t i_rights;
++ struct rsbac_acl_entry_desc_t desc;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_get_single_right(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (!result)
++ return -RSBAC_EINVALIDPOINTER;
++ if ((subj_type >= ACLS_NONE)
++ || (right >= ACLR_NONE)
++ )
++ return -RSBAC_EINVALIDVALUE;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_get_single_right(): called from interrupt!\n");
++ }
++#endif
++ i_rvec = (rsbac_acl_rights_vector_t) 1 << right;
++
++ desc.subj_type = subj_type;
++ desc.subj_id = subj_id;
++
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ /* default entry? */
++ if (RSBAC_IS_ZERO_DEV(tid.file.device) && !tid.file.inode
++ && !tid.file.dentry_p) {
++ if (!rsbac_ta_list_get_data_ttl
++ (0, default_fd_handle, NULL, &desc,
++ &i_rights)) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_fd_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++ }
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ /* use loop for inheritance - used to be recursive calls */
++ for (;;) {
++ /* lookup device */
++ device_p = acl_lookup_device(tid.file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_get_single_right(): Could not lookup device, blindly granting access!\n");
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ *result = TRUE;
++ return 0;
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(0,
++ device_p->handle,
++ NULL,
++ &tid.file.
++ inode,
++ &desc,
++ &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return 0;
++ }
++
++ {
++ enum rsbac_target_t parent_target;
++ union rsbac_target_id_t parent_tid;
++
++#ifndef CONFIG_RSBAC_ACL_SUPER_FILTER
++ if (right != ACLR_SUPERVISOR)
++#endif
++ {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (!rsbac_ta_list_lol_get_data_ttl
++ (0, device_p->handle,
++ NULL, &tid.file.inode, &mask)
++ && !(mask & i_rvec)
++ ) {
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ *result = FALSE;
++ return 0;
++ }
++ }
++
++ /* inheritance possible? */
++ if (!rsbac_get_parent
++ (target, tid, &parent_target,
++ &parent_tid)) {
++ target = parent_target;
++ tid = parent_tid;
++ continue;
++ } else {
++ /* no inheritance possible -> try default_fd_acl */
++ if (!rsbac_ta_list_get_data_ttl
++ (0, default_fd_handle, NULL,
++ &desc, &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_fd_rights &
++ i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ /* free access to device_list_head - see above */
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return 0;
++ }
++ }
++ } /* end of for(;;) for inheritance */
++
++ case T_DEV:
++ if (RSBAC_IS_ZERO_DEV_DESC(tid.dev)) {
++ if (!rsbac_ta_list_get_data_ttl
++ (0, default_dev_handle, NULL, &desc,
++ &i_rights)) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_dev_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++ }
++
++ if (tid.dev.type >= D_block_major) {
++ tid.dev.type -= (D_block_major - D_block);
++ if (!rsbac_ta_list_lol_get_subdata_ttl
++ (0, dev_major_handle, NULL, &tid.dev, &desc,
++ &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ return 0;
++ }
++#ifndef CONFIG_RSBAC_ACL_SUPER_FILTER
++ if (right != ACLR_SUPERVISOR)
++#endif
++ {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (!rsbac_ta_list_lol_get_data_ttl
++ (0, dev_major_handle, NULL, &tid.dev,
++ &mask)
++ && !(mask & i_rvec)
++ ) {
++ *result = FALSE;
++ return 0;
++ }
++ }
++ /* no inheritance possible -> try default acl */
++ if (!rsbac_ta_list_get_data_ttl
++ (0, default_dev_handle, NULL, &desc, &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_dev_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(0, dev_handle,
++ NULL,
++ &tid.dev,
++ &desc, &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ return 0;
++ }
++#ifndef CONFIG_RSBAC_ACL_SUPER_FILTER
++ if (right != ACLR_SUPERVISOR)
++#endif
++ {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (!rsbac_ta_list_lol_get_data_ttl(0, dev_handle,
++ NULL,
++ &tid.dev,
++ &mask)
++ && !(mask & i_rvec)
++ ) {
++ *result = FALSE;
++ return 0;
++ }
++ }
++ if (!rsbac_ta_list_lol_get_subdata_ttl(0, dev_major_handle,
++ NULL,
++ &tid.dev,
++ &desc, &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ return 0;
++ }
++#ifndef CONFIG_RSBAC_ACL_SUPER_FILTER
++ if (right != ACLR_SUPERVISOR)
++#endif
++ {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (!rsbac_ta_list_lol_get_data_ttl
++ (0, dev_major_handle, NULL, &tid.dev, &mask)
++ && !(mask & i_rvec)
++ ) {
++ *result = FALSE;
++ return 0;
++ }
++ }
++ /* no inheritance possible -> try default acl */
++ if (!rsbac_ta_list_get_data_ttl(0, default_dev_handle,
++ NULL, &desc, &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_dev_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++
++ case T_IPC:
++ /* Use default entry */
++ if (!rsbac_ta_list_get_data_ttl(0, default_ipc_handle,
++ NULL, &desc, &i_rights)) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_ipc_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++
++ case T_SCD:
++ if (tid.scd == AST_none) {
++ if (!rsbac_ta_list_get_data_ttl
++ (0, default_scd_handle, NULL, &desc,
++ &i_rights)) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_scd_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++ }
++
++ if (!rsbac_ta_list_lol_get_subdata_ttl(0, scd_handle,
++ NULL,
++ &tid.scd,
++ &desc, &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ return 0;
++ }
++#ifndef CONFIG_RSBAC_ACL_SUPER_FILTER
++ if (right != ACLR_SUPERVISOR)
++#endif
++ {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (!rsbac_ta_list_lol_get_data_ttl(0, scd_handle,
++ NULL,
++ &tid.scd,
++ &mask)
++ && !(mask & i_rvec)
++ ) {
++ *result = FALSE;
++ return 0;
++ }
++ }
++
++ /* no inheritance possible -> try default acl */
++ if (!rsbac_ta_list_get_data_ttl(0, default_scd_handle,
++ NULL, &desc, &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_scd_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++
++ case T_USER:
++ if (tid.user == RSBAC_NO_USER) {
++ if (!rsbac_ta_list_get_data_ttl
++ (0, default_u_handle, NULL, &desc,
++ &i_rights)) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_u_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++ }
++
++ if (!rsbac_ta_list_lol_get_subdata_ttl(0, u_handle,
++ NULL,
++ &tid.user,
++ &desc, &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ return 0;
++ }
++#ifndef CONFIG_RSBAC_ACL_SUPER_FILTER
++ if (right != ACLR_SUPERVISOR)
++#endif
++ {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (!rsbac_ta_list_lol_get_data_ttl(0, u_handle,
++ NULL,
++ &tid.user,
++ &mask)
++ && !(mask & i_rvec)
++ ) {
++ *result = FALSE;
++ return 0;
++ }
++ }
++
++ /* no inheritance possible -> try default acl */
++ if (!rsbac_ta_list_get_data_ttl(0, default_u_handle,
++ NULL, &desc, &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_u_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++
++ case T_PROCESS:
++ /* Use default entry */
++ if (!rsbac_ta_list_get_data_ttl(0, default_p_handle,
++ NULL, &desc, &i_rights)) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_p_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ case T_GROUP:
++ if (tid.group == RSBAC_NO_GROUP) {
++ if (!rsbac_ta_list_get_data_ttl
++ (0, default_g_handle, NULL, &desc,
++ &i_rights)) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_g_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++ }
++
++ if (!rsbac_ta_list_lol_get_subdata_ttl(0, g_handle,
++ NULL,
++ &tid.group,
++ &desc, &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ return 0;
++ }
++#ifndef CONFIG_RSBAC_ACL_SUPER_FILTER
++ if (right != ACLR_SUPERVISOR)
++#endif
++ {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (!rsbac_ta_list_lol_get_data_ttl(0, g_handle,
++ NULL,
++ &tid.group,
++ &mask)
++ && !(mask & i_rvec)
++ ) {
++ *result = FALSE;
++ return 0;
++ }
++ }
++
++ /* no inheritance possible -> try default acl */
++ if (!rsbac_ta_list_get_data_ttl(0, default_g_handle,
++ NULL, &desc, &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_g_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++#endif
++
++#if defined(CONFIG_RSBAC_ACL_NET_DEV_PROT)
++ case T_NETDEV:
++ if (!tid.netdev[0]) {
++ if (!rsbac_ta_list_get_data_ttl
++ (0, default_netdev_handle, NULL, &desc,
++ &i_rights)) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_netdev_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++ }
++
++ if (!rsbac_ta_list_lol_get_subdata_ttl(0, netdev_handle,
++ NULL,
++ &tid.netdev,
++ &desc, &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ return 0;
++ }
++#ifndef CONFIG_RSBAC_ACL_SUPER_FILTER
++ if (right != ACLR_SUPERVISOR)
++#endif
++ {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (!rsbac_ta_list_lol_get_data_ttl
++ (0, netdev_handle, NULL, &tid.netdev, &mask)
++ && !(mask & i_rvec)
++ ) {
++ *result = FALSE;
++ return 0;
++ }
++ }
++
++ /* no inheritance possible -> try default acl */
++ if (!rsbac_ta_list_get_data_ttl(0, default_netdev_handle,
++ NULL, &desc, &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_netdev_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++#endif
++
++#if defined(CONFIG_RSBAC_ACL_NET_OBJ_PROT)
++ case T_NETTEMP_NT:
++ case T_NETTEMP:
++ if (!tid.nettemp) {
++ if (!rsbac_ta_list_get_data_ttl
++ (0, default_nettemp_nt_handle, NULL, &desc,
++ &i_rights)) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_nettemp_nt_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++ }
++
++ /* There should be no template, which is to be created, so skip nettemp_nt list */
++ if (right != R_CREATE) {
++ if (!rsbac_net_template_exist(tid.nettemp))
++ return FALSE;
++ if (!rsbac_ta_list_lol_get_subdata_ttl
++ (0, nettemp_nt_handle, NULL, &tid.nettemp,
++ &desc, &i_rights)
++ && (i_rights & i_rvec)
++ ) {
++ *result = TRUE;
++ return 0;
++ }
++#ifndef CONFIG_RSBAC_ACL_SUPER_FILTER
++ if (right != ACLR_SUPERVISOR)
++#endif
++ {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (!rsbac_ta_list_lol_get_data_ttl
++ (0, nettemp_nt_handle, NULL,
++ &tid.nettemp, &mask)
++ && !(mask & i_rvec)
++ ) {
++ *result = FALSE;
++ return 0;
++ }
++ }
++ }
++
++ /* no inheritance possible -> try default acl */
++ if (!rsbac_ta_list_get_data_ttl
++ (0, default_nettemp_nt_handle, NULL, &desc, &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_nettemp_nt_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++
++ case T_NETOBJ:
++ if (!tid.netobj.sock_p) {
++ if (!rsbac_ta_list_get_data_ttl
++ (0, default_netobj_handle, NULL, &desc,
++ &i_rights)) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_netobj_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++ }
++
++ if (!rsbac_ta_list_lol_get_subdata_ttl(0, netobj_handle,
++ NULL,
++ &tid.netobj.sock_p,
++ &desc, &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ return 0;
++ }
++#ifndef CONFIG_RSBAC_ACL_SUPER_FILTER
++ if (right != ACLR_SUPERVISOR)
++#endif
++ {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask to filter through */
++ if (!rsbac_ta_list_lol_get_data_ttl
++ (0, netobj_handle, NULL, &tid.netobj.sock_p,
++ &mask)
++ && !(mask & i_rvec)
++ ) {
++ *result = FALSE;
++ return 0;
++ }
++ }
++ /* Try net template */
++ {
++ rsbac_net_temp_id_t temp = 0;
++
++ if (rsbac_net_remote_request(right)) {
++ if(tid.netobj.remote_temp)
++ temp = tid.netobj.remote_temp;
++ else
++ rsbac_ta_net_lookup_templates(0,
++ &tid.netobj,
++ NULL, &temp);
++ } else {
++ if(tid.netobj.local_temp)
++ temp = tid.netobj.local_temp;
++ else
++ rsbac_ta_net_lookup_templates(0,
++ &tid.netobj,
++ &temp, NULL);
++ }
++ if (temp
++ && !rsbac_ta_list_lol_get_subdata_ttl(0,
++ nettemp_handle,
++ NULL,
++ &temp,
++ &desc,
++ &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ return 0;
++ }
++#ifndef CONFIG_RSBAC_ACL_SUPER_FILTER
++ if (right != ACLR_SUPERVISOR)
++#endif
++ {
++ rsbac_acl_rights_vector_t mask;
++
++ /* get mask from template to filter through */
++ if (!rsbac_ta_list_lol_get_data_ttl
++ (0, nettemp_handle, NULL, &temp, &mask)
++ && !(mask & i_rvec)
++ ) {
++ *result = FALSE;
++ return 0;
++ }
++ }
++ }
++
++ /* no inheritance possible -> try default acl */
++ if (!rsbac_ta_list_get_data_ttl(0, default_netobj_handle,
++ NULL, &desc, &i_rights)
++ ) {
++ if (i_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ } else {
++ if (default_netobj_rights & i_rvec)
++ *result = TRUE;
++ else
++ *result = FALSE;
++ }
++ return 0;
++#endif /* NET_OBJ_PROT */
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++}
++
++/*************************************************
++ * rsbac_acl_get_tlist
++ * Get subjects from ACL entries for given target.
++ */
++
++int rsbac_acl_get_tlist(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t tid,
++ struct rsbac_acl_entry_t **entry_pp,
++ rsbac_time_t ** ttl_pp)
++{
++ int count = 0;
++ struct rsbac_acl_device_list_item_t *device_p;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_get_tlist(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (!entry_pp)
++ return -RSBAC_EINVALIDPOINTER;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_get_tlist(): called from interrupt!\n");
++ }
++#endif
++ switch (target) {
++ case T_FD:
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ /* default entry? */
++ if (RSBAC_IS_ZERO_DEV(tid.file.device) && !tid.file.inode
++ && !tid.file.dentry_p)
++ return rsbac_ta_list_get_all_items_ttl(ta_number,
++ default_fd_handle,
++ (void **)
++ entry_pp,
++ ttl_pp);
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ /* lookup device */
++ device_p = acl_lookup_device(tid.file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_acl_get_tlist(): Could not lookup device!\n");
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ /* protect this list */
++ count = rsbac_ta_list_lol_get_all_subitems_ttl(ta_number,
++ device_p->
++ handle,
++ &tid.file.
++ inode,
++ (void **)
++ entry_pp,
++ ttl_pp);
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return count;
++
++ case T_DEV:
++ if (RSBAC_IS_ZERO_DEV_DESC(tid.dev))
++ return rsbac_ta_list_get_all_items_ttl(ta_number,
++ default_dev_handle,
++ (void **)
++ entry_pp,
++ ttl_pp);
++ else
++ switch (tid.dev.type) {
++ case D_char:
++ case D_block:
++ return
++ rsbac_ta_list_lol_get_all_subitems_ttl
++ (ta_number, dev_handle, &tid.dev,
++ (void **) entry_pp, ttl_pp);
++
++ case D_char_major:
++ case D_block_major:
++ tid.dev.type -= (D_block_major - D_block);
++ return
++ rsbac_ta_list_lol_get_all_subitems_ttl
++ (ta_number, dev_major_handle, &tid.dev,
++ (void **) entry_pp, ttl_pp);
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ case T_IPC:
++ /* default entry */
++ return rsbac_ta_list_get_all_items_ttl(ta_number,
++ default_ipc_handle,
++ (void **) entry_pp,
++ ttl_pp);
++
++ case T_SCD:
++ if ((tid.scd == AST_none)
++ || (tid.scd == ST_none)
++ )
++ return rsbac_ta_list_get_all_items_ttl(ta_number,
++ default_scd_handle,
++ (void **)
++ entry_pp,
++ ttl_pp);
++ else
++ return
++ rsbac_ta_list_lol_get_all_subitems_ttl
++ (ta_number, scd_handle, &tid.scd,
++ (void **) entry_pp, ttl_pp);
++
++ case T_USER:
++ if (tid.user == RSBAC_NO_USER)
++ return rsbac_ta_list_get_all_items_ttl(ta_number,
++ default_u_handle,
++ (void **)
++ entry_pp,
++ ttl_pp);
++ else
++ return
++ rsbac_ta_list_lol_get_all_subitems_ttl
++ (ta_number, u_handle, &tid.user,
++ (void **) entry_pp, ttl_pp);
++
++ case T_PROCESS:
++ return rsbac_ta_list_get_all_items_ttl(ta_number,
++ default_p_handle,
++ (void **) entry_pp,
++ ttl_pp);
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ case T_GROUP:
++ if (tid.group == RSBAC_NO_GROUP)
++ return rsbac_ta_list_get_all_items_ttl(ta_number,
++ default_g_handle,
++ (void **)
++ entry_pp,
++ ttl_pp);
++ else
++ return
++ rsbac_ta_list_lol_get_all_subitems_ttl
++ (ta_number, g_handle, &tid.group,
++ (void **) entry_pp, ttl_pp);
++#endif
++
++#if defined(CONFIG_RSBAC_ACL_NET_DEV_PROT)
++ case T_NETDEV:
++ if (!tid.netdev[0])
++ return rsbac_ta_list_get_all_items_ttl(ta_number,
++ default_netdev_handle,
++ (void **)
++ entry_pp,
++ ttl_pp);
++ else
++ return
++ rsbac_ta_list_lol_get_all_subitems_ttl
++ (ta_number, netdev_handle, &tid.netdev,
++ (void **) entry_pp, ttl_pp);
++#endif
++
++#if defined(CONFIG_RSBAC_ACL_NET_OBJ_PROT)
++ case T_NETTEMP_NT:
++ if (!tid.nettemp)
++ return rsbac_ta_list_get_all_items_ttl(ta_number,
++ default_nettemp_nt_handle,
++ (void **)
++ entry_pp,
++ ttl_pp);
++ if (!rsbac_ta_net_template_exist(ta_number, tid.nettemp))
++ return -RSBAC_EINVALIDTARGET;
++ return rsbac_ta_list_lol_get_all_subitems_ttl(ta_number,
++ nettemp_nt_handle,
++ &tid.nettemp,
++ (void **)
++ entry_pp,
++ ttl_pp);
++
++ case T_NETTEMP:
++ if (!tid.nettemp)
++ return -RSBAC_EINVALIDTARGET;
++ if (!rsbac_ta_net_template_exist(ta_number, tid.nettemp))
++ return -RSBAC_EINVALIDTARGET;
++ return rsbac_ta_list_lol_get_all_subitems_ttl(ta_number,
++ nettemp_handle,
++ &tid.nettemp,
++ (void **)
++ entry_pp,
++ ttl_pp);
++
++ case T_NETOBJ:
++ if (!tid.nettemp)
++ return rsbac_ta_list_get_all_items_ttl(ta_number,
++ default_netobj_handle,
++ (void **)
++ entry_pp,
++ ttl_pp);
++ else
++ return
++ rsbac_ta_list_lol_get_all_subitems_ttl
++ (ta_number, netobj_handle, &tid.netobj.sock_p,
++ (void **) entry_pp, ttl_pp);
++#endif /* NET_OBJ_PROT */
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++}
++
++/* Remove a subject from all acls (but not from group memberships, see remove_user) */
++int rsbac_acl_remove_subject(rsbac_list_ta_number_t ta_number,
++ struct rsbac_acl_entry_desc_t desc)
++{
++ struct rsbac_acl_device_list_item_t *device_p;
++ int srcu_idx;
++
++ if (desc.subj_type >= ACLS_NONE)
++ return -RSBAC_EINVALIDVALUE;
++
++ /* remove from default ACLs */
++ rsbac_ta_list_remove(ta_number, default_fd_handle, &desc);
++ rsbac_ta_list_remove(ta_number, default_dev_handle, &desc);
++ rsbac_ta_list_remove(ta_number, default_ipc_handle, &desc);
++ rsbac_ta_list_remove(ta_number, default_scd_handle, &desc);
++ rsbac_ta_list_remove(ta_number, default_u_handle, &desc);
++ rsbac_ta_list_remove(ta_number, default_p_handle, &desc);
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ rsbac_ta_list_remove(ta_number, default_g_handle, &desc);
++#endif
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ rsbac_ta_list_remove(ta_number, default_netdev_handle, &desc);
++#endif
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ rsbac_ta_list_remove(ta_number, default_nettemp_nt_handle, &desc);
++ rsbac_ta_list_remove(ta_number, default_netobj_handle, &desc);
++#endif
++
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = rcu_dereference(device_list_head_p)->head;
++ while (device_p) {
++ rsbac_ta_list_lol_subremove_from_all(ta_number,
++ device_p->handle,
++ &desc);
++ device_p = device_p->next;
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++
++ /* dev list */
++ rsbac_ta_list_lol_subremove_from_all(ta_number, dev_major_handle,
++ &desc);
++ rsbac_ta_list_lol_subremove_from_all(ta_number, dev_handle, &desc);
++
++ /* scd list */
++ rsbac_ta_list_lol_subremove_from_all(ta_number, scd_handle, &desc);
++
++ /* user list */
++ rsbac_ta_list_lol_subremove_from_all(ta_number, u_handle, &desc);
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ /* Linux group list */
++ rsbac_ta_list_lol_subremove_from_all(ta_number, g_handle, &desc);
++#endif
++
++#ifdef CONFIG_RSBAC_ACL_NET_DEV_PROT
++ /* netdev list */
++ rsbac_ta_list_lol_subremove_from_all(ta_number, netdev_handle,
++ &desc);
++#endif
++#ifdef CONFIG_RSBAC_ACL_NET_OBJ_PROT
++ rsbac_ta_list_lol_subremove_from_all(ta_number, nettemp_nt_handle,
++ &desc);
++ rsbac_ta_list_lol_subremove_from_all(ta_number, nettemp_handle,
++ &desc);
++ rsbac_ta_list_lol_subremove_from_all(ta_number, netobj_handle,
++ &desc);
++#endif
++
++ return 0;
++}
++
++/* add a group with new id and fill this id into *group_id_p */
++/* if old content of group_id_p is 0, make new id, else try given id */
++int rsbac_acl_add_group(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t owner,
++ enum rsbac_acl_group_type_t type,
++ char *name, rsbac_acl_group_id_t * group_id_p)
++{
++ struct rsbac_acl_group_entry_t entry;
++ int err = 0;
++
++ if (type >= ACLG_NONE)
++ return -RSBAC_EINVALIDVALUE;
++ if (!name || !group_id_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!name[0])
++ return -RSBAC_EINVALIDVALUE;
++ entry.owner = owner;
++ entry.type = type;
++ strncpy(entry.name, name, RSBAC_ACL_GROUP_NAMELEN - 1);
++ entry.name[RSBAC_ACL_GROUP_NAMELEN - 1] = 0;
++ if (!*group_id_p) {
++ /* step new group counter */
++ group_last_new++;
++ /* Just in case the counter has wrapped. It is almost impossible that all IDs are in use. */
++ while (!group_last_new
++ || rsbac_ta_list_exist(ta_number, group_handle,
++ &group_last_new))
++ group_last_new++;
++
++ entry.id = group_last_new;
++ } else {
++ if (rsbac_ta_list_exist
++ (ta_number, group_handle, group_id_p)) {
++ return -RSBAC_EEXISTS;
++ } else
++ entry.id = *group_id_p;
++ }
++ if (rsbac_ta_list_add_ttl
++ (ta_number, group_handle, 0, &entry.id, &entry))
++ err = -RSBAC_ECOULDNOTADDITEM;
++ else {
++ *group_id_p = entry.id;
++ }
++ return err;
++}
++
++int rsbac_acl_change_group(rsbac_list_ta_number_t ta_number,
++ rsbac_acl_group_id_t id,
++ rsbac_uid_t owner,
++ enum rsbac_acl_group_type_t type, char *name)
++{
++ struct rsbac_acl_group_entry_t entry;
++
++ if (!id)
++ return -RSBAC_EINVALIDVALUE;
++ if (!rsbac_ta_list_exist(ta_number, group_handle, &id))
++ return -RSBAC_ENOTFOUND;
++ if (!name)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!name[0])
++ return -RSBAC_EINVALIDVALUE;
++ entry.id = id;
++ entry.owner = owner;
++ entry.type = type;
++ strncpy(entry.name, name, RSBAC_ACL_GROUP_NAMELEN);
++ entry.name[RSBAC_ACL_GROUP_NAMELEN - 1] = 0;
++ return rsbac_ta_list_add_ttl(ta_number, group_handle, 0, &entry.id,
++ &entry);
++}
++
++int rsbac_acl_remove_group(rsbac_list_ta_number_t ta_number,
++ rsbac_acl_group_id_t id)
++{
++ int err = 0;
++
++ if (!id)
++ return -RSBAC_EINVALIDVALUE;
++
++ err = rsbac_ta_list_remove(ta_number, group_handle, &id);
++ if (!err) {
++ struct rsbac_acl_entry_desc_t desc;
++
++ /* cleanup group memberships */
++ rsbac_ta_list_lol_subremove_from_all(ta_number, gm_handle,
++ &id);
++ desc.subj_type = ACLS_GROUP;
++ desc.subj_id = id;
++ err = rsbac_acl_remove_subject(ta_number, desc);
++ }
++ return err;
++}
++
++int rsbac_acl_get_group_entry(rsbac_list_ta_number_t ta_number,
++ rsbac_acl_group_id_t group,
++ struct rsbac_acl_group_entry_t *entry_p)
++{
++ if (!group)
++ return -RSBAC_EINVALIDVALUE;
++ if (!entry_p)
++ return -RSBAC_EINVALIDPOINTER;
++ return rsbac_ta_list_get_data_ttl(ta_number, group_handle, NULL,
++ &group, entry_p);
++}
++
++int rsbac_acl_list_groups(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t owner,
++ rsbac_boolean_t include_global,
++ struct rsbac_acl_group_entry_t **entry_pp)
++{
++ long count;
++ struct rsbac_acl_group_entry_t *local_entry_p;
++
++ if (!entry_pp)
++ return -RSBAC_EINVALIDPOINTER;
++ count =
++ rsbac_ta_list_get_all_data(ta_number, group_handle,
++ (void **) &local_entry_p);
++ if (count > 0) {
++ long i;
++ long rescount = 0;
++
++ *entry_pp = rsbac_kmalloc(count * sizeof(**entry_pp));
++ if (!*entry_pp) {
++ rsbac_kfree(local_entry_p);
++ return -RSBAC_ENOMEM;
++ }
++ for (i = 0; i < count; i++) {
++ if ((local_entry_p[i].owner == owner)
++ || (include_global
++ && (local_entry_p[i].type == ACLG_GLOBAL)
++ )
++ ) {
++ memcpy(&(*entry_pp)[rescount],
++ &local_entry_p[i],
++ sizeof(local_entry_p[i]));
++ rescount++;
++ }
++ }
++ rsbac_kfree(local_entry_p);
++ count = rescount;
++ }
++ return count;
++}
++
++/* check group existence */
++rsbac_boolean_t rsbac_acl_group_exist(rsbac_acl_group_id_t group)
++{
++ if (!group)
++ return TRUE;
++ return rsbac_ta_list_exist(0, group_handle, &group);
++}
++
++int rsbac_acl_add_group_member(rsbac_list_ta_number_t ta_number,
++ rsbac_acl_group_id_t group,
++ rsbac_uid_t user, rsbac_time_t ttl)
++{
++ int err = 0;
++
++ if (!group)
++ return -RSBAC_EINVALIDVALUE;
++ if (!rsbac_ta_list_exist(ta_number, group_handle, &group))
++ return -RSBAC_EINVALIDVALUE;
++
++ if (!rsbac_ta_list_lol_exist(ta_number, gm_handle, &user)) {
++ err =
++ rsbac_ta_list_lol_add_ttl(ta_number, gm_handle, 0,
++ &user, NULL);
++ if (err)
++ return err;
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number, gm_handle, ttl,
++ &user, &group, NULL);
++}
++
++int rsbac_acl_remove_group_member(rsbac_list_ta_number_t ta_number,
++ rsbac_acl_group_id_t group,
++ rsbac_uid_t user)
++{
++ int err;
++
++ if (!group)
++ return -RSBAC_EINVALIDVALUE;
++ if (!rsbac_ta_list_exist(ta_number, group_handle, &group))
++ return -RSBAC_EINVALIDVALUE;
++
++ err =
++ rsbac_ta_list_lol_subremove(ta_number, gm_handle, &user,
++ &group);
++ /* cleanup empty gm items */
++ if (!err
++ && !rsbac_ta_list_lol_subcount(ta_number, gm_handle, &user)
++ )
++ err =
++ rsbac_ta_list_lol_remove(ta_number, gm_handle, &user);
++
++ return err;
++}
++
++/* check membership */
++rsbac_boolean_t rsbac_acl_group_member(rsbac_acl_group_id_t group,
++ rsbac_uid_t user)
++{
++ return rsbac_ta_list_lol_subexist(0, gm_handle, &user, &group);
++}
++
++/* build vmalloc'd array of all group memberships of the given user */
++/* returns number of groups or negative error */
++/* Attention: memory deallocation with rsbac_kfree (if result > 0) must be done by caller! */
++int rsbac_acl_get_user_groups(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user,
++ rsbac_acl_group_id_t ** group_pp,
++ rsbac_time_t ** ttl_pp)
++{
++ return rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ gm_handle,
++ &user,
++ (void **) group_pp,
++ ttl_pp);
++}
++
++/* Returns number of members or negative error */
++int rsbac_acl_get_group_members(rsbac_list_ta_number_t ta_number,
++ rsbac_acl_group_id_t group,
++ rsbac_uid_t user_array[],
++ rsbac_time_t ttl_array[], int maxnum)
++{
++ long desc_count;
++ long i;
++ rsbac_uid_t *user_p;
++ int err = 0;
++
++ if (!group || (maxnum <= 0))
++ return -RSBAC_EINVALIDVALUE;
++ if (!rsbac_ta_list_exist(ta_number, group_handle, &group))
++ return -RSBAC_EINVALIDVALUE;
++ if (!user_array)
++ return -RSBAC_EINVALIDPOINTER;
++
++ /* traverse group memberships */
++ desc_count =
++ rsbac_ta_list_lol_get_all_desc(ta_number, gm_handle,
++ (void **) &user_p);
++ if (desc_count > 0) {
++ rsbac_time_t ttl;
++
++ for (i = 0; i < desc_count; i++) {
++ if (!rsbac_ta_list_lol_get_subdata_ttl
++ (ta_number, gm_handle, &ttl, &user_p[i],
++ &group, NULL)) {
++ user_array[err] = user_p[i];
++ if (ttl_array)
++ ttl_array[err] = ttl;
++ err++;
++ if (err >= maxnum)
++ break;
++ }
++ }
++ rsbac_kfree(user_p);
++ }
++ return err;
++}
++
++int rsbac_acl_list_all_dev(rsbac_list_ta_number_t ta_number,
++ struct rsbac_dev_desc_t **id_pp)
++{
++ if (id_pp)
++ return rsbac_ta_list_lol_get_all_desc(ta_number,
++ dev_handle,
++ (void **) id_pp);
++ else
++ return rsbac_ta_list_lol_count(ta_number, dev_handle);
++}
++
++int rsbac_acl_list_all_major_dev(rsbac_list_ta_number_t ta_number,
++ struct rsbac_dev_desc_t **id_pp)
++{
++ if (id_pp) {
++ int count;
++
++ count =
++ rsbac_ta_list_lol_get_all_desc(ta_number,
++ dev_major_handle,
++ (void **) id_pp);
++ if (count > 0) {
++ u_int i;
++ struct rsbac_dev_desc_t *tmp_p;
++
++ tmp_p = *id_pp;
++ for (i = 0; i < count; i++)
++ tmp_p[i].type += (D_block_major - D_block);
++ }
++ return count;
++ } else
++ return rsbac_ta_list_lol_count(ta_number,
++ dev_major_handle);
++}
++
++int rsbac_acl_list_all_user(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t ** id_pp)
++{
++ if (id_pp)
++ return rsbac_ta_list_lol_get_all_desc(ta_number, u_handle,
++ (void **) id_pp);
++ else
++ return rsbac_ta_list_lol_count(ta_number, u_handle);
++}
++
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++int rsbac_acl_list_all_group(rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t ** id_pp)
++{
++ if (id_pp)
++ return rsbac_ta_list_lol_get_all_desc(ta_number, g_handle,
++ (void **) id_pp);
++ else
++ return rsbac_ta_list_lol_count(ta_number, g_handle);
++}
++#endif
++
++/********************************************/
++/* remove user from all groups and all ACLs */
++int rsbac_acl_remove_user(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user)
++{
++ u_long i;
++ struct rsbac_acl_group_entry_t *entry_p;
++ long desc_count;
++ struct rsbac_acl_entry_desc_t desc;
++
++ rsbac_ta_list_lol_remove(ta_number, gm_handle, &user);
++ /* traverse groups for this owner */
++ desc_count =
++ rsbac_ta_list_get_all_data(ta_number, group_handle,
++ (void **) &entry_p);
++ if (desc_count > 0) {
++ for (i = 0; i < desc_count; i++) {
++ if (entry_p[i].owner == user) {
++ rsbac_ta_list_remove(ta_number,
++ group_handle,
++ &entry_p[i].id);
++ /* cleanup group memberships */
++ rsbac_ta_list_lol_subremove_from_all
++ (ta_number, gm_handle, &entry_p[i].id);
++ }
++ }
++ rsbac_kfree(entry_p);
++ }
++
++ desc.subj_type = ACLS_USER;
++ desc.subj_id = user;
++
++ return rsbac_acl_remove_subject(ta_number, desc);
++}
+diff --git a/rsbac/data_structures/auth_data_structures.c b/rsbac/data_structures/auth_data_structures.c
+new file mode 100644
+index 0000000..c910c80
+--- /dev/null
++++ b/rsbac/data_structures/auth_data_structures.c
+@@ -0,0 +1,4034 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of AUTH data structures */
++/* Author and (c) 1999-2011: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 12/Jul/2011 */
++/*************************************************** */
++
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <asm/uaccess.h>
++#include <rsbac/types.h>
++#include <rsbac/aci_data_structures.h>
++#include <rsbac/auth_data_structures.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/adf.h>
++#include <rsbac/aci.h>
++#include <rsbac/auth.h>
++#include <rsbac/lists.h>
++#include <rsbac/proc_fs.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/getname.h>
++#include <linux/string.h>
++#include <linux/srcu.h>
++#include <linux/seq_file.h>
++#include <linux/module.h>
++
++/************************************************************************** */
++/* Global Variables */
++/************************************************************************** */
++
++/* The following global variables are needed for access to PM data. */
++
++static struct rsbac_auth_device_list_head_t * device_list_head_p;
++static spinlock_t device_list_lock;
++static struct srcu_struct device_list_srcu;
++static struct lock_class_key device_list_lock_class;
++
++static rsbac_list_handle_t process_handle = NULL;
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++static rsbac_list_handle_t process_eff_handle = NULL;
++static rsbac_list_handle_t process_fs_handle = NULL;
++#endif
++
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++static rsbac_list_handle_t process_group_handle = NULL;
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++static rsbac_list_handle_t process_group_eff_handle = NULL;
++static rsbac_list_handle_t process_group_fs_handle = NULL;
++#endif
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++#ifdef CONFIG_RSBAC_AUTH_LEARN_TA
++rsbac_list_ta_number_t auth_learn_ta = CONFIG_RSBAC_AUTH_LEARN_TA;
++#else
++rsbac_list_ta_number_t auth_learn_ta = 0;
++#endif
++#endif
++
++static struct kmem_cache * auth_device_item_slab = NULL;
++
++/**************************************************/
++/* Declarations of external functions */
++/**************************************************/
++
++rsbac_boolean_t writable(struct super_block *sb_p);
++
++/**************************************************/
++/* Declarations of internal functions */
++/**************************************************/
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++static u_int nr_fd_hashes = RSBAC_AUTH_NR_CAP_FD_LISTS;
++
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++static u_int nr_eff_fd_hashes = RSBAC_AUTH_NR_CAP_EFF_FD_LISTS;
++static u_int nr_fs_fd_hashes = RSBAC_AUTH_NR_CAP_FS_FD_LISTS;
++#endif
++
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++static u_int nr_group_fd_hashes = RSBAC_AUTH_NR_CAP_GROUP_FD_LISTS;
++
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++static u_int nr_group_eff_fd_hashes = RSBAC_AUTH_NR_CAP_GROUP_EFF_FD_LISTS;
++static u_int nr_group_fs_fd_hashes = RSBAC_AUTH_NR_CAP_GROUP_FS_FD_LISTS;
++#endif
++#endif
++
++static int cap_compare(void *desc1, void *desc2)
++{
++ struct rsbac_auth_cap_range_t *range1 = desc1;
++ struct rsbac_auth_cap_range_t *range2 = desc2;
++
++ if (!desc1 || !desc2)
++ return 0;
++ if (range1->first < range2->first)
++ return -1;
++ if (range1->first > range2->first)
++ return 1;
++ if (range1->last < range2->last)
++ return -1;
++ if (range1->last > range2->last)
++ return 1;
++ return 0;
++}
++
++static int single_cap_compare(void *desc1, void *desc2)
++{
++ struct rsbac_auth_cap_range_t *range = desc1;
++ rsbac_uid_t *uid = desc2;
++
++ if (!desc1 || !desc2)
++ return 0;
++ if ((*uid < range->first)
++ || (*uid > range->last)
++ )
++ return 1;
++ else
++ return 0;
++}
++
++static int auth_subconv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_auth_cap_range_t *tmp_new_desc = new_desc;
++ struct rsbac_auth_old_cap_range_t *tmp_old_desc = old_desc;
++
++ tmp_new_desc->first = tmp_old_desc->first;
++ tmp_new_desc->last = tmp_old_desc->last;
++ return 0;
++}
++
++static rsbac_list_conv_function_t *auth_get_subconv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_AUTH_FD_OLD_LIST_VERSION:
++ return auth_subconv;
++ default:
++ return NULL;
++ }
++}
++
++static int auth_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *auth_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_AUTH_FD_OLD_LIST_VERSION:
++ return auth_conv;
++ default:
++ return NULL;
++ }
++}
++
++
++/* auth_register_fd_lists() */
++/* register fd ACL lists for device */
++
++static int auth_register_fd_lists(struct rsbac_auth_device_list_item_t
++ *device_p, kdev_t kdev)
++{
++ int err = 0;
++ int tmperr;
++ struct rsbac_list_lol_info_t lol_info;
++
++ if (!device_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++ lol_info.version = RSBAC_AUTH_FD_LIST_VERSION;
++ lol_info.key = RSBAC_AUTH_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_inode_nr_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size =
++ sizeof(struct rsbac_auth_cap_range_t);
++ lol_info.subdata_size = 0; /* rights */
++ lol_info.max_age = 0;
++ tmperr = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->handle,
++ &lol_info,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ cap_compare,
++ auth_get_conv, auth_get_subconv,
++ NULL, NULL,
++ RSBAC_AUTH_FD_FILENAME, kdev,
++ nr_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_AUTH_FD_OLD_FILENAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "auth_register_fd_lists(): registering list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_AUTH_FD_FILENAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ /* register all the AUTH DAC lists of lists */
++ lol_info.version = RSBAC_AUTH_FD_EFF_LIST_VERSION;
++ lol_info.key = RSBAC_AUTH_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_inode_nr_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size =
++ sizeof(struct rsbac_auth_cap_range_t);
++ lol_info.subdata_size = 0; /* rights */
++ lol_info.max_age = 0;
++ tmperr = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->eff_handle,
++ &lol_info,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ cap_compare,
++ auth_get_conv, auth_get_subconv,
++ NULL, NULL,
++ RSBAC_AUTH_FD_EFF_FILENAME, kdev,
++ nr_eff_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_AUTH_FD_OLD_EFF_FILENAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "auth_register_fd_lists(): registering list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_AUTH_FD_EFF_FILENAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ lol_info.version = RSBAC_AUTH_FD_FS_LIST_VERSION;
++ lol_info.key = RSBAC_AUTH_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_inode_nr_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size =
++ sizeof(struct rsbac_auth_cap_range_t);
++ lol_info.subdata_size = 0; /* rights */
++ lol_info.max_age = 0;
++ tmperr = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->fs_handle,
++ &lol_info,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ cap_compare,
++ auth_get_conv, auth_get_subconv,
++ NULL, NULL,
++ RSBAC_AUTH_FD_FS_FILENAME, kdev,
++ nr_fs_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_AUTH_FD_OLD_FS_FILENAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "auth_register_fd_lists(): registering list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_AUTH_FD_FS_FILENAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ lol_info.version = RSBAC_AUTH_FD_GROUP_LIST_VERSION;
++ lol_info.key = RSBAC_AUTH_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_inode_nr_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size =
++ sizeof(struct rsbac_auth_cap_range_t);
++ lol_info.subdata_size = 0; /* rights */
++ lol_info.max_age = 0;
++ tmperr = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->group_handle,
++ &lol_info,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ cap_compare,
++ auth_get_conv, auth_get_subconv,
++ NULL, NULL,
++ RSBAC_AUTH_FD_GROUP_FILENAME, kdev,
++ nr_group_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_AUTH_FD_OLD_GROUP_FILENAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "auth_register_fd_lists(): registering list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_AUTH_FD_GROUP_FILENAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ lol_info.version = RSBAC_AUTH_FD_GROUP_EFF_LIST_VERSION;
++ lol_info.key = RSBAC_AUTH_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_inode_nr_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size =
++ sizeof(struct rsbac_auth_cap_range_t);
++ lol_info.subdata_size = 0; /* rights */
++ lol_info.max_age = 0;
++ tmperr = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->group_eff_handle,
++ &lol_info,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ cap_compare,
++ auth_get_conv, auth_get_subconv,
++ NULL, NULL,
++ RSBAC_AUTH_FD_GROUP_EFF_FILENAME, kdev,
++ nr_group_eff_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_AUTH_FD_OLD_GROUP_EFF_FILENAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "auth_register_fd_lists(): registering list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_AUTH_FD_GROUP_EFF_FILENAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ lol_info.version = RSBAC_AUTH_FD_GROUP_FS_LIST_VERSION;
++ lol_info.key = RSBAC_AUTH_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_inode_nr_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size =
++ sizeof(struct rsbac_auth_cap_range_t);
++ lol_info.subdata_size = 0; /* rights */
++ lol_info.max_age = 0;
++ tmperr = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->group_fs_handle,
++ &lol_info,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ cap_compare,
++ auth_get_conv, auth_get_subconv,
++ NULL, NULL,
++ RSBAC_AUTH_FD_GROUP_FS_FILENAME, kdev,
++ nr_group_fs_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_AUTH_FD_OLD_GROUP_FS_FILENAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "auth_register_fd_lists(): registering list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_AUTH_FD_GROUP_FS_FILENAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#endif
++#endif /* AUTH_GROUP */
++
++ return err;
++}
++
++/* auth_detach_fd_lists() */
++/* detach from fd AUTH lists for device */
++
++static int auth_detach_fd_lists(struct rsbac_auth_device_list_item_t
++ *device_p)
++{
++ int err = 0;
++ int tmperr;
++
++ if (!device_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++ /* detach all the AUTH lists of lists */
++ tmperr = rsbac_list_lol_detach(&device_p->handle,
++ RSBAC_AUTH_LIST_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "auth_detach_fd_lists(): detaching from list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_AUTH_FD_FILENAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ tmperr = rsbac_list_lol_detach(&device_p->eff_handle,
++ RSBAC_AUTH_LIST_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "auth_detach_fd_lists(): detaching from list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_AUTH_FD_EFF_FILENAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ tmperr = rsbac_list_lol_detach(&device_p->fs_handle,
++ RSBAC_AUTH_LIST_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "auth_detach_fd_lists(): detaching from list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_AUTH_FD_FS_FILENAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ tmperr = rsbac_list_lol_detach(&device_p->group_handle,
++ RSBAC_AUTH_LIST_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "auth_detach_fd_lists(): detaching from list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_AUTH_FD_GROUP_FILENAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ tmperr = rsbac_list_lol_detach(&device_p->group_eff_handle,
++ RSBAC_AUTH_LIST_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "auth_detach_fd_lists(): detaching from list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_AUTH_FD_GROUP_EFF_FILENAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ tmperr = rsbac_list_lol_detach(&device_p->group_fs_handle,
++ RSBAC_AUTH_LIST_KEY);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "auth_detach_fd_lists(): detaching from list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_AUTH_FD_GROUP_FS_FILENAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++#endif
++#endif /* AUTH_GROUP */
++
++ return err;
++}
++
++/************************************************************************** */
++/* The lookup functions return NULL, if the item is not found, and a */
++/* pointer to the item otherwise. */
++
++/* first the device item lookup */
++static struct rsbac_auth_device_list_item_t *lookup_device(kdev_t kdev)
++{
++ struct rsbac_auth_device_list_item_t *curr = rcu_dereference(device_list_head_p)->curr;
++
++ /* if there is no current item or it is not the right one, search... */
++ if (!(curr && (RSBAC_MAJOR(curr->id) == RSBAC_MAJOR(kdev))
++ && (RSBAC_MINOR(curr->id) == RSBAC_MINOR(kdev))
++ )
++ ) {
++ curr = rcu_dereference(device_list_head_p)->head;
++ while (curr
++ && ((RSBAC_MAJOR(curr->id) != RSBAC_MAJOR(kdev))
++ || (RSBAC_MINOR(curr->id) != RSBAC_MINOR(kdev))
++ )
++ ) {
++ curr = curr->next;
++ }
++ if (curr)
++ rcu_dereference(device_list_head_p)->curr = curr;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++/************************************************************************** */
++/* The add_item() functions add an item to the list, set head.curr to it, */
++/* and return a pointer to the item. */
++/* These functions will NOT check, if there is already an item under the */
++/* same ID! If this happens, the lookup functions will return the old item! */
++/* All list manipulation is protected by rw-spinlocks to prevent inconsistency */
++/* and undefined behaviour in other concurrent functions. */
++
++/* Create a device item without adding to list. No locking needed. */
++static struct rsbac_auth_device_list_item_t
++*create_device_item(kdev_t kdev)
++{
++ struct rsbac_auth_device_list_item_t *new_item_p;
++
++ /* allocate memory for new device, return NULL, if failed */
++ if (!(new_item_p = rsbac_smalloc_clear_unlocked(auth_device_item_slab)))
++ return NULL;
++
++ new_item_p->id = kdev;
++ new_item_p->mount_count = 1;
++ return new_item_p;
++}
++
++/* Add an existing device item to list. Locking needed. */
++static struct rsbac_auth_device_list_item_t
++*add_device_item(struct rsbac_auth_device_list_item_t *device_p)
++{
++ struct rsbac_auth_device_list_head_t * new_p;
++ struct rsbac_auth_device_list_head_t * old_p;
++
++ if (!device_p)
++ return NULL;
++
++ spin_lock(&device_list_lock);
++ old_p = device_list_head_p;
++ new_p = rsbac_kmalloc(sizeof(*new_p));
++ *new_p = *old_p;
++ /* add new device to device list */
++ if (!new_p->head) { /* first device */
++ new_p->head = device_p;
++ new_p->tail = device_p;
++ new_p->curr = device_p;
++ new_p->count = 1;
++ device_p->prev = NULL;
++ device_p->next = NULL;
++ } else { /* there is another device -> hang to tail */
++ device_p->prev = new_p->tail;
++ device_p->next = NULL;
++ new_p->tail->next = device_p;
++ new_p->tail = device_p;
++ new_p->curr = device_p;
++ new_p->count++;
++ }
++ rcu_assign_pointer(device_list_head_p, new_p);
++ spin_unlock(&device_list_lock);
++ synchronize_srcu(&device_list_srcu);
++ rsbac_kfree(old_p);
++ return device_p;
++}
++
++/************************************************************************** */
++/* The remove_item() functions remove an item from the list. If this item */
++/* is head, tail or curr, these pointers are set accordingly. */
++/* To speed up removing several subsequent items, curr is set to the next */
++/* item, if possible. */
++/* If the item is not found, nothing is done. */
++
++static void clear_device_item(struct rsbac_auth_device_list_item_t *item_p)
++{
++ if (!item_p)
++ return;
++
++ auth_detach_fd_lists(item_p);
++ rsbac_sfree(auth_device_item_slab, item_p);
++}
++
++static void remove_device_item(kdev_t kdev)
++{
++ struct rsbac_auth_device_list_item_t *item_p;
++ struct rsbac_auth_device_list_head_t * new_p;
++ struct rsbac_auth_device_list_head_t * old_p;
++
++ old_p = device_list_head_p;
++ new_p = rsbac_kmalloc(sizeof(*new_p));
++ *new_p = *old_p;
++ /* first we must locate the item. */
++ if ((item_p = lookup_device(kdev))) { /* ok, item was found */
++ if (new_p->head == item_p) { /* item is head */
++ if (new_p->tail == item_p) { /* item is head and tail = only item -> list will be empty */
++ new_p->head = NULL;
++ new_p->tail = NULL;
++ } else { /* item is head, but not tail -> next item becomes head */
++ item_p->next->prev = NULL;
++ new_p->head = item_p->next;
++ }
++ } else { /* item is not head */
++ if (new_p->tail == item_p) { /*item is not head, but tail -> previous item becomes tail */
++ item_p->prev->next = NULL;
++ new_p->tail = item_p->prev;
++ } else { /* item is neither head nor tail -> item is cut out */
++ item_p->prev->next = item_p->next;
++ item_p->next->prev = item_p->prev;
++ }
++ }
++
++ /* curr is no longer valid -> reset. */
++ new_p->curr = NULL;
++ /* adjust counter */
++ new_p->count--;
++ rcu_assign_pointer(device_list_head_p, new_p);
++ spin_unlock(&device_list_lock);
++ synchronize_srcu(&device_list_srcu);
++ rsbac_kfree(old_p);
++
++ /* now we can remove the item from memory. This means cleaning up */
++ /* everything below. */
++ clear_device_item(item_p);
++ } /* end of if: item was found */
++ else
++ spin_unlock(&device_list_lock);
++} /* end of remove_device_item() */
++
++/************************************************************************** */
++/* The copy_fp_cap_set_item() function copies a file cap set to a process */
++/* cap set */
++
++static int copy_fp_cap_set_item(struct rsbac_auth_device_list_item_t
++ *device_p, rsbac_auth_file_t file,
++ rsbac_pid_t pid)
++{
++ struct rsbac_auth_cap_range_t *cap_item_p;
++ rsbac_time_t *ttl_p;
++ int i;
++ long count;
++ enum rsbac_target_t target = T_FILE;
++ union rsbac_target_id_t tid;
++
++ rsbac_list_lol_remove(process_handle, &pid);
++ count =
++ rsbac_list_lol_get_all_subdesc_ttl(device_p->handle,
++ &file.inode,
++ (void **) &cap_item_p,
++ &ttl_p);
++ if (!count || (count == -RSBAC_ENOTFOUND)
++ ) {
++ tid.file = file;
++ if (!rsbac_get_parent(target, tid, &target, &tid))
++ count =
++ rsbac_list_lol_get_all_subdesc_ttl(device_p->handle,
++ &tid.file.
++ inode,
++ (void **)
++ &cap_item_p,
++ &ttl_p);
++ }
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ rsbac_list_lol_subadd_ttl(process_handle,
++ ttl_p[i],
++ &pid,
++ &cap_item_p[i], NULL);
++ }
++ rsbac_kfree(cap_item_p);
++ rsbac_kfree(ttl_p);
++ } else {
++ if ((count < 0)
++ && (count != -RSBAC_ENOTFOUND)
++ )
++ return count;
++ }
++
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ rsbac_list_lol_remove(process_eff_handle, &pid);
++ count =
++ rsbac_list_lol_get_all_subdesc_ttl(device_p->eff_handle,
++ &file.inode,
++ (void **) &cap_item_p,
++ &ttl_p);
++ if (!count || (count == -RSBAC_ENOTFOUND)
++ ) {
++ tid.file = file;
++ if (!rsbac_get_parent(target, tid, &target, &tid))
++ count =
++ rsbac_list_lol_get_all_subdesc_ttl(device_p->eff_handle,
++ &tid.file.
++ inode,
++ (void **)
++ &cap_item_p,
++ &ttl_p);
++ }
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ rsbac_list_lol_subadd_ttl(process_eff_handle,
++ ttl_p[i],
++ &pid,
++ &cap_item_p[i], NULL);
++ }
++ rsbac_kfree(cap_item_p);
++ rsbac_kfree(ttl_p);
++ } else {
++ if ((count < 0)
++ && (count != -RSBAC_ENOTFOUND)
++ )
++ return count;
++ }
++ rsbac_list_lol_remove(process_fs_handle, &pid);
++ count =
++ rsbac_list_lol_get_all_subdesc_ttl(device_p->fs_handle,
++ &file.inode,
++ (void **) &cap_item_p,
++ &ttl_p);
++ if (!count || (count == -RSBAC_ENOTFOUND)
++ ) {
++ tid.file = file;
++ if (!rsbac_get_parent(target, tid, &target, &tid))
++ count =
++ rsbac_list_lol_get_all_subdesc_ttl(device_p->fs_handle,
++ &tid.file.
++ inode,
++ (void **)
++ &cap_item_p,
++ &ttl_p);
++ }
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ rsbac_list_lol_subadd_ttl(process_fs_handle,
++ ttl_p[i],
++ &pid,
++ &cap_item_p[i], NULL);
++ }
++ rsbac_kfree(cap_item_p);
++ rsbac_kfree(ttl_p);
++ } else {
++ if ((count < 0)
++ && (count != -RSBAC_ENOTFOUND)
++ )
++ return count;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ rsbac_list_lol_remove(process_group_handle, &pid);
++ count =
++ rsbac_list_lol_get_all_subdesc_ttl(device_p->
++ group_handle,
++ &file.inode,
++ (void **) &cap_item_p,
++ &ttl_p);
++ if (!count || (count == -RSBAC_ENOTFOUND)
++ ) {
++ tid.file = file;
++ if (!rsbac_get_parent(target, tid, &target, &tid))
++ count =
++ rsbac_list_lol_get_all_subdesc_ttl(device_p->group_handle,
++ &tid.file.
++ inode,
++ (void **)
++ &cap_item_p,
++ &ttl_p);
++ }
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ rsbac_list_lol_subadd_ttl(process_group_handle,
++ ttl_p[i],
++ &pid,
++ &cap_item_p[i], NULL);
++ }
++ rsbac_kfree(cap_item_p);
++ rsbac_kfree(ttl_p);
++ } else {
++ if ((count < 0)
++ && (count != -RSBAC_ENOTFOUND)
++ )
++ return count;
++ }
++
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ rsbac_list_lol_remove(process_group_eff_handle, &pid);
++ count =
++ rsbac_list_lol_get_all_subdesc_ttl(device_p->group_eff_handle,
++ &file.inode,
++ (void **) &cap_item_p,
++ &ttl_p);
++ if (!count || (count == -RSBAC_ENOTFOUND)
++ ) {
++ tid.file = file;
++ if (!rsbac_get_parent(target, tid, &target, &tid))
++ count =
++ rsbac_list_lol_get_all_subdesc_ttl(device_p->group_eff_handle,
++ &tid.file.
++ inode,
++ (void **)
++ &cap_item_p,
++ &ttl_p);
++ }
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ rsbac_list_lol_subadd_ttl(process_group_eff_handle,
++ ttl_p[i],
++ &pid,
++ &cap_item_p[i], NULL);
++ }
++ rsbac_kfree(cap_item_p);
++ rsbac_kfree(ttl_p);
++ } else {
++ if ((count < 0)
++ && (count != -RSBAC_ENOTFOUND)
++ )
++ return count;
++ }
++ rsbac_list_lol_remove(process_group_fs_handle, &pid);
++ count =
++ rsbac_list_lol_get_all_subdesc_ttl(device_p->group_fs_handle,
++ &file.inode,
++ (void **) &cap_item_p,
++ &ttl_p);
++ if (!count || (count == -RSBAC_ENOTFOUND)
++ ) {
++ tid.file = file;
++ if (!rsbac_get_parent(target, tid, &target, &tid))
++ count =
++ rsbac_list_lol_get_all_subdesc_ttl(device_p->group_fs_handle,
++ &tid.file.
++ inode,
++ (void **)
++ &cap_item_p,
++ &ttl_p);
++ }
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ rsbac_list_lol_subadd_ttl(process_group_fs_handle,
++ ttl_p[i],
++ &pid,
++ &cap_item_p[i], NULL);
++ }
++ rsbac_kfree(cap_item_p);
++ rsbac_kfree(ttl_p);
++ } else {
++ if ((count < 0)
++ && (count != -RSBAC_ENOTFOUND)
++ )
++ return count;
++ }
++#endif
++#endif /* AUTH_GROUP */
++
++ return 0;
++} /* end of copy_fp_cap_set_item() */
++
++/************************************************************************** */
++/* The copy_pp_cap_set_item() function copies a process cap set to another */
++
++static int copy_pp_cap_set_item_handle(rsbac_list_handle_t handle,
++ rsbac_pid_t old_pid,
++ rsbac_pid_t new_pid)
++{
++ struct rsbac_auth_cap_range_t *cap_item_p;
++ rsbac_time_t *ttl_p;
++ int i;
++ long count;
++
++ rsbac_list_lol_remove(handle, &new_pid);
++ count = rsbac_list_lol_get_all_subdesc_ttl(handle,
++ &old_pid,
++ (void **) &cap_item_p,
++ &ttl_p);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ rsbac_list_lol_subadd_ttl(handle,
++ ttl_p[i],
++ &new_pid,
++ &cap_item_p[i], NULL);
++ }
++ rsbac_kfree(cap_item_p);
++ rsbac_kfree(ttl_p);
++ } else {
++ if (count < 0)
++ return count;
++ }
++ return 0;
++}
++
++static int copy_pp_cap_set_item(rsbac_pid_t old_pid, rsbac_pid_t new_pid)
++{
++ int res;
++
++ res =
++ copy_pp_cap_set_item_handle(process_handle, old_pid, new_pid);
++
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ if (res)
++ return res;
++ res =
++ copy_pp_cap_set_item_handle(process_eff_handle, old_pid,
++ new_pid);
++ if (res)
++ return res;
++ res =
++ copy_pp_cap_set_item_handle(process_fs_handle, old_pid,
++ new_pid);
++#endif
++
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ res =
++ copy_pp_cap_set_item_handle(process_group_handle, old_pid,
++ new_pid);
++
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ if (res)
++ return res;
++ res =
++ copy_pp_cap_set_item_handle(process_group_eff_handle, old_pid,
++ new_pid);
++ if (res)
++ return res;
++ res =
++ copy_pp_cap_set_item_handle(process_group_fs_handle, old_pid,
++ new_pid);
++#endif
++#endif
++
++ return res;
++} /* end of copy_pp_cap_set_item() */
++
++/************************************************* */
++/* proc functions */
++/************************************************* */
++
++#if defined(CONFIG_RSBAC_PROC) && defined(CONFIG_PROC_FS)
++static int
++auth_devices_proc_show(struct seq_file *m, void *v)
++{
++ struct rsbac_auth_device_list_item_t *device_p;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized())
++ return -ENOSYS;
++
++ seq_printf(m,
++ "%u RSBAC AUTH Devices\n--------------------\n",
++ rcu_dereference(device_list_head_p)->count);
++
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ for (device_p = rcu_dereference(device_list_head_p)->head; device_p;
++ device_p = device_p->next) {
++ seq_printf(m,
++ "%02u:%02u with mount_count = %u\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ device_p->mount_count);
++ }
++
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++
++ return 0;
++}
++
++static int auth_devices_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, auth_devices_proc_show, NULL);
++}
++
++static const struct file_operations auth_devices_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = auth_devices_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *auth_devices;
++
++static int
++stats_auth_proc_show(struct seq_file *m, void *v)
++{
++ u_int cap_set_count = 0;
++ u_int member_count = 0;
++ struct rsbac_auth_device_list_item_t *device_p;
++ int srcu_idx;
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "stats_auth_proc_info(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ rsbac_pr_debug(aef_auth, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ seq_printf(m, "AUTH Status\n-----------\n");
++
++ seq_printf(m,
++ "%lu process cap set items, sum of %lu members\n",
++ rsbac_list_lol_count(process_handle),
++ rsbac_list_lol_all_subcount(process_handle));
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ seq_printf(m,
++ "%lu process eff cap set items, sum of %lu members\n",
++ rsbac_list_lol_count(process_eff_handle),
++ rsbac_list_lol_all_subcount(process_eff_handle));
++ seq_printf(m,
++ "%lu process fs cap set items, sum of %lu members\n",
++ rsbac_list_lol_count(process_fs_handle),
++ rsbac_list_lol_all_subcount(process_fs_handle));
++#endif
++
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ seq_printf(m,
++ "%lu process group cap set items, sum of %lu members\n",
++ rsbac_list_lol_count(process_group_handle),
++ rsbac_list_lol_all_subcount(process_group_handle));
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ seq_printf(m,
++ "%lu process group eff cap set items, sum of %lu members\n",
++ rsbac_list_lol_count(process_group_eff_handle),
++ rsbac_list_lol_all_subcount(process_group_eff_handle));
++ seq_printf(m,
++ "%lu process group fs cap set items, sum of %lu members\n",
++ rsbac_list_lol_count(process_group_fs_handle),
++ rsbac_list_lol_all_subcount(process_group_fs_handle));
++#endif
++#endif /* AUTH_GROUP */
++
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = rcu_dereference(device_list_head_p)->head;
++ while (device_p) {
++ /* reset counters */
++ cap_set_count = rsbac_list_lol_count(device_p->handle);
++ member_count = rsbac_list_lol_all_subcount(device_p->handle);
++ seq_printf(m,
++ "device %02u:%02u has %u file cap set items, sum of %u members\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), cap_set_count,
++ member_count);
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ cap_set_count = rsbac_list_lol_count(device_p->eff_handle);
++ member_count = rsbac_list_lol_all_subcount(device_p->eff_handle);
++ seq_printf(m,
++ "device %02u:%02u has %u file eff cap set items, sum of %u members\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), cap_set_count,
++ member_count);
++ cap_set_count = rsbac_list_lol_count(device_p->fs_handle);
++ member_count = rsbac_list_lol_all_subcount(device_p->fs_handle);
++ seq_printf(m,
++ "device %02u:%02u has %u file fs cap set items, sum of %u members\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), cap_set_count,
++ member_count);
++#endif
++
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ cap_set_count = rsbac_list_lol_count(device_p->group_handle);
++ member_count = rsbac_list_lol_all_subcount(device_p->group_handle);
++ seq_printf(m,
++ "device %02u:%02u has %u file group cap set items, sum of %u members\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), cap_set_count,
++ member_count);
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ cap_set_count = rsbac_list_lol_count(device_p->group_eff_handle);
++ member_count = rsbac_list_lol_all_subcount(device_p->group_eff_handle);
++ seq_printf(m,
++ "device %02u:%02u has %u file group eff cap set items, sum of %u members\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), cap_set_count,
++ member_count);
++ cap_set_count = rsbac_list_lol_count(device_p->group_fs_handle);
++ member_count = rsbac_list_lol_all_subcount(device_p->group_fs_handle);
++ seq_printf(m,
++ "device %02u:%02u has %u file group fs cap set items, sum of %u members\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), cap_set_count,
++ member_count);
++#endif
++#endif /* AUTH_GROUP */
++
++ device_p = device_p->next;
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++
++ return 0;
++}
++
++static int stats_auth_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, stats_auth_proc_show, NULL);
++}
++
++static const struct file_operations stats_auth_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = stats_auth_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *stats_auth;
++
++static int
++auth_caplist_proc_show(struct seq_file *m, void *v)
++{
++ u_int count = 0;
++ u_int member_count = 0;
++ u_long all_count;
++ u_long all_member_count;
++ int i, j;
++ struct rsbac_auth_device_list_item_t *device_p;
++ rsbac_pid_t *p_list;
++ rsbac_inode_nr_t *f_list;
++ struct rsbac_auth_cap_range_t *cap_list;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "auth_caplist_proc_info(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ rsbac_pr_debug(aef_auth, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ seq_printf(m, "AUTH Cap Lists\n--------------\n");
++
++ seq_printf(m,
++ "Process capabilities:\nset-id count cap-members");
++ all_member_count = 0;
++ count = rsbac_list_lol_get_all_desc(process_handle,
++ (void **) &p_list);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ member_count =
++ rsbac_list_lol_get_all_subdesc(process_handle,
++ &p_list[i],
++ (void **)
++ &cap_list);
++ seq_printf(m, "\n %u\t%u\t", pid_vnr(p_list[i]),
++ member_count);
++ if (member_count > 0) {
++ for (j = 0; j < member_count; j++) {
++ if (cap_list[j].first !=
++ cap_list[j].last) {
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first)
++ || RSBAC_UID_SET(cap_list[j].last)
++ )
++ seq_printf(m,
++ "%u/%u:%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_SET(cap_list[j].last),
++ RSBAC_UID_NUM(cap_list[j].last));
++ else
++#endif
++ seq_printf(m,
++ "%u:%u ",
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].last));
++ } else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first))
++ seq_printf(m,
++ "%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first));
++ else
++#endif
++ seq_printf(m,
++ "%u ",
++ RSBAC_UID_NUM(cap_list[j].first));
++ }
++ }
++ rsbac_kfree(cap_list);
++ all_member_count += member_count;
++ }
++ }
++ rsbac_kfree(p_list);
++ }
++ seq_printf(m,
++ "\n%u process cap set items, sum of %lu members\n",
++ count, all_member_count);
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ seq_printf(m,
++ "\nProcess eff capabilities:\nset-id count cap-members");
++
++ all_member_count = 0;
++ count = rsbac_list_lol_get_all_desc(process_eff_handle,
++ (void **) &p_list);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ member_count =
++ rsbac_list_lol_get_all_subdesc
++ (process_eff_handle, &p_list[i],
++ (void **) &cap_list);
++ seq_printf(m, "\n %u\t%u\t", pid_vnr(p_list[i]),
++ member_count);
++ if (member_count > 0) {
++ for (j = 0; j < member_count; j++) {
++ if (cap_list[j].first !=
++ cap_list[j].last) {
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first)
++ || RSBAC_UID_SET(cap_list[j].last)
++ )
++ seq_printf(m,
++ "%u/%u:%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_SET(cap_list[j].last),
++ RSBAC_UID_NUM(cap_list[j].last));
++ else
++#endif
++ seq_printf(m,
++ "%u:%u ",
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].last));
++ } else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first))
++ seq_printf(m,
++ "%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first));
++ else
++#endif
++ seq_printf(m,
++ "%u ",
++ RSBAC_UID_NUM(cap_list[j].first));
++ }
++ }
++ rsbac_kfree(cap_list);
++ all_member_count += member_count;
++ }
++ }
++ rsbac_kfree(p_list);
++ }
++ seq_printf(m,
++ "\n%u process eff cap set items, sum of %lu members\n",
++ count, all_member_count);
++ seq_printf(m,
++ "\nProcess fs capabilities:\nset-id count cap-members");
++
++ all_member_count = 0;
++ count = rsbac_list_lol_get_all_desc(process_fs_handle,
++ (void **) &p_list);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ member_count =
++ rsbac_list_lol_get_all_subdesc
++ (process_fs_handle, &p_list[i],
++ (void **) &cap_list);
++ seq_printf(m, "\n %u\t%u\t", pid_vnr(p_list[i]),
++ member_count);
++ if (member_count > 0) {
++ for (j = 0; j < member_count; j++) {
++ if (cap_list[j].first !=
++ cap_list[j].last) {
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first)
++ || RSBAC_UID_SET(cap_list[j].last)
++ )
++ seq_printf(m,
++ "%u/%u:%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_SET(cap_list[j].last),
++ RSBAC_UID_NUM(cap_list[j].last));
++ else
++#endif
++ seq_printf(m,
++ "%u:%u ",
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].last));
++ } else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first))
++ seq_printf(m,
++ "%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first));
++ else
++#endif
++ seq_printf(m,
++ "%u ",
++ RSBAC_UID_NUM(cap_list[j].first));
++ }
++ }
++ rsbac_kfree(cap_list);
++ all_member_count += member_count;
++ }
++ }
++ rsbac_kfree(p_list);
++ }
++ seq_printf(m,
++ "\n\n%u process fs cap set items, sum of %lu members\n",
++ count, all_member_count);
++#endif
++
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ seq_printf(m,
++ "\nProcess group capabilities:\nset-id count cap-members");
++ all_member_count = 0;
++ count = rsbac_list_lol_get_all_desc(process_group_handle,
++ (void **) &p_list);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ member_count =
++ rsbac_list_lol_get_all_subdesc
++ (process_group_handle, &p_list[i],
++ (void **) &cap_list);
++ seq_printf(m, "\n %u\t%u\t", pid_vnr(p_list[i]),
++ member_count);
++ if (member_count > 0) {
++ for (j = 0; j < member_count; j++) {
++ if (cap_list[j].first !=
++ cap_list[j].last) {
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first)
++ || RSBAC_UID_SET(cap_list[j].last)
++ )
++ seq_printf(m,
++ "%u/%u:%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_SET(cap_list[j].last),
++ RSBAC_UID_NUM(cap_list[j].last));
++ else
++#endif
++ seq_printf(m,
++ "%u:%u ",
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].last));
++ } else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first))
++ seq_printf(m,
++ "%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first));
++ else
++#endif
++ seq_printf(m,
++ "%u ",
++ RSBAC_UID_NUM(cap_list[j].first));
++ }
++ }
++ rsbac_kfree(cap_list);
++ all_member_count += member_count;
++ }
++ }
++ rsbac_kfree(p_list);
++ }
++ seq_printf(m,
++ "\n%u process group cap set items, sum of %lu members\n",
++ count, all_member_count);
++
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ seq_printf(m,
++ "\nProcess group eff capabilities:\nset-id count cap-members");
++ all_member_count = 0;
++ count = rsbac_list_lol_get_all_desc(process_group_eff_handle,
++ (void **) &p_list);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ member_count =
++ rsbac_list_lol_get_all_subdesc
++ (process_group_eff_handle, &p_list[i],
++ (void **) &cap_list);
++ seq_printf(m, "\n %u\t%u\t", pid_vnr(p_list[i]),
++ member_count);
++ if (member_count > 0) {
++ for (j = 0; j < member_count; j++) {
++ if (cap_list[j].first !=
++ cap_list[j].last) {
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first)
++ || RSBAC_UID_SET(cap_list[j].last)
++ )
++ seq_printf(m,
++ "%u/%u:%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_SET(cap_list[j].last),
++ RSBAC_UID_NUM(cap_list[j].last));
++ else
++#endif
++ seq_printf(m,
++ "%u:%u ",
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].last));
++ } else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first))
++ seq_printf(m,
++ "%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first));
++ else
++#endif
++ seq_printf(m,
++ "%u ",
++ RSBAC_UID_NUM(cap_list[j].first));
++ }
++ }
++ rsbac_kfree(cap_list);
++ all_member_count += member_count;
++ }
++ }
++ rsbac_kfree(p_list);
++ }
++ seq_printf(m,
++ "\n%u process group eff cap set items, sum of %lu members\n",
++ count, all_member_count);
++ seq_printf(m,
++ "\nProcess group fs capabilities:\nset-id count cap-members");
++
++ all_member_count = 0;
++ count = rsbac_list_lol_get_all_desc(process_group_fs_handle,
++ (void **) &p_list);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ member_count =
++ rsbac_list_lol_get_all_subdesc
++ (process_group_fs_handle, &p_list[i],
++ (void **) &cap_list);
++ seq_printf(m, "\n %u\t%u\t", pid_vnr(p_list[i]),
++ member_count);
++ if (member_count > 0) {
++ for (j = 0; j < member_count; j++) {
++ if (cap_list[j].first !=
++ cap_list[j].last) {
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first)
++ || RSBAC_UID_SET(cap_list[j].last)
++ )
++ seq_printf(m,
++ "%u/%u:%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_SET(cap_list[j].last),
++ RSBAC_UID_NUM(cap_list[j].last));
++ else
++#endif
++ seq_printf(m,
++ "%u:%u ",
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].last));
++ } else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first))
++ seq_printf(m,
++ "%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first));
++ else
++#endif
++ seq_printf(m,
++ "%u ",
++ RSBAC_UID_NUM(cap_list[j].first));
++ }
++ }
++ rsbac_kfree(cap_list);
++ all_member_count += member_count;
++ }
++ }
++ rsbac_kfree(p_list);
++ }
++ seq_printf(m,
++ "\n\n%u process group fs cap set items, sum of %lu members\n",
++ count, all_member_count);
++#endif
++#endif /* AUTH_GROUP */
++
++ seq_printf(m,
++ "\nFile capabilities:\nset-id count cap-members");
++
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = rcu_dereference(device_list_head_p)->head;
++ while (device_p) {
++ /* reset counters */
++ all_member_count = 0;
++ all_count = 0;
++ count = rsbac_list_lol_get_all_desc(device_p->handle,
++ (void **) &f_list);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ member_count =
++ rsbac_list_lol_get_all_subdesc
++ (device_p->handle,
++ &f_list[i],
++ (void **) &cap_list);
++ seq_printf(m,
++ "\n %u\t%u\t",
++ f_list[i],
++ member_count);
++ if (member_count > 0) {
++ for (j = 0;
++ j < member_count;
++ j++) {
++ if (cap_list[j].
++ first !=
++ cap_list[j].
++ last) {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first)
++ || RSBAC_UID_SET(cap_list[j].last)
++ )
++ seq_printf(m,
++ "%u/%u:%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_SET(cap_list[j].last),
++ RSBAC_UID_NUM(cap_list[j].last));
++ else
++#endif
++ seq_printf
++ (m,
++ "%u:%u ",
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].last));
++ } else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first))
++ seq_printf(m,
++ "%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first));
++ else
++#endif
++ seq_printf(m,
++ "%u ",
++ RSBAC_UID_NUM(cap_list[j].first));
++ }
++ }
++ rsbac_kfree(cap_list);
++ all_member_count +=
++ member_count;
++ }
++ }
++ rsbac_kfree(f_list);
++ all_count += count;
++ }
++ seq_printf(m,
++ "\ndevice %02u:%02u has %lu file cap set items, sum of %lu members, list is clean\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), all_count,
++ all_member_count);
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ all_member_count = 0;
++ all_count = 0;
++ count =
++ rsbac_list_lol_get_all_desc(device_p->eff_handle,
++ (void **) &f_list);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ member_count =
++ rsbac_list_lol_get_all_subdesc
++ (device_p->eff_handle,
++ &f_list[i],
++ (void **) &cap_list);
++ seq_printf(m,
++ "\n %u\t%u\t",
++ f_list[i],
++ member_count);
++ if (member_count > 0) {
++ for (j = 0;
++ j < member_count;
++ j++) {
++ if (cap_list[j].first !=
++ cap_list[j].last) {
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first)
++ || RSBAC_UID_SET(cap_list[j].last)
++ )
++ seq_printf(m,
++ "%u/%u:%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_SET(cap_list[j].last),
++ RSBAC_UID_NUM(cap_list[j].last));
++ else
++#endif
++ seq_printf(m,
++ "%u:%u ",
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].last));
++ } else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first))
++ seq_printf(m,
++ "%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first));
++ else
++#endif
++ seq_printf(m,
++ "%u ",
++ RSBAC_UID_NUM(cap_list[j].first));
++ }
++ }
++ rsbac_kfree(cap_list);
++ all_member_count +=
++ member_count;
++ }
++ }
++ rsbac_kfree(f_list);
++ all_count += count;
++ }
++ seq_printf(m,
++ "\ndevice %02u:%02u has %lu file eff cap set items, sum of %lu members, list is clean\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), all_count,
++ all_member_count);
++ all_member_count = 0;
++ all_count = 0;
++ count =
++ rsbac_list_lol_get_all_desc(device_p->
++ fs_handle,
++ (void **) &f_list);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ member_count =
++ rsbac_list_lol_get_all_subdesc
++ (device_p->fs_handle,
++ &f_list[i],
++ (void **) &cap_list);
++ seq_printf(m,
++ "\n %u\t%u\t",
++ f_list[i],
++ member_count);
++ if (member_count > 0) {
++ for (j = 0;
++ j < member_count;
++ j++) {
++ if (cap_list[j].first !=
++ cap_list[j].last) {
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first)
++ || RSBAC_UID_SET(cap_list[j].last)
++ )
++ seq_printf(m,
++ "%u/%u:%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_SET(cap_list[j].last),
++ RSBAC_UID_NUM(cap_list[j].last));
++ else
++#endif
++ seq_printf(m,
++ "%u:%u ",
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].last));
++ } else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first))
++ seq_printf(m,
++ "%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first));
++ else
++#endif
++ seq_printf(m,
++ "%u ",
++ RSBAC_UID_NUM(cap_list[j].first));
++ }
++ }
++ rsbac_kfree(cap_list);
++ all_member_count +=
++ member_count;
++ }
++ }
++ rsbac_kfree(f_list);
++ all_count += count;
++ }
++ seq_printf(m,
++ "\ndevice %02u:%02u has %lu file fs cap set items, sum of %lu members, list is clean\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), all_count,
++ all_member_count);
++#endif
++
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ all_member_count = 0;
++ all_count = 0;
++ count =
++ rsbac_list_lol_get_all_desc(device_p->
++ group_handle,
++ (void **) &f_list);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ member_count =
++ rsbac_list_lol_get_all_subdesc
++ (device_p->group_handle,
++ &f_list[i],
++ (void **) &cap_list);
++ seq_printf(m,
++ "\n %u\t%u\t",
++ f_list[i],
++ member_count);
++ if (member_count > 0) {
++ for (j = 0;
++ j < member_count;
++ j++) {
++ if (cap_list[j].first !=
++ cap_list[j].last) {
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first)
++ || RSBAC_UID_SET(cap_list[j].last)
++ )
++ seq_printf(m,
++ "%u/%u:%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_SET(cap_list[j].last),
++ RSBAC_UID_NUM(cap_list[j].last));
++ else
++#endif
++ seq_printf(m,
++ "%u:%u ",
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].last));
++ } else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first))
++ seq_printf(m,
++ "%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first));
++ else
++#endif
++ seq_printf(m,
++ "%u ",
++ RSBAC_UID_NUM(cap_list[j].first));
++ }
++ }
++ rsbac_kfree(cap_list);
++ all_member_count +=
++ member_count;
++ }
++ }
++ rsbac_kfree(f_list);
++ all_count += count;
++ }
++ seq_printf(m,
++ "\ndevice %02u:%02u has %lu file group cap set items, sum of %lu members, list is clean\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), all_count,
++ all_member_count);
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ all_member_count = 0;
++ all_count = 0;
++ count = rsbac_list_lol_get_all_desc(device_p->
++ group_eff_handle,
++ (void **) &f_list);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ member_count =
++ rsbac_list_lol_get_all_subdesc
++ (device_p->group_eff_handle,
++ &f_list[i],
++ (void **) &cap_list);
++ seq_printf(m,
++ "\n %u\t%u\t",
++ f_list[i],
++ member_count);
++ if (member_count > 0) {
++ for (j = 0;
++ j < member_count;
++ j++) {
++ if (cap_list[j].first !=
++ cap_list[j].last) {
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first)
++ || RSBAC_UID_SET(cap_list[j].last)
++ )
++ seq_printf(m,
++ "%u/%u:%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_SET(cap_list[j].last),
++ RSBAC_UID_NUM(cap_list[j].last));
++ else
++#endif
++ seq_printf(m,
++ "%u:%u ",
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].last));
++ } else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first))
++ seq_printf(m,
++ "%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first));
++ else
++#endif
++ seq_printf(m,
++ "%u ",
++ RSBAC_UID_NUM(cap_list[j].first));
++ }
++ }
++ rsbac_kfree(cap_list);
++ all_member_count +=
++ member_count;
++ }
++ }
++ rsbac_kfree(f_list);
++ all_count += count;
++ }
++ seq_printf(m,
++ "\ndevice %02u:%02u has %lu file group eff cap set items, sum of %lu members, list is clean\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), all_count,
++ all_member_count);
++ all_member_count = 0;
++ all_count = 0;
++ count = rsbac_list_lol_get_all_desc(device_p->
++ group_fs_handle,
++ (void **) &f_list);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ member_count =
++ rsbac_list_lol_get_all_subdesc
++ (device_p->group_fs_handle,
++ &f_list[i],
++ (void **) &cap_list);
++ seq_printf(m,
++ "\n %u\t%u\t",
++ f_list[i],
++ member_count);
++ if (member_count > 0) {
++ for (j = 0;
++ j < member_count;
++ j++) {
++ if (cap_list[j].first !=
++ cap_list[j].last) {
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first)
++ || RSBAC_UID_SET(cap_list[j].last)
++ )
++ seq_printf(m,
++ "%u/%u:%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_SET(cap_list[j].last),
++ RSBAC_UID_NUM(cap_list[j].last));
++ else
++#endif
++ seq_printf(m,
++ "%u:%u ",
++ RSBAC_UID_NUM(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].last));
++ } else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_list[j].first))
++ seq_printf(m,
++ "%u/%u ",
++ RSBAC_UID_SET(cap_list[j].first),
++ RSBAC_UID_NUM(cap_list[j].first));
++ else
++#endif
++ seq_printf(m,
++ "%u ",
++ RSBAC_UID_NUM(cap_list[j].first));
++ }
++ }
++ rsbac_kfree(cap_list);
++ all_member_count +=
++ member_count;
++ }
++ }
++ rsbac_kfree(f_list);
++ all_count += count;
++ }
++ seq_printf(m,
++ "\ndevice %02u:%02u has %lu file group fs cap set items, sum of %lu members, list is clean\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), all_count,
++ all_member_count);
++#endif
++#endif /* AUTH_GROUP */
++
++ device_p = device_p->next;
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++
++ return 0;
++}
++
++static int auth_caplist_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, auth_caplist_proc_show, NULL);
++}
++
++static const struct file_operations auth_caplist_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = auth_caplist_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *auth_caplist;
++#endif /* CONFIG_PROC_FS && CONFIG_RSBAC_PROC */
++
++/************************************************* */
++/* Init functions */
++/************************************************* */
++
++/* All functions return 0, if no error occurred, and a negative error code */
++/* otherwise. The error codes are defined in rsbac/error.h. */
++
++/************************************************************************** */
++/* Initialization of all AUTH data structures. After this call, all AUTH */
++/* data is kept in memory for performance reasons, but is written to disk */
++/* on every change. */
++
++/* Because there can be no access to aci data structures before init, */
++/* rsbac_init_auth() will initialize all rw-spinlocks to unlocked. */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++int rsbac_init_auth(void)
++#else
++int __init rsbac_init_auth(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_auth_device_list_item_t *device_p = NULL;
++ struct rsbac_list_lol_info_t lol_info;
++
++ if (rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_init_auth(): RSBAC already initialized\n");
++ return -RSBAC_EREINIT;
++ }
++
++ rsbac_printk(KERN_INFO "rsbac_init_auth(): Initializing RSBAC: AUTH subsystem\n");
++
++ lol_info.version = RSBAC_AUTH_P_LIST_VERSION;
++ lol_info.key = RSBAC_AUTH_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_pid_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(struct rsbac_auth_cap_range_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &process_handle,
++ &lol_info,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ cap_compare,
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_AUTH_P_LIST_NAME,
++ RSBAC_AUTO_DEV,
++ RSBAC_LIST_MIN_MAX_HASHES,
++ rsbac_list_hash_pid,
++ NULL);
++ if (err) {
++ char *tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_init_auth(): Registering AUTH process cap list failed with error %s\n",
++ get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ lol_info.version = RSBAC_AUTH_P_LIST_VERSION;
++ lol_info.key = RSBAC_AUTH_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_pid_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(struct rsbac_auth_cap_range_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &process_eff_handle,
++ &lol_info,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ cap_compare,
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_AUTH_P_EFF_LIST_NAME,
++ RSBAC_AUTO_DEV,
++ RSBAC_LIST_MIN_MAX_HASHES,
++ rsbac_list_hash_pid,
++ NULL);
++ if (err) {
++ char *tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_init_auth(): Registering AUTH process eff cap list failed with error %s\n",
++ get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++ lol_info.version = RSBAC_AUTH_P_LIST_VERSION;
++ lol_info.key = RSBAC_AUTH_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_pid_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(struct rsbac_auth_cap_range_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &process_fs_handle,
++ &lol_info,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ cap_compare,
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_AUTH_P_FS_LIST_NAME,
++ RSBAC_AUTO_DEV,
++ RSBAC_LIST_MIN_MAX_HASHES,
++ rsbac_list_hash_pid,
++ NULL);
++ if (err) {
++ char *tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_init_auth(): Registering AUTH process fs cap list failed with error %s\n",
++ get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ lol_info.version = RSBAC_AUTH_P_LIST_VERSION;
++ lol_info.key = RSBAC_AUTH_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_pid_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(struct rsbac_auth_cap_range_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &process_group_handle,
++ &lol_info,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ cap_compare,
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_AUTH_P_GROUP_LIST_NAME,
++ RSBAC_AUTO_DEV,
++ RSBAC_LIST_MIN_MAX_HASHES,
++ rsbac_list_hash_pid,
++ NULL);
++ if (err) {
++ char *tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_init_auth(): Registering AUTH process group cap list failed with error %s\n",
++ get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ lol_info.version = RSBAC_AUTH_P_LIST_VERSION;
++ lol_info.key = RSBAC_AUTH_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_pid_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(struct rsbac_auth_cap_range_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &process_group_eff_handle,
++ &lol_info,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ cap_compare,
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_AUTH_P_GROUP_EFF_LIST_NAME,
++ RSBAC_AUTO_DEV,
++ RSBAC_LIST_MIN_MAX_HASHES,
++ rsbac_list_hash_pid,
++ NULL);
++ if (err) {
++ char *tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_init_auth(): Registering AUTH process group eff cap list failed with error %s\n",
++ get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++ lol_info.version = RSBAC_AUTH_P_LIST_VERSION;
++ lol_info.key = RSBAC_AUTH_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_pid_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(struct rsbac_auth_cap_range_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &process_group_fs_handle,
++ &lol_info,
++ RSBAC_LIST_DEF_DATA | RSBAC_LIST_AUTO_HASH_RESIZE | RSBAC_LIST_OWN_SLAB,
++ NULL,
++ cap_compare,
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_AUTH_P_GROUP_FS_LIST_NAME,
++ RSBAC_AUTO_DEV,
++ RSBAC_LIST_MIN_MAX_HASHES,
++ rsbac_list_hash_pid,
++ NULL);
++ if (err) {
++ char *tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_init_auth(): Registering AUTH process group fs cap list failed with error %s\n",
++ get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++#endif
++#endif /* AUTH_GROUP */
++
++ auth_device_item_slab = rsbac_slab_create("rsbac_auth_device_item",
++ sizeof(struct rsbac_auth_device_list_item_t));
++
++ /* Init FD lists */
++ device_list_head_p = kmalloc(sizeof(*device_list_head_p), GFP_KERNEL);
++ if (!device_list_head_p) {
++ rsbac_printk(KERN_WARNING
++ "rsbac_init_auth(): Failed to allocate device_list_head\n");
++ return -ENOMEM;
++ }
++ spin_lock_init(&device_list_lock);
++ init_srcu_struct(&device_list_srcu);
++ lockdep_set_class(&device_list_lock, &device_list_lock_class);
++ device_list_head_p->head = NULL;
++ device_list_head_p->tail = NULL;
++ device_list_head_p->curr = NULL;
++ device_list_head_p->count = 0;
++
++ /* read all data */
++ rsbac_pr_debug(ds_auth, "rsbac_init_auth(): Registering FD lists\n");
++ device_p = create_device_item(rsbac_root_dev);
++ if (!device_p) {
++ rsbac_printk(KERN_CRIT
++ "rsbac_init_auth(): Could not add device!\n");
++ return -RSBAC_ECOULDNOTADDDEVICE;
++ }
++ if ((err = auth_register_fd_lists(device_p, rsbac_root_dev))) {
++ char tmp[RSBAC_MAXNAMELEN];
++
++ rsbac_printk(KERN_WARNING "rsbac_init_auth(): File/Dir cap set registration failed for dev %02u:%02u, err %s!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev),
++ get_error_name(tmp, err));
++ }
++ device_p = add_device_item(device_p);
++ if (!device_p) {
++ rsbac_printk(KERN_CRIT
++ "rsbac_init_auth(): Could not add device!\n");
++ return -RSBAC_ECOULDNOTADDDEVICE;
++ }
++#if defined(CONFIG_RSBAC_PROC) && defined(CONFIG_PROC_FS)
++ auth_devices = proc_create("auth_devices",
++ S_IFREG | S_IRUGO,
++ proc_rsbac_root_p,
++ &auth_devices_proc_fops);
++ stats_auth = proc_create("stats_auth",
++ S_IFREG | S_IRUGO,
++ proc_rsbac_root_p,
++ &stats_auth_proc_fops);
++ auth_caplist = proc_create("auth_caplist",
++ S_IFREG | S_IRUGO,
++ proc_rsbac_root_p,
++ &auth_caplist_proc_fops);
++#endif
++
++ rsbac_pr_debug(ds_auth, "Ready.\n");
++ return err;
++}
++
++int rsbac_mount_auth(kdev_t kdev)
++{
++ int err = 0;
++ struct rsbac_auth_device_list_item_t *device_p;
++ struct rsbac_auth_device_list_item_t *new_device_p;
++ int srcu_idx;
++
++ rsbac_pr_debug(ds_auth, "mounting device %02u:%02u\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev));
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = lookup_device(kdev);
++ /* repeated mount? */
++ if (device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_mount_auth: repeated mount %u of device %02u:%02u\n",
++ device_p->mount_count, RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev));
++ device_p->mount_count++;
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return 0;
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++
++ new_device_p = create_device_item(kdev);
++ if (!new_device_p)
++ return -RSBAC_ECOULDNOTADDDEVICE;
++
++ /* register lists */
++ if ((err = auth_register_fd_lists(new_device_p, kdev))) {
++ char tmp[RSBAC_MAXNAMELEN];
++
++ rsbac_printk(KERN_WARNING "rsbac_mount_auth(): File/Dir ACL registration failed for dev %02u:%02u, err %s!\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev),
++ get_error_name(tmp, err));
++ }
++
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ /* make sure to only add, if this device item has not been added in the meantime */
++ device_p = lookup_device(kdev);
++ if (device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_mount_auth(): mount race for device %02u:%02u detected!\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev));
++ device_p->mount_count++;
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ clear_device_item(new_device_p);
++ } else {
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ device_p = add_device_item(new_device_p);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_mount_auth: adding device %02u:%02u failed!\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev));
++ clear_device_item(new_device_p);
++ err = -RSBAC_ECOULDNOTADDDEVICE;
++ }
++ }
++ return err;
++}
++
++/* When umounting a device, its file cap set list must be removed. */
++
++int rsbac_umount_auth(kdev_t kdev)
++{
++ struct rsbac_auth_device_list_item_t *device_p;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_umount(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ rsbac_pr_debug(ds_auth, "umounting device %02u:%02u\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev));
++ /* sync of attribute lists was done in rsbac_umount */
++ spin_lock(&device_list_lock);
++ device_p = lookup_device(kdev);
++ if (device_p) {
++ if (device_p->mount_count == 1)
++ remove_device_item(kdev);
++ else {
++ if (device_p->mount_count > 1) {
++ device_p->mount_count--;
++ spin_unlock(&device_list_lock);
++ } else {
++ spin_unlock(&device_list_lock);
++ rsbac_printk(KERN_WARNING "rsbac_mount_auth: device %02u:%02u has mount_count < 1!\n",
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev));
++ }
++ }
++ }
++ else
++ spin_unlock(&device_list_lock);
++ return 0;
++}
++
++/***************************************************/
++/* We also need some status information... */
++
++int rsbac_stats_auth(void)
++{
++ u_int cap_set_count = 0;
++ u_int member_count = 0;
++ struct rsbac_auth_device_list_item_t *device_p;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_stats_auth(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ rsbac_pr_debug(aef_auth, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ rsbac_printk(KERN_INFO "AUTH Status\n-----------\n");
++
++ rsbac_printk(KERN_INFO "%lu process cap set items, sum of %lu members\n",
++ rsbac_list_lol_count(process_handle),
++ rsbac_list_lol_all_subcount(process_handle));
++
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = rcu_dereference(device_list_head_p)->head;
++ while (device_p) {
++ /* reset counters */
++ cap_set_count = rsbac_list_lol_count(device_p->handle);
++ member_count = rsbac_list_lol_all_subcount(device_p->handle);
++ rsbac_printk(KERN_INFO "device %02u:%02u has %u file cap set items, sum of %u members\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), cap_set_count,
++ member_count);
++ device_p = device_p->next;
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return 0;
++}
++
++/************************************************* */
++/* Access functions */
++/************************************************* */
++
++/* All these procedures handle the rw-spinlocks to protect the targets during */
++/* access. */
++/* Trying to access a never created or removed set returns an error! */
++
++/* rsbac_auth_add_to_capset */
++/* Add a set member to a set sublist. Set behaviour: also returns success, */
++/* if member was already in set! */
++
++int rsbac_auth_add_to_p_capset(rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range,
++ rsbac_time_t ttl)
++{
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_add_to_p_capset(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_add_to_p_capset(): called from interrupt!\n");
++ }
++ if (cap_range.first > cap_range.last)
++ return -RSBAC_EINVALIDVALUE;
++ switch (cap_type) {
++ case ACT_real:
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ process_handle, ttl,
++ &pid, &cap_range,
++ NULL);
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ case ACT_eff:
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ process_eff_handle,
++ ttl, &pid, &cap_range,
++ NULL);
++ case ACT_fs:
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ process_fs_handle, ttl,
++ &pid, &cap_range,
++ NULL);
++#endif
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ case ACT_group_real:
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ process_group_handle,
++ ttl, &pid, &cap_range,
++ NULL);
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ case ACT_group_eff:
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ process_group_eff_handle,
++ ttl, &pid, &cap_range,
++ NULL);
++ case ACT_group_fs:
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ process_group_fs_handle,
++ ttl, &pid, &cap_range,
++ NULL);
++#endif
++#endif /* AUTH_GROUP */
++
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++}
++
++int rsbac_auth_add_to_f_capset(rsbac_list_ta_number_t ta_number,
++ rsbac_auth_file_t file,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range,
++ rsbac_time_t ttl)
++{
++ int err = 0;
++ struct rsbac_auth_device_list_item_t *device_p;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_add_to_f_capset(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_add_to_f_capset(): called from interrupt!\n");
++ }
++ if (cap_range.first > cap_range.last)
++ return -RSBAC_EINVALIDVALUE;
++
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = lookup_device(file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_add_to_f_capset(): invalid device %02u:%02u!\n",
++ RSBAC_MAJOR(file.device),
++ RSBAC_MINOR(file.device));
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++
++ switch (cap_type) {
++ case ACT_real:
++ err = rsbac_ta_list_lol_subadd_ttl(ta_number,
++ device_p->handle,
++ ttl, &file.inode,
++ &cap_range, NULL);
++ break;
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ case ACT_eff:
++ err = rsbac_ta_list_lol_subadd_ttl(ta_number,
++ device_p->eff_handle,
++ ttl, &file.inode,
++ &cap_range, NULL);
++ break;
++ case ACT_fs:
++ err = rsbac_ta_list_lol_subadd_ttl(ta_number,
++ device_p->fs_handle,
++ ttl, &file.inode,
++ &cap_range, NULL);
++ break;
++#endif
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ case ACT_group_real:
++ err = rsbac_ta_list_lol_subadd_ttl(ta_number,
++ device_p->group_handle,
++ ttl,
++ &file.inode, &cap_range,
++ NULL);
++ break;
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ case ACT_group_eff:
++ err = rsbac_ta_list_lol_subadd_ttl(ta_number,
++ device_p->group_eff_handle,
++ ttl,
++ &file.inode, &cap_range,
++ NULL);
++ break;
++ case ACT_group_fs:
++ err = rsbac_ta_list_lol_subadd_ttl(ta_number,
++ device_p->group_fs_handle,
++ ttl,
++ &file.inode, &cap_range,
++ NULL);
++ break;
++#endif
++#endif /* AUTH_GROUP */
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return err;
++}
++
++/* rsbac_auth_remove_from_capset */
++/* Remove a set member from a sublist. Set behaviour: Returns no error, if */
++/* member is not in list. */
++
++int rsbac_auth_remove_from_p_capset(rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t
++ cap_range)
++{
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_remove_from_p_capset(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_remove_from_p_capset(): called from interrupt!\n");
++ }
++ if (cap_range.first > cap_range.last)
++ return -RSBAC_EINVALIDVALUE;
++ switch (cap_type) {
++ case ACT_real:
++ return rsbac_ta_list_lol_subremove(ta_number,
++ process_handle, &pid,
++ &cap_range);
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ case ACT_eff:
++ return rsbac_ta_list_lol_subremove(ta_number,
++ process_eff_handle,
++ &pid, &cap_range);
++ case ACT_fs:
++ return rsbac_ta_list_lol_subremove(ta_number,
++ process_fs_handle, &pid,
++ &cap_range);
++#endif
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ case ACT_group_real:
++ return rsbac_ta_list_lol_subremove(ta_number,
++ process_group_handle,
++ &pid, &cap_range);
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ case ACT_group_eff:
++ return rsbac_ta_list_lol_subremove(ta_number,
++ process_group_eff_handle,
++ &pid, &cap_range);
++ case ACT_group_fs:
++ return rsbac_ta_list_lol_subremove(ta_number,
++ process_group_fs_handle,
++ &pid, &cap_range);
++#endif
++#endif /* AUTH_GROUP */
++
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++}
++
++int rsbac_auth_remove_from_f_capset(rsbac_list_ta_number_t ta_number,
++ rsbac_auth_file_t file,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t
++ cap_range)
++{
++ int err = 0;
++ struct rsbac_auth_device_list_item_t *device_p;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_remove_from_f_capset(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_remove_from_f_capset(): called from interrupt!\n");
++ }
++ if (cap_range.first > cap_range.last)
++ return -RSBAC_EINVALIDVALUE;
++
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = lookup_device(file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_remove_from_f_capset(): invalid device %02u:%02u!\n",
++ RSBAC_MAJOR(file.device),
++ RSBAC_MINOR(file.device));
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ switch (cap_type) {
++ case ACT_real:
++ err = rsbac_ta_list_lol_subremove(ta_number,
++ device_p->handle,
++ &file.inode, &cap_range);
++ break;
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ case ACT_eff:
++ err = rsbac_ta_list_lol_subremove(ta_number,
++ device_p->eff_handle,
++ &file.inode, &cap_range);
++ break;
++ case ACT_fs:
++ err = rsbac_ta_list_lol_subremove(ta_number,
++ device_p->fs_handle,
++ &file.inode, &cap_range);
++ break;
++#endif
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ case ACT_group_real:
++ err = rsbac_ta_list_lol_subremove(ta_number,
++ device_p->group_handle,
++ &file.inode, &cap_range);
++ break;
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ case ACT_group_eff:
++ err = rsbac_ta_list_lol_subremove(ta_number,
++ device_p->group_eff_handle,
++ &file.inode, &cap_range);
++ break;
++ case ACT_group_fs:
++ err = rsbac_ta_list_lol_subremove(ta_number,
++ device_p->group_fs_handle,
++ &file.inode, &cap_range);
++ break;
++#endif
++#endif /* AUTH_GROUP */
++
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return err;
++}
++
++/* rsbac_auth_clear_capset */
++/* Remove all set members from a sublist. Set behaviour: Returns no error, */
++/* if list is empty. */
++
++int rsbac_auth_clear_p_capset(rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ enum rsbac_auth_cap_type_t cap_type)
++{
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_clear_p_capset(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_clear_p_capset(): called from interrupt!\n");
++ }
++ switch (cap_type) {
++ case ACT_real:
++ return rsbac_ta_list_lol_remove(ta_number, process_handle,
++ &pid);
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ case ACT_eff:
++ return rsbac_ta_list_lol_remove(ta_number,
++ process_eff_handle, &pid);
++ case ACT_fs:
++ return rsbac_ta_list_lol_remove(ta_number,
++ process_fs_handle, &pid);
++#endif
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ case ACT_group_real:
++ return rsbac_ta_list_lol_remove(ta_number,
++ process_group_handle,
++ &pid);
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ case ACT_group_eff:
++ return rsbac_ta_list_lol_remove(ta_number,
++ process_group_eff_handle,
++ &pid);
++ case ACT_group_fs:
++ return rsbac_ta_list_lol_remove(ta_number,
++ process_group_fs_handle,
++ &pid);
++#endif
++#endif /* AUTH_GROUP */
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++}
++
++int rsbac_auth_clear_f_capset(rsbac_list_ta_number_t ta_number,
++ rsbac_auth_file_t file,
++ enum rsbac_auth_cap_type_t cap_type)
++{
++ int err = 0;
++ struct rsbac_auth_device_list_item_t *device_p;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_clear_f_capset(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_clear_f_capset(): called from interrupt!\n");
++ }
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = lookup_device(file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_clear_f_capset(): invalid device %02u:%02u!\n",
++ RSBAC_MAJOR(file.device),
++ RSBAC_MINOR(file.device));
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ switch (cap_type) {
++ case ACT_real:
++ err = rsbac_ta_list_lol_remove(ta_number,
++ device_p->handle,
++ &file.inode);
++ break;
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ case ACT_eff:
++ err = rsbac_ta_list_lol_remove(ta_number,
++ device_p->eff_handle,
++ &file.inode);
++ break;
++ case ACT_fs:
++ err = rsbac_ta_list_lol_remove(ta_number,
++ device_p->fs_handle,
++ &file.inode);
++ break;
++#endif
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ case ACT_group_real:
++ err = rsbac_ta_list_lol_remove(ta_number,
++ device_p->group_handle,
++ &file.inode);
++ break;
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ case ACT_group_eff:
++ err = rsbac_ta_list_lol_remove(ta_number,
++ device_p->group_eff_handle,
++ &file.inode);
++ break;
++ case ACT_group_fs:
++ err = rsbac_ta_list_lol_remove(ta_number,
++ device_p-> group_fs_handle,
++ &file.inode);
++ break;
++#endif
++#endif /* AUTH_GROUP */
++
++ default:
++ err = -RSBAC_EINVALIDTARGET;
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return err;
++}
++
++/* rsbac_auth_capset_member */
++/* Return truth value, whether member is in set */
++
++rsbac_boolean_t rsbac_auth_p_capset_member(rsbac_pid_t pid,
++ enum rsbac_auth_cap_type_t
++ cap_type, rsbac_uid_t member)
++{
++ rsbac_boolean_t result;
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++ int srcu_idx;
++#endif
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_p_capset_member(): RSBAC not initialized\n");
++ return FALSE;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_p_capset_member(): called from interrupt!\n");
++ }
++ switch (cap_type) {
++ case ACT_real:
++ result = rsbac_list_lol_subexist_compare(process_handle, &pid,
++ &member,
++ single_cap_compare);
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ /* check for pseudo set "all" */
++ if (!result) {
++ rsbac_uid_t amember;
++
++ amember = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_ALL, member);
++ result = rsbac_list_lol_subexist_compare(process_handle,
++ &pid,
++ &amember,
++ single_cap_compare);
++ }
++#endif
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++ if (!result && (RSBAC_UID_NUM(member) <= RSBAC_AUTH_MAX_RANGE_UID)
++ ) {
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val;
++ rsbac_boolean_t learn;
++
++ learn = rsbac_auth_learn;
++ if (!learn) {
++ tid.process = pid;
++ /* check learn on process */
++ if (!rsbac_get_attr
++ (SW_AUTH, T_PROCESS, tid, A_auth_learn,
++ &attr_val, FALSE))
++ learn = attr_val.auth_learn;
++ }
++ if (learn) {
++ struct rsbac_auth_cap_range_t range;
++ int err;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(member))
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH capability for uid %u/%u to process %u (%.15s) to transaction %u!\n",
++ RSBAC_UID_SET(member),
++ RSBAC_UID_NUM(member),
++ pid_nr(pid),
++ current->comm,
++ auth_learn_ta);
++ else
++#endif
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH capability for uid %u to process %u (%.15s) to transaction %u!\n",
++ RSBAC_UID_NUM(member), pid_nr(pid), current->comm, auth_learn_ta);
++ range.first = member;
++ range.last = member;
++#ifdef CONFIG_RSBAC_AUTH_LEARN_TA
++ if (!rsbac_list_ta_exist(auth_learn_ta))
++ rsbac_list_ta_begin(CONFIG_RSBAC_LIST_TRANS_MAX_TTL,
++ &auth_learn_ta,
++ RSBAC_ALL_USERS,
++ RSBAC_AUTH_LEARN_TA_NAME,
++ NULL);
++#endif
++ rsbac_ta_list_lol_subadd_ttl(auth_learn_ta,
++ process_handle,
++ RSBAC_LIST_TTL_KEEP,
++ &pid,
++ &range,
++ NULL);
++
++ tid.process = pid;
++ if (!(err = rsbac_get_attr
++ (SW_GEN, T_PROCESS, tid,
++ A_program_file, &attr_val,
++ FALSE))) {
++ struct
++ rsbac_auth_device_list_item_t
++ *device_p;
++ union rsbac_attribute_value_t
++ attr_val2;
++ char * target_id_name;
++
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name
++ = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++#else
++ target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++#endif
++ if (!rsbac_get_attr
++ (SW_AUTH, T_PROCESS, tid,
++ A_auth_start_uid, &attr_val2,
++ FALSE)
++ && (range.first ==
++ attr_val2.auth_start_uid)
++ ) {
++ range.first =
++ RSBAC_AUTH_OWNER_F_CAP;
++ range.last = range.first;
++ }
++ tid.file = attr_val.program_file;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(range.first))
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH capability for uid %u/%u to FILE %s to transaction %u!\n",
++ RSBAC_UID_SET(range.first),
++ RSBAC_UID_NUM(range.first),
++ get_target_name(NULL, T_FILE, target_id_name, tid),
++ auth_learn_ta);
++ else
++#endif
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH capability for uid %u to FILE %s to transaction %u!\n",
++ RSBAC_UID_NUM(range.first),
++ get_target_name(NULL, T_FILE, target_id_name, tid),
++ auth_learn_ta);
++ rsbac_kfree(target_id_name);
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p =
++ lookup_device(attr_val.
++ program_file.
++ device);
++ if (device_p) {
++ rsbac_ta_list_lol_subadd_ttl(
++ auth_learn_ta,
++ device_p->handle,
++ RSBAC_LIST_TTL_KEEP,
++ &attr_val.program_file.inode,
++ &range,
++ NULL);
++ } else {
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): unknown device %02u:%02u!\n",
++ MAJOR
++ (attr_val.
++ program_file.
++ device),
++ MINOR
++ (attr_val.
++ program_file.
++ device));
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ } else
++ rsbac_pr_get_error_num(A_program_file, err);
++ result = TRUE;
++ }
++ }
++#endif
++ break;
++
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ case ACT_eff:
++ result =
++ rsbac_list_lol_subexist_compare(process_eff_handle,
++ &pid, &member,
++ single_cap_compare);
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ /* check for pseudo set "all" */
++ if (!result) {
++ rsbac_uid_t amember;
++
++ amember = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_ALL, member);
++ result = rsbac_list_lol_subexist_compare(process_eff_handle,
++ &pid,
++ &amember,
++ single_cap_compare);
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++ if (!result && (RSBAC_UID_NUM(member) <= RSBAC_AUTH_MAX_RANGE_UID)
++ ) {
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val;
++ rsbac_boolean_t learn;
++
++ learn = rsbac_auth_learn;
++ if (!learn) {
++ tid.process = pid;
++ /* check learn on process */
++ if (!rsbac_get_attr
++ (SW_AUTH, T_PROCESS, tid, A_auth_learn,
++ &attr_val, FALSE))
++ learn = attr_val.auth_learn;
++ }
++ if (learn) {
++ struct rsbac_auth_cap_range_t range;
++ int err;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(member))
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH eff capability for uid %u/%u to process %u (%.15s) to transaction %u!\n",
++ RSBAC_UID_SET(member),
++ RSBAC_UID_NUM(member),
++ pid_nr(pid),
++ current->comm,
++ auth_learn_ta);
++ else
++#endif
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH eff capability for uid %u to process %u (%.15s) to transaction %u!\n",
++ RSBAC_UID_NUM(member), pid_nr(pid), current->comm, auth_learn_ta);
++ range.first = member;
++ range.last = member;
++#ifdef CONFIG_RSBAC_AUTH_LEARN_TA
++ if (!rsbac_list_ta_exist(auth_learn_ta))
++ rsbac_list_ta_begin(CONFIG_RSBAC_LIST_TRANS_MAX_TTL,
++ &auth_learn_ta,
++ RSBAC_ALL_USERS,
++ RSBAC_AUTH_LEARN_TA_NAME,
++ NULL);
++#endif
++ rsbac_ta_list_lol_subadd_ttl(auth_learn_ta,
++ process_eff_handle,
++ RSBAC_LIST_TTL_KEEP,
++ &pid,
++ &range,
++ NULL);
++
++ tid.process = pid;
++ if (!(err = rsbac_get_attr
++ (SW_GEN, T_PROCESS, tid,
++ A_program_file, &attr_val,
++ FALSE))) {
++ struct
++ rsbac_auth_device_list_item_t
++ *device_p;
++ union rsbac_attribute_value_t
++ attr_val2;
++ char * target_id_name;
++
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name
++ = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++#else
++ target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++#endif
++ if (!rsbac_get_attr
++ (SW_AUTH, T_PROCESS, tid,
++ A_auth_start_uid, &attr_val2,
++ FALSE)
++ && (range.first ==
++ attr_val2.auth_start_uid)
++ ) {
++ range.first =
++ RSBAC_AUTH_OWNER_F_CAP;
++ range.last = range.first;
++ } else
++ if (!rsbac_get_attr
++ (SW_AUTH, T_PROCESS, tid,
++ A_auth_start_euid,
++ &attr_val2, FALSE)
++ && (range.first ==
++ attr_val2.
++ auth_start_euid)
++ ) {
++ range.first =
++ RSBAC_AUTH_DAC_OWNER_F_CAP;
++ range.last = range.first;
++ }
++ tid.file = attr_val.program_file;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(range.first))
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH eff capability for uid %u/%u to FILE %s to transaction %u!\n",
++ RSBAC_UID_SET(range.first),
++ RSBAC_UID_NUM(range.first),
++ get_target_name(NULL, T_FILE, target_id_name, tid),
++ auth_learn_ta);
++ else
++#endif
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH eff capability for uid %u to FILE %s to transaction %u!\n",
++ RSBAC_UID_NUM(range.first),
++ get_target_name(NULL, T_FILE, target_id_name, tid),
++ auth_learn_ta);
++ rsbac_kfree(target_id_name);
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p =
++ lookup_device(attr_val.
++ program_file.
++ device);
++ if (device_p) {
++ rsbac_ta_list_lol_subadd_ttl(
++ auth_learn_ta,
++ device_p->eff_handle,
++ RSBAC_LIST_TTL_KEEP,
++ &attr_val.program_file.inode,
++ &range,
++ NULL);
++ } else {
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): unknown device %02u:%02u!\n",
++ MAJOR
++ (attr_val.
++ program_file.
++ device),
++ MINOR
++ (attr_val.
++ program_file.
++ device));
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ } else
++ rsbac_pr_get_error_num(A_program_file, err);
++ result = TRUE;
++ }
++ }
++#endif
++ break;
++
++ case ACT_fs:
++ result =
++ rsbac_list_lol_subexist_compare(process_fs_handle,
++ &pid, &member,
++ single_cap_compare);
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ /* check for pseudo set "all" */
++ if (!result) {
++ rsbac_uid_t amember;
++
++ amember = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_ALL, member);
++ result = rsbac_list_lol_subexist_compare(process_fs_handle,
++ &pid,
++ &amember,
++ single_cap_compare);
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++ if (!result && (RSBAC_UID_NUM(member) <= RSBAC_AUTH_MAX_RANGE_UID)
++ ) {
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val;
++ rsbac_boolean_t learn;
++
++ learn = rsbac_auth_learn;
++ if (!learn) {
++ tid.process = pid;
++ /* check learn on process */
++ if (!rsbac_get_attr
++ (SW_AUTH, T_PROCESS, tid, A_auth_learn,
++ &attr_val, FALSE))
++ learn = attr_val.auth_learn;
++ }
++ if (learn) {
++ struct rsbac_auth_cap_range_t range;
++ int err;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(member))
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH fs capability for uid %u/%u to process %u (%.15s) to transaction %u!\n",
++ RSBAC_UID_SET(member),
++ RSBAC_UID_NUM(member),
++ pid_nr(pid),
++ current->comm,
++ auth_learn_ta);
++ else
++#endif
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH fs capability for uid %u to process %u (%.15s) to transaction %u!\n",
++ RSBAC_UID_NUM(member), pid_nr(pid), current->comm, auth_learn_ta);
++ range.first = member;
++ range.last = member;
++#ifdef CONFIG_RSBAC_AUTH_LEARN_TA
++ if (!rsbac_list_ta_exist(auth_learn_ta))
++ rsbac_list_ta_begin(CONFIG_RSBAC_LIST_TRANS_MAX_TTL,
++ &auth_learn_ta,
++ RSBAC_ALL_USERS,
++ RSBAC_AUTH_LEARN_TA_NAME,
++ NULL);
++#endif
++ rsbac_ta_list_lol_subadd_ttl(auth_learn_ta,
++ process_fs_handle,
++ RSBAC_LIST_TTL_KEEP,
++ &pid,
++ &range,
++ NULL);
++
++ tid.process = pid;
++ if (!(err = rsbac_get_attr
++ (SW_GEN, T_PROCESS, tid,
++ A_program_file, &attr_val,
++ FALSE))) {
++ struct
++ rsbac_auth_device_list_item_t
++ *device_p;
++ union rsbac_attribute_value_t
++ attr_val2;
++ char * target_id_name;
++
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name
++ = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++#else
++ target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++#endif
++ if (!rsbac_get_attr
++ (SW_AUTH, T_PROCESS, tid,
++ A_auth_start_uid, &attr_val2,
++ FALSE)
++ && (range.first ==
++ attr_val2.auth_start_uid)
++ ) {
++ range.first =
++ RSBAC_AUTH_OWNER_F_CAP;
++ range.last = range.first;
++ } else
++ if (!rsbac_get_attr
++ (SW_AUTH, T_PROCESS, tid,
++ A_auth_start_euid,
++ &attr_val2, FALSE)
++ && (range.first ==
++ attr_val2.
++ auth_start_euid)
++ ) {
++ range.first =
++ RSBAC_AUTH_DAC_OWNER_F_CAP;
++ range.last = range.first;
++ }
++ tid.file = attr_val.program_file;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(range.first))
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH fs capability for uid %u/%u to FILE %s to transaction %u!\n",
++ RSBAC_UID_SET(range.first),
++ RSBAC_UID_NUM(range.first),
++ get_target_name(NULL, T_FILE, target_id_name, tid),
++ auth_learn_ta);
++ else
++#endif
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH fs capability for uid %u to FILE %s to transaction %u!\n",
++ RSBAC_UID_NUM(range.first),
++ get_target_name(NULL, T_FILE, target_id_name, tid),
++ auth_learn_ta);
++ rsbac_kfree(target_id_name);
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p =
++ lookup_device(attr_val.
++ program_file.
++ device);
++ if (device_p) {
++ rsbac_ta_list_lol_subadd_ttl(
++ auth_learn_ta,
++ device_p->fs_handle,
++ RSBAC_LIST_TTL_KEEP,
++ &attr_val.program_file.inode,
++ &range,
++ NULL);
++ } else {
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): unknown device %02u:%02u!\n",
++ MAJOR
++ (attr_val.
++ program_file.
++ device),
++ MINOR
++ (attr_val.
++ program_file.
++ device));
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ } else
++ rsbac_pr_get_error_num(A_program_file, err);
++ result = TRUE;
++ }
++ }
++#endif
++ break;
++#endif /* AUTH_DAC_OWNER */
++
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ case ACT_group_real:
++ result =
++ rsbac_list_lol_subexist_compare(process_group_handle,
++ &pid, &member,
++ single_cap_compare);
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ /* check for pseudo set "all" */
++ if (!result) {
++ rsbac_uid_t amember;
++
++ amember = RSBAC_GEN_GID(RSBAC_UM_VIRTUAL_ALL, member);
++ result = rsbac_list_lol_subexist_compare(process_group_handle,
++ &pid,
++ &amember,
++ single_cap_compare);
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++ if (!result && (RSBAC_GID_NUM(member) <= RSBAC_AUTH_MAX_RANGE_GID)
++ ) {
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val;
++ rsbac_boolean_t learn;
++
++ learn = rsbac_auth_learn;
++ if (!learn) {
++ tid.process = pid;
++ /* check learn on process */
++ if (!rsbac_get_attr
++ (SW_AUTH, T_PROCESS, tid, A_auth_learn,
++ &attr_val, FALSE))
++ learn = attr_val.auth_learn;
++ }
++ if (learn) {
++ struct rsbac_auth_cap_range_t range;
++ int err;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_GID_SET(member))
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH group capability for gid %u/%u to process %u (%.15s) to transaction %u!\n",
++ RSBAC_GID_SET(member),
++ RSBAC_GID_NUM(member),
++ pid_nr(pid),
++ current->comm,
++ auth_learn_ta);
++ else
++#endif
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH group capability for gid %u to process %u (%.15s) to transaction %u!\n",
++ RSBAC_GID_NUM(member), pid_nr(pid), current->comm, auth_learn_ta);
++ range.first = member;
++ range.last = member;
++#ifdef CONFIG_RSBAC_AUTH_LEARN_TA
++ if (!rsbac_list_ta_exist(auth_learn_ta))
++ rsbac_list_ta_begin(CONFIG_RSBAC_LIST_TRANS_MAX_TTL,
++ &auth_learn_ta,
++ RSBAC_ALL_USERS,
++ RSBAC_AUTH_LEARN_TA_NAME,
++ NULL);
++#endif
++ rsbac_ta_list_lol_subadd_ttl(auth_learn_ta,
++ process_group_handle,
++ RSBAC_LIST_TTL_KEEP,
++ &pid,
++ &range,
++ NULL);
++
++ tid.process = pid;
++ if (!(err = rsbac_get_attr
++ (SW_GEN, T_PROCESS, tid,
++ A_program_file, &attr_val,
++ FALSE))) {
++ struct
++ rsbac_auth_device_list_item_t
++ *device_p;
++ union rsbac_attribute_value_t
++ attr_val2;
++ char * target_id_name;
++
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name
++ = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++#else
++ target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++#endif
++ if (!rsbac_get_attr
++ (SW_AUTH, T_PROCESS, tid,
++ A_auth_start_gid, &attr_val2,
++ FALSE)
++ && (range.first ==
++ attr_val2.auth_start_gid)
++ ) {
++ range.first =
++ RSBAC_AUTH_GROUP_F_CAP;
++ range.last = range.first;
++ }
++ tid.file = attr_val.program_file;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_GID_SET(range.first))
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH group capability for gid %u/%u to FILE %s to transaction %u!\n",
++ RSBAC_GID_SET(range.first),
++ RSBAC_GID_NUM(range.first),
++ get_target_name(NULL, T_FILE, target_id_name, tid),
++ auth_learn_ta);
++ else
++#endif
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH group capability for gid %u to FILE %s to transaction %u!\n",
++ RSBAC_GID_NUM(range.first),
++ get_target_name(NULL, T_FILE, target_id_name, tid),
++ auth_learn_ta);
++ rsbac_kfree(target_id_name);
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p =
++ lookup_device(attr_val.
++ program_file.
++ device);
++ if (device_p) {
++ rsbac_ta_list_lol_subadd_ttl(
++ auth_learn_ta,
++ device_p->group_handle,
++ RSBAC_LIST_TTL_KEEP,
++ &attr_val.program_file.inode,
++ &range,
++ NULL);
++ } else {
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): unknown device %02u:%02u!\n",
++ MAJOR
++ (attr_val.
++ program_file.
++ device),
++ MINOR
++ (attr_val.
++ program_file.
++ device));
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ } else
++ rsbac_pr_get_error_num(A_program_file, err);
++ result = TRUE;
++ }
++ }
++#endif
++ break;
++
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ case ACT_group_eff:
++ result =
++ rsbac_list_lol_subexist_compare
++ (process_group_eff_handle, &pid, &member,
++ single_cap_compare);
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ /* check for pseudo set "all" */
++ if (!result) {
++ rsbac_uid_t amember;
++
++ amember = RSBAC_GEN_GID(RSBAC_UM_VIRTUAL_ALL, member);
++ result = rsbac_list_lol_subexist_compare(process_group_eff_handle,
++ &pid,
++ &amember,
++ single_cap_compare);
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++ if (!result && (RSBAC_GID_NUM(member) <= RSBAC_AUTH_MAX_RANGE_GID)
++ ) {
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val;
++ rsbac_boolean_t learn;
++
++ learn = rsbac_auth_learn;
++ if (!learn) {
++ tid.process = pid;
++ /* check learn on process */
++ if (!rsbac_get_attr
++ (SW_AUTH, T_PROCESS, tid, A_auth_learn,
++ &attr_val, FALSE))
++ learn = attr_val.auth_learn;
++ }
++ if (learn) {
++ struct rsbac_auth_cap_range_t range;
++ int err;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_GID_SET(member))
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH group eff capability for gid %u/%u to process %u (%.15s) to transaction %u!\n",
++ RSBAC_GID_SET(member),
++ RSBAC_GID_NUM(member),
++ pid_nr(pid),
++ current->comm,
++ auth_learn_ta);
++ else
++#endif
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH group eff capability for gid %u to process %u (%.15s) to transaction %u!\n",
++ RSBAC_GID_NUM(member), pid_nr(pid), current->comm, auth_learn_ta);
++ range.first = member;
++ range.last = member;
++#ifdef CONFIG_RSBAC_AUTH_LEARN_TA
++ if (!rsbac_list_ta_exist(auth_learn_ta))
++ rsbac_list_ta_begin(CONFIG_RSBAC_LIST_TRANS_MAX_TTL,
++ &auth_learn_ta,
++ RSBAC_ALL_USERS,
++ RSBAC_AUTH_LEARN_TA_NAME,
++ NULL);
++#endif
++ rsbac_ta_list_lol_subadd_ttl(auth_learn_ta,
++ process_group_eff_handle,
++ RSBAC_LIST_TTL_KEEP,
++ &pid,
++ &range,
++ NULL);
++
++ tid.process = pid;
++ if (!(err = rsbac_get_attr
++ (SW_GEN, T_PROCESS, tid,
++ A_program_file, &attr_val,
++ FALSE))) {
++ struct
++ rsbac_auth_device_list_item_t
++ *device_p;
++ union rsbac_attribute_value_t
++ attr_val2;
++ char * target_id_name;
++
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name
++ = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++#else
++ target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++#endif
++ if (!rsbac_get_attr
++ (SW_AUTH, T_PROCESS, tid,
++ A_auth_start_gid, &attr_val2,
++ FALSE)
++ && (range.first ==
++ attr_val2.auth_start_gid)
++ ) {
++ range.first =
++ RSBAC_AUTH_GROUP_F_CAP;
++ range.last = range.first;
++ } else
++ if (!rsbac_get_attr
++ (SW_AUTH, T_PROCESS, tid,
++ A_auth_start_egid,
++ &attr_val2, FALSE)
++ && (range.first ==
++ attr_val2.
++ auth_start_egid)
++ ) {
++ range.first =
++ RSBAC_AUTH_DAC_GROUP_F_CAP;
++ range.last = range.first;
++ }
++ tid.file = attr_val.program_file;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_GID_SET(range.first))
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH group eff capability for gid %u/%u to FILE %s to transaction %u!\n",
++ RSBAC_GID_SET(range.first),
++ RSBAC_GID_NUM(range.first),
++ get_target_name(NULL, T_FILE, target_id_name, tid),
++ auth_learn_ta);
++ else
++#endif
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH group eff capability for gid %u to FILE %s to transaction %u!\n",
++ RSBAC_GID_NUM(range.first),
++ get_target_name(NULL, T_FILE, target_id_name, tid),
++ auth_learn_ta);
++ rsbac_kfree(target_id_name);
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p =
++ lookup_device(attr_val.
++ program_file.
++ device);
++ if (device_p) {
++ rsbac_ta_list_lol_subadd_ttl(
++ auth_learn_ta,
++ device_p->group_eff_handle,
++ RSBAC_LIST_TTL_KEEP,
++ &attr_val.program_file.inode,
++ &range,
++ NULL);
++ } else {
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): unknown device %02u:%02u!\n",
++ MAJOR
++ (attr_val.
++ program_file.
++ device),
++ MINOR
++ (attr_val.
++ program_file.
++ device));
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ } else
++ rsbac_pr_get_error_num(A_program_file, err);
++ result = TRUE;
++ }
++ }
++#endif
++ break;
++
++ case ACT_group_fs:
++ result =
++ rsbac_list_lol_subexist_compare
++ (process_group_fs_handle, &pid, &member,
++ single_cap_compare);
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ /* check for pseudo set "all" */
++ if (!result) {
++ rsbac_uid_t amember;
++
++ amember = RSBAC_GEN_GID(RSBAC_UM_VIRTUAL_ALL, member);
++ result = rsbac_list_lol_subexist_compare(process_group_fs_handle,
++ &pid,
++ &amember,
++ single_cap_compare);
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++ if (!result && (RSBAC_GID_NUM(member) <= RSBAC_AUTH_MAX_RANGE_GID)
++ ) {
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val;
++ rsbac_boolean_t learn;
++
++ learn = rsbac_auth_learn;
++ if (!learn) {
++ tid.process = pid;
++ /* check learn on process */
++ if (!rsbac_get_attr
++ (SW_AUTH, T_PROCESS, tid, A_auth_learn,
++ &attr_val, FALSE))
++ learn = attr_val.auth_learn;
++ }
++ if (learn) {
++ struct rsbac_auth_cap_range_t range;
++ int err;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_GID_SET(member))
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH group fs capability for gid %u/%u to process %u (%.15s) to transaction %u!\n",
++ RSBAC_GID_SET(member),
++ RSBAC_GID_NUM(member),
++ pid_nr(pid),
++ current->comm,
++ auth_learn_ta);
++ else
++#endif
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH group fs capability for gid %u to process %u (%.15s) to transaction %u!\n",
++ RSBAC_GID_NUM(member), pid_nr(pid), current->comm, auth_learn_ta);
++ range.first = member;
++ range.last = member;
++#ifdef CONFIG_RSBAC_AUTH_LEARN_TA
++ if (!rsbac_list_ta_exist(auth_learn_ta))
++ rsbac_list_ta_begin(CONFIG_RSBAC_LIST_TRANS_MAX_TTL,
++ &auth_learn_ta,
++ RSBAC_ALL_USERS,
++ RSBAC_AUTH_LEARN_TA_NAME,
++ NULL);
++#endif
++ rsbac_ta_list_lol_subadd_ttl(auth_learn_ta,
++ process_group_fs_handle,
++ RSBAC_LIST_TTL_KEEP,
++ &pid,
++ &range,
++ NULL);
++
++ tid.process = pid;
++ if (!(err = rsbac_get_attr
++ (SW_GEN, T_PROCESS, tid,
++ A_program_file, &attr_val,
++ FALSE))) {
++ struct
++ rsbac_auth_device_list_item_t
++ *device_p;
++ union rsbac_attribute_value_t
++ attr_val2;
++ char * target_id_name;
++
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name
++ = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++ /* max. path name len + some extra */
++#else
++ target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++ /* max. file name len + some extra */
++#endif
++ if (!rsbac_get_attr
++ (SW_AUTH, T_PROCESS, tid,
++ A_auth_start_gid, &attr_val2,
++ FALSE)
++ && (range.first ==
++ attr_val2.auth_start_gid)
++ ) {
++ range.first =
++ RSBAC_AUTH_GROUP_F_CAP;
++ range.last = range.first;
++ } else
++ if (!rsbac_get_attr
++ (SW_AUTH, T_PROCESS, tid,
++ A_auth_start_egid,
++ &attr_val2, FALSE)
++ && (range.first ==
++ attr_val2.
++ auth_start_egid)
++ ) {
++ range.first =
++ RSBAC_AUTH_DAC_GROUP_F_CAP;
++ range.last = range.first;
++ }
++ tid.file = attr_val.program_file;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_GID_SET(range.first))
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH group fs capability for gid %u/%u to FILE %s to transaction %u!\n",
++ RSBAC_GID_SET(range.first),
++ RSBAC_GID_NUM(range.first),
++ get_target_name(NULL, T_FILE, target_id_name, tid),
++ auth_learn_ta);
++ else
++#endif
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): adding AUTH group fs capability for gid %u to FILE %s to transaction %u!\n",
++ RSBAC_GID_NUM(range.first),
++ get_target_name(NULL, T_FILE, target_id_name, tid),
++ auth_learn_ta);
++ rsbac_kfree(target_id_name);
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p =
++ lookup_device(attr_val.
++ program_file.
++ device);
++ if (device_p) {
++ rsbac_ta_list_lol_subadd_ttl(
++ auth_learn_ta,
++ device_p->group_fs_handle,
++ RSBAC_LIST_TTL_KEEP,
++ &attr_val.program_file.inode,
++ &range,
++ NULL);
++ } else {
++ rsbac_printk(KERN_INFO "rsbac_auth_p_capset_member(): unknown device %02u:%02u!\n",
++ MAJOR
++ (attr_val.
++ program_file.
++ device),
++ MINOR
++ (attr_val.
++ program_file.
++ device));
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ } else
++ rsbac_pr_get_error_num(A_program_file, err);
++ result = TRUE;
++ }
++ }
++#endif
++ break;
++#endif /* AUTH_DAC_GROUP */
++#endif /* AUTH_GROUP */
++
++ default:
++ return FALSE;
++ }
++ return result;
++}
++
++/* rsbac_auth_remove_capset */
++/* Remove a full set. For cleanup, if object is deleted. */
++/* To empty an existing set use rsbac_auth_clear_capset. */
++
++int rsbac_auth_remove_p_capsets(rsbac_pid_t pid)
++{
++ int err;
++
++ err = rsbac_auth_clear_p_capset(0, pid, ACT_real);
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ err = rsbac_auth_clear_p_capset(0, pid, ACT_eff);
++ err = rsbac_auth_clear_p_capset(0, pid, ACT_fs);
++#endif
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ err = rsbac_auth_clear_p_capset(0, pid, ACT_group_real);
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ err = rsbac_auth_clear_p_capset(0, pid, ACT_group_eff);
++ err = rsbac_auth_clear_p_capset(0, pid, ACT_group_fs);
++#endif
++#endif /* AUTH_GROUP */
++
++ return err;
++}
++
++int rsbac_auth_remove_f_capsets(rsbac_auth_file_t file)
++{
++ int err;
++
++ err = rsbac_auth_clear_f_capset(0, file, ACT_real);
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ if (!err)
++ err = rsbac_auth_clear_f_capset(0, file, ACT_eff);
++ if (!err)
++ err = rsbac_auth_clear_f_capset(0, file, ACT_fs);
++#endif
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ err = rsbac_auth_clear_f_capset(0, file, ACT_group_real);
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ if (!err)
++ err = rsbac_auth_clear_f_capset(0, file, ACT_group_eff);
++ if (!err)
++ err = rsbac_auth_clear_f_capset(0, file, ACT_group_fs);
++#endif
++#endif /* AUTH_GROUP */
++
++ return err;
++}
++
++int rsbac_auth_copy_fp_capset(rsbac_auth_file_t file,
++ rsbac_pid_t p_cap_set_id)
++{
++ struct rsbac_auth_device_list_item_t *device_p;
++ int err = 0;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_copy_fp_capset(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_copy_fp_capset(): called from interrupt!\n");
++ }
++/*
++ rsbac_pr_debug(ds_auth, "Copying file cap set data to process cap set\n");
++*/
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = lookup_device(file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_copy_fp_capset(): invalid device %02u:%02u!\n",
++ RSBAC_MAJOR(file.device),
++ RSBAC_MINOR(file.device));
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ /* call the copy function */
++ err = copy_fp_cap_set_item(device_p, file, p_cap_set_id);
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return err;
++}
++
++int rsbac_auth_copy_pp_capset(rsbac_pid_t old_p_set_id,
++ rsbac_pid_t new_p_set_id)
++{
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_copy_pp_capset(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_copy_pp_capset(): called from interrupt!\n");
++ }
++/*
++ rsbac_pr_debug(ds_auth, "Copying process cap set data to process cap set\n");
++*/
++ /* call the copy function */
++ return copy_pp_cap_set_item(old_p_set_id, new_p_set_id);
++}
++
++int rsbac_auth_get_f_caplist(rsbac_list_ta_number_t ta_number,
++ rsbac_auth_file_t file,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t **caplist_p,
++ rsbac_time_t ** ttllist_p)
++{
++ struct rsbac_auth_device_list_item_t *device_p;
++ long count;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_get_f_caplist(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_get_f_caplist(): called from interrupt!\n");
++ }
++/*
++ rsbac_pr_debug(ds_auth, "Getting file/dir cap set list\n");
++*/
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = lookup_device(file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_get_f_caplist(): invalid device %02u:%02u!\n",
++ RSBAC_MAJOR(file.device),
++ RSBAC_MINOR(file.device));
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ switch (cap_type) {
++ case ACT_real:
++ count = rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ device_p->handle,
++ &file.inode,
++ (void **)
++ caplist_p,
++ ttllist_p);
++ break;
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ case ACT_eff:
++ count = rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ device_p->eff_handle,
++ &file.inode,
++ (void **)
++ caplist_p,
++ ttllist_p);
++ break;
++ case ACT_fs:
++ count = rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ device_p->fs_handle,
++ &file.inode,
++ (void **)
++ caplist_p,
++ ttllist_p);
++ break;
++#endif
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ case ACT_group_real:
++ count = rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ device_p->group_handle,
++ &file.inode,
++ (void **)
++ caplist_p,
++ ttllist_p);
++ break;
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ case ACT_group_eff:
++ count = rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ device_p->group_eff_handle,
++ &file.inode,
++ (void **)
++ caplist_p,
++ ttllist_p);
++ break;
++ case ACT_group_fs:
++ count = rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ device_p->group_fs_handle,
++ &file.inode,
++ (void **)
++ caplist_p,
++ ttllist_p);
++ break;
++#endif
++#endif /* AUTH_GROUP */
++
++ default:
++ count = -RSBAC_EINVALIDTARGET;
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return count;
++}
++
++int rsbac_auth_get_p_caplist(rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t **caplist_p,
++ rsbac_time_t ** ttllist_p)
++{
++ long count;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_get_p_caplist(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_auth_get_p_caplist(): called from interrupt!\n");
++ }
++/*
++ rsbac_pr_debug(ds_auth, "Getting process cap set list\n");
++*/
++ switch (cap_type) {
++ case ACT_real:
++ count = rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ process_handle,
++ &pid,
++ (void **)
++ caplist_p,
++ ttllist_p);
++ break;
++#ifdef CONFIG_RSBAC_AUTH_DAC_OWNER
++ case ACT_eff:
++ count = rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ process_eff_handle,
++ &pid,
++ (void **)
++ caplist_p,
++ ttllist_p);
++ break;
++ case ACT_fs:
++ count = rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ process_fs_handle,
++ &pid,
++ (void **)
++ caplist_p,
++ ttllist_p);
++ break;
++#endif
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ case ACT_group_real:
++ count = rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ process_group_handle,
++ &pid,
++ (void **)
++ caplist_p,
++ ttllist_p);
++ break;
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ case ACT_group_eff:
++ count = rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ process_group_eff_handle,
++ &pid,
++ (void **)
++ caplist_p,
++ ttllist_p);
++ break;
++ case ACT_group_fs:
++ count = rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ process_group_fs_handle,
++ &pid,
++ (void **)
++ caplist_p,
++ ttllist_p);
++ break;
++#endif
++#endif /* AUTH_GROUP */
++
++ default:
++ count = -RSBAC_EINVALIDTARGET;
++ }
++ return count;
++}
+diff --git a/rsbac/data_structures/gen_lists.c b/rsbac/data_structures/gen_lists.c
+new file mode 100644
+index 0000000..ee9acfd
+--- /dev/null
++++ b/rsbac/data_structures/gen_lists.c
+@@ -0,0 +1,13005 @@
++/************************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2011: */
++/* Amon Ott <ao@rsbac.org> */
++/* Generic lists for all parts */
++/* Last modified: 19/Dec/2011 */
++/************************************* */
++
++#include <linux/sched.h>
++#include <linux/module.h>
++#ifdef CONFIG_RSBAC_LIST_TRANS_RANDOM_TA
++#include <linux/random.h>
++#endif
++#include <linux/seq_file.h>
++#include <asm/uaccess.h>
++#ifndef CONFIG_RSBAC_NO_WRITE
++#include <linux/mount.h>
++#endif
++#include <linux/srcu.h>
++
++#include <rsbac/types.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/debug.h>
++#include <rsbac/adf.h>
++#include <rsbac/aci_data_structures.h>
++#include <rsbac/proc_fs.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/lists.h>
++#include <rsbac/gen_lists.h>
++
++/********************/
++/* Global Variables */
++/********************/
++
++static struct rsbac_list_reg_head_t reg_head;
++static struct rsbac_list_lol_reg_head_t lol_reg_head;
++static rsbac_boolean_t list_initialized = FALSE;
++static struct srcu_struct reg_list_srcu;
++static struct srcu_struct lol_reg_list_srcu;
++
++static struct kmem_cache * reg_item_slab = NULL;
++static struct kmem_cache * lol_reg_item_slab = NULL;
++
++static struct lock_class_key list_lock_class;
++
++static u_int rsbac_list_max_hashes = RSBAC_LIST_MIN_MAX_HASHES;
++static u_int rsbac_list_lol_max_hashes = RSBAC_LIST_MIN_MAX_HASHES;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++static struct rsbac_list_reg_item_t *ta_handle = NULL;
++static DEFINE_SPINLOCK(ta_lock);
++static rsbac_boolean_t ta_committing = FALSE;
++DECLARE_WAIT_QUEUE_HEAD(ta_wait);
++#ifndef CONFIG_RSBAC_LIST_TRANS_RANDOM_TA
++rsbac_list_ta_number_t ta_next = 1;
++#endif
++#endif
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++static int do_forget(rsbac_list_ta_number_t ta_number);
++#endif
++
++#ifdef CONFIG_RSBAC_AUTO_WRITE
++static rsbac_time_t next_rehash = 0;
++#endif
++
++static u_int rsbac_list_read_errors = 0;
++
++#ifdef CONFIG_RSBAC_LIST_STATS
++static __u64 rcu_free_calls = 0;
++static __u64 rcu_free_item_chain_calls = 0;
++static __u64 rcu_free_lol_calls = 0;
++static __u64 rcu_free_lol_sub_calls = 0;
++static __u64 rcu_free_lol_item_chain_calls = 0;
++static __u64 rcu_free_lol_subitem_chain_calls = 0;
++static __u64 rcu_free_do_cleanup_calls = 0;
++static __u64 rcu_free_do_cleanup_lol_calls = 0;
++static __u64 rcu_free_callback_calls = 0;
++static __u64 rcu_free_callback_lol_calls = 0;
++#endif
++
++/* Limit RCU callback calls to RCURATE per second, switch to sync when exceeded */
++#if CONFIG_RSBAC_RCU_RATE < 1
++#define RCURATE 1
++#else
++#if CONFIG_RSBAC_RCU_RATE > 100000
++#define RCURATE 100000
++#else
++#define RCURATE CONFIG_RSBAC_RCU_RATE
++#endif
++#endif
++u_int rsbac_list_rcu_rate = RCURATE;
++static u_int rcu_callback_count = 0;
++static struct timer_list rcu_rate_timer;
++
++static struct kmem_cache * rcu_free_item_slab = NULL;
++static struct kmem_cache * rcu_free_head_slab = NULL;
++static struct kmem_cache * rcu_free_head_lol_slab = NULL;
++
++/*********************************/
++/* Data Structures */
++/*********************************/
++
++/* RCU garbage collector */
++
++/* Call spinlocked */
++static inline struct rsbac_list_rcu_free_head_t *
++ get_rcu_free(struct rsbac_list_reg_item_t * list)
++{
++ if (list->rcu_free) {
++ struct rsbac_list_rcu_free_head_t * rcu_free;
++
++ rcu_free = list->rcu_free;
++ list->rcu_free = NULL;
++ return rcu_free;
++ } else
++ return NULL;
++}
++
++/* Call spinlocked */
++static inline struct rsbac_list_rcu_free_head_lol_t *
++ get_rcu_free_lol(struct rsbac_list_lol_reg_item_t * list)
++{
++ if (list->rcu_free) {
++ struct rsbac_list_rcu_free_head_lol_t * rcu_free;
++
++ rcu_free = list->rcu_free;
++ list->rcu_free = NULL;
++ return rcu_free;
++ } else
++ return NULL;
++}
++
++/* Call locked or unlocked */
++static struct rsbac_list_rcu_free_head_t *
++ create_rcu_free(struct rsbac_list_reg_item_t * list)
++{
++ /* Just to be sure */
++ if (!list)
++ return NULL;
++ /* Exists, all fine */
++ if (list->rcu_free)
++ return list->rcu_free;
++
++ list->rcu_free = rsbac_smalloc_clear(rcu_free_head_slab);
++ if (!list->rcu_free)
++ return NULL;
++ list->rcu_free->slab = list->slab;
++ return list->rcu_free;
++}
++
++static struct rsbac_list_rcu_free_head_lol_t *
++ create_rcu_free_lol(struct rsbac_list_lol_reg_item_t * list)
++{
++ /* Just to be sure */
++ if (!list)
++ return NULL;
++ /* Exists, all fine */
++ if (list->rcu_free)
++ return list->rcu_free;
++
++ list->rcu_free = rsbac_smalloc_clear(rcu_free_head_lol_slab);
++ if (!list->rcu_free)
++ return NULL;
++ list->rcu_free->slab = list->slab;
++ list->rcu_free->subslab = list->subslab;
++ return list->rcu_free;
++}
++
++/* Call spinlocked */
++static void rcu_free(struct rsbac_list_reg_item_t * list, void * mem)
++{
++ struct rsbac_list_rcu_free_item_t * rcu_item;
++
++ if (!create_rcu_free(list)) {
++ rsbac_printk(KERN_WARNING "rcu_free(): cannot allocate rcu_free_head for list %s, loosing item %p!\n",
++ list->name, mem);
++ return;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ rcu_free_calls++;
++#endif
++#if 0
++ /* Sanity check for dupes - to be removed after test phase */
++ rcu_item = list->rcu_free->head;
++ while (rcu_item) {
++ if (rcu_item->mem == mem) {
++ BUG();
++ return;
++ }
++ rcu_item = rcu_item->next;
++ }
++#endif
++ rcu_item = rsbac_smalloc(rcu_free_item_slab);
++ if (rcu_item) {
++ rcu_item->mem = mem;
++ rcu_item->next = list->rcu_free->head;
++ list->rcu_free->head = rcu_item;
++ } else {
++ rsbac_printk(KERN_WARNING "rcu_free(): cannot allocate rcu_free for list %s, loosing item %p!\n",
++ list->name, mem);
++ rcu_callback_count = rsbac_list_rcu_rate;
++ }
++}
++
++/* Call spinlocked */
++static void rcu_free_lol(struct rsbac_list_lol_reg_item_t * list, void * mem)
++{
++ struct rsbac_list_rcu_free_item_t * rcu_item;
++
++ if (!create_rcu_free_lol(list)) {
++ rsbac_printk(KERN_WARNING "rcu_free_lol(): cannot allocate rcu_free_head for list of lists %s, loosing item %p!\n",
++ list->name, mem);
++ return;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ rcu_free_lol_calls++;
++#endif
++#if 0
++ /* Sanity check for dupes - to be removed after test phase */
++ rcu_item = list->rcu_free->head;
++ while (rcu_item) {
++ if (rcu_item->mem == mem) {
++ BUG();
++ return;
++ }
++ rcu_item = rcu_item->next;
++ }
++#endif
++ rcu_item = rsbac_smalloc(rcu_free_item_slab);
++ if (rcu_item) {
++ rcu_item->mem = mem;
++ rcu_item->next = list->rcu_free->head;
++ list->rcu_free->head = rcu_item;
++ } else {
++ rsbac_printk(KERN_WARNING "rcu_free_lol(): cannot allocate rcu_free for list of lists %s, loosing item %p!\n",
++ list->name, mem);
++ rcu_callback_count = rsbac_list_rcu_rate;
++ }
++}
++
++static void rcu_free_lol_sub(struct rsbac_list_lol_reg_item_t * list, void * mem)
++{
++ struct rsbac_list_rcu_free_item_t * rcu_item;
++
++ if (!create_rcu_free_lol(list)) {
++ rsbac_printk(KERN_WARNING "rcu_free_lol_sub(): cannot allocate rcu_free_head for list of lists %s, loosing subitem %p!\n",
++ list->name, mem);
++ return;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ rcu_free_lol_calls++;
++#endif
++#if 0
++ /* Sanity check for dupes - to be removed after test phase */
++ rcu_item = list->rcu_free->subhead;
++ while (rcu_item) {
++ if (rcu_item->mem == mem) {
++ BUG();
++ return;
++ }
++ rcu_item = rcu_item->next;
++ }
++#endif
++ rcu_item = rsbac_smalloc(rcu_free_item_slab);
++ if (rcu_item) {
++ rcu_item->mem = mem;
++ rcu_item->next = list->rcu_free->subhead;
++ list->rcu_free->subhead = rcu_item;
++ } else {
++ rsbac_printk(KERN_WARNING "rcu_free_lol_sub(): cannot allocate rcu_free for list of lists %s, loosing subitem %p!\n",
++ list->name, mem);
++ rcu_callback_count = rsbac_list_rcu_rate;
++ }
++}
++
++/* Call spinlocked */
++static void rcu_free_item_chain(struct rsbac_list_reg_item_t * list,
++ struct rsbac_list_item_t * item_chain)
++{
++ if (!item_chain)
++ return;
++ if (!create_rcu_free(list)) {
++ rsbac_printk(KERN_WARNING "rcu_free_item_chain(): cannot allocate rcu_free_head for list %s, loosing chain %p!\n",
++ list->name, item_chain);
++ return;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ rcu_free_item_chain_calls++;
++#endif
++ if (!list->rcu_free->item_chain) {
++ list->rcu_free->item_chain = item_chain;
++ } else {
++ while (item_chain) {
++ rcu_free(list, item_chain);
++ item_chain = item_chain->next;
++ }
++ }
++}
++
++/* Call spinlocked */
++static void rcu_free_lol_subitem_chain(struct rsbac_list_lol_reg_item_t * list,
++ struct rsbac_list_item_t * subitem_chain)
++{
++ if (!subitem_chain)
++ return;
++ if (!create_rcu_free_lol(list)) {
++ rsbac_printk(KERN_WARNING "rcu_free_lol_subitem_chain(): cannot allocate rcu_free_head for list of lists %s, loosing subchain %p!\n",
++ list->name, subitem_chain);
++ return;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ rcu_free_lol_subitem_chain_calls++;
++#endif
++ if (!list->rcu_free->lol_item_subchain) {
++ list->rcu_free->lol_item_subchain = subitem_chain;
++ } else {
++ while (subitem_chain) {
++ rcu_free_lol_sub(list, subitem_chain);
++ subitem_chain = subitem_chain->next;
++ }
++ }
++}
++
++/* Call spinlocked */
++static void rcu_free_lol_item_chain(struct rsbac_list_lol_reg_item_t * list,
++ struct rsbac_list_lol_item_t * lol_item_chain)
++{
++ if (!lol_item_chain)
++ return;
++ if (!create_rcu_free_lol(list)) {
++ rsbac_printk(KERN_WARNING "rcu_free_lol_item_chain(): cannot allocate rcu_free_head for list of lists %s, loosing chain %p!\n",
++ list->name, lol_item_chain);
++ return;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ rcu_free_lol_item_chain_calls++;
++#endif
++ if (!list->rcu_free->lol_item_chain) {
++ list->rcu_free->lol_item_chain = lol_item_chain;
++ } else {
++ struct rsbac_list_item_t * sub_item;
++
++ while (lol_item_chain) {
++ sub_item = lol_item_chain->head;
++ while (sub_item) {
++ rcu_free_lol_sub(list, sub_item);
++ sub_item = sub_item->next;
++ }
++ rcu_free_lol(list, lol_item_chain);
++ lol_item_chain = lol_item_chain->next;
++ }
++ }
++}
++
++/* Call unlocked */
++static void rcu_free_do_cleanup(struct rsbac_list_rcu_free_head_t * rcu_head)
++{
++ struct rsbac_list_rcu_free_item_t * rcu_item;
++ struct rsbac_list_rcu_free_item_t * rcu_next_item;
++ struct rsbac_list_item_t * item_chain;
++ struct rsbac_list_item_t * item_chain_next;
++
++ if (!rcu_head)
++ return;
++#ifdef CONFIG_RSBAC_LIST_STATS
++ rcu_free_do_cleanup_calls++;
++#endif
++ rcu_item = rcu_head->head;
++ if (rcu_head->slab) {
++ while (rcu_item) {
++ rsbac_sfree(rcu_head->slab, rcu_item->mem);
++ rcu_next_item = rcu_item->next;
++ rsbac_sfree(rcu_free_item_slab, rcu_item);
++ rcu_item = rcu_next_item;
++ }
++ item_chain = rcu_head->item_chain;
++ while (item_chain) {
++ item_chain_next = item_chain->next;
++ rsbac_sfree(rcu_head->slab, item_chain);
++ item_chain = item_chain_next;
++ }
++ } else {
++ while (rcu_item) {
++ rsbac_kfree(rcu_item->mem);
++ rcu_next_item = rcu_item->next;
++ rsbac_sfree(rcu_free_item_slab, rcu_item);
++ rcu_item = rcu_next_item;
++ }
++ item_chain = rcu_head->item_chain;
++ while (item_chain) {
++ item_chain_next = item_chain->next;
++ rsbac_kfree(item_chain);
++ item_chain = item_chain_next;
++ }
++ }
++ rsbac_sfree(rcu_free_head_slab, rcu_head);
++}
++
++static void rcu_free_do_cleanup_lol(struct rsbac_list_rcu_free_head_lol_t * rcu_head)
++{
++ struct rsbac_list_rcu_free_item_t * rcu_item;
++ struct rsbac_list_rcu_free_item_t * rcu_next_item;
++ struct rsbac_list_lol_item_t * lol_item_chain;
++ struct rsbac_list_lol_item_t * lol_item_chain_next;
++ struct rsbac_list_item_t * lol_item_subchain;
++ struct rsbac_list_item_t * lol_item_subchain_next;
++
++ if (!rcu_head)
++ return;
++#ifdef CONFIG_RSBAC_LIST_STATS
++ rcu_free_do_cleanup_lol_calls++;
++#endif
++ if (rcu_head->slab) {
++ rcu_item = rcu_head->head;
++ while (rcu_item) {
++ rsbac_sfree(rcu_head->slab, rcu_item->mem);
++ rcu_next_item = rcu_item->next;
++ rsbac_sfree(rcu_free_item_slab, rcu_item);
++ rcu_item = rcu_next_item;
++ }
++ rcu_item = rcu_head->subhead;
++ while (rcu_item) {
++ rsbac_sfree(rcu_head->subslab, rcu_item->mem);
++ rcu_next_item = rcu_item->next;
++ rsbac_sfree(rcu_free_item_slab, rcu_item);
++ rcu_item = rcu_next_item;
++ }
++ lol_item_chain = rcu_head->lol_item_chain;
++ while (lol_item_chain) {
++ lol_item_subchain = lol_item_chain->head;
++ while (lol_item_subchain) {
++ lol_item_subchain_next = lol_item_subchain->next;
++ rsbac_sfree(rcu_head->subslab, lol_item_subchain);
++ lol_item_subchain = lol_item_subchain_next;
++ }
++ lol_item_chain_next = lol_item_chain->next;
++ rsbac_sfree(rcu_head->slab, lol_item_chain);
++ lol_item_chain = lol_item_chain_next;
++ }
++ lol_item_subchain = rcu_head->lol_item_subchain;
++ while (lol_item_subchain) {
++ lol_item_subchain_next = lol_item_subchain->next;
++ rsbac_sfree(rcu_head->subslab, lol_item_subchain);
++ lol_item_subchain = lol_item_subchain_next;
++ }
++ } else {
++ rcu_item = rcu_head->head;
++ while (rcu_item) {
++ rsbac_kfree(rcu_item->mem);
++ rcu_next_item = rcu_item->next;
++ rsbac_sfree(rcu_free_item_slab, rcu_item);
++ rcu_item = rcu_next_item;
++ }
++ lol_item_chain = rcu_head->lol_item_chain;
++ while (lol_item_chain) {
++ lol_item_subchain = lol_item_chain->head;
++ while (lol_item_subchain) {
++ lol_item_subchain_next = lol_item_subchain->next;
++ rsbac_kfree(lol_item_subchain);
++ lol_item_subchain = lol_item_subchain_next;
++ }
++ lol_item_chain_next = lol_item_chain->next;
++ rsbac_kfree(lol_item_chain);
++ lol_item_chain = lol_item_chain_next;
++ }
++ lol_item_subchain = rcu_head->lol_item_subchain;
++ while (lol_item_subchain) {
++ lol_item_subchain_next = lol_item_subchain->next;
++ rsbac_kfree(lol_item_subchain);
++ lol_item_subchain = lol_item_subchain_next;
++ }
++ }
++ rsbac_sfree(rcu_free_head_lol_slab, rcu_head);
++}
++
++/* RCU callback, do not call directly. Called unlocked by RCU. */
++static void rcu_free_callback(struct rcu_head *rp)
++{
++ if (!rp)
++ return;
++#ifdef CONFIG_RSBAC_LIST_STATS
++ rcu_free_callback_calls++;
++#endif
++ rcu_free_do_cleanup((struct rsbac_list_rcu_free_head_t *) rp);
++}
++
++static void rcu_free_callback_lol(struct rcu_head *rp)
++{
++ if (!rp)
++ return;
++#ifdef CONFIG_RSBAC_LIST_STATS
++ rcu_free_callback_lol_calls++;
++#endif
++ rcu_free_do_cleanup_lol((struct rsbac_list_rcu_free_head_lol_t *) rp);
++}
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++/* Call unlocked */
++static void do_call_rcu(struct rsbac_list_rcu_free_head_t * rcu_head)
++{
++ if (rcu_head) {
++ rcu_callback_count++;
++ call_rcu(&rcu_head->rcu, rcu_free_callback);
++ }
++}
++static void do_call_rcu_lol(struct rsbac_list_rcu_free_head_lol_t * rcu_head)
++{
++ if (rcu_head) {
++ rcu_callback_count++;
++ call_rcu(&rcu_head->rcu, rcu_free_callback_lol);
++ }
++}
++#endif
++
++/* Call unlocked */
++static void do_sync_rcu(struct rsbac_list_rcu_free_head_t * rcu_head)
++{
++ if (rcu_head) {
++ rcu_callback_count++;
++ if (rcu_callback_count < rsbac_list_rcu_rate)
++ call_rcu(&rcu_head->rcu, rcu_free_callback);
++ else {
++ synchronize_rcu();
++ rcu_free_do_cleanup(rcu_head);
++ }
++ }
++}
++
++static void do_sync_rcu_lol(struct rsbac_list_rcu_free_head_lol_t * rcu_head)
++{
++ if (rcu_head) {
++ rcu_callback_count++;
++ if (rcu_callback_count < rsbac_list_rcu_rate)
++ call_rcu(&rcu_head->rcu, rcu_free_callback_lol);
++ else {
++ synchronize_rcu();
++ rcu_free_do_cleanup_lol(rcu_head);
++ }
++ }
++}
++
++/* List handling */
++/* Call RCU locked */
++static inline struct rsbac_list_item_t *lookup_item_compare(
++ struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int hash,
++ void *desc)
++{
++ struct rsbac_list_item_t *curr;
++ int compres;
++
++ curr = rcu_dereference(hashed[hash].curr);
++ if (!curr) {
++ curr = rcu_dereference(hashed[hash].head);
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = list->compare(desc, &curr[1]);
++ if (compres) {
++ if (compres > 0) {
++ curr = rcu_dereference(curr->next);
++ while (curr && (list->compare(desc, &curr[1]) > 0)
++ )
++ curr = rcu_dereference(curr->next);
++ } else {
++ curr = rcu_dereference(curr->prev);
++ while (curr && (list->compare(desc, &curr[1]) < 0)
++ )
++ curr = rcu_dereference(curr->prev);
++ }
++ if (curr) {
++ if (!list->compare(desc, &curr[1]))
++ return curr;
++ }
++ /* NULL or not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++/* Call RCU locked */
++static inline struct rsbac_list_item_t *lookup_item_memcmp(
++ struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int hash,
++ void *desc)
++{
++ struct rsbac_list_item_t *curr;
++ int compres;
++
++ curr = rcu_dereference(hashed[hash].curr);
++ if (!curr) {
++ curr = rcu_dereference(hashed[hash].head);
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = memcmp(desc, &curr[1], list->info.desc_size);
++ if (compres) {
++ if (compres > 0) {
++ curr = rcu_dereference(curr->next);
++ while (curr
++ && (memcmp(desc,
++ &curr[1],
++ list->info.desc_size) > 0)
++ )
++ curr = rcu_dereference(curr->next);
++ } else {
++ curr = rcu_dereference(curr->prev);
++ while (curr
++ && (memcmp(desc,
++ &curr[1],
++ list->info.desc_size) < 0)
++ )
++ curr = rcu_dereference(curr->prev);
++ }
++ if (curr) {
++ if (!memcmp(desc, &curr[1], list->info.desc_size))
++ return curr;
++ }
++ /* not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++/* Call RCU locked */
++static struct rsbac_list_item_t *lookup_item(
++ struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int hash,
++ void *desc)
++{
++ if (!list || !desc || !hashed)
++ return NULL;
++
++ if (list->compare)
++ return lookup_item_compare(list, hashed, hash, desc);
++ else
++ return lookup_item_memcmp(list, hashed, hash, desc);
++}
++
++static inline struct rsbac_list_item_t *lookup_item_compare_locked(
++ struct rsbac_list_reg_item_t *list, void *desc)
++{
++ struct rsbac_list_item_t *curr;
++ u_int hash = 0;
++ int compres;
++
++ if (!list || !desc || !list->compare)
++ return NULL;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ curr = list->hashed[hash].curr;
++ if (!curr) {
++ curr = list->hashed[hash].head;
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = list->compare(desc, &curr[1]);
++ if (compres) {
++ if (compres > 0) {
++ curr = curr->next;
++ while (curr && (list->compare(desc, &curr[1]) > 0)
++ )
++ curr = curr->next;
++ } else {
++ curr = curr->prev;
++ while (curr && (list->compare(desc, &curr[1]) < 0)
++ )
++ curr = curr->prev;
++ }
++ if (curr) {
++ rcu_assign_pointer(list->hashed[hash].curr, curr);
++ if (!list->compare(desc, &curr[1]))
++ return curr;
++ }
++ /* NULL or not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++static inline struct rsbac_list_item_t *lookup_item_memcmp_locked(struct
++ rsbac_list_reg_item_t
++ *list, void *desc)
++{
++ struct rsbac_list_item_t *curr;
++ u_int hash = 0;
++ int compres;
++
++ if (!list || !desc)
++ return NULL;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ curr = list->hashed[hash].curr;
++ if (!curr) {
++ curr = list->hashed[hash].head;
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = memcmp(desc, &curr[1], list->info.desc_size);
++ if (compres) {
++ if (compres > 0) {
++ curr = curr->next;
++ while (curr
++ && (memcmp(desc,
++ &curr[1],
++ list->info.desc_size) > 0)
++ )
++ curr = curr->next;
++ } else {
++ curr = curr->prev;
++ while (curr
++ && (memcmp(desc,
++ &curr[1],
++ list->info.desc_size) < 0)
++ )
++ curr = curr->prev;
++ }
++ if (curr) {
++ rcu_assign_pointer(list->hashed[hash].curr, curr);
++ if (!memcmp(desc, &curr[1], list->info.desc_size))
++ return curr;
++ }
++ /* not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++static struct rsbac_list_item_t *lookup_item_locked(struct rsbac_list_reg_item_t
++ *list, void *desc)
++{
++ if (!list || !desc)
++ return NULL;
++
++ if (list->compare)
++ return lookup_item_compare_locked(list, desc);
++ else
++ return lookup_item_memcmp_locked(list, desc);
++}
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++/* Call RCU locked */
++static inline struct rsbac_list_item_t *ta_lookup_item_compare(
++ struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int hash,
++ void *desc)
++{
++ struct rsbac_list_item_t *curr;
++ int compres;
++
++ curr = rcu_dereference(hashed[hash].ta_curr);
++ if (!curr) {
++ curr = rcu_dereference(hashed[hash].ta_head);
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = list->compare(desc, &curr[1]);
++ if (compres) {
++ if (compres > 0) {
++ curr = rcu_dereference(curr->next);
++ while (curr && (list->compare(desc, &curr[1]) > 0)
++ )
++ curr = rcu_dereference(curr->next);
++ } else {
++ curr = rcu_dereference(curr->prev);
++ while (curr && (list->compare(desc, &curr[1]) < 0)
++ )
++ curr = rcu_dereference(curr->prev);
++ }
++ if (curr) {
++ if (!list->compare(desc, &curr[1]))
++ return curr;
++ }
++ /* NULL or not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++/* Call RCU locked */
++static inline struct rsbac_list_item_t *ta_lookup_item_memcmp(
++ struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int hash,
++ void *desc)
++{
++ struct rsbac_list_item_t *curr;
++ int compres;
++
++ curr = rcu_dereference(hashed[hash].ta_curr);
++ if (!curr) {
++ curr = rcu_dereference(hashed[hash].ta_head);
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = memcmp(desc, &curr[1], list->info.desc_size);
++ if (compres) {
++ if (compres > 0) {
++ curr = rcu_dereference(curr->next);
++ while (curr
++ && (memcmp(desc,
++ &curr[1],
++ list->info.desc_size) > 0)
++ )
++ curr = rcu_dereference(curr->next);
++ } else {
++ curr = rcu_dereference(curr->prev);
++ while (curr
++ && (memcmp(desc,
++ &curr[1],
++ list->info.desc_size) < 0)
++ )
++ curr = rcu_dereference(curr->prev);
++ }
++ if (curr) {
++ if (!memcmp(desc, &curr[1], list->info.desc_size))
++ return curr;
++ }
++ /* not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++/* Call RCU locked */
++static struct rsbac_list_item_t *ta_lookup_item(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int hash,
++ void *desc)
++{
++ if (!list || !desc)
++ return NULL;
++
++ if (!hashed[hash].ta_copied)
++ return lookup_item(list, hashed, hash, desc);
++ if (hashed[hash].ta_copied != ta_number)
++ return NULL;
++
++ if (list->compare)
++ return ta_lookup_item_compare(list, hashed, hash, desc);
++ else
++ return ta_lookup_item_memcmp(list, hashed, hash, desc);
++}
++
++static inline struct rsbac_list_item_t *ta_lookup_item_compare_locked(
++ struct rsbac_list_reg_item_t *list, void *desc)
++{
++ struct rsbac_list_item_t *curr;
++ u_int hash = 0;
++ int compres;
++
++ if (!list || !desc || !list->compare)
++ return NULL;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ curr = list->hashed[hash].ta_curr;
++ if (!curr) {
++ curr = list->hashed[hash].ta_head;
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = list->compare(desc, &curr[1]);
++ if (compres) {
++ if (compres > 0) {
++ curr = curr->next;
++ while (curr && (list->compare(desc, &curr[1]) > 0)
++ )
++ curr = curr->next;
++ } else {
++ curr = curr->prev;
++ while (curr && (list->compare(desc, &curr[1]) < 0)
++ )
++ curr = curr->prev;
++ }
++ if (curr) {
++ rcu_assign_pointer(list->hashed[hash].ta_curr, curr);
++ if (!list->compare(desc, &curr[1]))
++ return curr;
++ }
++ /* NULL or not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++static inline struct rsbac_list_item_t *ta_lookup_item_memcmp_locked(struct
++ rsbac_list_reg_item_t
++ *list, void *desc)
++{
++ struct rsbac_list_item_t *curr;
++ u_int hash = 0;
++ int compres;
++
++ if (!list || !desc)
++ return NULL;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ curr = list->hashed[hash].ta_curr;
++ if (!curr) {
++ curr = list->hashed[hash].ta_head;
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = memcmp(desc, &curr[1], list->info.desc_size);
++ if (compres) {
++ if (compres > 0) {
++ curr = curr->next;
++ while (curr
++ && (memcmp(desc,
++ &curr[1],
++ list->info.desc_size) > 0)
++ )
++ curr = curr->next;
++ } else {
++ curr = curr->prev;
++ while (curr
++ && (memcmp(desc,
++ &curr[1],
++ list->info.desc_size) < 0)
++ )
++ curr = curr->prev;
++ }
++ if (curr) {
++ rcu_assign_pointer(list->hashed[hash].ta_curr, curr);
++ if (!memcmp(desc, &curr[1], list->info.desc_size))
++ return curr;
++ }
++ /* not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++static struct rsbac_list_item_t *ta_lookup_item_locked(rsbac_list_ta_number_t
++ ta_number,
++ struct
++ rsbac_list_reg_item_t
++ *list, void *desc)
++{
++ u_int hash = 0;
++
++ if (!list || !desc)
++ return NULL;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ if (!list->hashed[hash].ta_copied)
++ return lookup_item_locked(list, desc);
++ if (list->hashed[hash].ta_copied != ta_number)
++ return NULL;
++
++ if (list->compare)
++ return ta_lookup_item_compare_locked(list, desc);
++ else
++ return ta_lookup_item_memcmp_locked(list, desc);
++}
++#endif
++
++/* Call RCU locked */
++static inline struct rsbac_list_item_t *lookup_item_data_compare(
++ struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_data_compare_function_t compare)
++{
++ struct rsbac_list_item_t *curr;
++ int i;
++
++ for(i=0; i<nr_hashes; i++) {
++ curr = rcu_dereference(hashed[i].head);
++
++ /* note: item desc is behind official struct */
++ while (curr
++ && ((curr->max_age && (curr->max_age <= RSBAC_CURRENT_TIME))
++ || compare((char *) curr + sizeof(*curr) +
++ list->info.desc_size, data)
++ )
++ )
++ curr = rcu_dereference(curr->next);
++ if(curr)
++ return curr;
++ }
++ return NULL;
++}
++
++/* Call RCU locked */
++static inline struct rsbac_list_item_t *lookup_item_data_memcmp(
++ struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data)
++{
++ struct rsbac_list_item_t *curr;
++ int i;
++
++ for(i=0; i<nr_hashes; i++) {
++ curr = rcu_dereference(hashed[i].head);
++
++ /* note: item desc is behind official struct */
++ while (curr
++ && ((curr->max_age && (curr->max_age <= RSBAC_CURRENT_TIME))
++ || memcmp(data,
++ &curr[1] + list->info.desc_size,
++ list->info.data_size)
++ )
++ )
++ curr = rcu_dereference(curr->next);
++ if(curr)
++ return curr;
++ }
++ return NULL;
++}
++
++/* Call RCU locked */
++static struct rsbac_list_item_t *lookup_item_data(
++ struct rsbac_list_reg_item_t * list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_data_compare_function_t compare)
++{
++ if (!list || !data || !hashed)
++ return NULL;
++
++ if (compare)
++ return lookup_item_data_compare(list, hashed, nr_hashes, data, compare);
++ else
++ return lookup_item_data_memcmp(list, hashed, nr_hashes, data);
++}
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++/* Call RCU locked */
++static inline struct rsbac_list_item_t *ta_lookup_item_data_compare(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_reg_item_t * list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_data_compare_function_t compare)
++{
++ struct rsbac_list_item_t *curr;
++ int i;
++
++ for(i=0; i<nr_hashes; i++) {
++ if (!hashed[i].ta_copied || hashed[i].ta_copied != ta_number)
++ curr = rcu_dereference(hashed[i].head);
++ else
++ curr = rcu_dereference(hashed[i].ta_head);
++
++ /* note: item desc is behind official struct */
++ while (curr
++ && ((curr->max_age && (curr->max_age <= RSBAC_CURRENT_TIME))
++ || compare((char *) curr + sizeof(*curr) +
++ list->info.desc_size, data)
++ )
++ )
++ curr = rcu_dereference(curr->next);
++ if(curr)
++ return curr;
++ }
++ return NULL;
++}
++
++/* Call RCU locked */
++static inline struct rsbac_list_item_t *ta_lookup_item_data_memcmp(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data)
++{
++ struct rsbac_list_item_t *curr;
++ int i;
++
++ for(i=0; i<nr_hashes; i++) {
++ if (!hashed[i].ta_copied || hashed[i].ta_copied != ta_number)
++ curr = rcu_dereference(hashed[i].head);
++ else
++ curr = rcu_dereference(hashed[i].ta_head);
++
++ /* note: item desc is behind official struct */
++ while (curr
++ && ((curr->max_age && (curr->max_age <= RSBAC_CURRENT_TIME))
++ || memcmp(data,
++ &curr[1] + list->info.desc_size,
++ list->info.data_size)
++ )
++ )
++ curr = rcu_dereference(curr->next);
++ if(curr)
++ return curr;
++ }
++ return NULL;
++}
++
++/* Call RCU locked */
++static struct rsbac_list_item_t *ta_lookup_item_data(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_data_compare_function_t compare)
++{
++ if (!list || !data || !hashed)
++ return NULL;
++
++ if(!ta_number)
++ return lookup_item_data(list, hashed, nr_hashes, data, compare);
++ if (compare)
++ return ta_lookup_item_data_compare(ta_number, list, hashed, nr_hashes, data, compare);
++ else
++ return ta_lookup_item_data_memcmp(ta_number, list, hashed, nr_hashes, data);
++}
++#endif
++
++/* Call RCU locked */
++static inline struct rsbac_list_item_t *lookup_item_data_compare_selector(
++ struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_data_compare_function_t compare,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ struct rsbac_list_item_t *curr;
++ int i;
++
++ for(i=0; i<nr_hashes; i++) {
++ curr = rcu_dereference(hashed[i].head);
++
++ /* note: item desc is behind official struct */
++ while (curr
++ && ((curr->max_age && (curr->max_age <= RSBAC_CURRENT_TIME))
++ || compare((char *) curr + sizeof(*curr) +
++ list->info.desc_size, data)
++ || !selector((char *) curr + sizeof(*curr), param)
++ )
++ )
++ curr = rcu_dereference(curr->next);
++ if(curr)
++ return curr;
++ }
++ return NULL;
++}
++
++/* Call RCU locked */
++static inline struct rsbac_list_item_t *lookup_item_data_memcmp_selector(
++ struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ struct rsbac_list_item_t *curr;
++ int i;
++
++ for(i=0; i<nr_hashes; i++) {
++ curr = rcu_dereference(hashed[i].head);
++
++ /* note: item desc is behind official struct */
++ while (curr
++ && ((curr->max_age && (curr->max_age <= RSBAC_CURRENT_TIME))
++ || memcmp(data,
++ &curr[1] + list->info.desc_size,
++ list->info.data_size)
++ || !selector((char *) curr + sizeof(*curr), param)
++ )
++ )
++ curr = rcu_dereference(curr->next);
++ if(curr)
++ return curr;
++ }
++ return NULL;
++}
++
++/* Call RCU locked */
++static struct rsbac_list_item_t *lookup_item_data_selector(
++ struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_data_compare_function_t compare,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ if (!list || !data || !hashed)
++ return NULL;
++
++ if (compare)
++ return lookup_item_data_compare_selector(list,
++ hashed, nr_hashes,
++ data, compare,
++ selector,
++ param);
++ else
++ return lookup_item_data_memcmp_selector(list,
++ hashed, nr_hashes,
++ data,
++ selector,
++ param);
++}
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++/* Call RCU locked */
++static inline struct rsbac_list_item_t *ta_lookup_item_data_compare_selector(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_data_compare_function_t compare,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ struct rsbac_list_item_t *curr;
++ int i;
++
++ for(i=0; i<nr_hashes; i++) {
++ if (!hashed[i].ta_copied || hashed[i].ta_copied != ta_number)
++ curr = rcu_dereference(hashed[i].head);
++ else
++ curr = rcu_dereference(hashed[i].ta_head);
++
++ /* note: item desc is behind official struct */
++ while (curr
++ && ((curr->max_age && (curr->max_age <= RSBAC_CURRENT_TIME))
++ || compare((char *) curr + sizeof(*curr) +
++ list->info.desc_size, data)
++ || !selector((char *) curr + sizeof(*curr), param)
++ )
++ )
++ curr = rcu_dereference(curr->next);
++ if(curr)
++ return curr;
++ }
++ return NULL;
++}
++
++/* Call RCU locked */
++static inline struct rsbac_list_item_t *ta_lookup_item_data_memcmp_selector(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ struct rsbac_list_item_t *curr;
++ int i;
++
++ for(i=0; i<nr_hashes; i++) {
++ if (!hashed[i].ta_copied || hashed[i].ta_copied != ta_number)
++ curr = rcu_dereference(hashed[i].head);
++ else
++ curr = rcu_dereference(hashed[i].ta_head);
++
++ /* note: item desc is behind official struct */
++ while (curr
++ && ((curr->max_age && (curr->max_age <= RSBAC_CURRENT_TIME))
++ || memcmp(data,
++ &curr[1] + list->info.desc_size,
++ list->info.data_size)
++ || !selector((char *) curr + sizeof(*curr), param)
++ )
++ )
++ curr = rcu_dereference(curr->next);
++ if(curr)
++ return curr;
++ }
++ return NULL;
++}
++
++/* Call RCU locked */
++static struct rsbac_list_item_t *ta_lookup_item_data_selector(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_data_compare_function_t compare,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ if (!list || !data || !hashed)
++ return NULL;
++
++ if(!ta_number)
++ return lookup_item_data_selector(
++ list, hashed, nr_hashes,
++ data, compare,
++ selector, param);
++ if (compare)
++ return ta_lookup_item_data_compare_selector(
++ ta_number, list,
++ hashed, nr_hashes,
++ data, compare,
++ selector, param);
++ else
++ return ta_lookup_item_data_memcmp_selector(
++ ta_number, list,
++ hashed, nr_hashes,
++ data,
++ selector, param);
++}
++#endif
++
++/* list of lists - subitems */
++
++/* Call RCU locked */
++static inline struct rsbac_list_item_t *lookup_lol_subitem_compare(
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_item_t *sublist,
++ void *subdesc,
++ rsbac_list_compare_function_t compare)
++{
++ struct rsbac_list_item_t *curr;
++ int compres;
++
++ if (!list || !sublist || !subdesc || !compare)
++ return NULL;
++
++ curr = rcu_dereference(sublist->curr);
++ if (!curr) {
++ curr = rcu_dereference(sublist->head);
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = compare(&curr[1], subdesc);
++ if (compres) {
++ if (compres < 0) {
++ curr = rcu_dereference(curr->next);
++ while (curr && (compare(&curr[1], subdesc) < 0)
++ )
++ curr = rcu_dereference(curr->next);
++ } else {
++ curr = rcu_dereference(curr->prev);
++ while (curr && (compare(&curr[1], subdesc) > 0)
++ )
++ curr = rcu_dereference(curr->prev);
++ }
++ if (curr) {
++ if (!compare(&curr[1], subdesc))
++ return curr;
++ }
++ /* not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++/* Call RCU locked */
++static inline struct rsbac_list_item_t *lookup_lol_subitem_memcmp(
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_item_t *sublist,
++ void *subdesc)
++{
++ struct rsbac_list_item_t *curr;
++ int compres;
++
++ if (!list || !sublist || !subdesc)
++ return NULL;
++
++ curr = rcu_dereference(sublist->curr);
++ if (!curr) {
++ curr = rcu_dereference(sublist->head);
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = memcmp(subdesc, &curr[1], list->info.subdesc_size);
++ if (compres) {
++ if (compres > 0) {
++ curr = rcu_dereference(curr->next);
++ while (curr
++ && (memcmp(subdesc,
++ &curr[1],
++ list->info.subdesc_size) > 0)
++ )
++ curr = rcu_dereference(curr->next);
++ } else {
++ curr = rcu_dereference(curr->prev);
++ while (curr
++ && (memcmp(subdesc,
++ &curr[1],
++ list->info.subdesc_size) < 0)
++ )
++ curr = rcu_dereference(curr->prev);
++ }
++ if (curr) {
++ if (!memcmp(subdesc,
++ &curr[1], list->info.subdesc_size))
++ return curr;
++ }
++ /* not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++/* Call RCU locked */
++static struct rsbac_list_item_t *lookup_lol_subitem(
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_item_t *sublist,
++ void *subdesc)
++{
++ if (!list || !sublist || !subdesc)
++ return NULL;
++
++ if (list->subcompare)
++ return lookup_lol_subitem_compare(list, sublist, subdesc,
++ list->subcompare);
++ else
++ return lookup_lol_subitem_memcmp(list, sublist, subdesc);
++}
++
++static inline struct rsbac_list_item_t *lookup_lol_subitem_compare_locked(struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ struct
++ rsbac_list_lol_item_t
++ *sublist,
++ void *subdesc,
++ rsbac_list_compare_function_t
++ compare)
++{
++ struct rsbac_list_item_t *curr;
++ int compres;
++
++ if (!list || !sublist || !subdesc || !compare)
++ return NULL;
++
++ curr = sublist->curr;
++ if (!curr) {
++ curr = sublist->head;
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = compare(&curr[1], subdesc);
++ if (compres) {
++ if (compres < 0) {
++ curr = curr->next;
++ while (curr && (compare(&curr[1], subdesc) < 0)
++ )
++ curr = curr->next;
++ } else {
++ curr = curr->prev;
++ while (curr && (compare(&curr[1], subdesc) > 0)
++ )
++ curr = curr->prev;
++ }
++ if (curr) {
++ rcu_assign_pointer(sublist->curr, curr);
++ if (!compare(&curr[1], subdesc))
++ return curr;
++ }
++ /* not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++static inline struct rsbac_list_item_t *lookup_lol_subitem_memcmp_locked(struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ struct
++ rsbac_list_lol_item_t
++ *sublist,
++ void *subdesc)
++{
++ struct rsbac_list_item_t *curr;
++ int compres;
++
++ if (!list || !sublist || !subdesc)
++ return NULL;
++
++ curr = sublist->curr;
++ if (!curr) {
++ curr = sublist->head;
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = memcmp(subdesc, &curr[1], list->info.subdesc_size);
++ if (compres) {
++ if (compres > 0) {
++ curr = curr->next;
++ while (curr
++ && (memcmp(subdesc,
++ &curr[1],
++ list->info.subdesc_size) > 0)
++ )
++ curr = curr->next;
++ } else {
++ curr = curr->prev;
++ while (curr
++ && (memcmp(subdesc,
++ &curr[1],
++ list->info.subdesc_size) < 0)
++ )
++ curr = curr->prev;
++ }
++ if (curr) {
++ rcu_assign_pointer(sublist->curr, curr);
++ if (!memcmp(subdesc,
++ &curr[1], list->info.subdesc_size))
++ return curr;
++ }
++ /* not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++static struct rsbac_list_item_t *lookup_lol_subitem_locked(struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ struct
++ rsbac_list_lol_item_t
++ *sublist,
++ void *subdesc)
++{
++ if (!list || !sublist || !subdesc)
++ return NULL;
++
++ if (list->subcompare)
++ return lookup_lol_subitem_compare_locked(list, sublist, subdesc,
++ list->subcompare);
++ else
++ return lookup_lol_subitem_memcmp_locked(list, sublist, subdesc);
++}
++
++/* Call RCU locked */
++static inline struct rsbac_list_item_t *lookup_lol_subitem_user_compare(struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ struct
++ rsbac_list_lol_item_t
++ *sublist,
++ void
++ *subdesc,
++ rsbac_list_compare_function_t
++ compare)
++{
++ struct rsbac_list_item_t *curr;
++
++ if (!list || !sublist || !subdesc || !compare)
++ return NULL;
++
++ curr = rcu_dereference(sublist->head);
++ /* note: item desc is behind official struct */
++ while (curr) {
++ if (!compare(&curr[1], subdesc))
++ return curr;
++ curr = rcu_dereference(curr->next);
++ }
++ return curr;
++}
++
++/* list of lists - items */
++
++/* Call RCU locked */
++static inline struct rsbac_list_lol_item_t *lookup_lol_item_compare(
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int hash,
++ void *desc)
++{
++ struct rsbac_list_lol_item_t *curr;
++ int compres;
++
++ curr = rcu_dereference(hashed[hash].curr);
++ if (!curr) {
++ curr = rcu_dereference(hashed[hash].head);
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = list->compare(desc, &curr[1]);
++ if (compres) {
++ if (compres > 0) {
++ curr = rcu_dereference(curr->next);
++ while (curr && (list->compare(desc, &curr[1]) > 0)
++ )
++ curr = rcu_dereference(curr->next);
++ } else {
++ curr = rcu_dereference(curr->prev);
++ while (curr && (list->compare(desc, &curr[1]) < 0)
++ )
++ curr = rcu_dereference(curr->prev);
++ }
++ if (curr) {
++ if (!list->compare(desc, &curr[1]))
++ return curr;
++ }
++ /* not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++/* Call RCU locked */
++static inline struct rsbac_list_lol_item_t *lookup_lol_item_memcmp(
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int hash,
++ void *desc)
++{
++ struct rsbac_list_lol_item_t *curr;
++ int compres;
++
++ curr = rcu_dereference(hashed[hash].curr);
++ if (!curr) {
++ curr = rcu_dereference(hashed[hash].head);
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = memcmp(desc, &curr[1], list->info.desc_size);
++ if (compres) {
++ if (compres > 0) {
++ curr = rcu_dereference(curr->next);
++ while (curr
++ && (memcmp(desc,
++ &curr[1],
++ list->info.desc_size) > 0)
++ )
++ curr = rcu_dereference(curr->next);
++ } else {
++ curr = rcu_dereference(curr->prev);
++ while (curr
++ && (memcmp(desc,
++ &curr[1],
++ list->info.desc_size) < 0)
++ )
++ curr = rcu_dereference(curr->prev);
++ }
++ if (curr) {
++ if (!memcmp(desc, &curr[1], list->info.desc_size))
++ return curr;
++ }
++ /* not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++/* Call RCU locked */
++static struct rsbac_list_lol_item_t *lookup_lol_item(
++ struct rsbac_list_lol_reg_item_t * list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int hash,
++ void *desc)
++{
++ if (!list || !desc || !hashed)
++ return NULL;
++
++ if (list->compare)
++ return lookup_lol_item_compare(list, hashed, hash, desc);
++ else
++ return lookup_lol_item_memcmp(list, hashed, hash, desc);
++}
++
++static inline struct rsbac_list_lol_item_t *lookup_lol_item_compare_locked(struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ void *desc)
++{
++ struct rsbac_list_lol_item_t *curr;
++ u_int hash = 0;
++ int compres;
++
++ if (!list || !desc || !list->compare)
++ return NULL;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ curr = list->hashed[hash].curr;
++ if (!curr) {
++ curr = list->hashed[hash].head;
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = list->compare(desc, &curr[1]);
++ if (compres) {
++ if (compres > 0) {
++ curr = curr->next;
++ while (curr && (list->compare(desc, &curr[1]) > 0)
++ )
++ curr = curr->next;
++ } else {
++ curr = curr->prev;
++ while (curr && (list->compare(desc, &curr[1]) < 0)
++ )
++ curr = curr->prev;
++ }
++ if (curr) {
++ rcu_assign_pointer(list->hashed[hash].curr, curr);
++ if (!list->compare(desc, &curr[1]))
++ return curr;
++ }
++ /* not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++static inline struct rsbac_list_lol_item_t *lookup_lol_item_memcmp_locked(struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ void *desc)
++{
++ struct rsbac_list_lol_item_t *curr;
++ u_int hash = 0;
++ int compres;
++
++ if (!list || !desc)
++ return NULL;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ curr = list->hashed[hash].curr;
++ if (!curr) {
++ curr = list->hashed[hash].head;
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = memcmp(desc, &curr[1], list->info.desc_size);
++ if (compres) {
++ if (compres > 0) {
++ curr = curr->next;
++ while (curr
++ && (memcmp(desc,
++ &curr[1],
++ list->info.desc_size) > 0)
++ )
++ curr = curr->next;
++ } else {
++ curr = curr->prev;
++ while (curr
++ && (memcmp(desc,
++ &curr[1],
++ list->info.desc_size) < 0)
++ )
++ curr = curr->prev;
++ }
++ if (curr) {
++ rcu_assign_pointer(list->hashed[hash].curr, curr);
++ if (!memcmp(desc, &curr[1], list->info.desc_size))
++ return curr;
++ }
++ /* not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++static struct rsbac_list_lol_item_t *lookup_lol_item_locked(struct
++ rsbac_list_lol_reg_item_t
++ *list, void *desc)
++{
++ if (!list || !desc)
++ return NULL;
++
++ if (list->compare)
++ return lookup_lol_item_compare_locked(list, desc);
++ else
++ return lookup_lol_item_memcmp_locked(list, desc);
++}
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++/* Call RCU locked */
++static inline struct rsbac_list_lol_item_t *ta_lookup_lol_item_compare(
++ struct rsbac_list_lol_reg_item_t * list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int hash,
++ void *desc)
++{
++ struct rsbac_list_lol_item_t *curr;
++ int compres;
++
++ curr = rcu_dereference(hashed[hash].ta_curr);
++ if (!curr) {
++ curr = rcu_dereference(hashed[hash].ta_head);
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = list->compare(desc, &curr[1]);
++ if (compres) {
++ if (compres > 0) {
++ curr = rcu_dereference(curr->next);
++ while (curr && (list->compare(desc, &curr[1]) > 0)
++ )
++ curr = rcu_dereference(curr->next);
++ } else {
++ curr = rcu_dereference(curr->prev);
++ while (curr && (list->compare(desc, &curr[1]) < 0)
++ )
++ curr = rcu_dereference(curr->prev);
++ }
++ if (curr) {
++ if (!list->compare(desc, &curr[1]))
++ return curr;
++ }
++ /* not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++/* Call RCU locked */
++static inline struct rsbac_list_lol_item_t *ta_lookup_lol_item_memcmp(
++ struct rsbac_list_lol_reg_item_t * list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int hash,
++ void *desc)
++{
++ struct rsbac_list_lol_item_t *curr;
++ int compres;
++
++ curr = rcu_dereference(hashed[hash].ta_curr);
++ if (!curr) {
++ curr = rcu_dereference(hashed[hash].ta_head);
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = memcmp(desc, &curr[1], list->info.desc_size);
++ if (compres) {
++ if (compres > 0) {
++ curr = rcu_dereference(curr->next);
++ while (curr
++ && (memcmp(desc,
++ &curr[1],
++ list->info.desc_size) > 0)
++ )
++ curr = rcu_dereference(curr->next);
++ } else {
++ curr = rcu_dereference(curr->prev);
++ while (curr
++ && (memcmp(desc,
++ &curr[1],
++ list->info.desc_size) < 0)
++ )
++ curr = rcu_dereference(curr->prev);
++ }
++ if (curr) {
++ if (!memcmp(desc, &curr[1], list->info.desc_size))
++ return curr;
++ }
++ /* not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++/* Call RCU locked */
++static struct rsbac_list_lol_item_t *ta_lookup_lol_item(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int hash,
++ void *desc)
++{
++ if (!list || !desc || !hashed)
++ return NULL;
++
++ if (!hashed[hash].ta_copied)
++ return lookup_lol_item(list, hashed, hash, desc);
++ if (hashed[hash].ta_copied != ta_number)
++ return NULL;
++
++ if (list->compare)
++ return ta_lookup_lol_item_compare(list, hashed, hash, desc);
++ else
++ return ta_lookup_lol_item_memcmp(list, hashed, hash, desc);
++}
++
++static inline struct rsbac_list_lol_item_t *ta_lookup_lol_item_compare_locked(struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ void *desc)
++{
++ struct rsbac_list_lol_item_t *curr;
++ u_int hash = 0;
++ int compres;
++
++ if (!list || !desc || !list->compare)
++ return NULL;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ curr = list->hashed[hash].ta_curr;
++ if (!curr) {
++ curr = list->hashed[hash].ta_head;
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = list->compare(desc, &curr[1]);
++ if (compres) {
++ if (compres > 0) {
++ curr = curr->next;
++ while (curr && (list->compare(desc, &curr[1]) > 0)
++ )
++ curr = curr->next;
++ } else {
++ curr = curr->prev;
++ while (curr && (list->compare(desc, &curr[1]) < 0)
++ )
++ curr = curr->prev;
++ }
++ if (curr) {
++ rcu_assign_pointer(list->hashed[hash].ta_curr, curr);
++ if (!list->compare(desc, &curr[1]))
++ return curr;
++ }
++ /* not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++static inline struct rsbac_list_lol_item_t *ta_lookup_lol_item_memcmp_locked(struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ void *desc)
++{
++ struct rsbac_list_lol_item_t *curr;
++ u_int hash = 0;
++ int compres;
++
++ if (!list || !desc)
++ return NULL;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ curr = list->hashed[hash].ta_curr;
++ if (!curr) {
++ curr = list->hashed[hash].ta_head;
++ if (!curr)
++ return NULL;
++ }
++ /* if current item is not the right one, search... */
++ /* note: item desc is behind official struct */
++ compres = memcmp(desc, &curr[1], list->info.desc_size);
++ if (compres) {
++ if (compres > 0) {
++ curr = curr->next;
++ while (curr
++ && (memcmp(desc,
++ &curr[1],
++ list->info.desc_size) > 0)
++ )
++ curr = curr->next;
++ } else {
++ curr = curr->prev;
++ while (curr
++ && (memcmp(desc,
++ &curr[1],
++ list->info.desc_size) < 0)
++ )
++ curr = curr->prev;
++ }
++ if (curr) {
++ rcu_assign_pointer(list->hashed[hash].ta_curr, curr);
++ if (!memcmp(desc, &curr[1], list->info.desc_size))
++ return curr;
++ }
++ /* not found */
++ return NULL;
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++static struct rsbac_list_lol_item_t
++ *ta_lookup_lol_item_locked(rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_lol_reg_item_t *list, void *desc)
++{
++ u_int hash = 0;
++
++ if (!list || !desc)
++ return NULL;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ if (!list->hashed[hash].ta_copied)
++ return lookup_lol_item_locked(list, desc);
++ if (list->hashed[hash].ta_copied != ta_number)
++ return NULL;
++
++ if (list->compare)
++ return ta_lookup_lol_item_compare_locked(list, desc);
++ else
++ return ta_lookup_lol_item_memcmp_locked(list, desc);
++}
++#endif
++
++/* Call RCU locked */
++static inline struct rsbac_list_lol_item_t *lookup_lol_item_data_compare(
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_data_compare_function_t compare)
++{
++ struct rsbac_list_lol_item_t *curr;
++ int i;
++
++ for(i=0; i<nr_hashes; i++) {
++ curr = rcu_dereference(hashed[i].head);
++
++ /* note: item desc is behind official struct */
++ while (curr
++ && ((curr->max_age && (curr->max_age <= RSBAC_CURRENT_TIME))
++ || compare((char *) curr + sizeof(*curr) +
++ list->info.desc_size, data)
++ )
++ )
++ curr = rcu_dereference(curr->next);
++ if(curr)
++ return curr;
++ }
++ return NULL;
++}
++
++/* Call RCU locked */
++static inline struct rsbac_list_lol_item_t *lookup_lol_item_data_memcmp(
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data)
++{
++ struct rsbac_list_lol_item_t *curr;
++ int i;
++
++ for(i=0; i<nr_hashes; i++) {
++ curr = rcu_dereference(hashed[i].head);
++
++ /* note: item desc is behind official struct */
++ while (curr
++ && ((curr->max_age && (curr->max_age <= RSBAC_CURRENT_TIME))
++ || memcmp(data,
++ &curr[1] + list->info.desc_size,
++ list->info.data_size)
++ )
++ )
++ curr = rcu_dereference(curr->next);
++ if(curr)
++ return curr;
++ }
++ return NULL;
++}
++
++/* Call RCU locked */
++static struct rsbac_list_lol_item_t *lookup_lol_item_data(
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_data_compare_function_t compare)
++{
++ if (!list || !data || !hashed)
++ return NULL;
++
++ if (compare)
++ return lookup_lol_item_data_compare(list, hashed, nr_hashes, data, compare);
++ else
++ return lookup_lol_item_data_memcmp(list, hashed, nr_hashes, data);
++}
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++/* Call RCU locked */
++static inline struct rsbac_list_lol_item_t *ta_lookup_lol_item_data_compare(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_data_compare_function_t compare)
++{
++ struct rsbac_list_lol_item_t *curr;
++ int i;
++
++ for(i=0; i<nr_hashes; i++) {
++ if (!hashed[i].ta_copied || hashed[i].ta_copied != ta_number)
++ curr = rcu_dereference(hashed[i].head);
++ else
++ curr = rcu_dereference(hashed[i].ta_head);
++
++ /* note: item desc is behind official struct */
++ while (curr
++ && ((curr->max_age && (curr->max_age <= RSBAC_CURRENT_TIME))
++ || compare((char *) curr + sizeof(*curr) +
++ list->info.desc_size, data)
++ )
++ )
++ curr = rcu_dereference(curr->next);
++ if(curr)
++ return curr;
++ }
++ return NULL;
++}
++
++/* Call RCU locked */
++static inline struct rsbac_list_lol_item_t *ta_lookup_lol_item_data_memcmp(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data)
++{
++ struct rsbac_list_lol_item_t *curr;
++ int i;
++
++ for(i=0; i<nr_hashes; i++) {
++ if (!hashed[i].ta_copied || hashed[i].ta_copied != ta_number)
++ curr = rcu_dereference(hashed[i].head);
++ else
++ curr = rcu_dereference(hashed[i].ta_head);
++
++ /* note: item desc is behind official struct */
++ while (curr
++ && ((curr->max_age && (curr->max_age <= RSBAC_CURRENT_TIME))
++ || memcmp(data,
++ &curr[1] + list->info.desc_size,
++ list->info.data_size)
++ )
++ )
++ curr = rcu_dereference(curr->next);
++ if(curr)
++ return curr;
++ }
++ return NULL;
++}
++
++/* Call RCU locked */
++static struct rsbac_list_lol_item_t *ta_lookup_lol_item_data(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_data_compare_function_t compare)
++{
++ if (!list || !data || !hashed)
++ return NULL;
++
++ if(!ta_number)
++ return lookup_lol_item_data(list, hashed, nr_hashes, data, compare);
++ if (compare)
++ return ta_lookup_lol_item_data_compare(ta_number, list,
++ hashed, nr_hashes,
++ data,
++ compare);
++ else
++ return ta_lookup_lol_item_data_memcmp(ta_number, list,
++ hashed, nr_hashes,
++ data);
++}
++#endif
++
++/* Call RCU locked */
++static inline struct rsbac_list_lol_item_t *lookup_lol_item_data_compare_selector(
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_data_compare_function_t compare,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ struct rsbac_list_lol_item_t *curr;
++ int i;
++
++ for(i=0; i<nr_hashes; i++) {
++ curr = rcu_dereference(hashed[i].head);
++
++ /* note: item desc is behind official struct */
++ while (curr
++ && ((curr->max_age && (curr->max_age <= RSBAC_CURRENT_TIME))
++ || compare((char *) curr + sizeof(*curr) +
++ list->info.desc_size, data)
++ || !selector((char *) curr + sizeof(*curr), param)
++ )
++ )
++ curr = rcu_dereference(curr->next);
++ if(curr)
++ return curr;
++ }
++ return NULL;
++}
++
++/* Call RCU locked */
++static inline struct rsbac_list_lol_item_t *lookup_lol_item_data_memcmp_selector(
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ struct rsbac_list_lol_item_t *curr;
++ int i;
++
++ if (!list || !data)
++ return NULL;
++
++ for(i=0; i<nr_hashes; i++) {
++ curr = rcu_dereference(hashed[i].head);
++
++ /* note: item desc is behind official struct */
++ while (curr
++ && ((curr->max_age && (curr->max_age <= RSBAC_CURRENT_TIME))
++ || memcmp(data,
++ &curr[1] + list->info.desc_size,
++ list->info.data_size)
++ || !selector((char *) curr + sizeof(*curr), param)
++ )
++ )
++ curr = rcu_dereference(curr->next);
++ if(curr)
++ return curr;
++ }
++ return NULL;
++}
++
++/* Call RCU locked */
++static struct rsbac_list_lol_item_t *lookup_lol_item_data_selector(
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_data_compare_function_t
++ compare,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ if (!list || !data || !hashed)
++ return NULL;
++
++ if (compare)
++ return lookup_lol_item_data_compare_selector(list, hashed, nr_hashes, data, compare, selector, param);
++ else
++ return lookup_lol_item_data_memcmp_selector(list, hashed, nr_hashes, data, selector, param);
++}
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++/* Call RCU locked */
++static inline struct rsbac_list_lol_item_t *ta_lookup_lol_item_data_compare_selector(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_data_compare_function_t compare,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ struct rsbac_list_lol_item_t *curr;
++ int i;
++
++ for(i=0; i<nr_hashes; i++) {
++ if (!hashed[i].ta_copied || hashed[i].ta_copied != ta_number)
++ curr = rcu_dereference(hashed[i].head);
++ else
++ curr = rcu_dereference(hashed[i].ta_head);
++
++ /* note: item desc is behind official struct */
++ while (curr
++ && ((curr->max_age && (curr->max_age <= RSBAC_CURRENT_TIME))
++ || compare((char *) curr + sizeof(*curr) +
++ list->info.desc_size, data)
++ || !selector((char *) curr + sizeof(*curr), param)
++ )
++ )
++ curr = rcu_dereference(curr->next);
++ if(curr)
++ return curr;
++ }
++ return NULL;
++}
++
++/* Call RCU locked */
++static inline struct rsbac_list_lol_item_t *ta_lookup_lol_item_data_memcmp_selector(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ struct rsbac_list_lol_item_t *curr;
++ int i;
++
++ for(i=0; i<nr_hashes; i++) {
++ if (!hashed[i].ta_copied || hashed[i].ta_copied != ta_number)
++ curr = rcu_dereference(hashed[i].head);
++ else
++ curr = rcu_dereference(hashed[i].ta_head);
++
++ /* note: item desc is behind official struct */
++ while (curr
++ && ((curr->max_age && (curr->max_age <= RSBAC_CURRENT_TIME))
++ || memcmp(data,
++ &curr[1] + list->info.desc_size,
++ list->info.data_size)
++ || !selector((char *) curr + sizeof(*curr), param)
++ )
++ )
++ curr = rcu_dereference(curr->next);
++ if(curr)
++ return curr;
++ }
++ return NULL;
++}
++
++/* Call RCU locked */
++static struct rsbac_list_lol_item_t
++ *ta_lookup_lol_item_data_selector(rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_hashed_t * hashed,
++ u_int nr_hashes,
++ void *data,
++ rsbac_list_data_compare_function_t compare,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ if (!list || !data || !hashed)
++ return NULL;
++
++ if(!ta_number)
++ return lookup_lol_item_data_selector(list, hashed, nr_hashes, data, compare,
++ selector, param);
++ if (compare)
++ return ta_lookup_lol_item_data_compare_selector(ta_number, list,
++ hashed, nr_hashes, data,
++ compare, selector, param);
++ else
++ return ta_lookup_lol_item_data_memcmp_selector(ta_number,
++ list, hashed, nr_hashes,
++ data, selector, param);
++}
++#endif
++
++/* Registration lookup */
++
++static struct rsbac_list_reg_item_t *lookup_reg(struct
++ rsbac_list_reg_item_t
++ *handle)
++{
++ struct rsbac_list_reg_item_t *curr = reg_head.curr;
++
++ if (!handle)
++ return NULL;
++ /* if there is no current item or it is not the right one, search... */
++ if (curr != handle) {
++ curr = reg_head.head;
++ while (curr && curr != handle)
++ curr = curr->next;
++ if (!curr)
++ rsbac_pr_debug(lists,
++ "Lookup of unknown list handle %p\n",
++ handle);
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++static struct rsbac_list_reg_item_t *lookup_reg_name(char *name,
++ kdev_t device)
++{
++ struct rsbac_list_reg_item_t *curr = reg_head.curr;
++
++ if (!name)
++ return NULL;
++ /* if there is no current item or it is not the right one, search... */
++ if (!curr || (strncmp(curr->name, name, RSBAC_LIST_MAX_FILENAME)
++ || (RSBAC_MAJOR(curr->device) != RSBAC_MAJOR(device))
++ || (RSBAC_MINOR(curr->device) != RSBAC_MINOR(device))
++ )
++ ) {
++ curr = reg_head.head;
++ while (curr
++ &&
++ (strncmp(curr->name, name, RSBAC_LIST_MAX_FILENAME)
++ || (RSBAC_MAJOR(curr->device) !=
++ RSBAC_MAJOR(device))
++ || (RSBAC_MINOR(curr->device) !=
++ RSBAC_MINOR(device))
++ )
++ )
++ curr = curr->next;
++ if (!curr)
++ rsbac_pr_debug(lists, "Lookup of unknown list name %s "
++ "on device %02u:%02u\n", name,
++ RSBAC_MAJOR(device),
++ RSBAC_MINOR(device));
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++/* List of lists registration lookup */
++
++static struct rsbac_list_lol_reg_item_t *lookup_lol_reg(struct
++ rsbac_list_lol_reg_item_t
++ *handle)
++{
++ struct rsbac_list_lol_reg_item_t *curr = lol_reg_head.curr;
++
++ if (!handle)
++ return NULL;
++ /* if there is no current item or it is not the right one, search... */
++ if (curr != handle) {
++ curr = lol_reg_head.head;
++ while (curr && curr != handle)
++ curr = curr->next;
++ if (!curr)
++ rsbac_pr_debug(lists, "Lookup of unknown list handle %p\n",
++ handle);
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++static struct rsbac_list_lol_reg_item_t *lookup_lol_reg_name(char *name,
++ kdev_t device)
++{
++ struct rsbac_list_lol_reg_item_t *curr = lol_reg_head.curr;
++
++ if (!name)
++ return NULL;
++ /* if there is no current item or it is not the right one, search... */
++ if (!curr || (strncmp(curr->name, name, RSBAC_LIST_MAX_FILENAME)
++ || (RSBAC_MAJOR(curr->device) != RSBAC_MAJOR(device))
++ || (RSBAC_MINOR(curr->device) != RSBAC_MINOR(device))
++ )
++ ) {
++ curr = lol_reg_head.head;
++ while (curr
++ &&
++ (strncmp(curr->name, name, RSBAC_LIST_MAX_FILENAME)
++ || (RSBAC_MAJOR(curr->device) !=
++ RSBAC_MAJOR(device))
++ || (RSBAC_MINOR(curr->device) !=
++ RSBAC_MINOR(device))
++ )
++ )
++ curr = curr->next;
++ if (!curr)
++ rsbac_pr_debug(lists, "Lookup of unknown list name %s "
++ "on device %02u:%02u\n", name,
++ RSBAC_MAJOR(device),
++ RSBAC_MINOR(device));
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++/*************/
++/* Add items */
++
++/* Call spinlocked */
++static inline struct rsbac_list_item_t *insert_item_compare(
++ struct rsbac_list_reg_item_t * list,
++ void *desc,
++ struct rsbac_list_item_t * new_item_p)
++{
++ struct rsbac_list_item_t *curr;
++ u_int hash = 0;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ curr = list->hashed[hash].curr;
++ if (!curr)
++ curr = list->hashed[hash].head;
++ if ((list->compare(desc, &curr[1]) > 0)) {
++ curr = curr->next;
++ while (curr && (list->compare(desc, &curr[1]) > 0)
++ )
++ curr = curr->next;
++ if (curr) {
++ /* insert before curr */
++ new_item_p->prev = curr->prev;
++ new_item_p->next = curr;
++ rcu_assign_pointer(curr->prev->next, new_item_p);
++ rcu_assign_pointer(curr->prev, new_item_p);
++ } else {
++ /* insert as last item */
++ new_item_p->prev = list->hashed[hash].tail;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(list->hashed[hash].tail->next, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].tail, new_item_p);
++ }
++ } else {
++ curr = curr->prev;
++ while (curr && (list->compare(desc, &curr[1]) < 0)
++ )
++ curr = curr->prev;
++ if (curr) {
++ /* insert after curr */
++ new_item_p->prev = curr;
++ new_item_p->next = curr->next;
++ rcu_assign_pointer(curr->next->prev, new_item_p);
++ rcu_assign_pointer(curr->next, new_item_p);
++ } else {
++ /* insert as first item */
++ new_item_p->prev = NULL;
++ new_item_p->next = list->hashed[hash].head;
++ rcu_assign_pointer(list->hashed[hash].head->prev, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].head, new_item_p);
++ }
++ }
++ list->hashed[hash].count++;
++ rcu_assign_pointer(list->hashed[hash].curr, new_item_p);
++ return new_item_p;
++}
++
++/* Call spinlocked */
++static inline struct rsbac_list_item_t *insert_item_memcmp(struct
++ rsbac_list_reg_item_t
++ *list, void *desc,
++ struct
++ rsbac_list_item_t
++ *new_item_p)
++{
++ struct rsbac_list_item_t *curr;
++ u_int hash = 0;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ curr = list->hashed[hash].curr;
++ if (!curr)
++ curr = list->hashed[hash].head;
++ if (memcmp(desc, &curr[1], list->info.desc_size) > 0) {
++ curr = curr->next;
++ while (curr
++ && (memcmp(desc,
++ &curr[1], list->info.desc_size) > 0)
++ )
++ curr = curr->next;
++ if (curr) {
++ /* insert before curr */
++ new_item_p->prev = curr->prev;
++ new_item_p->next = curr;
++ rcu_assign_pointer(curr->prev->next, new_item_p);
++ rcu_assign_pointer(curr->prev, new_item_p);
++ } else {
++ /* insert as last item */
++ new_item_p->prev = list->hashed[hash].tail;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(list->hashed[hash].tail->next, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].tail, new_item_p);
++ }
++ } else {
++ curr = curr->prev;
++ while (curr
++ && (memcmp(desc,
++ &curr[1], list->info.desc_size) < 0)
++ )
++ curr = curr->prev;
++ if (curr) {
++ /* insert after curr */
++ new_item_p->prev = curr;
++ new_item_p->next = curr->next;
++ rcu_assign_pointer(curr->next->prev, new_item_p);
++ rcu_assign_pointer(curr->next, new_item_p);
++ } else {
++ /* insert as first item */
++ new_item_p->prev = NULL;
++ new_item_p->next = list->hashed[hash].head;
++ rcu_assign_pointer(list->hashed[hash].head->prev, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].head, new_item_p);
++ }
++ }
++ list->hashed[hash].count++;
++ rcu_assign_pointer(list->hashed[hash].curr, new_item_p);
++ return new_item_p;
++}
++
++/* Call spinlocked */
++static struct rsbac_list_item_t *add_item(struct rsbac_list_reg_item_t
++ *list, rsbac_time_t max_age,
++ void *desc, void *data)
++{
++ struct rsbac_list_item_t *new_item_p = NULL;
++ u_int hash = 0;
++
++ if (!list || !desc)
++ return NULL;
++
++ if (!list || !desc)
++ return NULL;
++ if (list->info.data_size && !data)
++ return NULL;
++
++ /* item desc and data are behind official struct */
++ if (list->slab)
++ new_item_p = rsbac_smalloc(list->slab);
++ else
++ new_item_p = rsbac_kmalloc(sizeof(*new_item_p)
++ + list->info.desc_size
++ + list->info.data_size);
++ if (!new_item_p)
++ return NULL;
++
++ new_item_p->max_age = max_age;
++ /* item desc is behind official struct */
++ memcpy(&new_item_p[1], desc, list->info.desc_size);
++ /* item data is behind official struct and desc */
++ /* data might be empty! */
++ if (data && list->info.data_size)
++ memcpy(((__u8 *) new_item_p) + sizeof(*new_item_p) +
++ list->info.desc_size, data, list->info.data_size);
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ if (!list->hashed[hash].head) {
++ new_item_p->prev = NULL;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(list->hashed[hash].head, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].tail, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].curr, new_item_p);
++ list->hashed[hash].count = 1;
++ return new_item_p;
++ }
++ if(list->hashed[hash].count >= list->max_items_per_hash) {
++ rsbac_sfree(list->slab, new_item_p);
++ if (!(list->flags & RSBAC_LIST_NO_MAX_WARN))
++ rsbac_printk(KERN_WARNING "add_item(): cannot add item to list %s, hash %u on device %02u:%02u, would be more than %u items!\n",
++ list->name,
++ hash,
++ RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device),
++ list->max_items_per_hash);
++ return NULL;
++ }
++ if (list->compare)
++ return insert_item_compare(list, desc, new_item_p);
++ else
++ return insert_item_memcmp(list, desc, new_item_p);
++}
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++static void ta_remove_all_items(struct rsbac_list_reg_item_t *list, u_int hash);
++
++/* Call spinlocked */
++static int ta_copy(rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_reg_item_t *list,
++ u_int hash)
++{
++ struct rsbac_list_item_t *curr;
++ struct rsbac_list_item_t *new_item_p;
++ u_int item_size = sizeof(*new_item_p)
++ + list->info.desc_size + list->info.data_size;
++
++ /* write access to ta_* is safe for readers as long as ta_copied is not set */
++ curr = list->hashed[hash].head;
++ if (curr) {
++ if (list->slab)
++ new_item_p = rsbac_smalloc(list->slab);
++ else
++ new_item_p = rsbac_kmalloc(item_size);
++ if (!new_item_p) {
++ ta_remove_all_items(list, hash);
++ return -RSBAC_ENOMEM;
++ }
++ memcpy(new_item_p, curr, item_size);
++ new_item_p->prev = NULL;
++ new_item_p->next = NULL;
++ list->hashed[hash].ta_head = new_item_p;
++ list->hashed[hash].ta_tail = new_item_p;
++ list->hashed[hash].ta_curr = new_item_p;
++ list->hashed[hash].ta_count = 1;
++ curr = curr->next;
++ } else {
++ list->hashed[hash].ta_head = NULL;
++ list->hashed[hash].ta_tail = NULL;
++ list->hashed[hash].ta_curr = NULL;
++ list->hashed[hash].ta_count = 0;
++ list->hashed[hash].ta_copied = ta_number;
++ return 0;
++ }
++ while (curr) {
++ if (list->slab)
++ new_item_p = rsbac_smalloc(list->slab);
++ else
++ new_item_p = rsbac_kmalloc(item_size);
++ if (!new_item_p) {
++ ta_remove_all_items(list, hash);
++ return -RSBAC_ENOMEM;
++ }
++ memcpy(new_item_p, curr, item_size);
++ new_item_p->prev = list->hashed[hash].ta_tail;
++ new_item_p->next = NULL;
++ list->hashed[hash].ta_tail->next = new_item_p;
++ list->hashed[hash].ta_tail = new_item_p;
++ list->hashed[hash].ta_count++;
++ curr = curr->next;
++ }
++ list->hashed[hash].ta_copied = ta_number;
++ return 0;
++}
++
++static void ta_remove_all_lol_items(struct rsbac_list_lol_reg_item_t *list,
++ u_int hash);
++
++/* Call spinlocked */
++static int ta_lol_copy(rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_lol_reg_item_t *list,
++ u_int hash)
++{
++ struct rsbac_list_lol_item_t *curr;
++ struct rsbac_list_lol_item_t *new_item_p;
++ struct rsbac_list_item_t *sub_curr;
++ struct rsbac_list_item_t *new_subitem_p;
++ u_int item_size = sizeof(*new_item_p)
++ + list->info.desc_size + list->info.data_size;
++ u_int subitem_size = sizeof(*new_subitem_p)
++ + list->info.subdesc_size + list->info.subdata_size;
++
++ /* write access to ta_* is safe for readers as long as ta_copied is not set */
++ list->hashed[hash].ta_head = NULL;
++ list->hashed[hash].ta_tail = NULL;
++ list->hashed[hash].ta_curr = NULL;
++ list->hashed[hash].ta_count = 0;
++
++ curr = list->hashed[hash].head;
++ while (curr) {
++ if (list->slab)
++ new_item_p = rsbac_smalloc(list->slab);
++ else
++ new_item_p = rsbac_kmalloc(item_size);
++ if (!new_item_p) {
++ ta_remove_all_lol_items(list, hash);
++ return -RSBAC_ENOMEM;
++ }
++ memcpy(new_item_p, curr, item_size);
++ new_item_p->head = NULL;
++ new_item_p->tail = NULL;
++ new_item_p->curr = NULL;
++ new_item_p->count = 0;
++ new_item_p->prev = NULL;
++ new_item_p->next = NULL;
++ sub_curr = curr->head;
++ while (sub_curr) {
++ if (list->subslab)
++ new_subitem_p = rsbac_smalloc(list->subslab);
++ else
++ new_subitem_p = rsbac_kmalloc(subitem_size);
++ if (!new_subitem_p) {
++ ta_remove_all_lol_items(list, hash);
++ rsbac_sfree(list->slab, new_item_p);
++ return -RSBAC_ENOMEM;
++ }
++ memcpy(new_subitem_p, sub_curr, subitem_size);
++ new_subitem_p->prev = NULL;
++ new_subitem_p->next = NULL;
++ if (new_item_p->tail) {
++ new_subitem_p->prev = new_item_p->tail;
++ new_item_p->tail->next = new_subitem_p;
++ new_item_p->tail = new_subitem_p;
++ new_item_p->count++;
++ } else {
++ new_item_p->head = new_subitem_p;
++ new_item_p->tail = new_subitem_p;
++ new_item_p->count = 1;
++ }
++ sub_curr = sub_curr->next;
++ }
++ if (list->hashed[hash].ta_tail) {
++ new_item_p->prev = list->hashed[hash].ta_tail;
++ list->hashed[hash].ta_tail->next = new_item_p;
++ list->hashed[hash].ta_tail= new_item_p;
++ list->hashed[hash].ta_count++;
++ } else {
++ list->hashed[hash].ta_head = new_item_p;
++ list->hashed[hash].ta_tail = new_item_p;
++ list->hashed[hash].ta_curr = new_item_p;
++ list->hashed[hash].ta_count = 1;
++ }
++ curr = curr->next;
++ }
++ list->hashed[hash].ta_copied = ta_number;
++ return 0;
++}
++
++/* Call spinlocked */
++static inline struct rsbac_list_item_t *ta_insert_item_compare(struct
++ rsbac_list_reg_item_t
++ *list, void *desc,
++ struct
++ rsbac_list_item_t
++ *new_item_p)
++{
++ struct rsbac_list_item_t *curr;
++ u_int hash = 0;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ curr = list->hashed[hash].ta_curr;
++ if (!curr)
++ curr = list->hashed[hash].ta_head;
++ if ((list->compare(desc, &curr[1]) > 0)) {
++ curr = curr->next;
++ while (curr && (list->compare(desc, &curr[1]) > 0)
++ )
++ curr = curr->next;
++ if (curr) {
++ /* insert before curr */
++ new_item_p->prev = curr->prev;
++ new_item_p->next = curr;
++ rcu_assign_pointer(curr->prev->next, new_item_p);
++ rcu_assign_pointer(curr->prev, new_item_p);
++ } else {
++ /* insert as last item */
++ new_item_p->prev = list->hashed[hash].ta_tail;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(list->hashed[hash].ta_tail->next, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].ta_tail, new_item_p);
++ }
++ } else {
++ curr = curr->prev;
++ while (curr && (list->compare(desc, &curr[1]) < 0)
++ )
++ curr = curr->prev;
++ if (curr) {
++ /* insert after curr */
++ new_item_p->prev = curr;
++ new_item_p->next = curr->next;
++ rcu_assign_pointer(curr->next->prev, new_item_p);
++ rcu_assign_pointer(curr->next, new_item_p);
++ } else {
++ /* insert as first item */
++ new_item_p->prev = NULL;
++ new_item_p->next = list->hashed[hash].ta_head;
++ rcu_assign_pointer(list->hashed[hash].ta_head->prev, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].ta_head, new_item_p);
++ }
++ }
++ list->hashed[hash].ta_count++;
++ rcu_assign_pointer(list->hashed[hash].ta_curr, new_item_p);
++ return new_item_p;
++}
++
++/* Call spinlocked */
++static inline struct rsbac_list_item_t *ta_insert_item_memcmp(struct
++ rsbac_list_reg_item_t
++ *list, void *desc,
++ struct
++ rsbac_list_item_t
++ *new_item_p)
++{
++ struct rsbac_list_item_t *curr;
++ u_int hash = 0;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ curr = list->hashed[hash].ta_curr;
++ if (!curr)
++ curr = list->hashed[hash].ta_head;
++ if (memcmp(desc, &curr[1], list->info.desc_size) > 0) {
++ curr = curr->next;
++ while (curr
++ && (memcmp(desc,
++ &curr[1], list->info.desc_size) > 0)
++ )
++ curr = curr->next;
++ if (curr) {
++ /* insert before curr */
++ new_item_p->prev = curr->prev;
++ new_item_p->next = curr;
++ rcu_assign_pointer(curr->prev->next, new_item_p);
++ rcu_assign_pointer(curr->prev, new_item_p);
++ } else {
++ /* insert as last item */
++ new_item_p->prev = list->hashed[hash].ta_tail;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(list->hashed[hash].ta_tail->next, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].ta_tail, new_item_p);
++ }
++ } else {
++ curr = curr->prev;
++ while (curr
++ && (memcmp(desc,
++ &curr[1], list->info.desc_size) < 0)
++ )
++ curr = curr->prev;
++ if (curr) {
++ /* insert after curr */
++ new_item_p->prev = curr;
++ new_item_p->next = curr->next;
++ rcu_assign_pointer(curr->next->prev, new_item_p);
++ rcu_assign_pointer(curr->next, new_item_p);
++ } else {
++ /* insert as first item */
++ new_item_p->prev = NULL;
++ new_item_p->next = list->hashed[hash].ta_head;
++ rcu_assign_pointer(list->hashed[hash].ta_head->prev, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].ta_head, new_item_p);
++ }
++ }
++ list->hashed[hash].ta_count++;
++ rcu_assign_pointer(list->hashed[hash].ta_curr, new_item_p);
++ return new_item_p;
++}
++
++/* Call spinlocked */
++static struct rsbac_list_item_t *ta_add_item(rsbac_list_ta_number_t
++ ta_number,
++ struct rsbac_list_reg_item_t
++ *list, rsbac_time_t max_age,
++ void *desc, void *data)
++{
++ struct rsbac_list_item_t *new_item_p = NULL;
++ u_int hash = 0;
++
++ if (!list || !desc)
++ return NULL;
++
++ if (!list || !desc)
++ return NULL;
++ if (list->info.data_size && !data)
++ return NULL;
++ if (!ta_number)
++ return add_item(list, max_age, desc, data);
++ /* item desc and data are behind official struct */
++ if (list->slab)
++ new_item_p = rsbac_smalloc(list->slab);
++ else
++ new_item_p = rsbac_kmalloc(sizeof(*new_item_p)
++ + list->info.desc_size
++ + list->info.data_size);
++ if (!new_item_p)
++ return NULL;
++ new_item_p->max_age = max_age;
++ /* item desc is behind official struct */
++ memcpy(&new_item_p[1], desc, list->info.desc_size);
++ /* item data is behind official struct and desc */
++ /* data might be empty! */
++ if (data && list->info.data_size)
++ memcpy(((__u8 *) new_item_p) + sizeof(*new_item_p) +
++ list->info.desc_size, data, list->info.data_size);
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ if (!list->hashed[hash].ta_copied) { /* copy list to ta_list */
++ if (ta_copy(ta_number, list, hash)) {
++ rsbac_sfree(list->slab, new_item_p);
++ return NULL;
++ }
++ } else {
++ if (list->hashed[hash].ta_copied != ta_number) {
++ rsbac_sfree(list->slab, new_item_p);
++ return NULL;
++ }
++ }
++
++ if (!list->hashed[hash].ta_head) {
++ new_item_p->prev = NULL;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(list->hashed[hash].ta_head, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].ta_tail, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].ta_curr, new_item_p);
++ list->hashed[hash].ta_count = 1;
++ return new_item_p;
++ }
++ if (list->hashed[hash].ta_count >= list->max_items_per_hash) {
++ rsbac_sfree(list->slab, new_item_p);
++ if (!(list->flags & RSBAC_LIST_NO_MAX_WARN))
++ rsbac_printk(KERN_WARNING "ta_add_item(): cannot add item to list %s, hash %u on device %02u:%02u, would be more than %u items!\n",
++ list->name,
++ hash,
++ RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device),
++ list->max_items_per_hash);
++ return NULL;
++ }
++ if (list->compare)
++ return ta_insert_item_compare(list, desc, new_item_p);
++ else
++ return ta_insert_item_memcmp(list, desc, new_item_p);
++}
++#endif
++
++
++/* Call spinlocked */
++static inline struct rsbac_list_item_t *insert_lol_subitem_compare(struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ struct
++ rsbac_list_lol_item_t
++ *sublist,
++ void *subdesc,
++ struct
++ rsbac_list_item_t
++ *new_item_p)
++{
++ struct rsbac_list_item_t *curr;
++
++ curr = sublist->curr;
++ if (!curr)
++ curr = sublist->head;
++ if ((list->subcompare(subdesc, &curr[1]) > 0)) {
++ curr = curr->next;
++ while (curr && (list->subcompare(subdesc, &curr[1]) > 0)
++ )
++ curr = curr->next;
++ if (curr) {
++ /* insert before curr */
++ new_item_p->prev = curr->prev;
++ new_item_p->next = curr;
++ rcu_assign_pointer(curr->prev->next, new_item_p);
++ rcu_assign_pointer(curr->prev, new_item_p);
++ } else {
++ /* insert as last item */
++ new_item_p->prev = sublist->tail;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(sublist->tail->next, new_item_p);
++ rcu_assign_pointer(sublist->tail, new_item_p);
++ }
++ } else {
++ curr = curr->prev;
++ while (curr && (list->subcompare(subdesc, &curr[1]) < 0)
++ )
++ curr = curr->prev;
++ if (curr) {
++ /* insert after curr */
++ new_item_p->prev = curr;
++ new_item_p->next = curr->next;
++ rcu_assign_pointer(curr->next->prev, new_item_p);
++ rcu_assign_pointer(curr->next, new_item_p);
++ } else {
++ /* insert as first item */
++ new_item_p->prev = NULL;
++ new_item_p->next = sublist->head;
++ rcu_assign_pointer(sublist->head->prev, new_item_p);
++ rcu_assign_pointer(sublist->head, new_item_p);
++ }
++ }
++ sublist->count++;
++ rcu_assign_pointer(sublist->curr, new_item_p);
++ return new_item_p;
++}
++
++/* Call spinlocked */
++static inline struct rsbac_list_item_t *insert_lol_subitem_memcmp(struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ struct
++ rsbac_list_lol_item_t
++ *sublist,
++ void *subdesc,
++ struct
++ rsbac_list_item_t
++ *new_item_p)
++{
++ struct rsbac_list_item_t *curr;
++
++ curr = sublist->curr;
++ if (!curr)
++ curr = sublist->head;
++ if (memcmp(subdesc, &curr[1], list->info.subdesc_size) > 0) {
++ curr = curr->next;
++ while (curr
++ && (memcmp(subdesc,
++ &curr[1], list->info.subdesc_size) > 0)
++ )
++ curr = curr->next;
++ if (curr) {
++ /* insert before curr */
++ new_item_p->prev = curr->prev;
++ new_item_p->next = curr;
++ rcu_assign_pointer(curr->prev->next, new_item_p);
++ rcu_assign_pointer(curr->prev, new_item_p);
++ } else {
++ /* insert as last item */
++ new_item_p->prev = sublist->tail;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(sublist->tail->next, new_item_p);
++ rcu_assign_pointer(sublist->tail, new_item_p);
++ }
++ } else {
++ curr = curr->prev;
++ while (curr
++ && (memcmp(subdesc,
++ &curr[1], list->info.subdesc_size) < 0)
++ )
++ curr = curr->prev;
++ if (curr) {
++ /* insert after curr */
++ new_item_p->prev = curr;
++ new_item_p->next = curr->next;
++ rcu_assign_pointer(curr->next->prev, new_item_p);
++ rcu_assign_pointer(curr->next, new_item_p);
++ } else {
++ /* insert as first item */
++ new_item_p->prev = NULL;
++ new_item_p->next = sublist->head;
++ rcu_assign_pointer(sublist->head->prev, new_item_p);
++ rcu_assign_pointer(sublist->head, new_item_p);
++ }
++ }
++ sublist->count++;
++ rcu_assign_pointer(sublist->curr, new_item_p);
++ return new_item_p;
++}
++
++/* Call spinlocked */
++static struct rsbac_list_item_t *add_lol_subitem(struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ struct
++ rsbac_list_lol_item_t
++ *sublist,
++ rsbac_time_t max_age,
++ void *subdesc,
++ void *subdata)
++{
++ struct rsbac_list_item_t *new_item_p = NULL;
++
++ if (!list || !sublist || !subdesc)
++ return NULL;
++ if (list->info.subdata_size && !subdata)
++ return NULL;
++ /* item desc and data are behind official struct */
++ if (list->subslab)
++ new_item_p = rsbac_smalloc(list->subslab);
++ else
++ new_item_p = rsbac_kmalloc(sizeof(*new_item_p)
++ + list->info.subdesc_size
++ + list->info.subdata_size);
++ if (!new_item_p)
++ return NULL;
++
++ new_item_p->max_age = max_age;
++ /* item desc is behind official struct */
++ memcpy(&new_item_p[1], subdesc, list->info.subdesc_size);
++ /* item data is behind official struct and desc */
++ /* subdata might be empty! */
++ if (subdata && list->info.subdata_size)
++ memcpy(((__u8 *) new_item_p) + sizeof(*new_item_p) +
++ list->info.subdesc_size, subdata,
++ list->info.subdata_size);
++
++ /* Sublist was empty */
++ if (!sublist->head) {
++ new_item_p->prev = NULL;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(sublist->head, new_item_p);
++ rcu_assign_pointer(sublist->tail, new_item_p);
++ rcu_assign_pointer(sublist->curr, new_item_p);
++ sublist->count = 1;
++ return new_item_p;
++ }
++ if (sublist->count >= list->max_subitems) {
++ rsbac_sfree(list->slab, new_item_p);
++ if (!(list->flags & RSBAC_LIST_NO_MAX_WARN))
++ rsbac_printk(KERN_WARNING "add_lol_subitem(): cannot add subitem to sublist of %s on device %02u:%02u, would be more than %u subitems!\n",
++ list->name,
++ RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device),
++ list->max_subitems);
++ return NULL;
++ }
++ if (list->subcompare)
++ return insert_lol_subitem_compare(list, sublist, subdesc,
++ new_item_p);
++ else
++ return insert_lol_subitem_memcmp(list, sublist, subdesc,
++ new_item_p);
++}
++
++/* Call spinlocked */
++static inline struct rsbac_list_lol_item_t *insert_lol_item_compare(struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ void *desc,
++ struct
++ rsbac_list_lol_item_t
++ *new_item_p)
++{
++ struct rsbac_list_lol_item_t *curr;
++ u_int hash = 0;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ curr = list->hashed[hash].curr;
++ if (!curr)
++ curr = list->hashed[hash].head;
++ if ((list->compare(desc, &curr[1]) > 0)) {
++ curr = curr->next;
++ while (curr && (list->compare(desc, &curr[1]) > 0)
++ )
++ curr = curr->next;
++ if (curr) {
++ /* insert before curr */
++ new_item_p->prev = curr->prev;
++ new_item_p->next = curr;
++ rcu_assign_pointer(curr->prev->next, new_item_p);
++ rcu_assign_pointer(curr->prev, new_item_p);
++ } else {
++ /* insert as last item */
++ new_item_p->prev = list->hashed[hash].tail;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(list->hashed[hash].tail->next, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].tail, new_item_p);
++ }
++ } else {
++ curr = curr->prev;
++ while (curr && (list->compare(desc, &curr[1]) < 0)
++ )
++ curr = curr->prev;
++ if (curr) {
++ /* insert after curr */
++ new_item_p->prev = curr;
++ new_item_p->next = curr->next;
++ rcu_assign_pointer(curr->next->prev, new_item_p);
++ rcu_assign_pointer(curr->next, new_item_p);
++ } else {
++ /* insert as first item */
++ new_item_p->prev = NULL;
++ new_item_p->next = list->hashed[hash].head;
++ rcu_assign_pointer(list->hashed[hash].head->prev, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].head, new_item_p);
++ }
++ }
++ list->hashed[hash].count++;
++ rcu_assign_pointer(list->hashed[hash].curr, new_item_p);
++ return new_item_p;
++}
++
++/* Call spinlocked */
++static inline struct rsbac_list_lol_item_t *insert_lol_item_memcmp(struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ void *desc,
++ struct
++ rsbac_list_lol_item_t
++ *new_item_p)
++{
++ struct rsbac_list_lol_item_t *curr;
++ u_int hash = 0;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ curr = list->hashed[hash].curr;
++ if (!curr)
++ curr = list->hashed[hash].head;
++ if (memcmp(desc, &curr[1], list->info.desc_size) > 0) {
++ curr = curr->next;
++ while (curr
++ && (memcmp(desc,
++ &curr[1], list->info.desc_size) > 0)
++ )
++ curr = curr->next;
++ if (curr) {
++ /* insert before curr */
++ new_item_p->prev = curr->prev;
++ new_item_p->next = curr;
++ rcu_assign_pointer(curr->prev->next, new_item_p);
++ rcu_assign_pointer(curr->prev, new_item_p);
++ } else {
++ /* insert as last item */
++ new_item_p->prev = list->hashed[hash].tail;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(list->hashed[hash].tail->next, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].tail, new_item_p);
++ }
++ } else {
++ curr = curr->prev;
++ while (curr
++ && (memcmp(desc,
++ &curr[1], list->info.desc_size) < 0)
++ )
++ curr = curr->prev;
++ if (curr) {
++ /* insert after curr */
++ new_item_p->prev = curr;
++ new_item_p->next = curr->next;
++ rcu_assign_pointer(curr->next->prev, new_item_p);
++ rcu_assign_pointer(curr->next, new_item_p);
++ } else {
++ /* insert as first item */
++ new_item_p->prev = NULL;
++ new_item_p->next = list->hashed[hash].head;
++ rcu_assign_pointer(list->hashed[hash].head->prev, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].head, new_item_p);
++ }
++ }
++ list->hashed[hash].count++;
++ rcu_assign_pointer(list->hashed[hash].curr, new_item_p);
++ return new_item_p;
++}
++
++/* Call spinlocked */
++static struct rsbac_list_lol_item_t *add_lol_item(struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ rsbac_time_t max_age,
++ void *desc, void *data)
++{
++ struct rsbac_list_lol_item_t *new_item_p = NULL;
++ u_int hash = 0;
++
++ if (!list || !desc)
++ return NULL;
++ if (list->info.data_size && !data)
++ return NULL;
++ /* item desc and data are behind official struct */
++ if (list->slab)
++ new_item_p = rsbac_smalloc(list->slab);
++ else
++ new_item_p = rsbac_kmalloc(sizeof(*new_item_p)
++ + list->info.desc_size
++ + list->info.data_size);
++ if (!new_item_p)
++ return NULL;
++
++ /* Init sublist */
++ new_item_p->head = NULL;
++ new_item_p->tail = NULL;
++ new_item_p->curr = NULL;
++ new_item_p->count = 0;
++ new_item_p->max_age = max_age;
++ /* item desc is behind official struct */
++ memcpy(&new_item_p[1], desc, list->info.desc_size);
++ /* item data is behind official struct and desc */
++ /* data might be empty! */
++ if (data && list->info.data_size)
++ memcpy(((__u8 *) new_item_p) + sizeof(*new_item_p) +
++ list->info.desc_size, data, list->info.data_size);
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ if (!list->hashed[hash].head) {
++ new_item_p->prev = NULL;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(list->hashed[hash].head, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].tail, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].curr, new_item_p);
++ list->hashed[hash].count = 1;
++ return new_item_p;
++ }
++ if (list->hashed[hash].count >= list->max_items_per_hash) {
++ rsbac_sfree(list->slab, new_item_p);
++ if (!(list->flags & RSBAC_LIST_NO_MAX_WARN))
++ rsbac_printk(KERN_WARNING "add_lol_item(): cannot add item to list %s, hash %u on device %02u:%02u, would be more than %u items!\n",
++ list->name,
++ hash,
++ RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device),
++ list->max_items_per_hash);
++ return NULL;
++ }
++ if (list->compare)
++ return insert_lol_item_compare(list, desc, new_item_p);
++ else
++ return insert_lol_item_memcmp(list, desc, new_item_p);
++}
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++/* Call spinlocked */
++static inline struct rsbac_list_lol_item_t *ta_insert_lol_item_compare(struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ void *desc,
++ struct
++ rsbac_list_lol_item_t
++ *new_item_p)
++{
++ struct rsbac_list_lol_item_t *curr;
++ u_int hash = 0;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ curr = list->hashed[hash].ta_curr;
++ if (!curr)
++ curr = list->hashed[hash].ta_head;
++ if ((list->compare(desc, &curr[1]) > 0)) {
++ curr = curr->next;
++ while (curr && (list->compare(desc, &curr[1]) > 0)
++ )
++ curr = curr->next;
++ if (curr) {
++ /* insert before curr */
++ new_item_p->prev = curr->prev;
++ new_item_p->next = curr;
++ rcu_assign_pointer(curr->prev->next, new_item_p);
++ rcu_assign_pointer(curr->prev, new_item_p);
++ } else {
++ /* insert as last item */
++ new_item_p->prev = list->hashed[hash].ta_tail;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(list->hashed[hash].ta_tail->next, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].ta_tail, new_item_p);
++ }
++ } else {
++ curr = curr->prev;
++ while (curr && (list->compare(desc, &curr[1]) < 0)
++ )
++ curr = curr->prev;
++ if (curr) {
++ /* insert after curr */
++ new_item_p->prev = curr;
++ new_item_p->next = curr->next;
++ rcu_assign_pointer(curr->next->prev, new_item_p);
++ rcu_assign_pointer(curr->next, new_item_p);
++ } else {
++ /* insert as first item */
++ new_item_p->prev = NULL;
++ new_item_p->next = list->hashed[hash].ta_head;
++ rcu_assign_pointer(list->hashed[hash].ta_head->prev, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].ta_head, new_item_p);
++ }
++ }
++ list->hashed[hash].ta_count++;
++ rcu_assign_pointer(list->hashed[hash].ta_curr, new_item_p);
++ return new_item_p;
++}
++
++/* Call spinlocked */
++static inline struct rsbac_list_lol_item_t *ta_insert_lol_item_memcmp(struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ void *desc,
++ struct
++ rsbac_list_lol_item_t
++ *new_item_p)
++{
++ struct rsbac_list_lol_item_t *curr;
++ u_int hash = 0;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ curr = list->hashed[hash].ta_curr;
++ if (!curr)
++ curr = list->hashed[hash].ta_head;
++ if (memcmp(desc, &curr[1], list->info.desc_size) > 0) {
++ curr = curr->next;
++ while (curr
++ && (memcmp(desc,
++ &curr[1], list->info.desc_size) > 0)
++ )
++ curr = curr->next;
++ if (curr) {
++ /* insert before curr */
++ new_item_p->prev = curr->prev;
++ new_item_p->next = curr;
++ rcu_assign_pointer(curr->prev->next, new_item_p);
++ rcu_assign_pointer(curr->prev, new_item_p);
++ } else {
++ /* insert as last item */
++ new_item_p->prev = list->hashed[hash].ta_tail;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(list->hashed[hash].ta_tail->next, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].ta_tail, new_item_p);
++ }
++ } else {
++ curr = curr->prev;
++ while (curr
++ && (memcmp(desc,
++ &curr[1], list->info.desc_size) < 0)
++ )
++ curr = curr->prev;
++ if (curr) {
++ /* insert after curr */
++ new_item_p->prev = curr;
++ new_item_p->next = curr->next;
++ rcu_assign_pointer(curr->next->prev, new_item_p);
++ rcu_assign_pointer(curr->next, new_item_p);
++ } else {
++ /* insert as first item */
++ new_item_p->prev = NULL;
++ new_item_p->next = list->hashed[hash].ta_head;
++ rcu_assign_pointer(list->hashed[hash].ta_head->prev, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].ta_head, new_item_p);
++ }
++ }
++ list->hashed[hash].ta_count++;
++ rcu_assign_pointer(list->hashed[hash].ta_curr, new_item_p);
++ return new_item_p;
++}
++
++/* Call spinlocked */
++static struct rsbac_list_lol_item_t *ta_add_lol_item(rsbac_list_ta_number_t
++ ta_number,
++ struct
++ rsbac_list_lol_reg_item_t
++ *list,
++ rsbac_time_t max_age,
++ void *desc,
++ void *data)
++{
++ struct rsbac_list_lol_item_t *new_item_p = NULL;
++ u_int hash = 0;
++
++ if (!list || !desc)
++ return NULL;
++ if (list->info.data_size && !data)
++ return NULL;
++ if (!ta_number)
++ return add_lol_item(list, max_age, desc, data);
++ /* item desc and data are behind official struct */
++ if (list->slab)
++ new_item_p = rsbac_smalloc(list->slab);
++ else
++ new_item_p = rsbac_kmalloc(sizeof(*new_item_p)
++ + list->info.desc_size
++ + list->info.data_size);
++ if (!new_item_p)
++ return NULL;
++
++ /* Init sublist */
++ new_item_p->head = NULL;
++ new_item_p->tail = NULL;
++ new_item_p->curr = NULL;
++ new_item_p->count = 0;
++ new_item_p->max_age = max_age;
++ new_item_p->prev = NULL;
++ new_item_p->next = NULL;
++ /* item desc is behind official struct */
++ memcpy(&new_item_p[1], desc, list->info.desc_size);
++ /* item data is behind official struct and desc */
++ /* data might be empty! */
++ if (data && list->info.data_size)
++ memcpy(((__u8 *) new_item_p) + sizeof(*new_item_p) +
++ list->info.desc_size, data, list->info.data_size);
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ if (!list->hashed[hash].ta_copied) { /* copy list to ta_list */
++ if (ta_lol_copy(ta_number, list, hash)) {
++ rsbac_sfree(list->slab, new_item_p);
++ return NULL;
++ }
++ } else {
++ if (list->hashed[hash].ta_copied != ta_number) {
++ rsbac_sfree(list->slab, new_item_p);
++ return NULL;
++ }
++ }
++
++ if (!list->hashed[hash].ta_head) {
++ rcu_assign_pointer(list->hashed[hash].ta_head, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].ta_tail, new_item_p);
++ rcu_assign_pointer(list->hashed[hash].ta_curr, new_item_p);
++ list->hashed[hash].ta_count = 1;
++ return (new_item_p);
++ }
++ if (list->hashed[hash].ta_count >= list->max_items_per_hash) {
++ rsbac_sfree(list->slab, new_item_p);
++ if (!(list->flags & RSBAC_LIST_NO_MAX_WARN))
++ rsbac_printk(KERN_WARNING "ta_add_lol_item(): cannot add item to list %s, hash %u on device %02u:%02u, would be more than %u items!\n",
++ list->name,
++ hash,
++ RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device),
++ list->max_items_per_hash);
++ return NULL;
++ }
++ if (list->compare)
++ return ta_insert_lol_item_compare(list, desc, new_item_p);
++ else
++ return ta_insert_lol_item_memcmp(list, desc, new_item_p);
++}
++#endif
++
++/* Add registration items */
++
++/* no locking needed */
++static inline struct rsbac_list_reg_item_t *create_reg(
++ struct rsbac_list_info_t *info_p,
++ u_int flags,
++ rsbac_list_compare_function_t * compare,
++ rsbac_list_get_conv_t * get_conv,
++ void *def_data,
++ char *name,
++ kdev_t device,
++ u_int nr_hashes,
++ rsbac_list_hash_function_t hash_function,
++ char * old_name_base)
++{
++ struct rsbac_list_reg_item_t *new_item_p = NULL;
++
++ if (!(new_item_p = rsbac_smalloc_clear_unlocked(reg_item_slab)))
++ return NULL;
++ if (!(new_item_p->hashed = rsbac_kmalloc_clear_unlocked(nr_hashes*sizeof(struct rsbac_list_hashed_t)))) {
++ rsbac_sfree(reg_item_slab, new_item_p);
++ return NULL;
++ }
++ new_item_p->info = *info_p;
++ if (!def_data)
++ flags &= ~RSBAC_LIST_DEF_DATA;
++ new_item_p->flags = flags;
++ new_item_p->compare = compare;
++ new_item_p->get_conv = get_conv;
++ new_item_p->rcu_free = NULL;
++ if (flags & RSBAC_LIST_DEF_DATA) {
++ new_item_p->def_data = rsbac_kmalloc_unlocked(info_p->data_size);
++ if (new_item_p->def_data)
++ memcpy(new_item_p->def_data, def_data,
++ info_p->data_size);
++ else {
++ rsbac_kfree(new_item_p->hashed);
++ rsbac_sfree(reg_item_slab, new_item_p);
++ return NULL;
++ }
++ } else
++ new_item_p->def_data = NULL;
++ if (name) {
++ strncpy(new_item_p->name, name, RSBAC_LIST_MAX_FILENAME);
++ new_item_p->name[RSBAC_LIST_MAX_FILENAME] = 0;
++ } else {
++ strcpy(new_item_p->name, RSBAC_LIST_NONAME);
++ }
++ new_item_p->nr_hashes = nr_hashes;
++ if (flags & RSBAC_LIST_NO_MAX)
++ new_item_p->max_items_per_hash = RSBAC_LIST_MAX_NR_ITEMS_LIMIT;
++ else
++ new_item_p->max_items_per_hash = RSBAC_LIST_MAX_NR_ITEMS;
++ new_item_p->hash_function = hash_function;
++ if (old_name_base) {
++ strncpy(new_item_p->old_name_base, old_name_base, RSBAC_LIST_MAX_FILENAME);
++ new_item_p->old_name_base[RSBAC_LIST_MAX_FILENAME] = 0;
++ } else {
++ new_item_p->old_name_base[0] = 0;
++ }
++ new_item_p->device = device;
++ spin_lock_init(&new_item_p->lock);
++ if (flags & RSBAC_LIST_OWN_SLAB) {
++ new_item_p->slabname = rsbac_kmalloc(RSBAC_MAX_SLABNAME);
++ if (!new_item_p->slabname) {
++ rsbac_kfree(new_item_p->hashed);
++ rsbac_sfree(reg_item_slab, new_item_p);
++ if (new_item_p->def_data)
++ rsbac_kfree(new_item_p->def_data);
++ return NULL;
++ }
++ if (device != RSBAC_AUTO_DEV) {
++ snprintf(new_item_p->slabname,
++ RSBAC_MAX_SLABNAME,
++ "%s-%02u:%02u",
++ name,
++ RSBAC_MAJOR(device), RSBAC_MINOR(device));
++ } else {
++ strncpy(new_item_p->slabname, name, RSBAC_MAX_SLABNAME);
++ }
++ new_item_p->slabname[RSBAC_MAX_SLABNAME - 1] = 0;
++ new_item_p->slab = rsbac_slab_create(new_item_p->slabname,
++ sizeof(struct rsbac_list_item_t) + info_p->desc_size + info_p->data_size);
++ }
++ lockdep_set_class(&new_item_p->lock, &list_lock_class);
++ new_item_p->dirty = FALSE;
++ if (flags & RSBAC_LIST_NO_WRITE)
++ new_item_p->no_write = TRUE;
++ else
++ new_item_p->no_write = FALSE;
++ new_item_p->self = new_item_p;
++ return new_item_p;
++}
++
++/* locking needed */
++static struct rsbac_list_reg_item_t *add_reg(struct rsbac_list_reg_item_t
++ *new_item_p)
++{
++ if (!reg_head.head) {
++ new_item_p->prev = NULL;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(reg_head.head, new_item_p);
++ rcu_assign_pointer(reg_head.tail, new_item_p);
++ rcu_assign_pointer(reg_head.curr, new_item_p);
++ reg_head.count = 1;
++ } else {
++ new_item_p->prev = reg_head.tail;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(reg_head.tail->next, new_item_p);
++ rcu_assign_pointer(reg_head.tail, new_item_p);
++ rcu_assign_pointer(reg_head.curr, new_item_p);
++ reg_head.count++;
++ }
++ return new_item_p;
++}
++
++/* no locking needed */
++static inline struct rsbac_list_lol_reg_item_t *create_lol_reg(
++ struct rsbac_list_lol_info_t *info_p,
++ u_int flags,
++ rsbac_list_compare_function_t *compare,
++ rsbac_list_compare_function_t *subcompare,
++ rsbac_list_get_conv_t *get_conv,
++ rsbac_list_get_conv_t *get_subconv,
++ void *def_data,
++ void *def_subdata,
++ char *name,
++ kdev_t device,
++ u_int nr_hashes,
++ rsbac_list_hash_function_t hash_function,
++ char * old_name_base)
++{
++ struct rsbac_list_lol_reg_item_t *new_item_p = NULL;
++
++ if (!(new_item_p = rsbac_smalloc_clear_unlocked(lol_reg_item_slab)))
++ return NULL;
++ if (!(new_item_p->hashed = rsbac_kmalloc_clear_unlocked(nr_hashes*sizeof(struct rsbac_list_lol_hashed_t)))) {
++ rsbac_sfree(lol_reg_item_slab, new_item_p);
++ return NULL;
++ }
++ new_item_p->info = *info_p;
++ if (info_p->data_size && !def_data)
++ flags &= ~RSBAC_LIST_DEF_DATA;
++ if (!def_subdata)
++ flags &= ~RSBAC_LIST_DEF_SUBDATA;
++ new_item_p->flags = flags;
++ new_item_p->compare = compare;
++ new_item_p->subcompare = subcompare;
++ new_item_p->get_conv = get_conv;
++ new_item_p->get_subconv = get_subconv;
++ new_item_p->rcu_free = NULL;
++ if ((flags & RSBAC_LIST_DEF_DATA)
++ && (info_p->data_size)
++ ) {
++ new_item_p->def_data = rsbac_kmalloc_unlocked(info_p->data_size);
++ if (new_item_p->def_data)
++ memcpy(new_item_p->def_data, def_data,
++ info_p->data_size);
++ else {
++ rsbac_kfree(new_item_p->hashed);
++ rsbac_sfree(lol_reg_item_slab, new_item_p);
++ return NULL;
++ }
++ }
++ if ((flags & RSBAC_LIST_DEF_SUBDATA)
++ && (info_p->subdata_size)
++ ) {
++ new_item_p->def_subdata =
++ rsbac_kmalloc_unlocked(info_p->subdata_size);
++ if (new_item_p->def_subdata)
++ memcpy(new_item_p->def_subdata, def_subdata,
++ info_p->subdata_size);
++ else {
++ if (new_item_p->def_data)
++ rsbac_kfree(new_item_p->def_data);
++ rsbac_kfree(new_item_p->hashed);
++ rsbac_sfree(lol_reg_item_slab, new_item_p);
++ return NULL;
++ }
++ }
++ if (name) {
++ strncpy(new_item_p->name, name, RSBAC_LIST_MAX_FILENAME);
++ new_item_p->name[RSBAC_LIST_MAX_FILENAME] = 0;
++ } else {
++ strcpy(new_item_p->name, RSBAC_LIST_NONAME);
++ }
++ new_item_p->nr_hashes = nr_hashes;
++ if (flags & RSBAC_LIST_NO_MAX) {
++ new_item_p->max_items_per_hash = RSBAC_LIST_MAX_NR_ITEMS_LIMIT;
++ new_item_p->max_subitems = RSBAC_LIST_MAX_NR_ITEMS_LIMIT;
++ } else {
++ new_item_p->max_items_per_hash = RSBAC_LIST_MAX_NR_ITEMS;
++ new_item_p->max_subitems = RSBAC_LIST_MAX_NR_SUBITEMS;
++ }
++ new_item_p->hash_function = hash_function;
++ if (old_name_base) {
++ strncpy(new_item_p->old_name_base, old_name_base, RSBAC_LIST_MAX_FILENAME);
++ new_item_p->old_name_base[RSBAC_LIST_MAX_FILENAME] = 0;
++ } else
++ new_item_p->old_name_base[0] = 0;
++ new_item_p->device = device;
++ spin_lock_init(&new_item_p->lock);
++ if (flags & RSBAC_LIST_OWN_SLAB) {
++ new_item_p->slabname = rsbac_kmalloc(RSBAC_MAX_SLABNAME);
++ if (!new_item_p->slabname) {
++ rsbac_kfree(new_item_p->hashed);
++ rsbac_sfree(lol_reg_item_slab, new_item_p);
++ if (new_item_p->def_data)
++ rsbac_kfree(new_item_p->def_data);
++ if (new_item_p->def_subdata)
++ rsbac_kfree(new_item_p->def_subdata);
++ return NULL;
++ }
++ new_item_p->subslabname = rsbac_kmalloc(RSBAC_MAX_SLABNAME);
++ if (!new_item_p->subslabname) {
++ rsbac_kfree(new_item_p->hashed);
++ rsbac_sfree(lol_reg_item_slab, new_item_p);
++ if (new_item_p->def_data)
++ rsbac_kfree(new_item_p->def_data);
++ if (new_item_p->def_subdata)
++ rsbac_kfree(new_item_p->def_subdata);
++ if (new_item_p->slabname)
++ rsbac_kfree(new_item_p->slabname);
++ return NULL;
++ }
++ if (device != RSBAC_AUTO_DEV) {
++ snprintf(new_item_p->slabname,
++ RSBAC_MAX_SLABNAME,
++ "%s-%02u:%02u",
++ name,
++ RSBAC_MAJOR(device), RSBAC_MINOR(device));
++ snprintf(new_item_p->subslabname,
++ RSBAC_MAX_SLABNAME,
++ "%s-s-%02u:%02u",
++ name,
++ RSBAC_MAJOR(device), RSBAC_MINOR(device));
++ } else {
++ strncpy(new_item_p->slabname, name, RSBAC_MAX_SLABNAME);
++ snprintf(new_item_p->subslabname,
++ RSBAC_MAX_SLABNAME,
++ "%s-s",
++ name);
++ }
++ new_item_p->slabname[RSBAC_MAX_SLABNAME - 1] = 0;
++ new_item_p->subslabname[RSBAC_MAX_SLABNAME - 1] = 0;
++ new_item_p->slab = rsbac_slab_create(new_item_p->slabname,
++ sizeof(struct rsbac_list_lol_item_t) + info_p->desc_size + info_p->data_size);
++ new_item_p->subslab = rsbac_slab_create(new_item_p->subslabname,
++ sizeof(struct rsbac_list_item_t) + info_p->subdesc_size + info_p->subdata_size);
++ }
++ lockdep_set_class(&new_item_p->lock, &list_lock_class);
++ new_item_p->dirty = FALSE;
++ if (flags & RSBAC_LIST_NO_WRITE)
++ new_item_p->no_write = TRUE;
++ else
++ new_item_p->no_write = FALSE;
++ new_item_p->self = new_item_p;
++ return new_item_p;
++}
++
++/* locking needed */
++static struct rsbac_list_lol_reg_item_t *add_lol_reg(struct
++ rsbac_list_lol_reg_item_t
++ *new_item_p)
++{
++ if (!lol_reg_head.head) {
++ new_item_p->prev = NULL;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(lol_reg_head.head, new_item_p);
++ rcu_assign_pointer(lol_reg_head.tail, new_item_p);
++ rcu_assign_pointer(lol_reg_head.curr, new_item_p);
++ lol_reg_head.count = 1;
++ } else {
++ new_item_p->prev = lol_reg_head.tail;
++ new_item_p->next = NULL;
++ rcu_assign_pointer(lol_reg_head.tail->next, new_item_p);
++ rcu_assign_pointer(lol_reg_head.tail, new_item_p);
++ rcu_assign_pointer(lol_reg_head.curr, new_item_p);
++ lol_reg_head.count++;
++ }
++ return new_item_p;
++}
++
++/* Removing items */
++
++/* Call spinlocked */
++static inline void do_remove_item(struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_item_t *item_p,
++ u_int hash)
++{
++ if (!list || !item_p)
++ return;
++
++ /* curr is no longer valid -> reset */
++ if (list->hashed[hash].curr == item_p)
++ rcu_assign_pointer(list->hashed[hash].curr, NULL);
++ if ((list->hashed[hash].head == item_p)) { /* item is head */
++ if ((list->hashed[hash].tail == item_p)) { /* item is head and tail = only item -> list will be empty */
++ rcu_assign_pointer(list->hashed[hash].head, NULL);
++ rcu_assign_pointer(list->hashed[hash].tail, NULL);
++ } else { /* item is head, but not tail -> next item becomes head */
++ rcu_assign_pointer(item_p->next->prev, NULL);
++ rcu_assign_pointer(list->hashed[hash].head, item_p->next);
++ }
++ } else { /* item is not head */
++ if ((list->hashed[hash].tail == item_p)) { /*item is not head, but tail -> previous item becomes tail */
++ rcu_assign_pointer(item_p->prev->next, NULL);
++ rcu_assign_pointer(list->hashed[hash].tail, item_p->prev);
++ } else { /* item is neither head nor tail -> item is cut out */
++ rcu_assign_pointer(item_p->prev->next, item_p->next);
++ rcu_assign_pointer(item_p->next->prev, item_p->prev);
++ }
++ }
++ /* adjust counter */
++ list->hashed[hash].count--;
++ /* now we can remove the item from memory */
++ rcu_free(list, item_p);
++}
++
++/* Call spinlocked */
++static void remove_item(struct rsbac_list_reg_item_t *list, void *desc)
++{
++ struct rsbac_list_item_t *item_p;
++ u_int hash = 0;
++
++ if (!list || !desc)
++ return;
++ /* first we must locate the item. */
++ if ((item_p = lookup_item_locked(list, desc))) {
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ do_remove_item(list, item_p, hash);
++ }
++}
++
++/* Call spinlocked */
++static void remove_all_items(struct rsbac_list_reg_item_t *list, u_int hash)
++{
++ struct rsbac_list_item_t *item_p;
++
++ if (!list || !list->hashed)
++ return;
++ /* cleanup all items */
++ item_p = list->hashed[hash].head;
++ rcu_assign_pointer(list->hashed[hash].curr, NULL);
++ rcu_assign_pointer(list->hashed[hash].head, NULL);
++ rcu_assign_pointer(list->hashed[hash].tail, NULL);
++ list->hashed[hash].count = 0;
++ rcu_free_item_chain(list, item_p);
++}
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++/* Call spinlocked */
++static void ta_do_remove_item(struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_item_t *item_p,
++ u_int hash)
++{
++ if (!list || !item_p)
++ return;
++
++ /* curr is no longer valid -> reset */
++ if (list->hashed[hash].ta_curr == item_p)
++ rcu_assign_pointer(list->hashed[hash].ta_curr, NULL);
++ if ((list->hashed[hash].ta_head == item_p)) { /* item is head */
++ if ((list->hashed[hash].ta_tail == item_p)) { /* item is head and tail = only item -> list will be empty */
++ rcu_assign_pointer(list->hashed[hash].ta_head, NULL);
++ rcu_assign_pointer(list->hashed[hash].ta_tail, NULL);
++ } else { /* item is head, but not tail -> next item becomes head */
++ rcu_assign_pointer(item_p->next->prev, NULL);
++ rcu_assign_pointer(list->hashed[hash].ta_head, item_p->next);
++ }
++ } else { /* item is not head */
++ if ((list->hashed[hash].ta_tail == item_p)) { /*item is not head, but tail -> previous item becomes tail */
++ rcu_assign_pointer(item_p->prev->next, NULL);
++ rcu_assign_pointer(list->hashed[hash].ta_tail, item_p->prev);
++ } else { /* item is neither head nor tail -> item is cut out */
++ rcu_assign_pointer(item_p->prev->next, item_p->next);
++ rcu_assign_pointer(item_p->next->prev, item_p->prev);
++ }
++ }
++ /* adjust counter */
++ list->hashed[hash].ta_count--;
++ /* now we can remove the item from memory */
++ rcu_free(list, item_p);
++}
++
++/* Call spinlocked */
++static void ta_remove_item(rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_reg_item_t *list, void *desc)
++{
++ struct rsbac_list_item_t *item_p;
++ u_int hash = 0;
++
++ if (!list || !desc)
++ return;
++ if (!ta_number)
++ return remove_item(list, desc);
++ /* first we must locate the item. */
++ if ((item_p = ta_lookup_item_locked(ta_number, list, desc))) {
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ ta_do_remove_item(list, item_p, hash);
++ }
++}
++
++/* Call spinlocked */
++static void ta_remove_all_items(struct rsbac_list_reg_item_t *list, u_int hash)
++{
++ struct rsbac_list_item_t *item_p;
++
++ /* cleanup all items */
++ item_p = list->hashed[hash].ta_head;
++ rcu_assign_pointer(list->hashed[hash].ta_curr, NULL);
++ rcu_assign_pointer(list->hashed[hash].ta_head, NULL);
++ rcu_assign_pointer(list->hashed[hash].ta_tail, NULL);
++ list->hashed[hash].ta_count = 0;
++ rcu_free_item_chain(list, item_p);
++}
++#endif
++
++/* Call spinlocked */
++static void do_remove_lol_subitem(struct rsbac_list_lol_item_t *sublist,
++ struct rsbac_list_item_t *item_p)
++{
++ if (!sublist || !item_p)
++ return;
++
++ /* curr is no longer valid -> reset */
++ if (sublist->curr == item_p)
++ rcu_assign_pointer(sublist->curr, NULL);
++ if ((sublist->head == item_p)) { /* item is head */
++ if ((sublist->tail == item_p)) { /* item is head and tail = only item -> list will be empty */
++ rcu_assign_pointer(sublist->head, NULL);
++ rcu_assign_pointer(sublist->tail, NULL);
++ } else { /* item is head, but not tail -> next item becomes head */
++ rcu_assign_pointer(item_p->next->prev, NULL);
++ rcu_assign_pointer(sublist->head, item_p->next);
++ }
++ } else { /* item is not head */
++ if ((sublist->tail == item_p)) { /*item is not head, but tail -> previous item becomes tail */
++ rcu_assign_pointer(item_p->prev->next, NULL);
++ rcu_assign_pointer(sublist->tail, item_p->prev);
++ } else { /* item is neither head nor tail -> item is cut out */
++ rcu_assign_pointer(item_p->prev->next, item_p->next);
++ rcu_assign_pointer(item_p->next->prev, item_p->prev);
++ }
++ }
++ /* adjust counter */
++ sublist->count--;
++ /* free call is in calling function */
++}
++
++/* Call spinlocked */
++static void remove_lol_subitem(struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_item_t *sublist,
++ void *subdesc)
++{
++ struct rsbac_list_item_t *subitem_p;
++
++ if (!list || !sublist || !subdesc)
++ return;
++
++ /* first we must locate the item. */
++ if ((subitem_p = lookup_lol_subitem_locked(list, sublist, subdesc))) {
++ do_remove_lol_subitem(sublist, subitem_p);
++ rcu_free_lol_sub(list, subitem_p);
++ }
++}
++
++
++/* Call spinlocked */
++static void do_remove_lol_item(struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_item_t *item_p,
++ u_int hash)
++{
++ if (!list || !item_p)
++ return;
++
++ /* curr is no longer valid -> reset */
++ if (list->hashed[hash].curr == item_p)
++ rcu_assign_pointer(list->hashed[hash].curr, NULL);
++ if ((list->hashed[hash].head == item_p)) { /* item is head */
++ if ((list->hashed[hash].tail == item_p)) { /* item is head and tail = only item -> list will be empty */
++ rcu_assign_pointer(list->hashed[hash].head, NULL);
++ rcu_assign_pointer(list->hashed[hash].tail, NULL);
++ } else { /* item is head, but not tail -> next item becomes head */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (!item_p->next) { /* list corrupted! */
++ rsbac_printk(KERN_WARNING "do_remove_lol_item(): list %s corrupted: invalid next!\n",
++ list->name);
++ } else
++#endif
++ {
++ rcu_assign_pointer(item_p->next->prev, NULL);
++ rcu_assign_pointer(list->hashed[hash].head, item_p->next);
++ }
++ }
++ } else { /* item is not head */
++ if ((list->hashed[hash].tail == item_p)) { /*item is not head, but tail -> previous item becomes tail */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (!item_p->prev) { /* list corrupted! */
++ rsbac_printk(KERN_WARNING "do_remove_lol_item(): list %s corrupted: invalid prev!\n",
++ list->name);
++ } else
++#endif
++ {
++ rcu_assign_pointer(item_p->prev->next, NULL);
++ rcu_assign_pointer(list->hashed[hash].tail, item_p->prev);
++ }
++ } else { /* item is neither head nor tail -> item is cut out */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (!item_p->prev) { /* list corrupted! */
++ rsbac_printk(KERN_WARNING "do_remove_lol_item(): list %s corrupted: invalid prev!\n",
++ list->name);
++ } else if (!item_p->next) { /* list corrupted! */
++ rsbac_printk(KERN_WARNING "do_remove_lol_item(): list %s corrupted: invalid next!\n",
++ list->name);
++ } else
++#endif
++ {
++ rcu_assign_pointer(item_p->prev->next, item_p->next);
++ rcu_assign_pointer(item_p->next->prev, item_p->prev);
++ }
++ }
++ }
++ /* adjust counter */
++ list->hashed[hash].count--;
++
++ rcu_free_lol_subitem_chain(list, item_p->head);
++ rcu_free_lol(list, item_p);
++}
++
++/* Call spinlocked */
++static void remove_lol_item(struct rsbac_list_lol_reg_item_t *list,
++ void *desc)
++{
++ struct rsbac_list_lol_item_t *item_p;
++ u_int hash = 0;
++
++ if (!list || !desc)
++ return;
++
++ /* first we must locate the item. */
++ if ((item_p = lookup_lol_item_locked(list, desc))) {
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ do_remove_lol_item(list, item_p, hash);
++ }
++}
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++/* Call spinlocked */
++static void ta_do_remove_lol_item(struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_item_t *item_p,
++ u_int hash)
++{
++ if (!list || !item_p)
++ return;
++
++ /* curr is no longer valid -> reset */
++ if (list->hashed[hash].ta_curr == item_p)
++ rcu_assign_pointer(list->hashed[hash].ta_curr, NULL);
++ if ((list->hashed[hash].ta_head == item_p)) { /* item is head */
++ if ((list->hashed[hash].ta_tail == item_p)) { /* item is head and tail = only item -> list will be empty */
++ rcu_assign_pointer(list->hashed[hash].ta_head, NULL);
++ rcu_assign_pointer(list->hashed[hash].ta_tail, NULL);
++ } else { /* item is head, but not tail -> next item becomes head */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (!item_p->next) { /* list corrupted! */
++ rsbac_printk(KERN_WARNING "do_remove_lol_item(): list %s corrupted: invalid next!\n",
++ list->name);
++ } else
++#endif
++ {
++ rcu_assign_pointer(item_p->next->prev, NULL);
++ rcu_assign_pointer(list->hashed[hash].ta_head, item_p->next);
++ }
++ }
++ } else { /* item is not head */
++ if ((list->hashed[hash].ta_tail == item_p)) { /*item is not head, but tail -> previous item becomes tail */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (!item_p->prev) { /* list corrupted! */
++ rsbac_printk(KERN_WARNING "do_remove_lol_item(): list %s corrupted: invalid prev!\n",
++ list->name);
++ } else
++#endif
++ {
++ rcu_assign_pointer(item_p->prev->next, NULL);
++ rcu_assign_pointer(list->hashed[hash].ta_tail, item_p->prev);
++ }
++ } else { /* item is neither head nor tail -> item is cut out */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (!item_p->prev) { /* list corrupted! */
++ rsbac_printk(KERN_WARNING "do_remove_lol_item(): list %s corrupted: invalid prev!\n",
++ list->name);
++ } else if (!item_p->next) { /* list corrupted! */
++ rsbac_printk(KERN_WARNING "do_remove_lol_item(): list %s corrupted: invalid next!\n",
++ list->name);
++ } else
++#endif
++ {
++ rcu_assign_pointer(item_p->prev->next, item_p->next);
++ rcu_assign_pointer(item_p->next->prev, item_p->prev);
++ }
++ }
++ }
++ /* adjust counter */
++ list->hashed[hash].ta_count--;
++
++ rcu_free_lol_subitem_chain(list, item_p->head);
++ rcu_free_lol(list, item_p);
++}
++
++/* Call spinlocked */
++static void ta_remove_lol_item(rsbac_list_ta_number_t ta_number,
++ struct rsbac_list_lol_reg_item_t *list,
++ void *desc)
++{
++ struct rsbac_list_lol_item_t *item_p;
++ u_int hash = 0;
++
++ if (!list || !desc)
++ return;
++
++ /* first we must locate the item. */
++ if ((item_p = ta_lookup_lol_item_locked(ta_number, list, desc))) {
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ ta_do_remove_lol_item(list, item_p, hash);
++ }
++}
++#endif
++
++/* Call spinlocked */
++static void remove_all_lol_subitems(struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_item_t *sublist)
++{
++ struct rsbac_list_item_t *subitem_p;
++
++ subitem_p = sublist->head;
++ rcu_assign_pointer(sublist->curr, NULL);
++ rcu_assign_pointer(sublist->head, NULL);
++ rcu_assign_pointer(sublist->tail, NULL);
++ sublist->count = 0;
++ rcu_free_lol_subitem_chain(list, subitem_p);
++}
++
++/* Call spinlocked */
++static void remove_all_lol_items(struct rsbac_list_lol_reg_item_t *list, u_int hash)
++{
++ struct rsbac_list_lol_item_t *item_p;
++
++ if (!list || !list->hashed)
++ return;
++ item_p = list->hashed[hash].head;
++ rcu_assign_pointer(list->hashed[hash].curr, NULL);
++ rcu_assign_pointer(list->hashed[hash].head, NULL);
++ rcu_assign_pointer(list->hashed[hash].tail, NULL);
++ list->hashed[hash].count = 0;
++ rcu_free_lol_item_chain(list, item_p);
++}
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++/* Call spinlocked */
++static void ta_remove_all_lol_items(struct rsbac_list_lol_reg_item_t *list,
++ u_int hash)
++{
++ struct rsbac_list_lol_item_t *item_p;
++
++ /* cleanup all items */
++ item_p = list->hashed[hash].ta_head;
++ rcu_assign_pointer(list->hashed[hash].ta_curr, NULL);
++ rcu_assign_pointer(list->hashed[hash].ta_head, NULL);
++ rcu_assign_pointer(list->hashed[hash].ta_tail, NULL);
++ list->hashed[hash].ta_count = 0;
++ rcu_free_lol_item_chain(list, item_p);
++}
++#endif
++
++/* Remove registration items */
++
++/* no locking needed */
++static void clear_reg(struct rsbac_list_reg_item_t *reg_item_p)
++{
++ if (reg_item_p) {
++ int i;
++ struct rsbac_list_item_t *item_p;
++ struct rsbac_list_item_t *new_item_p;
++
++ /* now we can remove the item from memory */
++ synchronize_rcu();
++ for (i=0; i<reg_item_p->nr_hashes; i++) {
++ item_p = reg_item_p->hashed[i].head;
++ while(item_p) {
++ new_item_p = item_p->next;
++ rsbac_sfree(reg_item_p->slab, item_p);
++ item_p = new_item_p;
++ }
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if(reg_item_p->hashed[i].ta_copied) {
++ item_p = reg_item_p->hashed[i].ta_head;
++ while(item_p) {
++ new_item_p = item_p->next;
++ rsbac_sfree(reg_item_p->slab, item_p);
++ item_p = new_item_p;
++ }
++ }
++#endif
++ }
++ if (reg_item_p->def_data)
++ rsbac_kfree(reg_item_p->def_data);
++ if (reg_item_p->slab)
++ rsbac_slab_destroy(reg_item_p->slab);
++ if (reg_item_p->slabname)
++ rsbac_kfree(reg_item_p->slabname);
++ if (reg_item_p->hashed)
++ rsbac_kfree(reg_item_p->hashed);
++ rsbac_sfree(reg_item_slab, reg_item_p);
++ }
++}
++
++/* locking needed */
++static void remove_reg(struct rsbac_list_reg_item_t *reg_item_p)
++{
++ /* first we must locate the item. */
++ if (reg_item_p && (reg_item_p->self == reg_item_p)) {/* item found and valid */
++ /* protect against reuse */
++ reg_item_p->self = NULL;
++ if ((reg_head.head == reg_item_p)) { /* item is head */
++ if ((reg_head.tail == reg_item_p)) { /* item is head and tail = only item -> list will be empty */
++ rcu_assign_pointer(reg_head.head, NULL);
++ rcu_assign_pointer(reg_head.tail, NULL);
++ } else { /* item is head, but not tail -> next item becomes head */
++ reg_item_p->next->prev = NULL;
++ rcu_assign_pointer(reg_head.head, reg_item_p->next);
++ }
++ } else { /* item is not head */
++ if ((reg_head.tail == reg_item_p)) { /*item is not head, but tail -> previous item becomes tail */
++ reg_item_p->prev->next = NULL;
++ rcu_assign_pointer(reg_head.tail, reg_item_p->prev);
++ } else { /* item is neither head nor tail -> item is cut out */
++ reg_item_p->prev->next = reg_item_p->next;
++ reg_item_p->next->prev = reg_item_p->prev;
++ }
++ }
++
++ /* curr is no longer valid -> reset */
++ reg_head.curr = NULL;
++ /* adjust counter */
++ reg_head.count--;
++ } /* end of if: item was found */
++}
++
++/* no locking needed */
++static void clear_lol_reg(struct rsbac_list_lol_reg_item_t *reg_item_p)
++{
++ int i;
++
++ if (reg_item_p) {
++ struct rsbac_list_lol_item_t *lol_item_p;
++ struct rsbac_list_lol_item_t *new_lol_item_p;
++ struct rsbac_list_item_t * lol_subitem_p;
++ struct rsbac_list_item_t * new_lol_subitem_p;
++
++ /* now we can remove the item from memory */
++ synchronize_rcu();
++ for (i=0; i<reg_item_p->nr_hashes; i++) {
++ lol_item_p = reg_item_p->hashed[i].head;
++ while(lol_item_p) {
++ lol_subitem_p = lol_item_p->head;
++ while (lol_subitem_p) {
++ new_lol_subitem_p = lol_subitem_p->next;
++ rsbac_sfree(reg_item_p->subslab, lol_subitem_p);
++ lol_subitem_p = new_lol_subitem_p;
++ }
++ new_lol_item_p = lol_item_p->next;
++ rsbac_sfree(reg_item_p->slab, lol_item_p);
++ lol_item_p = new_lol_item_p;
++ }
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if(reg_item_p->hashed[i].ta_copied) {
++ lol_item_p = reg_item_p->hashed[i].ta_head;
++ while(lol_item_p) {
++ lol_subitem_p = lol_item_p->head;
++ while (lol_subitem_p) {
++ new_lol_subitem_p = lol_subitem_p->next;
++ rsbac_sfree(reg_item_p->subslab, lol_subitem_p);
++ lol_subitem_p = new_lol_subitem_p;
++ }
++ new_lol_item_p = lol_item_p->next;
++ rsbac_sfree(reg_item_p->slab, lol_item_p);
++ lol_item_p = new_lol_item_p;
++ }
++ }
++#endif
++ }
++ if (reg_item_p->def_data)
++ rsbac_kfree(reg_item_p->def_data);
++ if (reg_item_p->def_subdata)
++ rsbac_kfree(reg_item_p->def_subdata);
++ if (reg_item_p->slab)
++ rsbac_slab_destroy(reg_item_p->slab);
++ if (reg_item_p->subslab)
++ rsbac_slab_destroy(reg_item_p->subslab);
++ if (reg_item_p->slabname)
++ rsbac_kfree(reg_item_p->slabname);
++ if (reg_item_p->subslabname)
++ rsbac_kfree(reg_item_p->subslabname);
++ if (reg_item_p->hashed)
++ rsbac_kfree(reg_item_p->hashed);
++ rsbac_sfree(lol_reg_item_slab, reg_item_p);
++ }
++}
++
++/* locking needed */
++static void remove_lol_reg(struct rsbac_list_lol_reg_item_t *reg_item_p)
++{
++ /* first we must locate the item. */
++ if (reg_item_p && (reg_item_p->self == reg_item_p)) {/* found */
++ /* protect against reuse */
++ reg_item_p->self = NULL;
++ if ((lol_reg_head.head == reg_item_p)) { /* item is head */
++ if ((lol_reg_head.tail == reg_item_p)) { /* item is head and tail = only item -> list will be empty */
++ rcu_assign_pointer(lol_reg_head.head, NULL);
++ rcu_assign_pointer(lol_reg_head.tail, NULL);
++ } else { /* item is head, but not tail -> next item becomes head */
++ reg_item_p->next->prev = NULL;
++ rcu_assign_pointer(lol_reg_head.head, reg_item_p->next);
++ }
++ } else { /* item is not head */
++ if ((lol_reg_head.tail == reg_item_p)) { /*item is not head, but tail -> previous item becomes tail */
++ reg_item_p->prev->next = NULL;
++ rcu_assign_pointer(lol_reg_head.tail, reg_item_p->prev);
++ } else { /* item is neither head nor tail -> item is cut out */
++ reg_item_p->prev->next = reg_item_p->next;
++ reg_item_p->next->prev = reg_item_p->prev;
++ }
++ }
++
++ /* curr is no longer valid -> reset */
++ rcu_assign_pointer(lol_reg_head.curr, NULL);
++ /* adjust counter */
++ lol_reg_head.count--;
++ } /* end of if: item was found */
++}
++
++#define touch(x)
++
++#define lol_touch(x)
++
++/********************/
++/* Read/Write */
++/********************/
++
++/* call unlocked */
++static int do_read_list(struct rsbac_list_reg_item_t *list,
++ char * name,
++ rsbac_boolean_t backup)
++{
++ struct file *file_p;
++ int err = 0;
++ int tmperr;
++ int converr;
++ rsbac_version_t list_version;
++ u_long read_count = 0;
++ char *old_buf = NULL;
++ char *new_buf = NULL;
++ char *old_data;
++ char *new_data;
++ struct rsbac_list_info_t *list_info_p;
++ rsbac_list_count_t list_count;
++ rsbac_time_t timestamp;
++ struct rsbac_nanotime_t lastchange;
++ rsbac_time_t max_age = 0;
++ rsbac_list_conv_function_t *conv = NULL;
++ rsbac_boolean_t timeout = FALSE;
++ mm_segment_t oldfs;
++
++ list_info_p = rsbac_kmalloc_unlocked(sizeof(*list_info_p));
++ if (!list_info_p)
++ return -RSBAC_ENOMEM;
++ /* open file */
++ if ((err = rsbac_read_open(name, &file_p, list->device))) {
++ goto double_free;
++ }
++
++ /* OK, now we can start reading */
++ /* There is a read function for this file, so check info and read as
++ * many items as possible. A positive return value means a read success,
++ * 0 end of file and a negative value an error. */
++
++ /* Set current user space to kernel space, because read() writes */
++ /* to user space */
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++
++ /* check gen-list on-disk version */
++ tmperr = file_p->f_path.dentry->d_inode->i_fop->read(file_p,
++ (__u8 *) & list_version,
++ sizeof(list_version), &file_p->f_pos);
++ set_fs(oldfs);
++ /* error? */
++ if (tmperr < sizeof(list_version)) {
++ rsbac_printk(KERN_WARNING "do_read_list(): read error %i from file when reading list version!\n", tmperr);
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++ /* if wrong list on-disk version, fail */
++ switch (list_version) {
++ case RSBAC_LIST_DISK_VERSION:
++ case RSBAC_LIST_DISK_OLD_VERSION:
++ break;
++ default:
++ rsbac_printk(KERN_WARNING "do_read_list(): wrong on-disk list version %u in file %s, expected %u - error!\n",
++ list_version,
++ name, RSBAC_LIST_DISK_VERSION);
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++
++ /* get timestamp */
++ set_fs(KERNEL_DS);
++ tmperr = file_p->f_op->read(file_p,
++ (__u8 *) & timestamp,
++ sizeof(timestamp), &file_p->f_pos);
++ set_fs(oldfs);
++ /* error? */
++ if (tmperr < sizeof(timestamp)) {
++ rsbac_printk(KERN_WARNING "do_read_list(): timestamp read error %i from file %s!\n",
++ tmperr,
++ name);
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++
++ /* get list info */
++ set_fs(KERNEL_DS);
++ tmperr = file_p->f_op->read(file_p,
++ (__u8 *) list_info_p,
++ sizeof(*list_info_p), &file_p->f_pos);
++ set_fs(oldfs);
++ /* error? */
++ if (tmperr < sizeof(*list_info_p)) {
++ rsbac_printk(KERN_WARNING "do_read_list(): list info read error %i from file %s!\n",
++ tmperr,
++ name);
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++
++ /* list timed out? System time is measured in seconds. */
++ if (list_info_p->max_age
++ && (timestamp + list_info_p->max_age) <= RSBAC_CURRENT_TIME)
++ timeout = TRUE;
++
++ /* Valid key? */
++ if (list_info_p->key != list->info.key) {
++ if (timeout) {
++ rsbac_printk(KERN_WARNING "do_read_list(): accessing timed out list %s with wrong key, ignoring old contents!\n",
++ name);
++ goto end_read;
++ } else {
++ rsbac_printk(KERN_WARNING "do_read_list(): try to access list %s with wrong key!\n",
++ name);
++ err = -EPERM;
++ goto end_read;
++ }
++ }
++
++ /* skip the rest, if ignore is requested */
++ if (list->flags & RSBAC_LIST_IGNORE_OLD)
++ goto end_read;
++
++ switch (list_version) {
++ case RSBAC_LIST_DISK_VERSION:
++ set_fs(KERNEL_DS);
++ tmperr = file_p->f_op->read(file_p,
++ (char *) &lastchange,
++ sizeof(lastchange),
++ &file_p->f_pos);
++ set_fs(oldfs);
++ /* error? */
++ if (tmperr < sizeof(lastchange)) {
++ rsbac_printk(KERN_WARNING "do_read_list(): lastchange read error %i from file %s!\n",
++ tmperr,
++ name);
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++ break;
++ case RSBAC_LIST_DISK_OLD_VERSION:
++ break;
++ default:
++ break;
++ }
++ /* if wrong list version, try to get_conv */
++ if (list_info_p->version != list->info.version) {
++ if (list->get_conv)
++ conv = list->get_conv(list_info_p->version);
++ if (!conv) {
++ if (timeout) {
++ rsbac_printk(KERN_WARNING "do_read_list(): accessing timed out list %s without conversion function, ignoring old contents!\n",
++ name);
++ goto end_read;
++ } else {
++ /* complain and set error, if ignore is not requested */
++ if (!
++ (list->
++ flags &
++ RSBAC_LIST_IGNORE_UNSUPP_VERSION)) {
++ rsbac_printk(KERN_WARNING "do_read_list(): cannot convert list version %u of file %s to version %u!\n",
++ list_info_p->version,
++ name,
++ list->info.version);
++ err = -RSBAC_EINVALIDVERSION;
++ }
++ goto end_read;
++ }
++ } else {
++ rsbac_printk(KERN_WARNING "do_read_list(): converting list version %u of file %s on device %02u:%02u to version %u!\n",
++ list_info_p->version,
++ name,
++ RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device),
++ list->info.version);
++ }
++ } else { /* same version needs same sizes */
++
++ if ((list_info_p->desc_size != list->info.desc_size)
++ || (list_info_p->data_size != list->info.data_size)
++ ) {
++ if (timeout) {
++ rsbac_printk(KERN_WARNING "do_read_list(): accessing timed out list %s with wrong desc or data size, ignoring old contents!\n",
++ name);
++ goto end_read;
++ } else {
++ rsbac_printk(KERN_WARNING "do_read_list(): desc or data size mismatch on list %s!\n",
++ name);
++ err = -RSBAC_EINVALIDLIST;
++ goto end_read;
++ }
++ }
++ }
++
++ /* get list count */
++ set_fs(KERNEL_DS);
++ tmperr = file_p->f_op->read(file_p,
++ (__u8 *) & list_count,
++ sizeof(list_count), &file_p->f_pos);
++ set_fs(oldfs);
++ /* error? */
++ if (tmperr < sizeof(list_count)) {
++ rsbac_printk(KERN_WARNING "do_read_list(): list count read error %i from file %s!\n",
++ tmperr,
++ name);
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++
++ /* alloc mem for old and converted item */
++ old_buf =
++ rsbac_kmalloc_unlocked(list_info_p->desc_size + list_info_p->data_size);
++ if (!old_buf) {
++ rsbac_printk(KERN_WARNING "do_read_list(): cannot allocate memory!\n");
++ err = -RSBAC_ENOMEM;
++ goto end_read;
++ }
++ new_buf =
++ rsbac_kmalloc_unlocked(list->info.desc_size + list->info.data_size);
++ if (!new_buf) {
++ rsbac_printk(KERN_WARNING "do_read_list(): cannot allocate memory!\n");
++ err = -RSBAC_ENOMEM;
++ goto end_read;
++ }
++ /* calculate data pointers */
++ if (list_info_p->data_size)
++ old_data = old_buf + list_info_p->desc_size;
++ else
++ old_data = NULL;
++ if (list->info.data_size)
++ new_data = new_buf + list->info.desc_size;
++ else
++ new_data = NULL;
++
++ /* actual reading */
++ do {
++ set_fs(KERNEL_DS);
++ tmperr = file_p->f_op->read(file_p,
++ (char *) &max_age,
++ sizeof(max_age),
++ &file_p->f_pos);
++ set_fs(oldfs);
++ if (conv) {
++ set_fs(KERNEL_DS);
++ tmperr = file_p->f_op->read(file_p,
++ old_buf,
++ list_info_p->
++ desc_size +
++ list_info_p->data_size,
++ &file_p->f_pos);
++ set_fs(oldfs);
++ if (tmperr > 0) { /* convert */
++ converr = conv(old_buf, old_data,
++ new_buf, new_data);
++ if (converr)
++ tmperr = converr;
++ }
++ } else {
++ set_fs(KERNEL_DS);
++ tmperr = file_p->f_op->read(file_p,
++ new_buf,
++ list->info.desc_size +
++ list->info.data_size,
++ &file_p->f_pos);
++ set_fs(oldfs);
++ }
++ /* if successful, add item */
++ if (tmperr > 0) {
++ /* no need to lock, list is not yet published */
++ if (!backup || !lookup_item_locked(list, new_buf))
++ add_item(list, max_age, new_buf, new_data);
++ /* allow access */
++ read_count++;
++/*
++ rsbac_pr_debug(lists, "read item %i\n", user_aci.id);
++*/
++ }
++ }
++ while (tmperr > 0); /* end of do */
++
++ if (tmperr < 0) {
++ rsbac_printk(KERN_WARNING "do_read_list(): read error %i from file %s!\n",
++ tmperr,
++ name);
++ err = -RSBAC_EREADFAILED;
++ }
++
++ if (read_count != list_count) {
++ rsbac_printk(KERN_WARNING "do_read_list(): read %lu, expected %u items from file %s!\n",
++ read_count, list_count, name);
++ err = -RSBAC_EREADFAILED;
++ }
++
++end_read:
++ if (old_buf)
++ rsbac_kfree(old_buf);
++ if (new_buf)
++ rsbac_kfree(new_buf);
++
++ rsbac_pr_debug(lists, "%lu entries read.\n", read_count);
++ /* We do not need this file any more */
++ rsbac_read_close(file_p);
++
++double_free:
++ rsbac_kfree(list_info_p);
++
++ if ( err
++ && (err != -RSBAC_ENOTFOUND)
++ && !backup
++ && rsbac_list_recover
++ ) {
++ char * bname;
++
++ rsbac_list_read_errors++;
++ bname = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if (!bname)
++ return -RSBAC_ENOMEM;
++ rsbac_printk(KERN_WARNING "restoring list %s from device %02u:%02u failed with error %s, rsbac_list_recover is set, so retrying with backup list.\n",
++ name,
++ RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device),
++ get_error_name(bname, err));
++ sprintf(bname, "%sb", name);
++ err = do_read_list(list, bname, TRUE);
++ if ( err
++ && (err != -RSBAC_ENOTFOUND)
++ && rsbac_list_recover
++ ) {
++ rsbac_printk(KERN_WARNING "restoring list %s backup from device %02u:%02u failed with error %s, rsbac_list_recover is set, so returning that list is fine.\n",
++ name,
++ RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device),
++ get_error_name(bname, err));
++ err = 0;
++ }
++ list->dirty = TRUE;
++ rsbac_kfree(bname);
++ }
++
++ return err;
++}
++
++/* call unlocked */
++static int read_list(struct rsbac_list_reg_item_t *list)
++{
++ int res;
++ u_int flags;
++
++ flags = list->flags;
++ list->flags |= RSBAC_LIST_NO_MAX;
++ res = do_read_list(list, list->name, FALSE);
++ if((res == -RSBAC_ENOTFOUND) && list->old_name_base[0]) {
++ char name[RSBAC_MAXNAMELEN];
++ int i;
++
++ rsbac_printk(KERN_INFO "read_list(): list %s on device %02u:%02u not found, trying numbered lists 0 to %u with old name base '%s'\n",
++ list->name, MAJOR(list->device), MINOR(list->device), RSBAC_LIST_MAX_OLD_HASH-1, list->old_name_base);
++ for (i=0; i<RSBAC_LIST_MAX_OLD_HASH; i++) {
++ sprintf(name, "%s%u", list->old_name_base, i);
++ res = do_read_list(list, name, FALSE);
++ if(res && (res != -RSBAC_ENOTFOUND))
++ return res;
++ }
++ list->dirty = TRUE;
++ }
++ list->flags = flags;
++ return res;
++}
++
++/* call unlocked */
++static int do_read_lol_list(struct rsbac_list_lol_reg_item_t *list,
++ char * name,
++ rsbac_boolean_t backup)
++{
++ struct file *file_p;
++ int err = 0;
++ int tmperr;
++ int converr;
++ rsbac_version_t list_version;
++ u_long read_count = 0;
++ u_long sublen;
++ u_long i;
++ char *old_buf = NULL;
++ char *new_buf = NULL;
++ char *old_data;
++ char *new_data;
++ char *old_subbuf = NULL;
++ char *new_subbuf = NULL;
++ char *old_subdata;
++ char *new_subdata;
++ struct rsbac_list_lol_info_t *list_info_p;
++ rsbac_list_count_t list_count;
++ rsbac_time_t timestamp;
++ struct rsbac_nanotime_t lastchange;
++ rsbac_time_t max_age = 0;
++ rsbac_list_conv_function_t *conv = NULL;
++ rsbac_list_conv_function_t *subconv = NULL;
++ rsbac_boolean_t timeout = FALSE;
++ struct rsbac_list_lol_item_t *item_p;
++ mm_segment_t oldfs;
++
++ list_info_p = rsbac_kmalloc_unlocked(sizeof(*list_info_p));
++ if (!list_info_p)
++ return -RSBAC_ENOMEM;
++ /* open file */
++ if ((err = rsbac_read_open(name, &file_p, list->device))) {
++ goto double_free;
++ }
++
++ /* OK, now we can start reading */
++ /* There is a read function for this file, so check info and read as
++ * many items as possible. A positive return value means a read success,
++ * 0 end of file and a negative value an error. */
++
++ /* Set current user space to kernel space, because read() writes */
++ /* to user space */
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++
++ /* check gen-list on-disk version */
++ tmperr = file_p->f_op->read(file_p,
++ (__u8 *) & list_version,
++ sizeof(list_version), &file_p->f_pos);
++ set_fs(oldfs);
++ /* error? */
++ if (tmperr < sizeof(list_version)) {
++ printk(KERN_WARNING
++ "do_read_lol_list(): read error %i from file!\n",
++ tmperr);
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++ /* if wrong list on-disk version, fail */
++ switch (list_version) {
++ case RSBAC_LIST_DISK_VERSION:
++ case RSBAC_LIST_DISK_OLD_VERSION:
++ break;
++ default:
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): wrong on-disk list version %u in file %s, expected %u - error!\n",
++ list_version,
++ name, RSBAC_LIST_DISK_VERSION);
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++
++ /* get timestamp */
++ set_fs(KERNEL_DS);
++ tmperr = file_p->f_op->read(file_p,
++ (__u8 *) & timestamp,
++ sizeof(timestamp), &file_p->f_pos);
++ set_fs(oldfs);
++ /* error? */
++ if (tmperr < sizeof(timestamp)) {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): timestamp read error %i from file %s!\n",
++ tmperr,
++ name);
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++
++ /* get list info */
++ set_fs(KERNEL_DS);
++ tmperr = file_p->f_op->read(file_p,
++ (__u8 *) list_info_p,
++ sizeof(*list_info_p), &file_p->f_pos);
++ set_fs(oldfs);
++ /* error? */
++ if (tmperr < sizeof(*list_info_p)) {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): list info read error %i from file %s!\n",
++ tmperr,
++ name);
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++
++ /* list timed out? System time is measured in seconds. */
++ if (list_info_p->max_age
++ && (timestamp + list_info_p->max_age) <= RSBAC_CURRENT_TIME)
++ timeout = TRUE;
++
++ /* Valid key? */
++ if (list_info_p->key != list->info.key) {
++ if (timeout) {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): accessing timed out list %s with wrong key, ignoring old contents!\n",
++ name);
++ goto end_read;
++ } else {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): try to access list %s with wrong key!\n",
++ name);
++ err = -EPERM;
++ goto end_read;
++ }
++ }
++
++ /* skip the rest, if ignore is requested */
++ if (list->flags & RSBAC_LIST_IGNORE_OLD)
++ goto end_read;
++
++ switch (list_version) {
++ case RSBAC_LIST_DISK_VERSION:
++ set_fs(KERNEL_DS);
++ tmperr = file_p->f_op->read(file_p,
++ (char *) &lastchange,
++ sizeof(lastchange),
++ &file_p->f_pos);
++ set_fs(oldfs);
++ /* error? */
++ if (tmperr < sizeof(lastchange)) {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): lastchange read error %i from file %s!\n",
++ tmperr,
++ name);
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++ break;
++ case RSBAC_LIST_DISK_OLD_VERSION:
++ break;
++ default:
++ break;
++ }
++ /* if wrong list version, try to get_conv */
++ if (list_info_p->version != list->info.version) {
++ if (list->get_conv)
++ conv = list->get_conv(list_info_p->version);
++ if (list->get_subconv)
++ subconv = list->get_subconv(list_info_p->version);
++ if (!conv || !subconv) {
++ if (timeout) {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): accessing timed out list %s without both conversion functions, ignoring old contents!\n",
++ name);
++ goto end_read;
++ } else {
++ /* complain and set error, if ignore is not requested */
++ if (!
++ (list->
++ flags &
++ RSBAC_LIST_IGNORE_UNSUPP_VERSION)) {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): cannot convert list version %u of file %s to version %u!\n",
++ list_info_p->version,
++ name,
++ list->info.version);
++ err = -RSBAC_EINVALIDVERSION;
++ }
++ goto end_read;
++ }
++ } else {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): converting list version %u of file %s on device %02u:%02u to version %u!\n",
++ list_info_p->version,
++ name,
++ RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device),
++ list->info.version);
++ }
++ } else { /* same version needs same sizes */
++
++ if ((list_info_p->desc_size != list->info.desc_size)
++ || (list_info_p->data_size != list->info.data_size)
++ || (list_info_p->subdesc_size !=
++ list->info.subdesc_size)
++ || (list_info_p->subdata_size !=
++ list->info.subdata_size)
++ ) {
++ if (timeout) {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): accessing timed out list %s with wrong desc or data size(s), ignoring old contents!\n",
++ name);
++ goto end_read;
++ } else {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): desc or data size mismatch on list %s!\n",
++ name);
++ err = -RSBAC_EINVALIDLIST;
++ goto end_read;
++ }
++ }
++ }
++
++ /* get list count */
++ set_fs(KERNEL_DS);
++ tmperr = file_p->f_op->read(file_p,
++ (__u8 *) & list_count,
++ sizeof(list_count), &file_p->f_pos);
++ set_fs(oldfs);
++ /* error? */
++ if (tmperr < sizeof(list_count)) {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): list count read error %i from file %s!\n",
++ tmperr,
++ name);
++ err = -RSBAC_EREADFAILED;
++ goto end_read;
++ }
++
++ /* alloc mem for old and converted items */
++ old_buf =
++ rsbac_kmalloc_unlocked(list_info_p->desc_size + list_info_p->data_size);
++ if (!old_buf) {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): cannot allocate memory!\n");
++ err = -RSBAC_ENOMEM;
++ goto end_read;
++ }
++ new_buf =
++ rsbac_kmalloc_unlocked(list->info.desc_size + list->info.data_size);
++ if (!new_buf) {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): cannot allocate memory!\n");
++ err = -RSBAC_ENOMEM;
++ goto end_read;
++ }
++ old_subbuf =
++ rsbac_kmalloc_unlocked(list_info_p->subdesc_size +
++ list_info_p->subdata_size);
++ if (!old_subbuf) {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): cannot allocate memory!\n");
++ err = -RSBAC_ENOMEM;
++ goto end_read;
++ }
++ new_subbuf =
++ rsbac_kmalloc_unlocked(list->info.subdesc_size +
++ list->info.subdata_size);
++ if (!new_subbuf) {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): cannot allocate memory!\n");
++ err = -RSBAC_ENOMEM;
++ goto end_read;
++ }
++ /* calculate data pointers */
++ if (list_info_p->data_size)
++ old_data = old_buf + list_info_p->desc_size;
++ else
++ old_data = NULL;
++ if (list->info.data_size)
++ new_data = new_buf + list->info.desc_size;
++ else
++ new_data = NULL;
++ if (list_info_p->subdata_size)
++ old_subdata = old_subbuf + list_info_p->subdesc_size;
++ else
++ old_subdata = NULL;
++ if (list->info.subdata_size)
++ new_subdata = new_subbuf + list->info.subdesc_size;
++ else
++ new_subdata = NULL;
++
++ /* actual reading */
++ do {
++ set_fs(KERNEL_DS);
++ tmperr = file_p->f_op->read(file_p,
++ (char *) &max_age,
++ sizeof(max_age),
++ &file_p->f_pos);
++ set_fs(oldfs);
++ if (conv) {
++ set_fs(KERNEL_DS);
++ tmperr = file_p->f_op->read(file_p,
++ old_buf,
++ list_info_p->
++ desc_size +
++ list_info_p->data_size,
++ &file_p->f_pos);
++ set_fs(oldfs);
++ if (tmperr > 0) { /* convert */
++ converr = conv(old_buf, old_data,
++ new_buf, new_data);
++ if (converr)
++ tmperr = converr;
++ }
++ } else {
++ set_fs(KERNEL_DS);
++ tmperr = file_p->f_op->read(file_p,
++ new_buf,
++ list->info.desc_size +
++ list->info.data_size,
++ &file_p->f_pos);
++ set_fs(oldfs);
++ }
++ /* if successful, add item */
++ if (tmperr > 0) {
++ /* no need to lock, list is not yet published */
++ if (!backup || !(item_p = lookup_lol_item_locked(list, new_buf)))
++ item_p = add_lol_item(list, max_age, new_buf, new_data);
++ /* allow access */
++ if (!item_p) {
++ err = -RSBAC_ENOMEM;
++ goto end_read;
++ }
++ read_count++;
++/*
++ rsbac_pr_debug(lists, "read item %i\n", user_aci.id);
++*/
++ set_fs(KERNEL_DS);
++ tmperr = file_p->f_op->read(file_p,
++ (__u8 *) & sublen,
++ sizeof(sublen),
++ &file_p->f_pos);
++ set_fs(oldfs);
++ /* if successful, read and add sublen subitems */
++ if (tmperr > 0) {
++ for (i = 0; i < sublen; i++) {
++ set_fs(KERNEL_DS);
++ tmperr = file_p->f_op->read(file_p,
++ (char
++ *)
++ &max_age,
++ sizeof
++ (max_age),
++ &file_p->
++ f_pos);
++ set_fs(oldfs);
++ if (subconv) {
++ set_fs(KERNEL_DS);
++ tmperr =
++ file_p->f_op->
++ read(file_p,
++ old_subbuf,
++ list_info_p->
++ subdesc_size +
++ list_info_p->
++ subdata_size,
++ &file_p->f_pos);
++ set_fs(oldfs);
++ if (tmperr > 0) { /* convert */
++ converr =
++ subconv
++ (old_subbuf,
++ old_subdata,
++ new_subbuf,
++ new_subdata);
++ if (converr)
++ tmperr =
++ converr;
++ }
++ } else {
++ set_fs(KERNEL_DS);
++ tmperr =
++ file_p->f_op->
++ read(file_p,
++ new_subbuf,
++ list->info.
++ subdesc_size +
++ list->info.
++ subdata_size,
++ &file_p->f_pos);
++ set_fs(oldfs);
++ }
++ if (tmperr > 0) {
++ /* no need to lock, list is not yet published */
++ if (!backup || !lookup_lol_subitem_locked(list, item_p, new_subbuf))
++ if (!add_lol_subitem
++ (list, item_p, max_age,
++ new_subbuf,
++ new_subdata)) {
++ rsbac_printk
++ (KERN_WARNING
++ "do_read_lol_list(): could not add subitem!\n");
++ i = sublen;
++ tmperr = -1;
++ }
++ } else {
++ i = sublen;
++ tmperr = -1;
++ }
++ }
++ }
++ }
++ }
++ while (tmperr > 0); /* end of do */
++
++ if (tmperr < 0) {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): read error %i from file %s!\n",
++ tmperr,
++ name);
++ err = -RSBAC_EREADFAILED;
++ }
++
++ if (read_count != list_count) {
++ rsbac_printk(KERN_WARNING "do_read_lol_list(): read %lu, expected %u items from file %s!\n",
++ read_count, list_count, name);
++ err = -RSBAC_EREADFAILED;
++ }
++
++end_read:
++ if (old_buf)
++ rsbac_kfree(old_buf);
++ if (new_buf)
++ rsbac_kfree(new_buf);
++ if (old_subbuf)
++ rsbac_kfree(old_subbuf);
++ if (new_subbuf)
++ rsbac_kfree(new_subbuf);
++
++ rsbac_pr_debug(lists, "%lu entries read.\n", read_count);
++ /* We do not need this file any more */
++ rsbac_read_close(file_p);
++
++double_free:
++ rsbac_kfree(list_info_p);
++
++ if ( err
++ && (err != -RSBAC_ENOTFOUND)
++ && !backup
++ && rsbac_list_recover
++ ) {
++ char * bname;
++
++ rsbac_list_read_errors++;
++ bname = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if (!bname)
++ return -RSBAC_ENOMEM;
++ rsbac_printk(KERN_WARNING "restoring list of lists %s from device %02u:%02u failed with error %s, rsbac_list_recover is set, so retrying with backup list.\n",
++ name,
++ RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device),
++ get_error_name(bname, err));
++ sprintf(bname, "%sb", name);
++ err = do_read_lol_list(list, bname, TRUE);
++ if ( err
++ && (err != -RSBAC_ENOTFOUND)
++ && rsbac_list_recover
++ ) {
++ rsbac_printk(KERN_WARNING "restoring list of lists %s backup from device %02u:%02u failed with error %s, rsbac_list_recover is set, so returning that list is fine.\n",
++ name,
++ RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device),
++ get_error_name(bname, err));
++ err = 0;
++ }
++ list->dirty = TRUE;
++ rsbac_kfree(bname);
++ }
++
++ return err;
++} /* end of do_read_lol_list() */
++
++/* call unlocked */
++static int read_lol_list(struct rsbac_list_lol_reg_item_t *list)
++{
++ int res;
++ u_int flags;
++
++ flags = list->flags;
++ list->flags |= RSBAC_LIST_NO_MAX;
++ res = do_read_lol_list(list, list->name, FALSE);
++ if((res == -RSBAC_ENOTFOUND) && list->old_name_base[0]) {
++ char name[RSBAC_MAXNAMELEN];
++ int i;
++
++ rsbac_printk(KERN_INFO "read_lol_list(): list %s on device %02u:%02u not found, trying numbered lists 0 to %u with old name base '%s'\n",
++ list->name, MAJOR(list->device), MINOR(list->device), RSBAC_LIST_LOL_MAX_OLD_HASH-1, list->old_name_base);
++ for (i=0; i<RSBAC_LIST_LOL_MAX_OLD_HASH; i++) {
++ sprintf(name, "%s%u", list->old_name_base, i);
++ res = do_read_lol_list(list, name, FALSE);
++ if(res && (res != -RSBAC_ENOTFOUND))
++ return res;
++ }
++ list->dirty = TRUE;
++ }
++ list->flags = flags;
++ return res;
++}
++
++
++#ifndef CONFIG_RSBAC_NO_WRITE
++int check_buffer(struct rsbac_list_buffer_t ** buffer_pp, u_int size)
++{
++ if((*buffer_pp)->len + size <= RSBAC_LIST_BUFFER_DATA_SIZE)
++ return 0;
++ else {
++ struct rsbac_list_buffer_t * new_buffer;
++
++ new_buffer = rsbac_kmalloc(RSBAC_LIST_BUFFER_SIZE);
++ if(!new_buffer)
++ return -RSBAC_ENOMEM;
++ rsbac_pr_debug(write, "Added a buffer\n");
++ new_buffer->next = NULL;
++ new_buffer->len = 0;
++ (*buffer_pp)->next = new_buffer;
++ *buffer_pp = new_buffer;
++ return 0;
++ }
++}
++
++void free_buffers(struct rsbac_list_buffer_t * buffer)
++{
++ struct rsbac_list_buffer_t * next;
++
++ while(buffer) {
++ rsbac_pr_debug(write, "Freeing buffer of size %u\n",
++ buffer->len);
++ next = buffer->next;
++ rsbac_kfree(buffer);
++ buffer = next;
++ }
++}
++
++/* call unlocked */
++static int fill_buffer(struct rsbac_list_reg_item_t *list,
++ struct rsbac_list_write_item_t **write_item_pp)
++{
++ struct rsbac_list_write_item_t *write_item_p;
++ struct rsbac_list_item_t *current_p;
++ struct rsbac_list_buffer_t *buffer = NULL;
++ rsbac_list_count_t allcount = 0;
++ rsbac_version_t list_version = RSBAC_LIST_DISK_VERSION;
++ rsbac_time_t timestamp = RSBAC_CURRENT_TIME;
++ int i;
++
++ write_item_p = rsbac_kmalloc(sizeof(*write_item_p));
++ if (!write_item_p) {
++ *write_item_pp = NULL;
++ return -RSBAC_ENOMEM;
++ }
++
++ /* fill write_item */
++ write_item_p->prev = NULL;
++ write_item_p->next = NULL;
++ write_item_p->list = list;
++ write_item_p->buffer = NULL;
++ strncpy(write_item_p->name, list->name, RSBAC_LIST_MAX_FILENAME);
++ write_item_p->name[RSBAC_LIST_MAX_FILENAME] = 0;
++ write_item_p->device = list->device;
++
++ buffer = rsbac_kmalloc(RSBAC_LIST_BUFFER_SIZE);
++ if (!buffer) {
++ rsbac_kfree(write_item_p);
++ *write_item_pp = NULL;
++ return -RSBAC_ENOMEM;
++ }
++ write_item_p->buffer = buffer;
++ buffer->len = 0;
++ buffer->next = NULL;
++ /* copy version */
++ memcpy(buffer->data, &list_version, sizeof(list_version));
++ buffer->len = sizeof(list_version);
++ /* copy timestamp */
++ memcpy(buffer->data + buffer->len,
++ &timestamp, sizeof(timestamp));
++ buffer->len += sizeof(timestamp);
++ /* copy info */
++ memcpy(buffer->data + buffer->len,
++ &list->info, sizeof(list->info));
++ buffer->len += sizeof(list->info);
++
++ /* Protect list */
++ spin_lock(&list->lock);
++ for (i=0; i<list->nr_hashes; i++)
++ allcount += list->hashed[i].count;
++ /* copy lastchange */
++ memcpy(buffer->data + buffer->len,
++ &list->lastchange, sizeof(list->lastchange));
++ buffer->len += sizeof(list->lastchange);
++ /* copy count */
++ memcpy(buffer->data + buffer->len,
++ &allcount, sizeof(allcount));
++ buffer->len += sizeof(allcount);
++ /* copy list */
++ for (i=0; i<list->nr_hashes; i++) {
++ current_p = list->hashed[i].head;
++ while (current_p) {
++ if (check_buffer(&buffer, sizeof(current_p->max_age) + list->info.desc_size + list->info.data_size)) {
++ /* unprotect this list */
++ spin_unlock(&list->lock);
++ free_buffers(write_item_p->buffer);
++ rsbac_kfree(write_item_p);
++ *write_item_pp = NULL;
++ return -RSBAC_ENOMEM;
++ }
++ memcpy(buffer->data + buffer->len,
++ &current_p->max_age, sizeof(current_p->max_age));
++ buffer->len += sizeof(current_p->max_age);
++ memcpy(buffer->data + buffer->len,
++ ((char *) current_p) + sizeof(*current_p),
++ list->info.desc_size + list->info.data_size);
++ buffer->len += list->info.desc_size + list->info.data_size;
++ current_p = current_p->next;
++ }
++ }
++ spin_unlock(&list->lock);
++
++ *write_item_pp = write_item_p;
++
++ return 0;
++}
++
++/* call unlocked */
++static int rsbac_list_write_buffers(struct rsbac_list_write_head_t write_head)
++{
++ struct file *file_p;
++ int count = 0;
++ mm_segment_t oldfs;
++ u_int written;
++ u_long all_written;
++ u_long bytes;
++ u_int bufcount;
++ int tmperr = 0;
++ struct rsbac_list_buffer_t * buffer;
++ struct rsbac_list_write_item_t *write_item_p;
++ struct rsbac_list_write_item_t *next_item_p;
++
++ write_item_p = write_head.head;
++ while (write_item_p) {
++ rsbac_pr_debug(write, "write list %s on device %02u:%02u.\n",
++ write_item_p->name,
++ RSBAC_MAJOR(write_item_p->device),
++ RSBAC_MINOR(write_item_p->device));
++ /* open file */
++ if ((tmperr = rsbac_write_open(write_item_p->name,
++ &file_p,
++ write_item_p->device))) {
++ if (tmperr != -RSBAC_ENOTWRITABLE) {
++ rsbac_printk(KERN_WARNING "rsbac_list_write_buffers(): opening file %s on device %02u:%02u failed with error %i!\n",
++ write_item_p->name,
++ RSBAC_MAJOR(write_item_p->
++ device),
++ RSBAC_MINOR(write_item_p->
++ device), tmperr);
++ }
++ count = tmperr;
++ goto out_free_all;
++ }
++
++ /* OK, now we can start writing the buffer. */
++ /* Set current user space to kernel space, because write() reads */
++ /* from user space */
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++
++ buffer = write_item_p->buffer;
++ all_written = 0;
++ bufcount = 0;
++ while (buffer && (tmperr >= 0)) {
++ rsbac_pr_debug(write, "Writing list %s, buffer %u with size %u\n",
++ write_item_p->name, bufcount, buffer->len);
++ bufcount++;
++ written = 0;
++ while ((written < buffer->len) && (tmperr >= 0)) {
++ bytes = buffer->len - written;
++ tmperr = file_p->f_op->write(file_p,
++ buffer->data + written,
++ bytes,
++ &file_p->f_pos);
++ if (tmperr > 0) {
++ written += tmperr;
++ }
++ }
++ all_written += written;
++ buffer = buffer->next;
++ }
++ /* Set current user space back to user space, because write() reads */
++ /* from user space */
++ set_fs(oldfs);
++ /* End of write access */
++ rsbac_write_close(file_p);
++ if (tmperr < 0) {
++ rsbac_printk(KERN_WARNING "rsbac_list_write_buffers(): write error %i on device %02u:%02u file %s!\n",
++ tmperr,
++ RSBAC_MAJOR(write_item_p->device),
++ RSBAC_MINOR(write_item_p->device),
++ write_item_p->name);
++ count = tmperr;
++ goto out_free_all;
++ } else
++ count++;
++
++ rsbac_pr_debug(write, "%lu bytes from %u buffers written.\n",
++ all_written, bufcount);
++
++ free_buffers(write_item_p->buffer);
++ next_item_p = write_item_p->next;
++ rsbac_kfree(write_item_p);
++ write_item_p = next_item_p;
++ }
++ return count;
++
++out_free_all:
++ /* Mark unwritten lists dirty and free everything */
++ while(write_item_p)
++ {
++ if(write_item_p->list->self == write_item_p->list)
++ write_item_p->list->dirty = TRUE;
++ free_buffers(write_item_p->buffer);
++ next_item_p = write_item_p->next;
++ rsbac_kfree(write_item_p);
++ write_item_p = next_item_p;
++ }
++ return count;
++}
++
++/* call unlocked */
++static int fill_lol_buffer(struct rsbac_list_lol_reg_item_t *list,
++ struct rsbac_list_lol_write_item_t
++ **write_item_pp)
++{
++ struct rsbac_list_lol_write_item_t *write_item_p;
++ struct rsbac_list_lol_item_t *current_p;
++ struct rsbac_list_item_t *sub_p;
++ struct rsbac_list_buffer_t *buffer = NULL;
++ rsbac_list_count_t allcount = 0;
++ rsbac_version_t list_version = RSBAC_LIST_DISK_VERSION;
++ rsbac_time_t timestamp = RSBAC_CURRENT_TIME;
++ int i;
++
++ write_item_p = rsbac_kmalloc_unlocked(sizeof(*write_item_p));
++ if (!write_item_p) {
++ *write_item_pp = NULL;
++ return (-RSBAC_ENOMEM);
++ }
++
++ rsbac_pr_debug(write, "Filling buffers for list of lists %s\n",
++ list->name);
++ /* fill write_item */
++ write_item_p->prev = NULL;
++ write_item_p->next = NULL;
++ write_item_p->list = list;
++ write_item_p->buffer = NULL;
++ strncpy(write_item_p->name, list->name, RSBAC_LIST_MAX_FILENAME);
++ write_item_p->name[RSBAC_LIST_MAX_FILENAME] = 0;
++ write_item_p->device = list->device;
++
++ buffer = rsbac_kmalloc(RSBAC_LIST_BUFFER_SIZE);
++ if (!buffer) {
++ rsbac_kfree(write_item_p);
++ *write_item_pp = NULL;
++ return -RSBAC_ENOMEM;
++ }
++ write_item_p->buffer = buffer;
++ buffer->len = 0;
++ buffer->next = NULL;
++ /* copy version */
++ memcpy(buffer->data, (char *) &list_version, sizeof(list_version));
++ buffer->len = sizeof(list_version);
++ /* copy timestamp */
++ memcpy(buffer->data + buffer->len,
++ (char *) &timestamp, sizeof(timestamp));
++ buffer->len += sizeof(timestamp);
++ /* copy info */
++ memcpy(buffer->data + buffer->len,
++ (char *) &list->info, sizeof(list->info));
++ buffer->len += sizeof(list->info);
++ /* protect list */
++ spin_lock(&list->lock);
++ for (i=0; i<list->nr_hashes; i++)
++ allcount += list->hashed[i].count;
++ /* copy lastchange */
++ memcpy(buffer->data + buffer->len,
++ (char *) &list->lastchange, sizeof(list->lastchange));
++ buffer->len += sizeof(list->lastchange);
++ /* copy count */
++ memcpy(buffer->data + buffer->len,
++ (char *) &allcount, sizeof(allcount));
++ buffer->len += sizeof(allcount);
++ /* copy list */
++ for (i=0; i<list->nr_hashes; i++) {
++ current_p = list->hashed[i].head;
++ while (current_p) {
++ if (check_buffer(&buffer, sizeof(current_p->max_age)
++ + list->info.desc_size
++ + list->info.data_size
++ + sizeof(current_p->count))) {
++ /* unprotect this list */
++ spin_unlock(&list->lock);
++ free_buffers(write_item_p->buffer);
++ rsbac_kfree(write_item_p);
++ *write_item_pp = NULL;
++ return -RSBAC_ENOMEM;
++ }
++ memcpy(buffer->data + buffer->len,
++ &current_p->max_age, sizeof(current_p->max_age));
++ buffer->len += sizeof(current_p->max_age);
++ memcpy(buffer->data + buffer->len,
++ ((char *) current_p) + sizeof(*current_p),
++ list->info.desc_size + list->info.data_size);
++ buffer->len += list->info.desc_size + list->info.data_size;
++ memcpy(buffer->data + buffer->len,
++ &current_p->count, sizeof(current_p->count));
++ buffer->len += sizeof(current_p->count);
++ /* copy subitems */
++ sub_p = current_p->head;
++ while (sub_p) {
++ if (check_buffer(&buffer, sizeof(sub_p->max_age)
++ + list->info.subdesc_size
++ + list->info.subdata_size)) {
++ /* unprotect this list */
++ spin_unlock(&list->lock);
++ free_buffers(write_item_p->buffer);
++ rsbac_kfree(write_item_p);
++ *write_item_pp = NULL;
++ return -RSBAC_ENOMEM;
++ }
++ memcpy(buffer->data + buffer->len,
++ &sub_p->max_age, sizeof(sub_p->max_age));
++ buffer->len += sizeof(sub_p->max_age);
++ memcpy(buffer->data + buffer->len,
++ ((char *) sub_p) + sizeof(*sub_p),
++ list->info.subdesc_size +
++ list->info.subdata_size);
++ buffer->len +=
++ list->info.subdesc_size +
++ list->info.subdata_size;
++ sub_p = sub_p->next;
++ }
++ current_p = current_p->next;
++ }
++ }
++ /* unprotect this list */
++ spin_unlock(&list->lock);
++ *write_item_pp = write_item_p;
++
++ return 0;
++}
++
++/* call unlocked */
++static int rsbac_list_write_lol_buffers(struct rsbac_list_lol_write_head_t
++ write_head)
++{
++ struct file *file_p;
++ int count = 0;
++ mm_segment_t oldfs;
++ u_long written;
++ u_long all_written;
++ u_long bytes;
++ u_int bufcount;
++ int tmperr = 0;
++ struct rsbac_list_buffer_t * buffer;
++ struct rsbac_list_lol_write_item_t *write_item_p;
++ struct rsbac_list_lol_write_item_t *next_item_p;
++
++ write_item_p = write_head.head;
++ while (write_item_p) {
++ rsbac_pr_debug(write, "write list of lists %s on device %02u:%02u.\n",
++ write_item_p->name,
++ RSBAC_MAJOR(write_item_p->device),
++ RSBAC_MINOR(write_item_p->device));
++ /* open file */
++ if ((tmperr = rsbac_write_open(write_item_p->name,
++ &file_p,
++ write_item_p->device))) {
++ if (tmperr != -RSBAC_ENOTWRITABLE) {
++ rsbac_printk(KERN_WARNING "rsbac_list_write_lol_buffers(): opening file %s on device %02u:%02u failed with error %i!\n",
++ write_item_p->name,
++ RSBAC_MAJOR(write_item_p->
++ device),
++ RSBAC_MINOR(write_item_p->
++ device), tmperr);
++ }
++ goto out_free_all;
++ }
++
++ /* OK, now we can start writing the buffer. */
++ /* Set current user space to kernel space, because write() reads */
++ /* from user space */
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++
++ buffer = write_item_p->buffer;
++ all_written = 0;
++ bufcount = 0;
++ while (buffer && (tmperr >= 0)) {
++ rsbac_pr_debug(write, "Writing list of lists %s, buffer %u with size %u\n",
++ write_item_p->name, bufcount, buffer->len);
++ bufcount++;
++ written = 0;
++ while ((written < buffer->len) && (tmperr >= 0)) {
++ bytes = buffer->len - written;
++ tmperr = file_p->f_op->write(file_p,
++ buffer->data + written,
++ bytes,
++ &file_p->f_pos);
++ if (tmperr > 0) {
++ written += tmperr;
++ }
++ }
++ all_written += written;
++ buffer = buffer->next;
++ }
++ /* Set current user space back to user space, because write() reads */
++ /* from user space */
++ set_fs(oldfs);
++ /* End of write access */
++ rsbac_write_close(file_p);
++
++ if (tmperr < 0) {
++ rsbac_printk(KERN_WARNING "rsbac_list_write_lol_buffers(): write error %i on device %02u:%02u file %s!\n",
++ tmperr,
++ RSBAC_MAJOR(write_item_p->device),
++ RSBAC_MINOR(write_item_p->device),
++ write_item_p->name);
++ count = tmperr;
++ goto out_free_all;
++ } else
++ count++;
++
++ rsbac_pr_debug(write, "%lu bytes from %u buffers written.\n",
++ all_written, bufcount);
++ free_buffers(write_item_p->buffer);
++ next_item_p = write_item_p->next;
++ rsbac_kfree(write_item_p);
++ write_item_p = next_item_p;
++ }
++ return count;
++
++out_free_all:
++ /* Mark unwritten lists dirty and free everything */
++ while(write_item_p)
++ {
++ if(write_item_p->list->self == write_item_p->list)
++ write_item_p->list->dirty = TRUE;
++ free_buffers(write_item_p->buffer);
++ next_item_p = write_item_p->next;
++ rsbac_kfree(write_item_p);
++ write_item_p = next_item_p;
++ }
++ return count;
++}
++#endif /* ifndef CONFIG_RSBAC_NO_WRITE */
++
++/************************************************* */
++/* PROC support */
++/************************************************* */
++
++#if defined(CONFIG_RSBAC_PROC)
++static int
++lists_proc_show(struct seq_file *m, void *v)
++{
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ struct rsbac_list_reg_item_t *item_p;
++ struct rsbac_list_lol_reg_item_t *lol_item_p;
++ int i;
++ u_long tmp_count;
++ int srcu_idx;
++ struct rsbac_list_hashed_t * hashed;
++ struct rsbac_list_lol_hashed_t * lol_hashed;
++ u_int nr_hashes;
++
++ if (!rsbac_is_initialized())
++ return -ENOSYS;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ seq_printf(m,
++ "Generic Lists Status\n--------------------\nMaximum number of hashes per list/list of lists is %u/%u\n%u list read failures\n",
++ rsbac_list_max_hashes, rsbac_list_lol_max_hashes, rsbac_list_read_errors);
++#ifdef CONFIG_RSBAC_RC_LEARN_TA
++ seq_printf(m, "RC Learning Mode transaction Number: %u\n",
++ CONFIG_RSBAC_RC_LEARN_TA);
++#endif
++#ifdef CONFIG_RSBAC_AUTH_LEARN_TA
++ seq_printf(m, "AUTH Learning Mode transaction Number: %u\n",
++ CONFIG_RSBAC_AUTH_LEARN_TA);
++#endif
++#ifdef CONFIG_RSBAC_ACL_LEARN_TA
++ seq_printf(m, "ACL Learning Mode transaction Number: %u\n",
++ CONFIG_RSBAC_ACL_LEARN_TA);
++#endif
++#ifdef CONFIG_RSBAC_CAP_LEARN_TA
++ seq_printf(m, "CAP Learning Mode transaction Number: %u\n",
++ CONFIG_RSBAC_CAP_LEARN_TA);
++#endif
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (rsbac_list_count(ta_handle) > 0) {
++ int list_count;
++ rsbac_list_ta_number_t *desc_array;
++ struct rsbac_list_ta_data_t data;
++
++ seq_printf(m, "\nTransactions active:\n\n");
++ list_count =
++ rsbac_list_get_all_desc(ta_handle,
++ (void **) &desc_array);
++ if (list_count > 0) {
++ int i;
++ rsbac_time_t now = RSBAC_CURRENT_TIME;
++
++ for (i = 0; i < list_count; i++) {
++ if (!rsbac_list_get_data
++ (ta_handle, &desc_array[i], &data)) {
++ seq_printf(m,
++ "%u %s (ttl %is)\n",
++ desc_array[i],
++ data.name,
++ data.timeout - now);
++ }
++ }
++ rsbac_kfree(desc_array);
++ }
++
++ seq_printf(m,
++ "\nLists in Transaction\n--------------------\nName\t\tdevice\thash\tta\t count\n");
++
++ list_count = 0;
++ srcu_idx = srcu_read_lock(&reg_list_srcu);
++ item_p = reg_head.head;
++ while (item_p) {
++ rcu_read_lock();
++ nr_hashes = item_p->nr_hashes;
++ hashed = rcu_dereference(item_p->hashed);
++ for (i=0; i<nr_hashes; i++) {
++ if (hashed[i].ta_copied) {
++ seq_printf(m,
++ "%-16s%02u:%02u\t%u\t%10u\t%u\n",
++ item_p->name,
++ RSBAC_MAJOR(item_p->device),
++ RSBAC_MINOR(item_p->device),
++ i,
++ hashed[i].ta_copied,
++ hashed[i].ta_count);
++ list_count++;
++ }
++ }
++ rcu_read_unlock();
++ item_p = item_p->next;
++ }
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++
++ seq_printf(m,
++ "\n %u lists in transaction.\n\n", list_count);
++ seq_printf(m,
++ "Lists of Lists in Transaction\n-----------------------------\nName\t\tdevice\thash\tta\t count\n");
++ list_count = 0;
++ srcu_idx = srcu_read_lock(&lol_reg_list_srcu);
++ lol_item_p = lol_reg_head.head;
++ while (lol_item_p) {
++ rcu_read_lock();
++ nr_hashes = lol_item_p->nr_hashes;
++ lol_hashed = rcu_dereference(lol_item_p->hashed);
++ for (i=0; i<nr_hashes; i++) {
++ if (lol_hashed[i].ta_copied) {
++ seq_printf(m,
++ "%-16s%02u:%02u\t%u\t%10u\t%u\n",
++ lol_item_p->name,
++ RSBAC_MAJOR(lol_item_p->
++ device),
++ RSBAC_MINOR(lol_item_p->
++ device),
++ i,
++ lol_hashed[i].ta_copied,
++ lol_hashed[i].ta_count);
++ list_count++;
++ }
++ }
++ rcu_read_unlock();
++ lol_item_p = lol_item_p->next;
++ }
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++
++ seq_printf(m,
++ "\n %u lists of lists in transaction.\n\n",
++ list_count);
++ } else
++ seq_printf(m, "No active transaction\n");
++#endif
++ seq_printf(m,
++ "\nRegistered Generic Lists (item size %u + per hash %u)\n------------------------\n",
++ (int) sizeof(struct rsbac_list_reg_item_t), (int) sizeof(struct rsbac_list_hashed_t));
++ seq_printf(m,
++ "Name\t\tdevice\tcount\tdesc\tdata\tpersist\tnow/dir\tflags\thashes\n");
++
++ srcu_idx = srcu_read_lock(&reg_list_srcu);
++ item_p = reg_head.head;
++ while (item_p) {
++ rcu_read_lock();
++ nr_hashes = item_p->nr_hashes;
++ hashed = rcu_dereference(item_p->hashed);
++ tmp_count = 0;
++ for (i=0; i<nr_hashes; i++)
++ tmp_count += hashed[i].count;
++ rcu_read_unlock();
++ seq_printf(m,
++ "%-16s%02u:%02u\t%lu\t%u\t%u\t%u\t%u/%u\t%u\t%u\n",
++ item_p->name, RSBAC_MAJOR(item_p->device),
++ RSBAC_MINOR(item_p->device), tmp_count,
++ item_p->info.desc_size, item_p->info.data_size,
++ item_p->flags & RSBAC_LIST_PERSIST,
++ item_p->no_write,
++ item_p->dirty & (item_p->
++ flags & RSBAC_LIST_PERSIST),
++ item_p->flags,
++ nr_hashes);
++ item_p = item_p->next;
++ }
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++
++ seq_printf(m, "\n %u lists registered.\n\n",
++ reg_head.count);
++ seq_printf(m,
++ "Registered Generic Lists of Lists (item size %u + per hash %u)\n---------------------------------\n",
++ (int) sizeof(struct rsbac_list_lol_reg_item_t), (int) sizeof(struct rsbac_list_lol_hashed_t));
++ seq_printf(m,
++ "Name\t\tdevice\tcount\tdesc\tdata\tpersist\tnow/dir\tflags\thashes\n");
++
++ srcu_idx = srcu_read_lock(&lol_reg_list_srcu);
++ lol_item_p = lol_reg_head.head;
++ while (lol_item_p) {
++ rcu_read_lock();
++ nr_hashes = lol_item_p->nr_hashes;
++ lol_hashed = rcu_dereference(lol_item_p->hashed);
++ tmp_count = 0;
++ for (i=0; i<nr_hashes; i++)
++ tmp_count += lol_hashed[i].count;
++ rcu_read_unlock();
++ seq_printf(m,
++ "%-16s%02u:%02u\t%lu\t%u+%u\t%u+%u\t%u\t%u/%u\t%u\t%u\n",
++ lol_item_p->name,
++ RSBAC_MAJOR(lol_item_p->device),
++ RSBAC_MINOR(lol_item_p->device),
++ tmp_count, lol_item_p->info.desc_size,
++ lol_item_p->info.subdesc_size,
++ lol_item_p->info.data_size,
++ lol_item_p->info.subdata_size,
++ lol_item_p->flags & RSBAC_LIST_PERSIST,
++ lol_item_p->no_write,
++ lol_item_p->dirty & (lol_item_p->
++ flags &
++ RSBAC_LIST_PERSIST),
++ lol_item_p->flags,
++ nr_hashes);
++ lol_item_p = lol_item_p->next;
++ }
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++
++ seq_printf(m, "\n %u lists of lists registered.\n",
++ lol_reg_head.count);
++ return 0;
++}
++
++static int lists_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, lists_proc_show, NULL);
++}
++
++static const struct file_operations lists_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = lists_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *lists_proc;
++
++static int
++lists_counts_proc_show(struct seq_file *m, void *v)
++{
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ struct rsbac_list_reg_item_t *item_p;
++ struct rsbac_list_lol_reg_item_t *lol_item_p;
++ int i;
++#ifdef CONFIG_RSBAC_LIST_STATS
++ __u64 all_read = 0;
++ __u64 all_write = 0;
++#endif
++ int srcu_idx;
++ struct rsbac_list_hashed_t * hashed;
++ struct rsbac_list_lol_hashed_t * lol_hashed;
++ u_int nr_hashes;
++
++ if (!rsbac_is_initialized())
++ return -ENOSYS;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ seq_printf(m,
++ "Generic Lists Status\n--------------------\nMaximum number of hashes per list/list of lists is %u/%u\n\n",
++ rsbac_list_max_hashes, rsbac_list_lol_max_hashes);
++ seq_printf(m,
++ "Registered Generic Lists (item size %u + per hash %u)\n------------------------\n",
++ (int) sizeof(struct rsbac_list_reg_item_t), (int) sizeof(struct rsbac_list_hashed_t));
++#ifdef CONFIG_RSBAC_LIST_STATS
++ seq_printf(m,
++ "Name\t\tdevice\tmaxitem\treads\twrites\thashes\tcounts\n");
++#else
++ seq_printf(m,
++ "Name\t\tdevice\tmaxitem\thashes\tcounts\n");
++#endif
++ srcu_idx = srcu_read_lock(&reg_list_srcu);
++ item_p = reg_head.head;
++ while (item_p) {
++#ifdef CONFIG_RSBAC_LIST_STATS
++ seq_printf(m,
++ "%-16s%02u:%02u\t%u\t%llu\t%llu\t%u\t",
++ item_p->name, RSBAC_MAJOR(item_p->device),
++ RSBAC_MINOR(item_p->device),
++ item_p->max_items_per_hash,
++ item_p->read_count,
++ item_p->write_count,
++ item_p->nr_hashes);
++ all_read += item_p->read_count;
++ all_write += item_p->write_count;
++#else
++ seq_printf(m,
++ "%-16s%02u:%02u\t%u\t%u\t",
++ item_p->name, RSBAC_MAJOR(item_p->device),
++ RSBAC_MINOR(item_p->device),
++ item_p->max_items_per_hash,
++ item_p->nr_hashes);
++#endif
++ rcu_read_lock();
++ nr_hashes = item_p->nr_hashes;
++ hashed = rcu_dereference(item_p->hashed);
++ for (i=0; i<nr_hashes; i++) {
++ seq_printf(m,
++ " %u",
++ hashed[i].count);
++ }
++ rcu_read_unlock();
++ seq_printf(m,
++ "\n");
++ item_p = item_p->next;
++ }
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++
++#ifdef CONFIG_RSBAC_LIST_STATS
++ seq_printf(m, "\n %u lists registered, %llu reads, %llu writes\n\n",
++ reg_head.count, all_read, all_write);
++ all_read = 0;
++ all_write = 0;
++#else
++ seq_printf(m, "\n %u lists registered.\n\n",
++ reg_head.count);
++#endif
++ seq_printf(m,
++ "Registered Generic Lists of Lists (item size %u + per hash %u)\n---------------------------------\n",
++ (int) sizeof(struct rsbac_list_lol_reg_item_t), (int) sizeof(struct rsbac_list_lol_hashed_t));
++#ifdef CONFIG_RSBAC_LIST_STATS
++ seq_printf(m,
++ "Name\t\tdevice\tmaxitem\tmaxsubi\treads\twrites\thashes\tcounts\n");
++#else
++ seq_printf(m,
++ "Name\t\tdevice\tmaxitem\tmaxsubi\thashes\tcounts\n");
++#endif
++ srcu_idx = srcu_read_lock(&lol_reg_list_srcu);
++ lol_item_p = lol_reg_head.head;
++ while (lol_item_p) {
++#ifdef CONFIG_RSBAC_LIST_STATS
++ seq_printf(m,
++ "%-16s%02u:%02u\t%u\t%u\t%llu\t%llu\t%u\t",
++ lol_item_p->name, RSBAC_MAJOR(lol_item_p->device),
++ RSBAC_MINOR(lol_item_p->device),
++ lol_item_p->max_items_per_hash,
++ lol_item_p->max_subitems,
++ lol_item_p->read_count,
++ lol_item_p->write_count,
++ lol_item_p->nr_hashes);
++ all_read += lol_item_p->read_count;
++ all_write += lol_item_p->write_count;
++#else
++ seq_printf(m,
++ "%-16s%02u:%02u\t%u\t%u\t%u\t",
++ lol_item_p->name, RSBAC_MAJOR(lol_item_p->device),
++ RSBAC_MINOR(lol_item_p->device),
++ lol_item_p->max_items_per_hash,
++ lol_item_p->max_subitems,
++ lol_item_p->nr_hashes);
++#endif
++ rcu_read_lock();
++ nr_hashes = lol_item_p->nr_hashes;
++ lol_hashed = rcu_dereference(lol_item_p->hashed);
++ for (i=0; i<nr_hashes; i++) {
++ seq_printf(m,
++ " %u",
++ lol_hashed[i].count);
++ }
++ rcu_read_unlock();
++ seq_printf(m,
++ "\n");
++ lol_item_p = lol_item_p->next;
++ }
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++
++#ifdef CONFIG_RSBAC_LIST_STATS
++ seq_printf(m, "\n %u lists of lists registered, %llu reads, %llu writes\n\nRCU Garbage Collector call statistics\n-------------------------------------\n",
++ lol_reg_head.count, all_read, all_write);
++ seq_printf(m, "rcu_free: %llu\n", rcu_free_calls);
++ seq_printf(m, "rcu_free_item_chain: %llu\n", rcu_free_item_chain_calls);
++ seq_printf(m, "rcu_free_lol: %llu\n", rcu_free_lol_calls);
++ seq_printf(m, "rcu_free_lol_sub: %llu\n", rcu_free_lol_sub_calls);
++ seq_printf(m, "rcu_free_lol_item_chain: %llu\n", rcu_free_lol_item_chain_calls);
++ seq_printf(m, "rcu_free_lol_subitem_chain: %llu\n", rcu_free_lol_subitem_chain_calls);
++ seq_printf(m, "rcu_free_do_cleanup: %llu\n", rcu_free_do_cleanup_calls);
++ seq_printf(m, "rcu_free_do_cleanup_lol: %llu\n", rcu_free_do_cleanup_lol_calls);
++ seq_printf(m, "rcu_free_callback: %llu\n", rcu_free_callback_calls);
++ seq_printf(m, "rcu_free_callback_lol: %llu\n", rcu_free_callback_lol_calls);
++ seq_printf(m, "rcu_callback_count: %u\n", rcu_callback_count);
++ seq_printf(m, "rcu_rate: %u/s\n", rsbac_list_rcu_rate);
++ seq_printf(m, "system RCU total completed: %lu\n", rcu_batches_completed());
++#else
++ seq_printf(m, "\n %u lists of lists registered.\n",
++ lol_reg_head.count);
++#endif
++ return 0;
++}
++
++static int lists_counts_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, lists_counts_proc_show, NULL);
++}
++
++static const struct file_operations lists_counts_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = lists_counts_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *lists_counts_proc;
++
++/* Generic backup generation function */
++static int backup_proc_read(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int len = 0;
++ off_t pos = 0;
++ off_t begin = 0;
++ int i;
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_item_t *current_p;
++ rsbac_version_t list_version = RSBAC_LIST_DISK_VERSION;
++ rsbac_time_t timestamp = RSBAC_CURRENT_TIME;
++ int srcu_idx;
++ struct rsbac_list_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!rsbac_is_initialized())
++ return -ENOSYS;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ srcu_idx = srcu_read_lock(&reg_list_srcu);
++ list = lookup_reg(data);
++ if (!list) {
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ return -ENOSYS;
++ }
++ /* copy version */
++ memcpy(page, (char *) &list_version, sizeof(list_version));
++ len = sizeof(list_version);
++ /* copy version */
++ memcpy(page + len, (char *) &timestamp, sizeof(timestamp));
++ len += sizeof(timestamp);
++ /* copy info */
++ memcpy(page + len, (char *) &list->info, sizeof(list->info));
++ len += sizeof(list->info);
++ pos = begin + len;
++ if (pos < off) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > off + count) {
++ goto out;
++ }
++
++ rcu_read_lock();
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ /* copy list */
++ for (i=0; i<nr_hashes; i++) {
++ current_p = rcu_dereference(hashed[i].head);
++ while (current_p) {
++ memcpy(page + len,
++ ((char *) current_p) + sizeof(*current_p),
++ list->info.desc_size + list->info.data_size);
++ len += list->info.desc_size + list->info.data_size;
++ pos = begin + len;
++ if (pos < off) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > off + count) {
++ goto out_rcu;
++ }
++ current_p = rcu_dereference(current_p->next);
++ }
++ }
++
++ out_rcu:
++ rcu_read_unlock();
++
++ out:
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ if (len <= off + count)
++ *eof = 1;
++ *start = page + (off - begin);
++ len -= (off - begin);
++
++ if (len > count)
++ len = count;
++ return len;
++}
++
++/* Generic lists of lists backup generation function */
++static int lol_backup_proc_read(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int len = 0;
++ off_t pos = 0;
++ off_t begin = 0;
++ int i;
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *current_p;
++ struct rsbac_list_item_t *sub_p;
++ rsbac_version_t list_version = RSBAC_LIST_DISK_VERSION;
++ rsbac_time_t timestamp = RSBAC_CURRENT_TIME;
++ int srcu_idx;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!rsbac_is_initialized())
++ return -ENOSYS;
++
++ rsbac_pr_debug(aef, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ srcu_idx = srcu_read_lock(&lol_reg_list_srcu);
++ list = lookup_lol_reg(data);
++ if (!list) {
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ return -ENOSYS;
++ }
++ /* copy version */
++ memcpy(page, (char *) &list_version, sizeof(list_version));
++ len = sizeof(list_version);
++ /* copy version */
++ memcpy(page + len, (char *) &timestamp, sizeof(timestamp));
++ len += sizeof(timestamp);
++ /* copy info */
++ memcpy(page + len, (char *) &list->info, sizeof(list->info));
++ len += sizeof(list->info);
++ pos = begin + len;
++ if (pos < off) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > off + count) {
++ goto out;
++ }
++
++ rcu_read_lock();
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ /* copy list */
++ for (i=0; i<nr_hashes; i++) {
++ current_p = rcu_dereference(hashed[i].head);
++ while (current_p) {
++ memcpy(page + len,
++ ((char *) current_p) + sizeof(*current_p),
++ list->info.desc_size + list->info.data_size);
++ len += list->info.desc_size + list->info.data_size;
++ memcpy(page + len,
++ &current_p->count, sizeof(current_p->count));
++ len += sizeof(current_p->count);
++ pos = begin + len;
++ if (pos < off) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > off + count) {
++ goto out_rcu;
++ }
++ /* copy sublist */
++ sub_p = rcu_dereference(current_p->head);
++ while (sub_p) {
++ memcpy(page + len,
++ ((char *) sub_p) + sizeof(*sub_p),
++ list->info.subdesc_size +
++ list->info.subdata_size);
++ len +=
++ list->info.subdesc_size +
++ list->info.subdata_size;
++ pos = begin + len;
++ if (pos < off) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > off + count) {
++ goto out_rcu;
++ }
++ sub_p = rcu_dereference(sub_p->next);
++ }
++ current_p = rcu_dereference(current_p->next);
++ }
++ }
++
++ out_rcu:
++ rcu_read_unlock();
++
++ out:
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ if (len <= off + count)
++ *eof = 1;
++ *start = page + (off - begin);
++ len -= (off - begin);
++
++ if (len > count)
++ len = count;
++ return len;
++}
++#endif /* PROC */
++
++
++/********************/
++/* Init and general */
++/********************/
++int rsbac_list_compare_u32(void * desc1, void * desc2)
++ {
++ if( *((__u32*) desc1) < *((__u32*) desc2))
++ return -1;
++ return( *((__u32*) desc1) != *((__u32*) desc2));
++ }
++
++static void rcu_rate_reset(u_long dummy)
++{
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rcu_callback_count > rsbac_list_rcu_rate) {
++ rsbac_pr_debug(lists,
++ "rcu_callback_count %u over rcu_rate %u, list write accesses have been throttled\n",
++ rcu_callback_count, rsbac_list_rcu_rate);
++ }
++#endif
++ rcu_callback_count = 0;
++
++ mod_timer(&rcu_rate_timer, jiffies + HZ);
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++int rsbac_list_init(void)
++#else
++int __init rsbac_list_init(void)
++#endif
++{
++#if defined(CONFIG_RSBAC_LIST_TRANS) || defined(CONFIG_RSBAC_LIST_REPL)
++ int err;
++ struct rsbac_list_info_t *list_info_p;
++#endif
++ u_int i;
++
++ reg_item_slab = rsbac_slab_create("rsbac_reg_item",
++ sizeof(struct rsbac_list_reg_item_t));
++ lol_reg_item_slab = rsbac_slab_create("rsbac_lol_reg_item",
++ sizeof(struct rsbac_list_lol_reg_item_t));
++ rcu_free_item_slab = rsbac_slab_create("rsbac_rcu_free_item",
++ sizeof(struct rsbac_list_rcu_free_item_t));
++ rcu_free_head_slab = rsbac_slab_create("rsbac_rcu_free_head",
++ sizeof(struct rsbac_list_rcu_free_head_t));
++ rcu_free_head_lol_slab = rsbac_slab_create("rsbac_rcu_free_head_lol",
++ sizeof(struct rsbac_list_rcu_free_head_lol_t));
++
++ reg_head.head = NULL;
++ reg_head.tail = NULL;
++ reg_head.curr = NULL;
++ spin_lock_init(&reg_head.lock);
++ init_srcu_struct(&reg_list_srcu);
++ init_srcu_struct(&lol_reg_list_srcu);
++ reg_head.count = 0;
++
++ lol_reg_head.head = NULL;
++ lol_reg_head.tail = NULL;
++ lol_reg_head.curr = NULL;
++ spin_lock_init(&lol_reg_head.lock);
++ lol_reg_head.count = 0;
++
++ /* Check that the rsbac_list_max_hashes is correct
++ * and a potential of 2, else correct it
++ */
++ if(CONFIG_RSBAC_LIST_MAX_HASHES < RSBAC_LIST_MIN_MAX_HASHES)
++ rsbac_list_max_hashes = RSBAC_LIST_MIN_MAX_HASHES;
++ else if (CONFIG_RSBAC_LIST_MAX_HASHES > RSBAC_MAX_KMALLOC / sizeof(struct rsbac_list_hashed_t))
++ rsbac_list_max_hashes = RSBAC_MAX_KMALLOC / sizeof(struct rsbac_list_hashed_t);
++ else
++ rsbac_list_max_hashes = CONFIG_RSBAC_LIST_MAX_HASHES;
++
++ i = 1;
++ while ((i << 1) <= rsbac_list_max_hashes)
++ i = i << 1;
++ rsbac_list_max_hashes = i;
++
++ /* Also for rsbac_list_lol_max_hashes */
++ if(CONFIG_RSBAC_LIST_MAX_HASHES < RSBAC_LIST_MIN_MAX_HASHES)
++ rsbac_list_lol_max_hashes = RSBAC_LIST_MIN_MAX_HASHES;
++ else if (CONFIG_RSBAC_LIST_MAX_HASHES > RSBAC_MAX_KMALLOC / sizeof(struct rsbac_list_lol_hashed_t))
++ rsbac_list_lol_max_hashes = RSBAC_MAX_KMALLOC / sizeof(struct rsbac_list_lol_hashed_t);
++ else
++ rsbac_list_lol_max_hashes = CONFIG_RSBAC_LIST_MAX_HASHES;
++
++ i = 1;
++ while ((i << 1) <= rsbac_list_lol_max_hashes)
++ i = i << 1;
++ rsbac_list_lol_max_hashes = i;
++
++ list_initialized = TRUE;
++
++#if defined(CONFIG_RSBAC_LIST_TRANS) || defined(CONFIG_RSBAC_LIST_REPL)
++ list_info_p = rsbac_kmalloc_unlocked(sizeof(*list_info_p));
++ if (!list_info_p) {
++ return -ENOMEM;
++ }
++#endif
++
++ /* init proc entry */
++#if defined(CONFIG_RSBAC_PROC)
++ {
++ lists_proc = proc_create(RSBAC_LIST_PROC_NAME, S_IFREG | S_IRUGO,
++ proc_rsbac_root_p, &lists_proc_fops);
++ lists_counts_proc = proc_create(RSBAC_LIST_COUNTS_PROC_NAME,
++ S_IFREG | S_IRUGO,
++ proc_rsbac_root_p,
++ &lists_counts_proc_fops);
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ rsbac_printk(KERN_INFO "rsbac_list_init(): Registering transaction list.\n");
++ list_info_p->version = 1;
++ list_info_p->key = RSBAC_LIST_TA_KEY;
++ list_info_p->desc_size = sizeof(rsbac_list_ta_number_t);
++ list_info_p->data_size = sizeof(struct rsbac_list_ta_data_t);
++ list_info_p->max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ (void **) &ta_handle,
++ list_info_p,
++ 0,
++ NULL,
++ NULL,
++ NULL, "transactions", RSBAC_AUTO_DEV);
++ if (err) {
++ char *tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_list_init(): Registering transaction list failed with error %s\n",
++ get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_LIST_TRANS) || defined(CONFIG_RSBAC_LIST_REPL)
++ rsbac_kfree(list_info_p);
++#endif
++
++ init_timer(&rcu_rate_timer);
++ rcu_rate_timer.function = rcu_rate_reset;
++ rcu_rate_timer.data = 0;
++ rcu_rate_timer.expires = jiffies + HZ;
++ add_timer(&rcu_rate_timer);
++ return 0;
++}
++
++#ifdef CONFIG_RSBAC_AUTO_WRITE
++int rsbac_list_auto_rehash(void);
++
++int rsbac_write_lists(void)
++{
++ int count = 0;
++ int subcount = 0;
++ int error = 0;
++ struct rsbac_list_reg_item_t *item_p;
++ struct rsbac_list_lol_reg_item_t *lol_item_p;
++ struct rsbac_list_write_head_t write_head;
++ struct rsbac_list_write_item_t *write_item_p;
++ struct rsbac_list_lol_write_head_t write_lol_head;
++ struct rsbac_list_lol_write_item_t *write_lol_item_p;
++ int srcu_idx;
++
++/*
++ rsbac_pr_debug(lists, "called.\n");
++*/
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (rsbac_list_count(ta_handle) > 0) {
++ int list_count;
++ rsbac_list_ta_number_t *desc_array;
++ struct rsbac_list_ta_data_t data;
++
++ list_count =
++ rsbac_list_get_all_desc(ta_handle,
++ (void **) &desc_array);
++ if (list_count > 0) {
++ int i;
++ rsbac_time_t now = RSBAC_CURRENT_TIME;
++
++ for (i = 0; i < list_count; i++) {
++ if (!rsbac_list_get_data
++ (ta_handle, &desc_array[i], &data)) {
++ if (data.timeout < now) {
++ rsbac_printk(KERN_WARNING "rsbac_write_lists(): transaction %u timed out, forcing forget\n",
++ desc_array
++ [i]);
++ do_forget(desc_array[i]);
++ }
++ }
++ }
++ rsbac_kfree(desc_array);
++ }
++ }
++#endif
++
++ /* Init buffer list */
++ write_head.head = NULL;
++ write_head.tail = NULL;
++ write_head.count = 0;
++
++ srcu_idx = srcu_read_lock(&reg_list_srcu);
++ item_p = reg_head.head;
++ while (item_p) {
++ if ((item_p->flags & RSBAC_LIST_PERSIST)
++ && item_p->dirty && !item_p->no_write
++ && !rsbac_debug_no_write) {
++ struct vfsmount *mnt_p;
++
++ mnt_p = rsbac_get_vfsmount(item_p->device);
++ if (mnt_p && rsbac_writable(mnt_p->mnt_sb)) {
++ item_p->dirty = FALSE;
++ error = fill_buffer(item_p, &write_item_p);
++ if (!error) {
++ if (!write_head.head) {
++ write_head.head = write_item_p;
++ write_head.tail = write_item_p;
++ write_head.count = 1;
++ } else {
++ write_head.tail->next =
++ write_item_p;
++ write_item_p->prev =
++ write_head.tail;
++ write_head.tail = write_item_p;
++ write_head.count++;
++ }
++ } else {
++ if ((error != -RSBAC_ENOTWRITABLE)
++ && (error != -RSBAC_ENOMEM)
++ ) {
++ rsbac_printk(KERN_WARNING "rsbac_write_lists(): fill_buffer() for list %s returned error %i\n",
++ item_p->name, error);
++ item_p->dirty = TRUE;
++ }
++ }
++ }
++ }
++ item_p = item_p->next;
++ }
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++
++ if (write_head.count > 0)
++ rsbac_pr_debug(write, "%u lists copied to buffers\n",
++ write_head.count);
++
++ /* write all buffers */
++ if (write_head.count) {
++ count = rsbac_list_write_buffers(write_head);
++ rsbac_pr_debug(write, "%u lists written to disk\n", count);
++ }
++
++ /* LOL */
++ /* Init buffer list */
++ write_lol_head.head = NULL;
++ write_lol_head.tail = NULL;
++ write_lol_head.count = 0;
++
++ srcu_idx = srcu_read_lock(&lol_reg_list_srcu);
++ lol_item_p = lol_reg_head.head;
++ while (lol_item_p) {
++ if ((lol_item_p->flags & RSBAC_LIST_PERSIST)
++ && lol_item_p->dirty && !lol_item_p->no_write
++ && !rsbac_debug_no_write) {
++ struct vfsmount *mnt_p;
++
++ mnt_p = rsbac_get_vfsmount(lol_item_p->device);
++ if (mnt_p && rsbac_writable(mnt_p->mnt_sb)) {
++ lol_item_p->dirty = FALSE;
++ error = fill_lol_buffer(lol_item_p, &write_lol_item_p);
++ if (!error) {
++ if (!write_lol_head.head) {
++ write_lol_head.head =
++ write_lol_item_p;
++ write_lol_head.tail =
++ write_lol_item_p;
++ write_lol_head.count = 1;
++ } else {
++ write_lol_head.tail->next =
++ write_lol_item_p;
++ write_lol_item_p->prev =
++ write_lol_head.tail;
++ write_lol_head.tail =
++ write_lol_item_p;
++ write_lol_head.count++;
++ }
++ } else {
++ if ((error != -RSBAC_ENOTWRITABLE)
++ && (error != -RSBAC_ENOMEM))
++ {
++ rsbac_printk(KERN_WARNING "rsbac_write_lists(): fill_lol_buffer() for list %s returned error %i\n",
++ lol_item_p->name,
++ error);
++ }
++ lol_item_p->dirty = TRUE;
++ }
++ }
++ }
++ lol_item_p = lol_item_p->next;
++ }
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++
++ if (write_lol_head.count > 0)
++ rsbac_pr_debug(write, "%u lists of lists copied to buffers\n",
++ write_lol_head.count);
++ /* write all buffers */
++ if (write_lol_head.count) {
++ subcount =
++ rsbac_list_write_lol_buffers(write_lol_head);
++ count += subcount;
++ rsbac_pr_debug(write, "%u lists of lists written to disk\n",
++ subcount);
++ }
++ rsbac_pr_debug(write, "%u lists written.\n",
++ count);
++
++ if(jiffies > next_rehash) {
++ rsbac_list_auto_rehash();
++ next_rehash = jiffies + (RSBAC_LIST_REHASH_INTERVAL * HZ);
++ }
++ return count;
++}
++#endif /* CONFIG_RSBAC_AUTO_WRITE */
++
++/* Status checking */
++int rsbac_check_lists(int correct)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_lol_reg_item_t *lol_list;
++ struct rsbac_list_item_t *item_p;
++ struct rsbac_list_item_t *next_item_p;
++ struct rsbac_list_lol_item_t *lol_item_p;
++ struct rsbac_list_lol_item_t *next_lol_item_p;
++ struct rsbac_list_item_t *lol_subitem_p;
++ struct rsbac_list_item_t *next_lol_subitem_p;
++ u_long tmp_count;
++ u_long tmp_subcount;
++ u_long subitem_count;
++ u_long dirty = 0;
++ u_int remove_count;
++ int i;
++ u_long all_count;
++ struct rsbac_list_rcu_free_head_t * rcu_head_p;
++ struct rsbac_list_rcu_free_head_lol_t * rcu_head_lol_p;
++ int srcu_idx;
++
++ rsbac_pr_debug(lists, "called.\n");
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++ srcu_idx = srcu_read_lock(&reg_list_srcu);
++ list = reg_head.head;
++ while (list) {
++restart:
++ remove_count = 0;
++ /* check list */
++ spin_lock(&list->lock);
++ all_count = 0;
++ for (i=0; i<list->nr_hashes; i++) {
++ tmp_count = 0;
++ item_p = list->hashed[i].head;
++ while (item_p) {
++ if ((item_p->max_age
++ && (item_p->max_age <= RSBAC_CURRENT_TIME)
++ )
++ || (list->def_data
++ && !memcmp(((char *) item_p) +
++ sizeof(*item_p) +
++ list->info.desc_size,
++ list->def_data,
++ list->info.data_size)
++ )
++ ) {
++ next_item_p = item_p->next;
++ do_remove_item(list, item_p, i);
++ remove_count++;
++ if (remove_count > rsbac_list_rcu_rate) {
++ rcu_head_p = get_rcu_free(list);
++ spin_unlock(&list->lock);
++ synchronize_rcu();
++ rcu_free_do_cleanup(rcu_head_p);
++ goto restart;
++ }
++ item_p = next_item_p;
++ } else {
++ tmp_count++;
++ item_p = item_p->next;
++ }
++ }
++ if (tmp_count != list->hashed[i].count) {
++ if (correct) {
++ rsbac_printk(KERN_WARNING "rsbac_check_lists(): correcting count mismatch for list %s hash %u on device %02u:%02u - was %u, counted %lu!\n",
++ list->name, i,
++ RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device),
++ list->hashed[i].count, tmp_count);
++ list->hashed[i].count = tmp_count;
++ } else {
++ rsbac_printk(KERN_WARNING "rsbac_check_lists(): count mismatch for list %s hash %u on device %02u:%02u - is %u, counted %lu!\n",
++ list->name, i,
++ RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device),
++ list->hashed[i].count, tmp_count);
++ }
++ }
++ all_count += list->hashed[i].count;
++ }
++ rcu_head_p = get_rcu_free(list);
++ spin_unlock(&list->lock);
++ do_sync_rcu(rcu_head_p);
++ if (list->dirty && (list->flags & RSBAC_LIST_PERSIST)) {
++ dirty++;
++ rsbac_pr_debug(lists, "%s on %02u:%02u has %u items (list is dirty)\n",
++ list->name, RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device), all_count);
++ } else
++ rsbac_pr_debug(lists, "%s on %02u:%02u has %u items\n",
++ list->name, RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device), all_count);
++ list = rcu_dereference(list->next);
++ }
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++
++ srcu_idx = srcu_read_lock(&lol_reg_list_srcu);
++ lol_list = lol_reg_head.head;
++ while (lol_list) {
++lol_restart:
++ remove_count = 0;
++ /* check list */
++ spin_lock(&lol_list->lock);
++ all_count = 0;
++ subitem_count = 0;
++ for (i=0; i<lol_list->nr_hashes; i++) {
++ tmp_count = 0;
++ lol_item_p = lol_list->hashed[i].head;
++ while (lol_item_p) {
++ tmp_subcount = 0;
++ lol_subitem_p = lol_item_p->head;
++ while (lol_subitem_p) {
++ if ((lol_subitem_p->max_age
++ && (lol_subitem_p->max_age <=
++ RSBAC_CURRENT_TIME)
++ )
++ || (lol_list->def_subdata
++ &&
++ !memcmp(((char *)
++ lol_subitem_p) +
++ sizeof
++ (*lol_subitem_p) +
++ lol_list->info.
++ subdesc_size,
++ lol_list->
++ def_subdata,
++ lol_list->info.
++ subdata_size)
++ )
++ ) {
++ next_lol_subitem_p =
++ lol_subitem_p->next;
++ do_remove_lol_subitem
++ (lol_item_p,
++ lol_subitem_p);
++ rcu_free_lol_sub(lol_list, lol_subitem_p);
++ lol_subitem_p =
++ next_lol_subitem_p;
++ } else {
++ tmp_subcount++;
++ lol_subitem_p =
++ lol_subitem_p->next;
++ }
++ }
++ if (tmp_subcount != lol_item_p->count) {
++ if (correct) {
++ rsbac_printk(KERN_WARNING "rsbac_check_lists(): correcting count mismatch for list of lists %s hash %u sublist on %02u:%02u - was %lu, counted %lu!\n",
++ lol_list->name, i,
++ RSBAC_MAJOR
++ (lol_list->
++ device),
++ RSBAC_MINOR
++ (lol_list->
++ device),
++ lol_item_p->
++ count,
++ tmp_subcount);
++ lol_item_p->count =
++ tmp_subcount;
++ } else {
++ rsbac_printk(KERN_WARNING "rsbac_check_lists(): count mismatch for list of lists %s hash %u sublist on %02u:%02u - is %lu, counted %lu!\n",
++ lol_list->name, i,
++ RSBAC_MAJOR
++ (lol_list->
++ device),
++ RSBAC_MINOR
++ (lol_list->
++ device),
++ lol_item_p->
++ count,
++ tmp_subcount);
++ }
++ }
++ if ((lol_item_p->max_age
++ && (lol_item_p->max_age <= RSBAC_CURRENT_TIME)
++ )
++ || (lol_list->def_data
++ && !lol_item_p->count
++ && !memcmp(((char *) lol_item_p) +
++ sizeof(*lol_item_p) +
++ lol_list->info.desc_size,
++ lol_list->def_data,
++ lol_list->info.data_size)
++ )
++ || (!lol_list->info.data_size
++ && (lol_list->flags & RSBAC_LIST_DEF_DATA)
++ && !lol_item_p->count)
++ ) {
++ next_lol_item_p = lol_item_p->next;
++ do_remove_lol_item(lol_list, lol_item_p, i);
++ remove_count++;
++ if (remove_count > rsbac_list_rcu_rate) {
++ rcu_head_lol_p = get_rcu_free_lol(lol_list);
++ spin_unlock(&lol_list->lock);
++ synchronize_rcu();
++ rcu_free_do_cleanup_lol(rcu_head_lol_p);
++ goto lol_restart;
++ }
++ lol_item_p = next_lol_item_p;
++ } else {
++ tmp_count++;
++ subitem_count += lol_item_p->count;
++ lol_item_p = lol_item_p->next;
++ }
++ }
++ if (tmp_count != lol_list->hashed[i].count) {
++ if (correct) {
++ rsbac_printk(KERN_WARNING "rsbac_check_lists(): correcting count mismatch for list of lists %s hash %u on %02u:%02u - was %u, counted %lu!\n",
++ lol_list->name, i,
++ RSBAC_MAJOR(lol_list->device),
++ RSBAC_MINOR(lol_list->device),
++ lol_list->hashed[i].count, tmp_count);
++ lol_list->hashed[i].count = tmp_count;
++ } else {
++ rsbac_printk(KERN_WARNING "rsbac_check_lists(): count mismatch for list of lists %s hash %u on %02u:%02u - is %u, counted %lu!\n",
++ lol_list->name, i,
++ RSBAC_MAJOR(lol_list->device),
++ RSBAC_MINOR(lol_list->device),
++ lol_list->hashed[i].count, tmp_count);
++ }
++ }
++ all_count += lol_list->hashed[i].count;
++ }
++ rcu_head_lol_p = get_rcu_free_lol(lol_list);
++ spin_unlock(&lol_list->lock);
++ do_sync_rcu_lol(rcu_head_lol_p);
++ if (lol_list->dirty
++ && (lol_list->flags & RSBAC_LIST_PERSIST)) {
++ dirty++;
++ rsbac_pr_debug(lists, "%s on %02u:%02u has %u items and %lu subitems (list is dirty)\n",
++ lol_list->name,
++ RSBAC_MAJOR(lol_list->device),
++ RSBAC_MINOR(lol_list->device),
++ all_count, subitem_count);
++ } else
++ rsbac_pr_debug(lists, "%s on %02u:%02u has %u items and %lu subitems\n",
++ lol_list->name,
++ RSBAC_MAJOR(lol_list->device),
++ RSBAC_MINOR(lol_list->device),
++ all_count, subitem_count);
++ lol_list = lol_list->next;
++ }
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ return 0;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_check);
++#endif
++int rsbac_list_check(rsbac_list_handle_t handle, int correct)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_item_t *item_p;
++ struct rsbac_list_item_t *next_item_p;
++ u_long tmp_count;
++ int i;
++ struct rsbac_list_rcu_free_head_t * rcu_head_p;
++ u_int remove_count;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (!list || (list->self != list))
++ return -RSBAC_EINVALIDLIST;
++
++ rsbac_pr_debug(lists, "checking list %s.\n", list->name);
++
++restart:
++ remove_count = 0;
++
++ spin_lock(&list->lock);
++ for (i=0; i<list->nr_hashes; i++) {
++ tmp_count = 0;
++ item_p = list->hashed[i].head;
++ while (item_p) {
++ if ((item_p->max_age
++ && (item_p->max_age <= RSBAC_CURRENT_TIME)
++ )
++ || (list->def_data
++ && !memcmp(((char *) item_p) + sizeof(*item_p) +
++ list->info.desc_size, list->def_data,
++ list->info.data_size)
++ )
++ ) {
++ next_item_p = item_p->next;
++ do_remove_item(list, item_p, i);
++ remove_count++;
++ if (remove_count > rsbac_list_rcu_rate) {
++ rcu_head_p = get_rcu_free(list);
++ spin_unlock(&list->lock);
++ synchronize_rcu();
++ rcu_free_do_cleanup(rcu_head_p);
++ goto restart;
++ }
++ item_p = next_item_p;
++ list->dirty = TRUE;
++ } else {
++ tmp_count++;
++ item_p = item_p->next;
++ }
++ }
++ if (tmp_count != list->hashed[i].count) {
++ if (correct) {
++ rsbac_printk(KERN_WARNING "rsbac_list_check(): correcting count mismatch for list %s hash %u on device %02u:%02u - was %u, counted %lu!\n",
++ list->name, i, RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device),
++ list->hashed[i].count, tmp_count);
++ list->hashed[i].count = tmp_count;
++ list->dirty = TRUE;
++ } else {
++ rsbac_printk(KERN_WARNING "rsbac_list_check(): count mismatch for list %s hash %u on device %02u:%02u - is %u, counted %lu!\n",
++ list->name, i, RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device),
++ list->hashed[i].count, tmp_count);
++ }
++ }
++ }
++ rcu_head_p = get_rcu_free(list);
++ spin_unlock(&list->lock);
++ synchronize_rcu();
++ rcu_free_do_cleanup(rcu_head_p);
++ return 0;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_lol_check);
++#endif
++int rsbac_list_lol_check(rsbac_list_handle_t handle, int correct)
++{
++ struct rsbac_list_lol_reg_item_t *lol_list;
++ struct rsbac_list_lol_item_t *lol_item_p;
++ struct rsbac_list_lol_item_t *next_lol_item_p;
++ struct rsbac_list_item_t *lol_subitem_p;
++ struct rsbac_list_item_t *next_lol_subitem_p;
++ u_long tmp_count;
++ u_long tmp_subcount;
++ int i;
++ struct rsbac_list_rcu_free_head_lol_t * rcu_head_lol_p;
++ u_int remove_count;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ lol_list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (!lol_list || (lol_list->self != lol_list))
++ return -RSBAC_EINVALIDLIST;
++
++ rsbac_pr_debug(lists, "checking list %s.\n", lol_list->name);
++
++restart:
++ remove_count = 0;
++ spin_lock(&lol_list->lock);
++ for (i=0; i<lol_list->nr_hashes; i++) {
++ tmp_count = 0;
++ lol_item_p = lol_list->hashed[i].head;
++ while (lol_item_p) {
++ if ((lol_item_p->max_age
++ && (lol_item_p->max_age <= RSBAC_CURRENT_TIME)
++ )
++ || (lol_list->def_data
++ && !lol_item_p->count
++ && !memcmp(((char *) lol_item_p) +
++ sizeof(*lol_item_p) +
++ lol_list->info.desc_size,
++ lol_list->def_data,
++ lol_list->info.data_size)
++ )
++ || (!lol_list->info.data_size
++ && (lol_list->flags & RSBAC_LIST_DEF_DATA)
++ && !lol_item_p->count)
++ ) {
++ next_lol_item_p = lol_item_p->next;
++ do_remove_lol_item(lol_list, lol_item_p, i);
++ remove_count++;
++ if (remove_count > rsbac_list_rcu_rate) {
++ rcu_head_lol_p = get_rcu_free_lol(lol_list);
++ spin_unlock(&lol_list->lock);
++ synchronize_rcu();
++ rcu_free_do_cleanup_lol(rcu_head_lol_p);
++ goto restart;
++ }
++ lol_item_p = next_lol_item_p;
++ } else {
++ tmp_count++;
++ tmp_subcount = 0;
++ lol_subitem_p = lol_item_p->head;
++ while (lol_subitem_p) {
++ if ((lol_subitem_p->max_age
++ && (lol_subitem_p->max_age <=
++ RSBAC_CURRENT_TIME)
++ )
++ || (lol_list->def_subdata
++ && !memcmp(((char *) lol_subitem_p)
++ +
++ sizeof(*lol_subitem_p) +
++ lol_list->info.
++ subdesc_size,
++ lol_list->def_subdata,
++ lol_list->info.
++ subdata_size)
++ )
++ ) {
++ next_lol_subitem_p =
++ lol_subitem_p->next;
++ do_remove_lol_subitem(lol_item_p,
++ lol_subitem_p);
++ rcu_free_lol_sub(lol_list, lol_subitem_p);
++ lol_subitem_p = next_lol_subitem_p;
++ } else {
++ tmp_subcount++;
++ lol_subitem_p =
++ lol_subitem_p->next;
++ }
++ }
++ if (tmp_subcount != lol_item_p->count) {
++ if (correct) {
++ rsbac_printk(KERN_WARNING "rsbac_list_lol_check(): correcting count mismatch for list of lists %s hash %u sublist on %02u:%02u - was %lu, counted %lu!\n",
++ lol_list->name, i,
++ RSBAC_MAJOR(lol_list->
++ device),
++ RSBAC_MINOR(lol_list->
++ device),
++ lol_item_p->count,
++ tmp_subcount);
++ lol_item_p->count = tmp_subcount;
++ } else {
++ rsbac_printk(KERN_WARNING "rsbac_list_lol_check(): count mismatch for list of lists %s hash %u sublist on %02u:%02u - is %lu, counted %lu!\n",
++ lol_list->name, i,
++ RSBAC_MAJOR(lol_list->
++ device),
++ RSBAC_MINOR(lol_list->
++ device),
++ lol_item_p->count,
++ tmp_subcount);
++ }
++ }
++ lol_item_p = lol_item_p->next;
++ }
++ }
++ if (tmp_count != lol_list->hashed[i].count) {
++ if (correct) {
++ rsbac_printk(KERN_WARNING "rsbac_list_lol_check(): correcting count mismatch for list of lists %s hash %u on %02u:%02u - was %u, counted %lu!\n",
++ lol_list->name, i,
++ RSBAC_MAJOR(lol_list->device),
++ RSBAC_MINOR(lol_list->device),
++ lol_list->hashed[i].count, tmp_count);
++ lol_list->hashed[i].count = tmp_count;
++ } else {
++ rsbac_printk(KERN_WARNING "rsbac_list_lol_check(): count mismatch for list of lists %s hash %u on %02u:%02u - is %u, counted %lu!\n",
++ lol_list->name, i,
++ RSBAC_MAJOR(lol_list->device),
++ RSBAC_MINOR(lol_list->device),
++ lol_list->hashed[i].count, tmp_count);
++ }
++ }
++ }
++ rcu_head_lol_p = get_rcu_free_lol(lol_list);
++ spin_unlock(&lol_list->lock);
++ synchronize_rcu();
++ rcu_free_do_cleanup_lol(rcu_head_lol_p);
++ return 0;
++}
++
++
++/********************/
++/* Registration */
++/********************/
++
++/* get generic list registration version */
++inline rsbac_version_t rsbac_list_version(void)
++{
++ return RSBAC_LIST_VERSION;
++}
++
++/* register a new list */
++/*
++ * If list with same name exists in memory, error -RSBAC_EEXISTS is returned.
++ * If list with same name and key exists on device, it is restored depending on the flags.
++ * If list with same name, but different key exists, access is denied (error -EPERM).
++ *
++ * ds_version: for binary modules, must be RSBAC_LIST_VERSION. If version differs, return error.
++ * handle_p: for all list accesses, an opaque handle is put into *handle_p.
++ * key: positive, secret __u32 key, which must be the same as in on-disk version, if persistent
++ * list_version: positive __u32 version number for the list. If old on-disk version is
++ different, upconversion is tried (depending on flags and get_conv function)
++ * flags: see flag values
++ * desc_size: size of the descriptor (error is returned, if value is 0 or list exists and value differs)
++ * data_size: size of data (error is returned, if list exists and value differs). Can be 0 for sets.
++ * compare: for lookup and list optimization, can be NULL, then
++ memcmp(desc1, desc2, desc_size) is used
++ * def_data: default data value for flag RSBAC_LIST_DEF_DATA
++ (if NULL, flag is cleared)
++ * name: the on-disk name, must be distinct and max. 7 or 8.2 chars
++ (only used for statistics, if non-persistent)
++ * device: the device to read list from or to save list to - use 0 for root dev
++ (ignored, if non-persistent)
++ * nr_hashes: Number of hashes for this list, maximum is rsbac_list_max_hashes,
++ which is derived from CONFIG_RSBAC_LIST_MAX_HASHES.
++ If > maximum, it will be reduced to maximum automatically.
++ RSBAC_LIST_MIN_MAX_HASHES <= rsbac_list_max_hashes
++ <= RSBAC_MAX_KMALLOC / sizeof(struct rsbac_list_hashed_t) in all cases,
++ see above.
++ Thus, it is safe to use nr_hashes <= RSBAC_LIST_MIN_MAX_HASHES without
++ checks. Value may vary between registrations. Please note that with
++ registration flag RSBAC_LIST_AUTO_HASH_RESIZE the hash size increases
++ automatically, if the list grows bigger than 200 * nr_hashes.
++ * hash_function: Hash function(desc,nr_hashes), must always return a value
++ from 0 to nr_hashes-1.
++ * old_base_name: If not NULL and persistent list with name cannot be read,
++ try to read all old_base_name<n> with n from 0 to 31.
++ */
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_register_hashed);
++#endif
++int rsbac_list_register_hashed(rsbac_version_t ds_version,
++ rsbac_list_handle_t * handle_p,
++ struct rsbac_list_info_t *info_p,
++ u_int flags,
++ rsbac_list_compare_function_t * compare,
++ rsbac_list_get_conv_t * get_conv,
++ void *def_data, char *name, kdev_t device,
++ u_int nr_hashes,
++ rsbac_list_hash_function_t hash_function,
++ char * old_base_name)
++{
++ struct rsbac_list_reg_item_t *reg_item_p;
++ struct rsbac_list_reg_item_t *new_reg_item_p;
++ int err = 0;
++ int srcu_idx;
++
++ if (ds_version != RSBAC_LIST_VERSION) {
++ if (name) {
++ rsbac_printk(KERN_WARNING "rsbac_list_register: wrong ds_version %u for list %s, expected %u!\n",
++ ds_version, name, RSBAC_LIST_VERSION);
++ }
++ return -RSBAC_EINVALIDVERSION;
++ }
++ if (!handle_p || !info_p)
++ return -RSBAC_EINVALIDPOINTER;
++ *handle_p = NULL;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++ if (!info_p->key || !info_p->version || !info_p->desc_size)
++ return -RSBAC_EINVALIDVALUE;
++ if (info_p->max_age > RSBAC_LIST_MAX_AGE_LIMIT)
++ return -RSBAC_EINVALIDVALUE;
++ if (info_p->desc_size + info_p->data_size >
++ RSBAC_LIST_MAX_ITEM_SIZE)
++ return -RSBAC_EINVALIDVALUE;
++ if (nr_hashes > rsbac_list_max_hashes)
++ nr_hashes = rsbac_list_max_hashes;
++ else if (nr_hashes > 2) {
++ u_int i = 1;
++
++ while ((i << 1) <= nr_hashes)
++ i = i << 1;
++ nr_hashes = i;
++ }
++ if (!hash_function) {
++ nr_hashes = 1;
++ flags &= ~RSBAC_LIST_AUTO_HASH_RESIZE;
++ } else
++ if (!nr_hashes)
++ nr_hashes = 1;
++ if (name) {
++ struct rsbac_list_lol_reg_item_t *lol_reg_item_p;
++
++ srcu_idx = srcu_read_lock(&reg_list_srcu);
++ reg_item_p = lookup_reg_name(name, device);
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ if (reg_item_p) {
++ rsbac_pr_debug(lists, "list name %s already exists on device %02u:%02u!\n",
++ name, RSBAC_MAJOR(device),
++ RSBAC_MINOR(device));
++ return -RSBAC_EEXISTS;
++ }
++ srcu_idx = srcu_read_lock(&lol_reg_list_srcu);
++ lol_reg_item_p = lookup_lol_reg_name(name, device);
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ if (lol_reg_item_p) {
++ rsbac_pr_debug(lists, "list name %s already exists on device %02u:%02u!\n",
++ name, RSBAC_MAJOR(device),
++ RSBAC_MINOR(device));
++ return -RSBAC_EEXISTS;
++ }
++ } else if (flags & RSBAC_LIST_PERSIST) {
++ rsbac_printk(KERN_WARNING "rsbac_list_register: trial to register persistent list without name.\n");
++ return -RSBAC_EINVALIDVALUE;
++ }
++
++ if (flags & RSBAC_LIST_PERSIST) {
++ if (RSBAC_IS_AUTO_DEV(device))
++ device = rsbac_root_dev;
++ if (!RSBAC_MAJOR(device))
++ flags &= ~RSBAC_LIST_PERSIST;
++ }
++ rsbac_pr_debug(lists, "registering list %s for device %02u:%02u.\n",
++ name, RSBAC_MAJOR(device), RSBAC_MINOR(device));
++ new_reg_item_p =
++ create_reg(info_p, flags, compare, get_conv, def_data, name,
++ device, nr_hashes, hash_function, old_base_name);
++ if (!new_reg_item_p) {
++ return -RSBAC_ECOULDNOTADDITEM;
++ }
++ /* Restore from disk, but only for real device mounts */
++ if ((flags & RSBAC_LIST_PERSIST)
++ && RSBAC_MAJOR(device)
++ ) {
++ rsbac_pr_debug(lists, "restoring list %s from device %02u:%02u.\n",
++ name, RSBAC_MAJOR(device), RSBAC_MINOR(device));
++ err = read_list(new_reg_item_p);
++ /* not found is no error */
++ if (err == -RSBAC_ENOTFOUND)
++ err = 0;
++ else if (err) {
++ char tmp[RSBAC_MAXNAMELEN];
++
++ if (rsbac_list_recover) {
++ rsbac_printk(KERN_WARNING "restoring list %s from device %02u:%02u failed with error %s, rsbac_list_recover is set so registering anyway.\n",
++ name,
++ RSBAC_MAJOR(device),
++ RSBAC_MINOR(device),
++ get_error_name(tmp, err));
++ err = 0;
++ } else {
++ rsbac_printk(KERN_WARNING "restoring list %s from device %02u:%02u failed with error %s, unregistering list.\n",
++ name,
++ RSBAC_MAJOR(device),
++ RSBAC_MINOR(device),
++ get_error_name(tmp, err));
++ clear_reg(new_reg_item_p);
++ return err;
++ }
++ } else
++ rsbac_pr_debug(lists, "restoring list %s from device %02u:%02u was successful.\n",
++ name, RSBAC_MAJOR(device),
++ RSBAC_MINOR(device));
++ }
++
++ spin_lock(&reg_head.lock);
++ reg_item_p = add_reg(new_reg_item_p);
++ spin_unlock(&reg_head.lock);
++ if (!reg_item_p) {
++ rsbac_printk(KERN_WARNING "rsbac_list_register: inserting list %s failed!\n",
++ name);
++ /* cleanup */
++ clear_reg(new_reg_item_p);
++ return -RSBAC_ECOULDNOTADDITEM;
++ }
++
++ /* finish */
++#if defined(CONFIG_RSBAC_PROC)
++ /* create proc entry, if requested */
++ if (flags & RSBAC_LIST_BACKUP) {
++ reg_item_p->proc_entry_p =
++ create_proc_entry(reg_item_p->name, S_IFREG | S_IRUGO,
++ proc_rsbac_backup_p);
++ if (reg_item_p->proc_entry_p) {
++ reg_item_p->proc_entry_p->read_proc =
++ backup_proc_read;
++ reg_item_p->proc_entry_p->data = reg_item_p;
++ }
++ } else {
++ reg_item_p->proc_entry_p = NULL;
++ }
++#endif
++ *handle_p = reg_item_p;
++ return err;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_register);
++#endif
++int rsbac_list_register(rsbac_version_t ds_version,
++ rsbac_list_handle_t * handle_p,
++ struct rsbac_list_info_t *info_p,
++ u_int flags,
++ rsbac_list_compare_function_t * compare,
++ rsbac_list_get_conv_t * get_conv,
++ void *def_data, char *name, kdev_t device)
++{
++ return rsbac_list_register_hashed(ds_version, handle_p, info_p, flags,
++ compare, get_conv, def_data, name, device,
++ 1, NULL, NULL);
++}
++
++/* register a new list of lists */
++/*
++ * If list with same name exists in memory, error -RSBAC_EEXISTS is returned.
++ * If list with same name and key exists on device, it is restored depending on the flags.
++ * If list with same name, but different key exists, access is denied (error -EPERM).
++ *
++ * ds_version: for binary modules, must be RSBAC_LIST_VERSION. If version differs, return error.
++ * handle_p: for all list accesses, an opaque handle is put into *handle_p.
++ * key: positive, secret __u32 key, which must be the same as in on-disk version, if persistent
++ * list_version: positive __u32 version number for the list. If old on-disk version is
++ different, upconversion is tried (depending on flags and get_conv function)
++ * flags: see flag values
++ * desc_size: size of the descriptor (error is returned, if value is 0 or list exists and value differs)
++ * subdesc_size: size of the sublist descriptor (error is returned, if value is 0 or list exists
++ and value differs)
++ * data_size: size of data (error is returned, if list exists and value differs). Can be 0 for sets.
++ * subdata_size: size of sublist data (error is returned, if list exists and value differs).
++ Can be 0 for sets.
++ * compare: for lookup and list optimization, can be NULL, then
++ memcmp(desc1, desc2, desc_size) is used
++ * subcompare: for item lookup and optimization of sublist, can be NULL, then
++ memcmp(desc1, desc2, desc_size) is used
++ * def_data: default data value for flag RSBAC_LIST_DEF_DATA
++ (if NULL, flag is cleared)
++ * def_subdata: default subdata value for flag RSBAC_LIST_DEF_SUBDATA
++ (if NULL, flag is cleared)
++ * name: the on-disk name, must be distinct and max. 7 or 8.2 chars
++ (only used for info, if non-persistent)
++ * device: the device to read list from or to save list to - use 0 for root dev
++ (ignored, if non-persistent)
++ * nr_hashes: Number of hashes for this list, maximum is rsbac_list_lol_max_hashes,
++ which is derived from CONFIG_RSBAC_LIST_MAX_HASHES.
++ If > maximum, it will be reduced to maximum automatically.
++ RSBAC_LIST_MIN_MAX_HASHES <= rsbac_list_lol_max_hashes
++ <= RSBAC_MAX_KMALLOC / sizeof(struct rsbac_list_lol_hashed_t) in all
++ cases, see above.
++ Thus, it is safe to use nr_hashes <= RSBAC_LIST_MIN_MAX_HASHES without
++ checks. Value may vary between registrations. Please note that with
++ registration flag RSBAC_LIST_AUTO_HASH_RESIZE the hash size increases
++ automatically, if the list grows bigger than 200 * nr_hashes.
++ * hash_function: Hash function for desc, must always return a value
++ from 0 to nr_hashes-1.
++ * old_base_name: If not NULL and persistent list with name cannot be read,
++ try to read all old_base_name<n> with n from 0 to 31.
++ */
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_lol_register_hashed);
++#endif
++int rsbac_list_lol_register_hashed(rsbac_version_t ds_version,
++ rsbac_list_handle_t * handle_p,
++ struct rsbac_list_lol_info_t *info_p,
++ u_int flags,
++ rsbac_list_compare_function_t * compare,
++ rsbac_list_compare_function_t * subcompare,
++ rsbac_list_get_conv_t * get_conv,
++ rsbac_list_get_conv_t * get_subconv,
++ void *def_data,
++ void *def_subdata, char *name, kdev_t device,
++ u_int nr_hashes,
++ rsbac_list_hash_function_t hash_function,
++ char * old_base_name)
++{
++ struct rsbac_list_lol_reg_item_t *reg_item_p;
++ struct rsbac_list_lol_reg_item_t *new_reg_item_p;
++ int err = 0;
++ int srcu_idx;
++
++ if (ds_version != RSBAC_LIST_VERSION)
++ return -RSBAC_EINVALIDVERSION;
++ if (!handle_p)
++ return -RSBAC_EINVALIDPOINTER;
++ *handle_p = NULL;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++ if (!info_p->key || !info_p->version || !info_p->desc_size)
++ return -RSBAC_EINVALIDVALUE;
++ if (info_p->max_age > RSBAC_LIST_MAX_AGE_LIMIT)
++ return -RSBAC_EINVALIDVALUE;
++ if (info_p->desc_size + info_p->data_size >
++ RSBAC_LIST_MAX_ITEM_SIZE)
++ return -RSBAC_EINVALIDVALUE;
++ if (info_p->subdesc_size + info_p->subdata_size >
++ RSBAC_LIST_MAX_ITEM_SIZE)
++ return -RSBAC_EINVALIDVALUE;
++ if (nr_hashes > rsbac_list_lol_max_hashes)
++ nr_hashes = rsbac_list_lol_max_hashes;
++ else if (nr_hashes > 2) {
++ u_int i = 1;
++
++ while ((i << 1) <= nr_hashes)
++ i = i << 1;
++ nr_hashes = i;
++ }
++ if (!hash_function) {
++ nr_hashes = 1;
++ flags &= ~RSBAC_LIST_AUTO_HASH_RESIZE;
++ } else
++ if (!nr_hashes)
++ nr_hashes = 1;
++ if (name) {
++ struct rsbac_list_reg_item_t *std_reg_item_p;
++
++ srcu_idx = srcu_read_lock(&lol_reg_list_srcu);
++ reg_item_p = lookup_lol_reg_name(name, device);
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ if (reg_item_p) {
++ rsbac_pr_debug(lists, "list name %s already exists on device %02u:%02u!\n",
++ name, RSBAC_MAJOR(device),
++ RSBAC_MINOR(device));
++ return -RSBAC_EEXISTS;
++ }
++ srcu_idx = srcu_read_lock(&reg_list_srcu);
++ std_reg_item_p = lookup_reg_name(name, device);
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ if (std_reg_item_p) {
++ rsbac_pr_debug(lists, "list name %s already exists on device %02u:%02u!\n",
++ name, RSBAC_MAJOR(device),
++ RSBAC_MINOR(device));
++ return -RSBAC_EEXISTS;
++ }
++ } else if (flags & RSBAC_LIST_PERSIST) {
++ rsbac_printk(KERN_WARNING "rsbac_list_lol_register: trial to register persistent list of lists without name.\n");
++ return -RSBAC_EINVALIDVALUE;
++ }
++
++ if (flags & RSBAC_LIST_PERSIST) {
++ if (RSBAC_IS_AUTO_DEV(device))
++ device = rsbac_root_dev;
++ if (!RSBAC_MAJOR(device))
++ flags &= ~RSBAC_LIST_PERSIST;
++ }
++ rsbac_pr_debug(lists, "registering list of lists %s.\n",
++ name);
++ new_reg_item_p = create_lol_reg(info_p, flags, compare, subcompare,
++ get_conv, get_subconv,
++ def_data, def_subdata,
++ name, device,
++ nr_hashes, hash_function,
++ old_base_name);
++ if (!new_reg_item_p) {
++ return -RSBAC_ECOULDNOTADDITEM;
++ }
++ /* Restore from disk */
++ if (flags & RSBAC_LIST_PERSIST) {
++ rsbac_pr_debug(lists, "restoring list %s from device %02u:%02u.\n",
++ name, RSBAC_MAJOR(device),
++ RSBAC_MINOR(device));
++ err = read_lol_list(new_reg_item_p);
++ /* not found is no error */
++ if (err == -RSBAC_ENOTFOUND)
++ err = 0;
++ else if (err) {
++#ifdef CONFIG_RSBAC_DEBUG
++ char tmp[RSBAC_MAXNAMELEN];
++#endif
++
++ rsbac_pr_debug(lists, "restoring list %s from device %02u:%02u failed with error %s, unregistering list.\n",
++ name, RSBAC_MAJOR(device),
++ RSBAC_MINOR(device),
++ get_error_name(tmp, err));
++ clear_lol_reg(new_reg_item_p);
++ return err;
++ } else
++ rsbac_pr_debug(lists, "restoring list %s from device %02u:%02u was successful.\n",
++ name, RSBAC_MAJOR(device),
++ RSBAC_MINOR(device));
++ }
++
++ spin_lock(&lol_reg_head.lock);
++ reg_item_p = add_lol_reg(new_reg_item_p);
++ spin_unlock(&lol_reg_head.lock);
++ if (!reg_item_p) {
++ rsbac_printk(KERN_WARNING "rsbac_list_lol_register: inserting list %s failed!\n",
++ name);
++ /* cleanup */
++ clear_lol_reg(new_reg_item_p);
++ return -RSBAC_ECOULDNOTADDITEM;
++ }
++
++ /* finish */
++#if defined(CONFIG_RSBAC_PROC)
++ /* create proc entry, if requested */
++ if (flags & RSBAC_LIST_BACKUP) {
++ reg_item_p->proc_entry_p =
++ create_proc_entry(reg_item_p->name, S_IFREG | S_IRUGO,
++ proc_rsbac_backup_p);
++ if (reg_item_p->proc_entry_p) {
++ reg_item_p->proc_entry_p->read_proc =
++ lol_backup_proc_read;
++ reg_item_p->proc_entry_p->data = reg_item_p;
++ }
++ } else {
++ reg_item_p->proc_entry_p = NULL;
++ }
++#endif
++ *handle_p = reg_item_p;
++ return err;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_lol_register);
++#endif
++int rsbac_list_lol_register(rsbac_version_t ds_version,
++ rsbac_list_handle_t * handle_p,
++ struct rsbac_list_lol_info_t *info_p,
++ u_int flags,
++ rsbac_list_compare_function_t * compare,
++ rsbac_list_compare_function_t * subcompare,
++ rsbac_list_get_conv_t * get_conv,
++ rsbac_list_get_conv_t * get_subconv,
++ void *def_data,
++ void *def_subdata, char *name, kdev_t device) {
++ return rsbac_list_lol_register_hashed (ds_version, handle_p, info_p,
++ flags, compare, subcompare, get_conv,
++ get_subconv, def_data, def_subdata,
++ name, device,
++ 1, NULL, NULL);
++}
++
++/* destroy list */
++/* list is destroyed, disk file is deleted */
++/* list must have been opened with register */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_destroy);
++#endif
++int rsbac_list_destroy(rsbac_list_handle_t * handle_p,
++ rsbac_list_key_t key)
++{
++ struct rsbac_list_reg_item_t *reg_item_p;
++ int err = 0;
++ int srcu_idx;
++
++ if (!handle_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!*handle_p)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ srcu_idx = srcu_read_lock(&reg_list_srcu);
++ reg_item_p =
++ lookup_reg((struct rsbac_list_reg_item_t *) *handle_p);
++ if (!reg_item_p) {
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_list_destroy: destroying list failed due to invalid handle!\n");
++ return -RSBAC_EINVALIDLIST;
++ }
++ if (reg_item_p->info.key != key) {
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_list_destroy: destroying list %s denied due to invalid key!\n",
++ reg_item_p->name);
++ return -EPERM;
++ }
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ rsbac_pr_debug(lists, "destroying list %s.\n",
++ reg_item_p->name);
++#if defined(CONFIG_RSBAC_PROC)
++ /* delete proc entry, if it exists */
++ if ((reg_item_p->flags & RSBAC_LIST_BACKUP)
++ && reg_item_p->proc_entry_p) {
++ remove_proc_entry(reg_item_p->name, proc_rsbac_backup_p);
++ reg_item_p->proc_entry_p = NULL;
++ }
++#endif
++
++#if 0
++ if (reg_item_p->flags & RSBAC_LIST_PERSIST)
++ err = unlink_list(reg_item_p);
++#endif
++
++ spin_lock(&reg_head.lock);
++ remove_reg(reg_item_p);
++ *handle_p = NULL;
++ spin_unlock(&reg_head.lock);
++ synchronize_srcu(&reg_list_srcu);
++ /* now we can remove the item from memory */
++ clear_reg(reg_item_p);
++ return err;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_lol_destroy);
++#endif
++int rsbac_list_lol_destroy(rsbac_list_handle_t * handle_p,
++ rsbac_list_key_t key)
++{
++ struct rsbac_list_lol_reg_item_t *reg_item_p;
++ int err = 0;
++ int srcu_idx;
++
++ if (!handle_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!*handle_p)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ srcu_idx = srcu_read_lock(&lol_reg_list_srcu);
++ reg_item_p =
++ lookup_lol_reg((struct rsbac_list_lol_reg_item_t *) *handle_p);
++ if (!reg_item_p) {
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_list_lol_destroy: destroying list failed due to invalid handle!\n");
++ return -RSBAC_EINVALIDLIST;
++ }
++ if (reg_item_p->info.key != key) {
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_list_lol_destroy: destroying list %s denied due to invalid key %u!\n",
++ reg_item_p->name, key);
++ return -EPERM;
++ }
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ rsbac_pr_debug(lists, "destroying list %s.\n",
++ reg_item_p->name);
++#if defined(CONFIG_RSBAC_PROC)
++ /* delete proc entry, if it exists */
++ if ((reg_item_p->flags & RSBAC_LIST_BACKUP)
++ && reg_item_p->proc_entry_p) {
++ remove_proc_entry(reg_item_p->name, proc_rsbac_backup_p);
++ reg_item_p->proc_entry_p = NULL;
++ }
++#endif
++#if 0
++ if (reg_item_p->flags & RSBAC_LIST_PERSIST)
++ err = unlink_lol_list(reg_item_p);
++#endif
++
++ spin_lock(&lol_reg_head.lock);
++ remove_lol_reg(reg_item_p);
++ spin_unlock(&lol_reg_head.lock);
++ synchronize_srcu(&lol_reg_list_srcu);
++ /* now we can remove the item from memory */
++ clear_lol_reg(reg_item_p);
++ return err;
++}
++
++/* detach from list */
++/* list is saved and removed from memory. Call register for new access. */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_detach);
++#endif
++int rsbac_list_detach(rsbac_list_handle_t * handle_p, rsbac_list_key_t key)
++{
++ struct rsbac_list_reg_item_t *reg_item_p;
++ int err = 0;
++ int srcu_idx;
++
++ if (!handle_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!*handle_p)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ srcu_idx = srcu_read_lock(&reg_list_srcu);
++ reg_item_p =
++ lookup_reg((struct rsbac_list_reg_item_t *) *handle_p);
++ if (!reg_item_p) {
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_list_detach: detaching list failed due to invalid handle!\n");
++ return -RSBAC_EINVALIDLIST;
++ }
++ if (reg_item_p->info.key != key) {
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_list_detach: detaching list %s denied due to invalid key %u!\n",
++ reg_item_p->name, key);
++ return -EPERM;
++ }
++#if defined(CONFIG_RSBAC_PROC)
++ /* delete proc entry, if it exists */
++ if ((reg_item_p->flags & RSBAC_LIST_BACKUP)
++ && reg_item_p->proc_entry_p) {
++ remove_proc_entry(reg_item_p->name, proc_rsbac_backup_p);
++ reg_item_p->proc_entry_p = NULL;
++ }
++#endif
++#ifndef CONFIG_RSBAC_NO_WRITE
++ /* final write, if dirty etc. */
++ if ((reg_item_p->flags & RSBAC_LIST_PERSIST)
++ && reg_item_p->dirty && !reg_item_p->no_write
++ && !rsbac_debug_no_write) {
++ struct vfsmount *mnt_p;
++ struct rsbac_list_write_head_t write_head;
++ struct rsbac_list_write_item_t *write_item_p;
++
++ mnt_p = rsbac_get_vfsmount(reg_item_p->device);
++ if (mnt_p && rsbac_writable(mnt_p->mnt_sb)) {
++ reg_item_p->dirty = FALSE;
++ err = fill_buffer(reg_item_p, &write_item_p);
++ if (!err) {
++ write_head.head = write_item_p;
++ write_head.tail = write_item_p;
++ write_head.count = 1;
++ rsbac_list_write_buffers(write_head);
++ } else {
++ if (err != -RSBAC_ENOTWRITABLE) {
++ rsbac_printk(KERN_WARNING "rsbac_list_detach(): fill_buffer() for list %s returned error %i\n",
++ reg_item_p->name, err);
++ }
++ }
++ }
++ }
++#endif
++ /* disable handle */
++ *handle_p = NULL;
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ /* too bad that the list might have been changed again - we do not care anymore */
++ spin_lock(&reg_head.lock);
++ remove_reg(reg_item_p);
++ spin_unlock(&reg_head.lock);
++ synchronize_srcu(&reg_list_srcu);
++ /* now we can remove the item from memory */
++ clear_reg(reg_item_p);
++ return err;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_lol_detach);
++#endif
++int rsbac_list_lol_detach(rsbac_list_handle_t * handle_p,
++ rsbac_list_key_t key)
++{
++ struct rsbac_list_lol_reg_item_t *reg_item_p;
++ int err = 0;
++ int srcu_idx;
++
++ if (!handle_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!*handle_p)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ srcu_idx = srcu_read_lock(&lol_reg_list_srcu);
++ reg_item_p =
++ lookup_lol_reg((struct rsbac_list_lol_reg_item_t *) *handle_p);
++ if (!reg_item_p) {
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_list_lol_detach: detaching list failed due to invalid handle!\n");
++ return -RSBAC_EINVALIDLIST;
++ }
++ if (reg_item_p->info.key != key) {
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_list_lol_detach: detaching list %s denied due to invalid key %u!\n",
++ reg_item_p->name, key);
++ return -EPERM;
++ }
++#if defined(CONFIG_RSBAC_PROC)
++ /* delete proc entry, if it exists */
++ if ((reg_item_p->flags & RSBAC_LIST_BACKUP)
++ && reg_item_p->proc_entry_p) {
++ remove_proc_entry(reg_item_p->name, proc_rsbac_backup_p);
++ reg_item_p->proc_entry_p = NULL;
++ }
++#endif
++#ifndef CONFIG_RSBAC_NO_WRITE
++ /* final write, if dirty etc. */
++ if ((reg_item_p->flags & RSBAC_LIST_PERSIST)
++ && reg_item_p->dirty && !reg_item_p->no_write
++ && !rsbac_debug_no_write) {
++ struct rsbac_list_lol_write_head_t write_head;
++ struct rsbac_list_lol_write_item_t *write_item_p;
++ struct vfsmount *mnt_p;
++
++ mnt_p = rsbac_get_vfsmount(reg_item_p->device);
++ if (mnt_p && rsbac_writable(mnt_p->mnt_sb)) {
++ reg_item_p->dirty = FALSE;
++ err = fill_lol_buffer(reg_item_p, &write_item_p);
++ if (!err) {
++ write_head.head = write_item_p;
++ write_head.tail = write_item_p;
++ write_head.count = 1;
++ rsbac_list_write_lol_buffers(write_head);
++ } else {
++ if (err != -RSBAC_ENOTWRITABLE) {
++ rsbac_printk(KERN_WARNING "rsbac_list_lol_detach(): fill_lol_buffer() for list %s returned error %i\n",
++ reg_item_p->name, err);
++ }
++ }
++ }
++ }
++#endif
++ /* disable handle */
++ *handle_p = NULL;
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ /* too bad that the list might have been changed again - we do not care anymore */
++ spin_lock(&lol_reg_head.lock);
++ remove_lol_reg(reg_item_p);
++ spin_unlock(&lol_reg_head.lock);
++ synchronize_srcu(&lol_reg_list_srcu);
++ /* now we can remove the item from memory */
++ clear_lol_reg(reg_item_p);
++ return err;
++}
++
++/* set list's no_write flag */
++/* TRUE: do not write to disk, FALSE: writing allowed */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_no_write);
++#endif
++int rsbac_list_no_write(rsbac_list_handle_t handle, rsbac_list_key_t key,
++ rsbac_boolean_t no_write)
++{
++ struct rsbac_list_reg_item_t *reg_item_p;
++ int srcu_idx;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if ((no_write != FALSE) && (no_write != TRUE))
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ srcu_idx = srcu_read_lock(&reg_list_srcu);
++ reg_item_p = lookup_reg((struct rsbac_list_reg_item_t *) handle);
++ if (!reg_item_p) {
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_list_no_write: setting no_write for list denied due to invalid handle!\n");
++ return -RSBAC_EINVALIDLIST;
++ }
++ if (reg_item_p->info.key != key) {
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_list_no_write: setting no_write for list %s denied due to invalid key %u!\n",
++ reg_item_p->name, key);
++ return -EPERM;
++ }
++ reg_item_p->no_write = no_write;
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ return 0;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_lol_no_write);
++#endif
++int rsbac_list_lol_no_write(rsbac_list_handle_t handle,
++ rsbac_list_key_t key, rsbac_boolean_t no_write)
++{
++ struct rsbac_list_lol_reg_item_t *reg_item_p;
++ int srcu_idx;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if ((no_write != FALSE) && (no_write != TRUE))
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ srcu_idx = srcu_read_lock(&lol_reg_list_srcu);
++ reg_item_p =
++ lookup_lol_reg((struct rsbac_list_lol_reg_item_t *) handle);
++ if (!reg_item_p) {
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_list_lol_no_write: setting no_write for list denied due to invalid handle!\n");
++ return -RSBAC_EINVALIDLIST;
++ }
++ if (reg_item_p->info.key != key) {
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_list_lol_no_write: setting no_write for list %s denied due to invalid key %u!\n",
++ reg_item_p->name, key);
++ return -EPERM;
++ }
++ reg_item_p->no_write = no_write;
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ return 0;
++}
++
++/* set list's max_items_per_hash */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_max_items);
++#endif
++int rsbac_list_max_items(rsbac_list_handle_t handle, rsbac_list_key_t key,
++ u_int max_items)
++{
++ struct rsbac_list_reg_item_t *reg_item_p;
++ int srcu_idx;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ srcu_idx = srcu_read_lock(&reg_list_srcu);
++ reg_item_p = lookup_reg((struct rsbac_list_reg_item_t *) handle);
++ if (!reg_item_p) {
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_list_max_items: setting max_items_per_hash for list denied due to invalid handle!\n");
++ return -RSBAC_EINVALIDLIST;
++ }
++ if (reg_item_p->info.key != key) {
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_list_max_items: setting max_items_per_hash for list %s denied due to invalid key %u!\n",
++ reg_item_p->name, key);
++ return -EPERM;
++ }
++ if (!max_items)
++ max_items = RSBAC_LIST_MAX_NR_ITEMS;
++ reg_item_p->max_items_per_hash = rsbac_min(max_items, RSBAC_LIST_MAX_NR_ITEMS_LIMIT);
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ return 0;
++}
++
++/* set list's max_items_per_hash and max_subitems*/
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_lol_max_items);
++#endif
++int rsbac_list_lol_max_items(rsbac_list_handle_t handle, rsbac_list_key_t key,
++ u_int max_items, u_int max_subitems)
++{
++ struct rsbac_list_lol_reg_item_t *reg_item_p;
++ int srcu_idx;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ srcu_idx = srcu_read_lock(&lol_reg_list_srcu);
++ reg_item_p = lookup_lol_reg((struct rsbac_list_lol_reg_item_t *) handle);
++ if (!reg_item_p) {
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_list_lol_max_items: setting max_items_per_hash for list denied due to invalid handle!\n");
++ return -RSBAC_EINVALIDLIST;
++ }
++ if (reg_item_p->info.key != key) {
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_list_lol_max_items: setting max_items_per_hash for list %s denied due to invalid key %u!\n",
++ reg_item_p->name, key);
++ return -EPERM;
++ }
++ if (!max_items)
++ max_items = RSBAC_LIST_MAX_NR_ITEMS;
++ if (!max_subitems)
++ max_subitems = RSBAC_LIST_MAX_NR_SUBITEMS;
++ reg_item_p->max_items_per_hash = rsbac_min(max_items, RSBAC_LIST_MAX_NR_ITEMS_LIMIT);
++ reg_item_p->max_subitems = rsbac_min(max_subitems, RSBAC_LIST_MAX_NR_ITEMS_LIMIT);
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++ return 0;
++}
++
++
++/********************/
++/* Transactions */
++/********************/
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++static int do_commit(rsbac_list_ta_number_t ta_number)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_lol_reg_item_t *lol_list;
++ int i;
++ struct rsbac_list_rcu_free_head_t * rcu_head_p;
++ struct rsbac_list_rcu_free_head_lol_t * rcu_head_lol_p;
++ int srcu_idx;
++ int srcu_idx2;
++
++ srcu_idx = srcu_read_lock(&reg_list_srcu);
++ srcu_idx2 = srcu_read_lock(&lol_reg_list_srcu);
++ list = reg_head.head;
++ while (list) {
++ spin_lock(&list->lock);
++ for (i=0; i<list->nr_hashes; i++) {
++ if (list->hashed[i].ta_copied == ta_number) {
++ remove_all_items(list, i);
++ rcu_assign_pointer(list->hashed[i].head, list->hashed[i].ta_head);
++ rcu_assign_pointer(list->hashed[i].tail, list->hashed[i].ta_tail);
++ rcu_assign_pointer(list->hashed[i].curr, list->hashed[i].ta_curr);
++ list->hashed[i].count = list->hashed[i].ta_count;
++ list->hashed[i].ta_copied = 0;
++ rcu_assign_pointer(list->hashed[i].ta_head, NULL);
++ rcu_assign_pointer(list->hashed[i].ta_tail, NULL);
++ rcu_assign_pointer(list->hashed[i].ta_curr, NULL);
++ list->hashed[i].ta_count = 0;
++ list->dirty = TRUE;
++ }
++ }
++ rcu_head_p = get_rcu_free(list);
++ spin_unlock(&list->lock);
++ do_call_rcu(rcu_head_p);
++ list = list->next;
++ }
++ lol_list = lol_reg_head.head;
++ while (lol_list) {
++ spin_lock(&lol_list->lock);
++ for (i=0; i<lol_list->nr_hashes; i++) {
++ if (lol_list->hashed[i].ta_copied == ta_number) {
++ remove_all_lol_items(lol_list, i);
++ rcu_assign_pointer(lol_list->hashed[i].head, lol_list->hashed[i].ta_head);
++ rcu_assign_pointer(lol_list->hashed[i].tail, lol_list->hashed[i].ta_tail);
++ rcu_assign_pointer(lol_list->hashed[i].curr, lol_list->hashed[i].ta_curr);
++ lol_list->hashed[i].count = lol_list->hashed[i].ta_count;
++ lol_list->hashed[i].ta_copied = 0;
++ rcu_assign_pointer(lol_list->hashed[i].ta_head, NULL);
++ rcu_assign_pointer(lol_list->hashed[i].ta_tail, NULL);
++ rcu_assign_pointer(lol_list->hashed[i].ta_curr, NULL);
++ lol_list->hashed[i].ta_count = 0;
++ lol_list->dirty = TRUE;
++ }
++ }
++ rcu_head_lol_p = get_rcu_free_lol(lol_list);
++ spin_unlock(&lol_list->lock);
++ do_call_rcu_lol(rcu_head_lol_p);
++ lol_list = lol_list->next;
++ }
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx2);
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ return 0;
++}
++
++int rsbac_list_ta_commit(rsbac_list_ta_number_t ta_number, char *password)
++{
++ int err;
++ struct rsbac_list_ta_data_t ta_data;
++
++ rsbac_printk(KERN_INFO "rsbac_list_ta_commit(): starting commit of transaction %u\n",
++ ta_number);
++ err = rsbac_list_get_data(ta_handle, &ta_number, &ta_data);
++ if (err)
++ return err;
++ if ((RSBAC_UID_NUM(ta_data.commit_uid) != RSBAC_ALL_USERS)
++ && (ta_data.commit_uid != (rsbac_get_vset(),current_uid()))
++ )
++ return -EPERM;
++
++ if (ta_data.password[0]) {
++ if (!password)
++ return -EPERM;
++ if (strncmp
++ (ta_data.password, password,
++ RSBAC_LIST_TA_MAX_PASSLEN))
++ return -EPERM;
++ }
++ rsbac_printk(KERN_INFO "rsbac_list_ta_commit(): transaction %u data verified\n",
++ ta_number);
++ spin_lock(&ta_lock);
++ while (ta_committing) {
++ spin_unlock(&ta_lock);
++ interruptible_sleep_on(&ta_wait);
++ spin_lock(&ta_lock);
++ }
++ rsbac_list_remove(ta_handle, &ta_number);
++ ta_committing = TRUE;
++ spin_unlock(&ta_lock);
++
++ rsbac_printk(KERN_INFO "rsbac_list_ta_commit(): committing transaction %u now\n",
++ ta_number);
++
++ err = do_commit(ta_number);
++ ta_committing = FALSE;
++#ifdef CONFIG_RSBAC_FD_CACHE
++ if (!err)
++ rsbac_fd_cache_invalidate_all();
++#endif
++ wake_up(&ta_wait);
++ rsbac_printk(KERN_INFO "rsbac_list_ta_commit(): committed transaction %u\n",
++ ta_number);
++ return err;
++}
++
++static int do_forget(rsbac_list_ta_number_t ta_number)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_lol_reg_item_t *lol_list;
++ int i;
++ struct rsbac_list_rcu_free_head_t * rcu_head_p;
++ struct rsbac_list_rcu_free_head_lol_t * rcu_head_lol_p;
++ int srcu_idx;
++ int srcu_idx2;
++
++ spin_lock(&ta_lock);
++ while (ta_committing) {
++ spin_unlock(&ta_lock);
++ interruptible_sleep_on(&ta_wait);
++ spin_lock(&ta_lock);
++ }
++ rsbac_list_remove(ta_handle, &ta_number);
++ ta_committing = TRUE;
++ spin_unlock(&ta_lock);
++
++ rsbac_printk(KERN_INFO "rsbac_list_ta_forget(): removing transaction %u\n",
++ ta_number);
++
++ srcu_idx = srcu_read_lock(&reg_list_srcu);
++ srcu_idx2 = srcu_read_lock(&lol_reg_list_srcu);
++ list = reg_head.head;
++ while (list) {
++ spin_lock(&list->lock);
++ for (i=0; i<list->nr_hashes; i++) {
++ if (list->hashed[i].ta_copied == ta_number) {
++ ta_remove_all_items(list, i);
++ list->hashed[i].ta_copied = 0;
++ }
++ }
++ rcu_head_p = get_rcu_free(list);
++ spin_unlock(&list->lock);
++ do_call_rcu(rcu_head_p);
++ list = list->next;
++ }
++ lol_list = lol_reg_head.head;
++ while (lol_list) {
++ spin_lock(&lol_list->lock);
++ for (i=0; i<lol_list->nr_hashes; i++) {
++ if (lol_list->hashed[i].ta_copied == ta_number) {
++ ta_remove_all_lol_items(lol_list, i);
++ lol_list->hashed[i].ta_copied = 0;
++ }
++ }
++ rcu_head_lol_p = get_rcu_free_lol(lol_list);
++ spin_unlock(&lol_list->lock);
++ do_call_rcu_lol(rcu_head_lol_p);
++ lol_list = lol_list->next;
++ }
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx2);
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++
++ ta_committing = FALSE;
++ wake_up(&ta_wait);
++
++ return 0;
++}
++
++int rsbac_list_ta_forget(rsbac_list_ta_number_t ta_number, char *password)
++{
++ int err;
++ struct rsbac_list_ta_data_t ta_data;
++
++ err = rsbac_list_get_data(ta_handle, &ta_number, &ta_data);
++ if (err)
++ return err;
++ if ((RSBAC_UID_NUM(ta_data.commit_uid) != RSBAC_ALL_USERS)
++ && (ta_data.commit_uid != (rsbac_get_vset(),current_uid()))
++ )
++ return -EPERM;
++ if (ta_data.password[0]) {
++ if (!password)
++ return -EPERM;
++ if (strncmp
++ (ta_data.password, password,
++ RSBAC_LIST_TA_MAX_PASSLEN))
++ return -EPERM;
++ }
++ return do_forget(ta_number);
++}
++
++int rsbac_list_ta_begin(rsbac_time_t ttl,
++ rsbac_list_ta_number_t * ta_number_p,
++ rsbac_uid_t commit_uid,
++ char * name, char *password)
++{
++ int err;
++ rsbac_list_ta_number_t ta;
++ struct rsbac_list_ta_data_t ta_data;
++
++ if (!ta_number_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (*ta_number_p) {
++ if (rsbac_list_exist(ta_handle, ta_number_p))
++ return -RSBAC_EEXISTS;
++ ta = *ta_number_p;
++ } else {
++#ifdef CONFIG_RSBAC_LIST_TRANS_RANDOM_TA
++ get_random_bytes(&ta, sizeof(ta));
++#else
++ ta = ta_next++;
++#endif
++ while (!ta || rsbac_list_exist(ta_handle, &ta)
++#ifdef CONFIG_RSBAC_RC_LEARN_TA
++ || (ta == CONFIG_RSBAC_RC_LEARN_TA)
++#endif
++#ifdef CONFIG_RSBAC_AUTH_LEARN_TA
++ || (ta == CONFIG_RSBAC_AUTH_LEARN_TA)
++#endif
++#ifdef CONFIG_RSBAC_ACL_LEARN_TA
++ || (ta == CONFIG_RSBAC_ACL_LEARN_TA)
++#endif
++#ifdef CONFIG_RSBAC_CAP_LEARN_TA
++ || (ta == CONFIG_RSBAC_CAP_LEARN_TA)
++#endif
++ ) {
++#ifdef CONFIG_RSBAC_LIST_TRANS_RANDOM_TA
++ get_random_bytes(&ta, sizeof(ta));
++#else
++ ta = ta_next++;
++#endif
++ }
++ }
++ if (!ttl || (ttl > CONFIG_RSBAC_LIST_TRANS_MAX_TTL))
++ ttl = CONFIG_RSBAC_LIST_TRANS_MAX_TTL;
++
++ rsbac_printk(KERN_INFO "rsbac_list_ta_begin(): starting transaction %u with ttl of %us\n",
++ ta, ttl);
++
++ ta_data.start = RSBAC_CURRENT_TIME;
++ ta_data.timeout = ta_data.start + ttl;
++ ta_data.commit_uid = commit_uid;
++ if (name) {
++ strncpy(ta_data.name, name,
++ RSBAC_LIST_TA_MAX_NAMELEN - 1);
++ ta_data.name[RSBAC_LIST_TA_MAX_NAMELEN - 1] = 0;
++ } else
++ ta_data.name[0] = 0;
++ if (password) {
++ strncpy(ta_data.password, password,
++ RSBAC_LIST_TA_MAX_PASSLEN - 1);
++ ta_data.password[RSBAC_LIST_TA_MAX_PASSLEN - 1] = 0;
++ } else
++ ta_data.password[0] = 0;
++ err = rsbac_list_add(ta_handle, &ta, &ta_data);
++ if (!err)
++ *ta_number_p = ta;
++ return err;
++}
++
++int rsbac_list_ta_refresh(rsbac_time_t ttl,
++ rsbac_list_ta_number_t ta_number, char *password)
++{
++ struct rsbac_list_ta_data_t ta_data;
++ int err;
++
++ if (!rsbac_list_exist(ta_handle, &ta_number)) {
++ return -RSBAC_ENOTFOUND;
++ }
++ if (!ttl || (ttl > CONFIG_RSBAC_LIST_TRANS_MAX_TTL))
++ ttl = CONFIG_RSBAC_LIST_TRANS_MAX_TTL;
++
++ rsbac_printk(KERN_INFO "rsbac_list_ta_refresh(): refreshing transaction %u for %us\n",
++ ta_number, ttl);
++
++ err = rsbac_list_get_data(ta_handle, &ta_number, &ta_data);
++ if (err)
++ return err;
++ if ((RSBAC_UID_NUM(ta_data.commit_uid) != RSBAC_ALL_USERS)
++ && (ta_data.commit_uid != (rsbac_get_vset(),current_uid()))
++ )
++ return -EPERM;
++ if (ta_data.password[0]) {
++ if (!password)
++ return -EPERM;
++ if (strncmp
++ (ta_data.password, password,
++ RSBAC_LIST_TA_MAX_PASSLEN))
++ return -EPERM;
++ }
++ ta_data.timeout = RSBAC_CURRENT_TIME + ttl;
++ return rsbac_list_add(ta_handle, &ta_number, &ta_data);
++}
++
++int rsbac_list_ta_exist(rsbac_list_ta_number_t ta_number)
++{
++ if (!ta_number)
++ return TRUE;
++ else
++ return rsbac_list_exist(ta_handle, &ta_number);
++}
++#endif
++
++
++/********************/
++/* List Access */
++/********************/
++
++/* add item */
++/* if item for desc exists, the data is updated */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_add_ttl);
++#endif
++int rsbac_ta_list_add_ttl(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ rsbac_time_t ttl, void *desc, void *data)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_item_t *item_p;
++ u_int hash = 0;
++ struct rsbac_list_rcu_free_head_t * rcu_head_p;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!desc)
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (!list || (list->self != list))
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ if (list->info.data_size && !data) {
++ return -RSBAC_EINVALIDVALUE;
++ }
++
++/*
++ rsbac_pr_debug(lists, "adding to list %s.\n", list->name);
++*/
++ if (ttl && (ttl != RSBAC_LIST_TTL_KEEP)) {
++ if (ttl > RSBAC_LIST_MAX_AGE_LIMIT)
++ ttl = RSBAC_LIST_MAX_AGE_LIMIT;
++ ttl += RSBAC_CURRENT_TIME;
++ }
++ spin_lock(&list->lock);
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (!ta_number)
++#endif
++ {
++ item_p = lookup_item_locked(list, desc);
++ if (item_p) { /* exists -> update data, if any */
++ if (ttl != RSBAC_LIST_TTL_KEEP)
++ item_p->max_age = ttl;
++ if (data && list->info.data_size) {
++ if (list->def_data
++ && !item_p->max_age
++ && !memcmp(list->def_data, data,
++ list->info.data_size)
++ )
++ do_remove_item(list, item_p, hash);
++ else
++ memcpy(((char *) item_p) +
++ sizeof(*item_p) +
++ list->info.desc_size, data,
++ list->info.data_size);
++ }
++ } else {
++ if (ttl == RSBAC_LIST_TTL_KEEP)
++ ttl = 0;
++ if (!list->def_data
++ || memcmp(list->def_data, data,
++ list->info.data_size)
++ )
++ add_item(list, ttl, desc, data);
++ }
++ touch(list);
++ list->dirty = TRUE;
++ }
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (list->hashed[hash].ta_copied || ta_number) {
++ if (!list->hashed[hash].ta_copied)
++ ta_copy(ta_number, list, hash);
++ else if (ta_number) {
++ if (list->hashed[hash].ta_copied != ta_number) {
++ spin_unlock(&list->lock);
++ return -RSBAC_EBUSY;
++ }
++ } else
++ ta_number = list->hashed[hash].ta_copied;
++ item_p = ta_lookup_item_locked(ta_number, list, desc);
++ if (item_p) { /* exists -> update data, if any */
++ if (ttl != RSBAC_LIST_TTL_KEEP)
++ item_p->max_age = ttl;
++ if (data && list->info.data_size) {
++ if (list->def_data
++ && !item_p->max_age
++ && !memcmp(list->def_data, data,
++ list->info.data_size)
++ )
++ ta_do_remove_item(list, item_p, hash);
++ else
++ memcpy(((char *) item_p) +
++ sizeof(*item_p) +
++ list->info.desc_size, data,
++ list->info.data_size);
++ }
++ } else {
++ if (ttl == RSBAC_LIST_TTL_KEEP)
++ ttl = 0;
++ if (!list->def_data
++ || memcmp(list->def_data, data,
++ list->info.data_size)
++ )
++ ta_add_item(ta_number, list, ttl, desc,
++ data);
++ }
++ }
++#endif
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->write_count++;
++#endif
++ rcu_head_p = get_rcu_free(list);
++ spin_unlock(&list->lock);
++ do_sync_rcu(rcu_head_p);
++ return 0;
++}
++
++/* add list of lists sublist item */
++/* if item for desc exists, the data is updated */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_subadd_ttl);
++#endif
++int rsbac_ta_list_lol_subadd_ttl(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ rsbac_time_t ttl,
++ void *desc, void *subdesc, void *subdata)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *sublist;
++ struct rsbac_list_item_t *item_p;
++ int err = 0;
++ u_int hash = 0;
++ struct rsbac_list_rcu_free_head_lol_t * rcu_head_lol_p;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!desc || !subdesc)
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ if (list->info.subdata_size && !subdata) {
++ return -RSBAC_EINVALIDVALUE;
++ }
++
++/*
++ rsbac_pr_debug(lists, "adding to list %s.\n", list->name);
++*/
++ spin_lock(&list->lock);
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (!ta_number)
++#endif
++ {
++ sublist = lookup_lol_item_locked(list, desc);
++ if (!sublist && (list->flags & RSBAC_LIST_DEF_DATA))
++ sublist = add_lol_item(list, 0, desc, list->def_data);
++ if (sublist) {
++ if (sublist->max_age
++ && (sublist->max_age <= RSBAC_CURRENT_TIME)
++ ) {
++ remove_lol_item(list, desc);
++ err = -RSBAC_EINVALIDTARGET;
++ } else {
++ /* exists -> lookup subitem */
++ if (ttl && (ttl != RSBAC_LIST_TTL_KEEP)) {
++ if (ttl > RSBAC_LIST_MAX_AGE_LIMIT)
++ ttl = RSBAC_LIST_MAX_AGE_LIMIT;
++ ttl += RSBAC_CURRENT_TIME;
++ }
++ item_p =
++ lookup_lol_subitem_locked(list, sublist,
++ subdesc);
++ if (item_p) { /* exists -> update data, if any */
++ if (ttl != RSBAC_LIST_TTL_KEEP)
++ item_p->max_age = ttl;
++ if (subdata
++ && list->info.subdata_size) {
++ if (list->def_subdata
++ && !item_p->max_age
++ && !memcmp(list->
++ def_subdata,
++ subdata,
++ list->info.
++ subdata_size)
++ ) {
++ do_remove_lol_subitem
++ (sublist,
++ item_p);
++ rcu_free_lol_sub(list, item_p);
++ } else
++ memcpy(((char *)
++ item_p) +
++ sizeof
++ (*item_p) +
++ list->info.
++ subdesc_size,
++ subdata,
++ list->info.
++ subdata_size);
++ }
++ } else {
++ if (ttl == RSBAC_LIST_TTL_KEEP)
++ ttl = 0;
++ if (!list->def_subdata
++ || memcmp(list->def_subdata,
++ subdata,
++ list->info.
++ subdata_size)
++ ) {
++ if (!add_lol_subitem(list,
++ sublist,
++ ttl,
++ subdesc,
++ subdata))
++ err = -RSBAC_ECOULDNOTADDITEM;
++ }
++ }
++ lol_touch(list);
++ list->dirty = TRUE;
++ }
++ } else {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_unlock;
++ }
++ }
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (list->hashed[hash].ta_copied || ta_number) {
++ if (!list->hashed[hash].ta_copied) {
++ if ((err = ta_lol_copy(ta_number, list, hash)))
++ goto out_unlock;
++ } else if (ta_number) {
++ if (list->hashed[hash].ta_copied != ta_number) {
++ err = -RSBAC_EBUSY;
++ goto out_unlock;
++ }
++ } else
++ ta_number = list->hashed[hash].ta_copied;
++ sublist = ta_lookup_lol_item_locked(ta_number, list, desc);
++ if (!sublist && (list->flags & RSBAC_LIST_DEF_DATA)
++ )
++ sublist =
++ ta_add_lol_item(ta_number, list, 0, desc,
++ list->def_data);
++ if (sublist) {
++ if (sublist->max_age
++ && (sublist->max_age <= RSBAC_CURRENT_TIME)
++ ) {
++ ta_remove_lol_item(ta_number, list, desc);
++ err = -RSBAC_EINVALIDTARGET;
++ } else {
++ /* exists -> lookup subitem */
++ if (ttl && (ttl != RSBAC_LIST_TTL_KEEP)) {
++ if (ttl > RSBAC_LIST_MAX_AGE_LIMIT)
++ ttl =
++ RSBAC_LIST_MAX_AGE_LIMIT;
++ ttl += RSBAC_CURRENT_TIME;
++ }
++ item_p =
++ lookup_lol_subitem_locked(list, sublist,
++ subdesc);
++ if (item_p) { /* exists -> update data, if any */
++ if (ttl != RSBAC_LIST_TTL_KEEP)
++ item_p->max_age = ttl;
++ if (subdata
++ && list->info.subdata_size) {
++ if (list->def_subdata
++ && !item_p->max_age
++ && !memcmp(list->
++ def_subdata,
++ subdata,
++ list->info.
++ subdata_size)
++ ) {
++ do_remove_lol_subitem
++ (sublist,
++ item_p);
++ rcu_free_lol_sub(list, item_p);
++ } else
++ memcpy(((char *)
++ item_p) +
++ sizeof
++ (*item_p) +
++ list->info.
++ subdesc_size,
++ subdata,
++ list->info.
++ subdata_size);
++ }
++ } else {
++ if (ttl == RSBAC_LIST_TTL_KEEP)
++ ttl = 0;
++ if (!list->def_subdata
++ || memcmp(list->def_subdata,
++ subdata,
++ list->info.
++ subdata_size)
++ )
++ add_lol_subitem(list,
++ sublist,
++ ttl,
++ subdesc,
++ subdata);
++ }
++ }
++ } else {
++ err = -RSBAC_EINVALIDTARGET;
++ }
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->write_count++;
++#endif
++
++out_unlock:
++ rcu_head_lol_p = get_rcu_free_lol(list);
++ spin_unlock(&list->lock);
++ do_sync_rcu_lol(rcu_head_lol_p);
++ return err;
++}
++
++/* add list of lists item */
++/* if item for desc exists, the data is updated */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_add_ttl);
++#endif
++int rsbac_ta_list_lol_add_ttl(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ rsbac_time_t ttl, void *desc, void *data)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *item_p;
++ u_int hash = 0;
++ struct rsbac_list_rcu_free_head_lol_t * rcu_head_lol_p;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!desc)
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ if (ttl && (ttl != RSBAC_LIST_TTL_KEEP)) {
++ if (ttl > RSBAC_LIST_MAX_AGE_LIMIT)
++ ttl = RSBAC_LIST_MAX_AGE_LIMIT;
++ ttl += RSBAC_CURRENT_TIME;
++ }
++
++ if (list->info.data_size && !data) {
++ return -RSBAC_EINVALIDVALUE;
++ }
++
++/*
++ rsbac_pr_debug(lists, "adding to list %s.\n", list->name);
++*/
++ spin_lock(&list->lock);
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (!ta_number)
++#endif
++ {
++ item_p = lookup_lol_item_locked(list, desc);
++ if (item_p) { /* exists -> update data, if any */
++ if (ttl != RSBAC_LIST_TTL_KEEP)
++ item_p->max_age = ttl;
++ if (data && list->info.data_size) {
++ if (list->def_data
++ && !item_p->max_age
++ && !memcmp(list->def_data, data,
++ list->info.data_size)
++ && !item_p->count)
++ do_remove_lol_item(list, item_p, hash);
++ else
++ memcpy(((char *) item_p) +
++ sizeof(*item_p) +
++ list->info.desc_size, data,
++ list->info.data_size);
++ }
++ } else {
++ if (ttl == RSBAC_LIST_TTL_KEEP)
++ ttl = 0;
++ if (!list->def_data
++ || memcmp(list->def_data, data,
++ list->info.data_size)
++ )
++ add_lol_item(list, ttl, desc, data);
++ }
++ lol_touch(list);
++ list->dirty = TRUE;
++ }
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (list->hashed[hash].ta_copied || ta_number) {
++ if (!list->hashed[hash].ta_copied)
++ ta_lol_copy(ta_number, list, hash);
++ else if (ta_number) {
++ if (list->hashed[hash].ta_copied != ta_number) {
++ rcu_head_lol_p = get_rcu_free_lol(list);
++ spin_unlock(&list->lock);
++ do_sync_rcu_lol(rcu_head_lol_p);
++ return -RSBAC_EBUSY;
++ }
++ } else
++ ta_number = list->hashed[hash].ta_copied;
++ item_p = ta_lookup_lol_item_locked(ta_number, list, desc);
++ if (item_p) { /* exists -> update data, if any */
++ if (ttl != RSBAC_LIST_TTL_KEEP)
++ item_p->max_age = ttl;
++ if (data && list->info.data_size) {
++ if (list->def_data
++ && !item_p->max_age
++ && !memcmp(list->def_data, data,
++ list->info.data_size)
++ && !item_p->count)
++ ta_do_remove_lol_item(list,
++ item_p,
++ hash);
++ else
++ memcpy(((char *) item_p) +
++ sizeof(*item_p) +
++ list->info.desc_size, data,
++ list->info.data_size);
++ }
++ } else {
++ if (ttl == RSBAC_LIST_TTL_KEEP)
++ ttl = 0;
++ if (!list->def_data
++ || memcmp(list->def_data, data,
++ list->info.data_size)
++ )
++ ta_add_lol_item(ta_number, list, ttl, desc,
++ data);
++ }
++ }
++#endif
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->write_count++;
++#endif
++ rcu_head_lol_p = get_rcu_free_lol(list);
++ spin_unlock(&list->lock);
++ do_sync_rcu_lol(rcu_head_lol_p);
++ return 0;
++}
++
++/* remove item */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_remove);
++#endif
++int rsbac_ta_list_remove(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void *desc)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_rcu_free_head_t * rcu_head_p;
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ u_int hash = 0;
++#endif
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!desc)
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (!list || (list->self != list))
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++/*
++ rsbac_pr_debug(lists, "removing from list %s.\n", list->name);
++*/
++ spin_lock(&list->lock);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ if (list->hashed[hash].ta_copied) {
++ if (ta_number) {
++ if (ta_lookup_item_locked(list->hashed[hash].ta_copied, list, desc)) {
++ if (list->hashed[hash].ta_copied != ta_number) {
++ spin_unlock(&list->lock);
++ return -RSBAC_EBUSY;
++ } else
++ ta_remove_item(ta_number, list,
++ desc);
++ }
++ } else
++ ta_remove_item(list->hashed[hash].ta_copied, list, desc);
++ } else {
++ if (ta_number && lookup_item_locked(list, desc)) {
++ ta_copy(ta_number, list, hash);
++ ta_remove_item(ta_number, list, desc);
++ }
++ }
++ if (!ta_number)
++#endif
++ {
++ if (lookup_item_locked(list, desc)) { /* exists -> remove */
++ remove_item(list, desc);
++ touch(list);
++ list->dirty = TRUE;
++ }
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->write_count++;
++#endif
++ rcu_head_p = get_rcu_free(list);
++ spin_unlock(&list->lock);
++ do_sync_rcu(rcu_head_p);
++ return 0;
++}
++
++/* remove all items */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_remove_all);
++#endif
++int rsbac_ta_list_remove_all(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle)
++{
++ struct rsbac_list_reg_item_t *list;
++ int i;
++ u_int nr_hashes;
++ struct rsbac_list_rcu_free_head_t ** rcu_head_pp;
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ struct rsbac_list_rcu_free_head_t ** ta_rcu_head_pp;
++#endif
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++/*
++ rsbac_pr_debug(lists, "removing all items from list %s.\n", list->name);
++*/
++ spin_lock(&list->lock);
++ nr_hashes = list->nr_hashes;
++ rcu_head_pp = rsbac_kmalloc(nr_hashes * sizeof(*rcu_head_pp));
++ if (!rcu_head_pp) {
++ spin_unlock(&list->lock);
++ return -RSBAC_ENOMEM;
++ }
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ ta_rcu_head_pp = rsbac_kmalloc(nr_hashes * sizeof(*rcu_head_pp));
++ if (!ta_rcu_head_pp) {
++ spin_unlock(&list->lock);
++ rsbac_kfree(rcu_head_pp);
++ return -RSBAC_ENOMEM;
++ }
++ for (i=0; i<nr_hashes; i++) {
++ if (list->hashed[i].ta_copied) {
++ if (ta_number) {
++ if (list->hashed[i].ta_copied == ta_number) {
++ ta_remove_all_items(list, i);
++ if (!list->hashed[i].head) {
++ list->hashed[i].ta_copied = 0;
++ }
++ } else {
++ spin_unlock(&list->lock);
++ return -RSBAC_EBUSY;
++ }
++ } else
++ ta_remove_all_items(list, i);
++ } else {
++ if (ta_number) {
++ if (list->hashed[i].head) {
++ list->hashed[i].ta_head = NULL;
++ list->hashed[i].ta_tail = NULL;
++ list->hashed[i].ta_curr = NULL;
++ list->hashed[i].ta_count = 0;
++ list->hashed[i].ta_copied = ta_number;
++ }
++ }
++ }
++ ta_rcu_head_pp[i] = get_rcu_free(list);
++ }
++
++ if (!ta_number)
++#endif
++ for (i=0; i<nr_hashes; i++) {
++ if (list->hashed[i].head) {
++ remove_all_items(list, i);
++ touch(list);
++ list->dirty = TRUE;
++ rcu_head_pp[i] = get_rcu_free(list);
++ } else
++ rcu_head_pp[i] = NULL;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->write_count++;
++#endif
++ spin_unlock(&list->lock);
++ synchronize_rcu();
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ for (i=0; i<nr_hashes; i++)
++ rcu_free_do_cleanup(ta_rcu_head_pp[i]);
++ rsbac_kfree(ta_rcu_head_pp);
++ if (!ta_number)
++#endif
++ for (i=0; i<nr_hashes; i++)
++ rcu_free_do_cleanup(rcu_head_pp[i]);
++ rsbac_kfree(rcu_head_pp);
++ return 0;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_subremove_count);
++#endif
++int rsbac_ta_list_lol_subremove_count(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc, u_long count)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *sublist;
++ u_int hash = 0;
++ struct rsbac_list_rcu_free_head_lol_t * rcu_head_lol_p;
++
++ if (!count)
++ return 0;
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!desc)
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++
++/*
++ rsbac_pr_debug(lists, "removing from list of lists %s, device %02u:%02u.\n",
++ list->name, RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device));
++*/
++ spin_lock(&list->lock);
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (list->hashed[hash].ta_copied) {
++ sublist = ta_lookup_lol_item_locked(list->hashed[hash].ta_copied, list, desc);
++ if (sublist) {
++ if (sublist->max_age
++ && (sublist->max_age <= RSBAC_CURRENT_TIME)
++ ) {
++ ta_do_remove_lol_item(list, sublist, hash);
++ } else {
++ struct rsbac_list_item_t * subitem_p;
++
++ if (ta_number
++ && (list->hashed[hash].ta_copied != ta_number)) {
++ spin_unlock(&list->lock);
++ return -RSBAC_EBUSY;
++ }
++ while (sublist->head && (count > 0)) {
++ subitem_p = sublist->head;
++ do_remove_lol_subitem(sublist,
++ subitem_p);
++ rcu_free_lol_sub(list, subitem_p);
++ count--;
++ }
++ if (!sublist->count
++ && ((list->def_data
++ && !memcmp(((char *) sublist) +
++ sizeof(*sublist) +
++ list->info.desc_size,
++ list->def_data,
++ list->info.data_size)
++ )
++ || (!list->info.data_size
++ && (list->
++ flags &
++ RSBAC_LIST_DEF_DATA)
++ )
++ )
++ ) {
++ ta_do_remove_lol_item(list,
++ sublist,
++ hash);
++ }
++ }
++ }
++ } else {
++ if (ta_number && lookup_lol_item_locked(list, desc)) {
++ ta_lol_copy(ta_number, list, hash);
++ ta_remove_lol_item(ta_number, list, desc);
++ }
++ }
++ if (!ta_number)
++#endif
++ {
++ sublist = lookup_lol_item_locked(list, desc);
++ if (sublist) {
++ if (sublist->max_age
++ && (sublist->max_age <= RSBAC_CURRENT_TIME)
++ ) {
++ do_remove_lol_item(list, sublist, hash);
++ lol_touch(list);
++ list->dirty = TRUE;
++ } else {
++ struct rsbac_list_item_t * subitem_p;
++
++ while (sublist->head && (count > 0)) {
++ subitem_p = sublist->head;
++ /* Changes sublist->head */
++ do_remove_lol_subitem(sublist,
++ subitem_p);
++ rcu_free_lol_sub(list, subitem_p);
++ count--;
++ }
++ lol_touch(list);
++ list->dirty = TRUE;
++ if (!sublist->count
++ && ((list->def_data
++ && !memcmp(((char *) sublist) +
++ sizeof(*sublist) +
++ list->info.desc_size,
++ list->def_data,
++ list->info.data_size)
++ )
++ || (!list->info.data_size
++ && (list->
++ flags &
++ RSBAC_LIST_DEF_DATA)
++ )
++ )
++ ) {
++ do_remove_lol_item(list, sublist, hash);
++ }
++ }
++ }
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->write_count++;
++#endif
++ rcu_head_lol_p = get_rcu_free_lol(list);
++ spin_unlock(&list->lock);
++ do_sync_rcu_lol(rcu_head_lol_p);
++ return 0;
++}
++
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_subremove);
++#endif
++int rsbac_ta_list_lol_subremove(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc, void *subdesc)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *sublist;
++ u_int hash = 0;
++ struct rsbac_list_rcu_free_head_lol_t * rcu_head_lol_p;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!desc || !subdesc)
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++/*
++ rsbac_pr_debug(lists, "removing from list of lists %s, device %02u:%02u.\n",
++ list->name, RSBAC_MAJOR(list->device),
++ RSBAC_MINOR(list->device));
++*/
++ spin_lock(&list->lock);
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (list->hashed[hash].ta_copied) {
++ sublist = ta_lookup_lol_item_locked(list->hashed[hash].ta_copied, list, desc);
++ if (sublist) {
++ if (sublist->max_age
++ && (sublist->max_age <= RSBAC_CURRENT_TIME)
++ ) {
++ ta_do_remove_lol_item(list, sublist, hash);
++ } else {
++ if (ta_number
++ && (list->hashed[hash].ta_copied != ta_number)) {
++ spin_unlock(&list->lock);
++ return -RSBAC_EBUSY;
++ }
++ if (lookup_lol_subitem_locked
++ (list, sublist, subdesc))
++ remove_lol_subitem(list, sublist,
++ subdesc);
++ if (!sublist->head
++ &&
++ ((list->def_data
++ && !memcmp(((char *) sublist) +
++ sizeof(*sublist) +
++ list->info.desc_size,
++ list->def_data,
++ list->info.data_size)
++ )
++ || (!list->info.data_size
++ && (list->
++ flags & RSBAC_LIST_DEF_DATA)
++ )
++ )
++ ) {
++ ta_do_remove_lol_item(list,
++ sublist,
++ hash);
++ }
++ }
++ }
++ } else {
++ if (ta_number && lookup_lol_item_locked(list, desc)) {
++ ta_lol_copy(ta_number, list, hash);
++ ta_remove_lol_item(ta_number, list, desc);
++ }
++ }
++ if (!ta_number)
++#endif
++ {
++ sublist = lookup_lol_item_locked(list, desc);
++ if (sublist) {
++ if (sublist->max_age
++ && (sublist->max_age <= RSBAC_CURRENT_TIME)
++ ) {
++ do_remove_lol_item(list, sublist, hash);
++ lol_touch(list);
++ list->dirty = TRUE;
++ } else {
++ if (lookup_lol_subitem_locked(list, sublist, subdesc)) { /* exists -> remove and set dirty */
++ remove_lol_subitem(list, sublist,
++ subdesc);
++ lol_touch(list);
++ list->dirty = TRUE;
++ }
++ if (!sublist->head
++ && ((list->def_data
++ && !memcmp(((char *) sublist) +
++ sizeof(*sublist) +
++ list->info.desc_size,
++ list->def_data,
++ list->info.data_size)
++ )
++ || (!list->info.data_size
++ && (list->
++ flags &
++ RSBAC_LIST_DEF_DATA)
++ )
++ )
++ ) {
++ do_remove_lol_item(list, sublist, hash);
++ lol_touch(list);
++ list->dirty = TRUE;
++ }
++ }
++ }
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->write_count++;
++#endif
++ rcu_head_lol_p = get_rcu_free_lol(list);
++ spin_unlock(&list->lock);
++ do_sync_rcu_lol(rcu_head_lol_p);
++ return 0;
++}
++
++/* remove same subitem from all items */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_subremove_from_all);
++#endif
++int rsbac_ta_list_lol_subremove_from_all(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *subdesc)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *sublist;
++ int i;
++ struct rsbac_list_rcu_free_head_lol_t * rcu_head_lol_p;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!subdesc)
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++/*
++ rsbac_pr_debug(lists, "removing from list of lists %s.\n", list->name);
++*/
++ spin_lock(&list->lock);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ for (i=0; i<list->nr_hashes; i++) {
++ if (list->hashed[i].ta_copied) {
++ if (ta_number && (list->hashed[i].ta_copied != ta_number)) {
++ spin_unlock(&list->lock);
++ return -RSBAC_EBUSY;
++ }
++ sublist = list->hashed[i].head;
++ while (sublist) {
++ remove_lol_subitem(list, sublist, subdesc);
++ sublist = sublist->next;
++ }
++ } else {
++ if (ta_number) {
++ ta_lol_copy(ta_number, list, i);
++ sublist = list->hashed[i].head;
++ while (sublist) {
++ remove_lol_subitem(list, sublist, subdesc);
++ sublist = sublist->next;
++ }
++ }
++ }
++ }
++ if (!ta_number)
++#endif
++ {
++ for (i=0; i<list->nr_hashes; i++) {
++ sublist = list->hashed[i].head;
++ while (sublist) {
++ if (lookup_lol_subitem_locked(list, sublist, subdesc)) { /* exists -> remove and set dirty */
++ remove_lol_subitem(list, sublist, subdesc);
++ lol_touch(list);
++ list->dirty = TRUE;
++ }
++ sublist = sublist->next;
++ }
++ }
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->write_count++;
++#endif
++ rcu_head_lol_p = get_rcu_free_lol(list);
++ spin_unlock(&list->lock);
++ do_sync_rcu_lol(rcu_head_lol_p);
++ return 0;
++}
++
++/* remove all subitems */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_subremove_all);
++#endif
++int rsbac_ta_list_lol_subremove_all(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void *desc)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *sublist;
++ u_int hash = 0;
++ struct rsbac_list_rcu_free_head_lol_t * rcu_head_lol_p;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++/*
++ rsbac_pr_debug(lists, "removing all subitems from list of lists %s.\n",
++ list->name);
++*/
++ spin_lock(&list->lock);
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (list->hashed[hash].ta_copied) {
++ sublist = ta_lookup_lol_item_locked(list->hashed[hash].ta_copied, list, desc);
++ if (sublist) {
++ if (sublist->max_age
++ && (sublist->max_age <= RSBAC_CURRENT_TIME)
++ ) {
++ ta_do_remove_lol_item(list, sublist, hash);
++ } else {
++ if (ta_number
++ && (list->hashed[hash].ta_copied != ta_number)) {
++ spin_unlock(&list->lock);
++ return -RSBAC_EBUSY;
++ }
++ remove_all_lol_subitems(list, sublist);
++ if ((list->def_data
++ && !memcmp(((char *) sublist) +
++ sizeof(*sublist) +
++ list->info.desc_size,
++ list->def_data,
++ list->info.data_size)
++ )
++ || (!list->info.data_size
++ && (list->
++ flags & RSBAC_LIST_DEF_DATA)
++ )
++
++ ) {
++ ta_do_remove_lol_item(list,
++ sublist,
++ hash);
++ }
++ }
++ }
++ } else {
++ if (ta_number && lookup_lol_item_locked(list, desc)) {
++ ta_lol_copy(ta_number, list, hash);
++ sublist =
++ ta_lookup_lol_item_locked(ta_number, list, desc);
++ if (sublist)
++ remove_all_lol_subitems(list, sublist);
++ }
++ }
++ if (!ta_number)
++#endif
++ {
++ sublist = lookup_lol_item_locked(list, desc);
++ if (sublist && sublist->head) {
++ remove_all_lol_subitems(list, sublist);
++ lol_touch(list);
++ list->dirty = TRUE;
++ }
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->write_count++;
++#endif
++ rcu_head_lol_p = get_rcu_free_lol(list);
++ spin_unlock(&list->lock);
++ do_sync_rcu_lol(rcu_head_lol_p);
++ return 0;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_remove);
++#endif
++int rsbac_ta_list_lol_remove(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void *desc)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ u_int hash = 0;
++ struct rsbac_list_rcu_free_head_lol_t * rcu_head_lol_p;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!desc)
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++/*
++ rsbac_pr_debug(lists, "removing from list of lists %s.\n",
++ list->name);
++*/
++ spin_lock(&list->lock);
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (list->hashed[hash].ta_copied) {
++ if (ta_number) {
++ if (ta_lookup_lol_item_locked
++ (list->hashed[hash].ta_copied, list, desc)) {
++ if (list->hashed[hash].ta_copied != ta_number) {
++ spin_unlock(&list->lock);
++ return -RSBAC_EBUSY;
++ } else
++ ta_remove_lol_item(ta_number, list,
++ desc);
++ }
++ } else
++ ta_remove_lol_item(list->hashed[hash].ta_copied, list, desc);
++ } else {
++ if (ta_number && lookup_lol_item_locked(list, desc)) {
++ ta_lol_copy(ta_number, list, hash);
++ ta_remove_lol_item(ta_number, list, desc);
++ }
++ }
++ if (!ta_number)
++#endif
++ {
++ if (lookup_lol_item_locked(list, desc)) { /* exists -> remove */
++ remove_lol_item(list, desc);
++ lol_touch(list);
++ list->dirty = TRUE;
++ }
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->write_count++;
++#endif
++ rcu_head_lol_p = get_rcu_free_lol(list);
++ spin_unlock(&list->lock);
++ do_sync_rcu_lol(rcu_head_lol_p);
++ return 0;
++}
++
++/* remove all items */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_remove_all);
++#endif
++int rsbac_ta_list_lol_remove_all(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ int i;
++ u_int nr_hashes;
++ struct rsbac_list_rcu_free_head_lol_t ** rcu_head_lol_pp;
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ struct rsbac_list_rcu_free_head_lol_t ** ta_rcu_head_lol_pp;
++#endif
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++/*
++ rsbac_pr_debug(lists, "removing all items from list of lists %s.\n",
++ list->name);
++*/
++ spin_lock(&list->lock);
++ nr_hashes = list->nr_hashes;
++ rcu_head_lol_pp = rsbac_kmalloc(nr_hashes * sizeof(*rcu_head_lol_pp));
++ if (!rcu_head_lol_pp) {
++ spin_unlock(&list->lock);
++ return -RSBAC_ENOMEM;
++ }
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ ta_rcu_head_lol_pp = rsbac_kmalloc(nr_hashes * sizeof(*rcu_head_lol_pp));
++ if (!ta_rcu_head_lol_pp) {
++ spin_unlock(&list->lock);
++ rsbac_kfree(rcu_head_lol_pp);
++ return -RSBAC_ENOMEM;
++ }
++ for (i=0; i<nr_hashes; i++) {
++ if (list->hashed[i].ta_copied) {
++ if (ta_number) {
++ if (list->hashed[i].ta_copied == ta_number) {
++ ta_remove_all_lol_items(list, i);
++ if (!list->hashed[i].head) {
++ list->hashed[i].ta_copied = 0;
++ }
++ } else {
++ spin_unlock(&list->lock);
++ return -RSBAC_EBUSY;
++ }
++ } else
++ ta_remove_all_lol_items(list, i);
++ } else {
++ if (ta_number) {
++ if (list->hashed[i].head) {
++ list->hashed[i].ta_head = NULL;
++ list->hashed[i].ta_tail = NULL;
++ list->hashed[i].ta_curr = NULL;
++ list->hashed[i].ta_count = 0;
++ list->hashed[i].ta_copied = ta_number;
++ }
++ }
++ }
++ ta_rcu_head_lol_pp[i] = get_rcu_free_lol(list);
++ }
++
++ if (!ta_number)
++#endif
++ for (i=0; i<nr_hashes; i++) {
++ if (list->hashed[i].head) {
++ remove_all_lol_items(list, i);
++ lol_touch(list);
++ list->dirty = TRUE;
++ rcu_head_lol_pp[i] = get_rcu_free_lol(list);
++ } else
++ rcu_head_lol_pp[i] = NULL;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->write_count++;
++#endif
++ spin_unlock(&list->lock);
++ synchronize_rcu();
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ for (i=0; i<nr_hashes; i++)
++ rcu_free_do_cleanup_lol(ta_rcu_head_lol_pp[i]);
++ rsbac_kfree(ta_rcu_head_lol_pp);
++ if (!ta_number)
++#endif
++ for (i=0; i<nr_hashes; i++)
++ rcu_free_do_cleanup_lol(rcu_head_lol_pp[i]);
++ rsbac_kfree(rcu_head_lol_pp);
++ return 0;
++}
++
++/* get item data */
++/* Item data is copied - we cannot give a pointer, because item could be
++ * removed */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_get_data_ttl);
++#endif
++int rsbac_ta_list_get_data_ttl(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ rsbac_time_t * ttl_p,
++ void *desc, void *data)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_item_t *item_p;
++ int err = 0;
++ struct rsbac_list_hashed_t * hashed;
++ u_int hash = 0;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!desc)
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "getting data from list %s.\n",
++ list->name);
++*/
++ if (data && !list->info.data_size) {
++ rcu_read_unlock();
++ return -RSBAC_EINVALIDREQUEST;
++ }
++ hashed = rcu_dereference(list->hashed);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ item_p = ta_lookup_item(ta_number, list, hashed, hash, desc);
++ else
++#endif
++ item_p = lookup_item(list, hashed, hash, desc);
++ if (item_p
++ && (!item_p->max_age || (item_p->max_age > RSBAC_CURRENT_TIME)
++ )
++ ) { /* exists -> copy data, if any */
++ if (ttl_p) {
++ if (item_p->max_age)
++ *ttl_p =
++ item_p->max_age - RSBAC_CURRENT_TIME;
++ else
++ *ttl_p = 0;
++ }
++ if (data) {
++ memcpy(data,
++ ((char *) item_p) + sizeof(*item_p) +
++ list->info.desc_size, list->info.data_size);
++ }
++ } else {
++ if (!list->def_data)
++ err = -RSBAC_ENOTFOUND;
++ else {
++ if (ttl_p)
++ *ttl_p = 0;
++ if (data)
++ memcpy(data,
++ list->def_data,
++ list->info.data_size);
++ }
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return err;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_get_max_subdesc);
++#endif
++int rsbac_ta_list_lol_get_max_subdesc(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc, void *subdesc)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *sublist;
++ struct rsbac_list_item_t *item_p;
++ int err = 0;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int hash = 0;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!desc || !subdesc)
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ hashed = rcu_dereference(list->hashed);
++/*
++ rsbac_pr_debug(lists, "getting data from list %s.\n",
++ list->name);
++*/
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ sublist = ta_lookup_lol_item(ta_number, list, hashed, hash, desc);
++ else
++#endif
++ sublist = lookup_lol_item(list, hashed, hash, desc);
++ if (sublist) { /* exists -> lookup subitem */
++ item_p = rcu_dereference(sublist->tail);
++ while (item_p
++ && item_p->max_age
++ && (item_p->max_age > RSBAC_CURRENT_TIME)
++ )
++ item_p = rcu_dereference(item_p->prev);
++ if (item_p)
++ memcpy(subdesc, (char *) item_p + sizeof(*item_p),
++ list->info.subdesc_size);
++ else {
++ memset(subdesc, 0, list->info.subdesc_size);
++ err = -RSBAC_ENOTFOUND;
++ }
++ } else {
++ if (!(list->flags & RSBAC_LIST_DEF_DATA))
++ err = -RSBAC_ENOTFOUND;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return err;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_get_subdata_ttl);
++#endif
++int rsbac_ta_list_lol_get_subdata_ttl(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ rsbac_time_t * ttl_p,
++ void *desc,
++ void *subdesc, void *subdata)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *sublist;
++ struct rsbac_list_item_t *item_p;
++ int err = 0;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int hash = 0;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!desc || !subdesc)
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ if (subdata && !list->info.subdata_size) {
++ return -RSBAC_EINVALIDREQUEST;
++ }
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "getting data from list %s.\n", list->name);
++*/
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ hashed = rcu_dereference(list->hashed);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ sublist = ta_lookup_lol_item(ta_number, list, hashed, hash, desc);
++ else
++#endif
++ sublist = lookup_lol_item(list, hashed, hash, desc);
++ if (sublist) { /* exists -> lookup subitem */
++ item_p = lookup_lol_subitem(list, sublist, subdesc);
++ if (item_p
++ && (!item_p->max_age
++ || (item_p->max_age > RSBAC_CURRENT_TIME)
++ )
++ ) { /* exists -> copy data, if any */
++ if (ttl_p) {
++ if (item_p->max_age)
++ *ttl_p =
++ item_p->max_age -
++ RSBAC_CURRENT_TIME;
++ else
++ *ttl_p = 0;
++ }
++ if (subdata) {
++ memcpy(subdata,
++ ((char *) item_p) +
++ sizeof(*item_p) +
++ list->info.subdesc_size,
++ list->info.subdata_size);
++ }
++ } else {
++ if (!list->def_subdata)
++ err = -RSBAC_ENOTFOUND;
++ else {
++ if (ttl_p)
++ *ttl_p = 0;
++ if (subdata)
++ memcpy(subdata,
++ list->def_subdata,
++ list->info.subdata_size);
++ }
++ }
++ } else {
++ if (!list->def_subdata)
++ err = -RSBAC_ENOTFOUND;
++ else {
++ if (ttl_p)
++ *ttl_p = 0;
++ if (subdata)
++ memcpy(subdata,
++ list->def_subdata,
++ list->info.subdata_size);
++ }
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return err;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_get_data_ttl);
++#endif
++int rsbac_ta_list_lol_get_data_ttl(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ rsbac_time_t * ttl_p,
++ void *desc, void *data)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *item_p;
++ int err = 0;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int hash = 0;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!desc)
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ if (data && !list->info.data_size) {
++ return -RSBAC_EINVALIDREQUEST;
++ }
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "getting data from list %s.\n", list->name);
++*/
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ hashed = rcu_dereference(list->hashed);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ item_p = ta_lookup_lol_item(ta_number, list, hashed, hash, desc);
++ else
++#endif
++ item_p = lookup_lol_item(list, hashed, hash, desc);
++ if (item_p
++ && (!item_p->max_age || (item_p->max_age > RSBAC_CURRENT_TIME)
++ )
++ ) { /* exists -> copy data, if any */
++ if (ttl_p) {
++ if (item_p->max_age)
++ *ttl_p =
++ item_p->max_age - RSBAC_CURRENT_TIME;
++ else
++ *ttl_p = 0;
++ }
++ if (data) {
++ memcpy(data,
++ ((char *) item_p) + sizeof(*item_p) +
++ list->info.desc_size, list->info.data_size);
++ }
++ } else {
++ if (!list->def_data)
++ err = -RSBAC_ENOTFOUND;
++ else {
++ if (ttl_p)
++ *ttl_p = 0;
++ if (data)
++ memcpy(data,
++ list->def_data,
++ list->info.data_size);
++ }
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return err;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_get_max_desc);
++#endif
++int rsbac_ta_list_get_max_desc(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void *desc)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_item_t *item_p = NULL;
++ struct rsbac_list_item_t *tmp_item_p;
++ int err = 0;
++ int i;
++ struct rsbac_list_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ tmp_item_p = rcu_dereference(hashed[i].ta_tail);
++ else
++#endif
++ tmp_item_p = rcu_dereference(hashed[i].tail);
++ while (tmp_item_p
++ && tmp_item_p->max_age && (tmp_item_p->max_age > RSBAC_CURRENT_TIME)
++ )
++ tmp_item_p = rcu_dereference(tmp_item_p->prev);
++ if(tmp_item_p) {
++ if(list->compare) {
++ if(!item_p || list->compare(&tmp_item_p[1], &item_p[1]) > 0)
++ item_p = tmp_item_p;
++ } else {
++ if(!item_p || memcmp(&tmp_item_p[1], &item_p[1], list->info.desc_size) > 0)
++ item_p = tmp_item_p;
++ }
++ }
++ }
++ if (item_p)
++ memcpy(desc, (char *) item_p + sizeof(*item_p),
++ list->info.desc_size);
++ else {
++ memset(desc, 0, list->info.desc_size);
++ err = -RSBAC_ENOTFOUND;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return err;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_get_next_desc);
++#endif
++int rsbac_ta_list_get_next_desc(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *old_desc, void *next_desc)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_item_t *item_p;
++ struct rsbac_list_hashed_t * hashed;
++ u_int nr_hashes;
++ u_int hash = 0;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++ if (!next_desc)
++ return -RSBAC_EINVALIDPOINTER;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ if (old_desc) {
++ if(list->hash_function)
++ hash = list->hash_function(old_desc, nr_hashes);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ item_p = ta_lookup_item(ta_number, list, hashed, hash, old_desc);
++ else
++#endif
++ item_p = lookup_item(list, hashed, hash, old_desc);
++ if(item_p) {
++ item_p = rcu_dereference(item_p->next);
++ while (item_p
++ && item_p->max_age && (item_p->max_age > RSBAC_CURRENT_TIME)
++ ) {
++ item_p = rcu_dereference(item_p->next);
++ }
++ hash++;
++ } else
++ hash = 0;
++ } else
++ item_p = NULL;
++ while (!item_p && (hash < nr_hashes)) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ item_p = rcu_dereference(hashed[hash].ta_head);
++ else
++#endif
++ item_p = rcu_dereference(hashed[hash].head);
++ while (item_p
++ && item_p->max_age && (item_p->max_age > RSBAC_CURRENT_TIME)
++ ) {
++ item_p = rcu_dereference(item_p->next);
++ }
++ hash++;
++ }
++ if (item_p) {
++ memcpy(next_desc, (char *) item_p + sizeof(*item_p),
++ list->info.desc_size);
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ if (item_p)
++ return 0;
++ else
++ return -RSBAC_ENOTFOUND;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_get_next_desc_selector);
++#endif
++int rsbac_ta_list_get_next_desc_selector(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *old_desc,
++ void *next_desc,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_item_t *item_p;
++ struct rsbac_list_hashed_t * hashed;
++ u_int nr_hashes;
++ u_int hash = 0;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++ if (!next_desc)
++ return -RSBAC_EINVALIDPOINTER;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ if (old_desc) {
++ if(list->hash_function)
++ hash = list->hash_function(old_desc, nr_hashes);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ item_p = ta_lookup_item(ta_number, list, hashed, hash, old_desc);
++ else
++#endif
++ item_p = lookup_item(list, hashed, hash, old_desc);
++ if(item_p) {
++ item_p = rcu_dereference(item_p->next);
++ while (item_p
++ && item_p->max_age && (item_p->max_age > RSBAC_CURRENT_TIME)
++ && !selector((char *) item_p + sizeof(*item_p), param)
++ ) {
++ item_p = rcu_dereference(item_p->next);
++ }
++ hash++;
++ } else
++ hash = 0;
++ } else
++ item_p = NULL;
++ while (!item_p && (hash < nr_hashes)) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ item_p = rcu_dereference(hashed[hash].ta_head);
++ else
++#endif
++ item_p = rcu_dereference(hashed[hash].head);
++ while (item_p
++ && item_p->max_age && (item_p->max_age > RSBAC_CURRENT_TIME)
++ && !selector((char *) item_p + sizeof(*item_p), param)
++ ) {
++ item_p = rcu_dereference(item_p->next);
++ }
++ hash++;
++ }
++ if (item_p) {
++ memcpy(next_desc, (char *) item_p + sizeof(*item_p),
++ list->info.desc_size);
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ if (item_p)
++ return 0;
++ else
++ return -RSBAC_ENOTFOUND;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_get_next_desc);
++#endif
++int rsbac_ta_list_lol_get_next_desc(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *old_desc, void *next_desc)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *item_p;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int nr_hashes;
++ u_int hash = 0;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++ if (!next_desc)
++ return -RSBAC_EINVALIDPOINTER;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ if (old_desc) {
++ if(list->hash_function)
++ hash = list->hash_function(old_desc, nr_hashes);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ item_p = ta_lookup_lol_item(ta_number, list, hashed, hash, old_desc);
++ else
++#endif
++ item_p = lookup_lol_item(list, hashed, hash, old_desc);
++ if(item_p) {
++ item_p = rcu_dereference(item_p->next);
++ while (item_p
++ && item_p->max_age && (item_p->max_age > RSBAC_CURRENT_TIME)
++ ) {
++ item_p = rcu_dereference(item_p->next);
++ }
++ hash++;
++ } else
++ hash = 0;
++ } else
++ item_p = NULL;
++ while (!item_p && (hash < nr_hashes)) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ item_p = rcu_dereference(hashed[hash].ta_head);
++ else
++#endif
++ item_p = rcu_dereference(hashed[hash].head);
++ while (item_p
++ && item_p->max_age && (item_p->max_age > RSBAC_CURRENT_TIME)
++ ) {
++ item_p = rcu_dereference(item_p->next);
++ }
++ hash++;
++ }
++ if (item_p) {
++ memcpy(next_desc, (char *) item_p + sizeof(*item_p),
++ list->info.desc_size);
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ if (item_p)
++ return 0;
++ else
++ return -RSBAC_ENOTFOUND;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_get_next_desc_selector);
++#endif
++int rsbac_ta_list_lol_get_next_desc_selector(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *old_desc, void *next_desc,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *item_p;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int nr_hashes;
++ u_int hash = 0;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++ if (!next_desc)
++ return -RSBAC_EINVALIDPOINTER;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ if (old_desc) {
++ if(list->hash_function)
++ hash = list->hash_function(old_desc, nr_hashes);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ item_p = ta_lookup_lol_item(ta_number, list, hashed, hash, old_desc);
++ else
++#endif
++ item_p = lookup_lol_item(list, hashed, hash, old_desc);
++ if(item_p) {
++ item_p = rcu_dereference(item_p->next);
++ while (item_p
++ && item_p->max_age && (item_p->max_age > RSBAC_CURRENT_TIME)
++ && !selector((char *) item_p + sizeof(*item_p), param)
++ ) {
++ item_p = rcu_dereference(item_p->next);
++ }
++ hash++;
++ } else
++ hash = 0;
++ } else
++ item_p = NULL;
++ while (!item_p && (hash < nr_hashes)) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ item_p = rcu_dereference(hashed[hash].ta_head);
++ else
++#endif
++ item_p = rcu_dereference(hashed[hash].head);
++ while (item_p
++ && item_p->max_age && (item_p->max_age > RSBAC_CURRENT_TIME)
++ && !selector((char *) item_p + sizeof(*item_p), param)
++ ) {
++ item_p = rcu_dereference(item_p->next);
++ }
++ hash++;
++ }
++ if (item_p) {
++ memcpy(next_desc, (char *) item_p + sizeof(*item_p),
++ list->info.desc_size);
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ if (item_p)
++ return 0;
++ else
++ return -RSBAC_ENOTFOUND;
++}
++
++/* get item desc by data */
++/* Item desc is copied - we cannot give a pointer, because item could be
++ * removed.
++ * If no compare function is provided (NULL value), memcmp is used.
++ * Note: The data value given here is always used as second parameter to the
++ * compare function, so you can use different types for storage and
++ * lookup.
++ */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_get_desc);
++#endif
++int rsbac_ta_list_get_desc(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc,
++ void *data,
++ rsbac_list_data_compare_function_t compare)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_item_t *item_p;
++ int err = 0;
++ struct rsbac_list_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!desc || !data)
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "getting desc from list %s.\n", list->name);
++*/
++ if (!list->info.data_size) {
++ rcu_read_unlock();
++ return -RSBAC_EINVALIDREQUEST;
++ }
++
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ item_p = ta_lookup_item_data(ta_number, list, hashed, nr_hashes, data, compare);
++#else
++ item_p = lookup_item_data(list, hashed, nr_hashes, data, compare);
++#endif
++ if (item_p) { /* exists -> copy desc */
++ memcpy(desc,
++ ((char *) item_p) + sizeof(*item_p),
++ list->info.desc_size);
++ } else {
++ err = -RSBAC_ENOTFOUND;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return err;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_get_desc_selector);
++#endif
++int rsbac_ta_list_get_desc_selector(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc,
++ void *data,
++ rsbac_list_data_compare_function_t compare,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_item_t *item_p;
++ int err = 0;
++ struct rsbac_list_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!desc || !data)
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "getting desc from list %s.\n", list->name);
++*/
++ if (!list->info.data_size) {
++ rcu_read_unlock();
++ return -RSBAC_EINVALIDREQUEST;
++ }
++
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ item_p = ta_lookup_item_data_selector(ta_number, list,
++ hashed, nr_hashes,
++ data, compare,
++ selector,
++ param);
++#else
++ item_p = lookup_item_data_selector(list,
++ hashed, nr_hashes,
++ data, compare,
++ selector,
++ param);
++#endif
++ if (item_p) { /* exists -> copy desc */
++ memcpy(desc,
++ ((char *) item_p) + sizeof(*item_p),
++ list->info.desc_size);
++ } else {
++ err = -RSBAC_ENOTFOUND;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return err;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_get_desc);
++#endif
++int rsbac_ta_list_lol_get_desc(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc,
++ void *data,
++ rsbac_list_data_compare_function_t compare)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *item_p;
++ int err = 0;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!desc || !data)
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ if (!list->info.data_size) {
++ return -RSBAC_EINVALIDREQUEST;
++ }
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "getting desc from list %s.\n", list->name);
++*/
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ item_p = ta_lookup_lol_item_data(ta_number, list, hashed, nr_hashes, data, compare);
++#else
++ item_p = lookup_lol_item_data(list, hashed, nr_hashes, data, compare);
++#endif
++ if (item_p) { /* exists -> copy desc */
++ memcpy(desc,
++ ((char *) item_p) + sizeof(*item_p),
++ list->info.desc_size);
++ } else {
++ err = -RSBAC_ENOTFOUND;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return err;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_get_desc_selector);
++#endif
++int rsbac_ta_list_lol_get_desc_selector(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc,
++ void *data,
++ rsbac_list_data_compare_function_t compare,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *item_p;
++ int err = 0;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!desc || !data)
++ return -RSBAC_EINVALIDVALUE;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ if (!list->info.data_size) {
++ return -RSBAC_EINVALIDREQUEST;
++ }
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "getting desc from list %s.\n", list->name);
++*/
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ item_p = ta_lookup_lol_item_data_selector(ta_number,
++ list, hashed, nr_hashes,
++ data, compare,
++ selector,
++ param);
++#else
++ item_p = lookup_lol_item_data_selector(list, hashed, nr_hashes,
++ data, compare,
++ selector,
++ param);
++#endif
++ if (item_p) { /* exists -> copy desc */
++ memcpy(desc,
++ ((char *) item_p) + sizeof(*item_p),
++ list->info.desc_size);
++ } else {
++ err = -RSBAC_ENOTFOUND;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return err;
++}
++
++/* returns TRUE, if item exists or def_data is defined, FALSE, if not */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_exist);
++#endif
++int rsbac_ta_list_exist(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void *desc)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_item_t *item_p;
++ int result;
++ struct rsbac_list_hashed_t * hashed;
++ u_int hash = 0;
++
++ if (!handle || !desc)
++ return FALSE;
++ if (!list_initialized)
++ return FALSE;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "testing on list %s.\n", list->name);
++*/
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ hashed = rcu_dereference(list->hashed);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ item_p = ta_lookup_item(ta_number, list, hashed, hash, desc);
++ else
++#endif
++ item_p = lookup_item(list, hashed, hash, desc);
++ if (item_p
++ && (!item_p->max_age || (item_p->max_age > RSBAC_CURRENT_TIME)
++ )
++ ) { /* exists -> TRUE */
++ result = TRUE;
++ } else {
++ result = FALSE;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return result;
++}
++
++/* does item exist? */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_subexist);
++#endif
++int rsbac_ta_list_lol_subexist(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc, void *subdesc)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *sublist;
++ struct rsbac_list_item_t *item_p;
++ int result;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int hash = 0;
++
++ if (!handle || !desc || !subdesc)
++ return FALSE;
++ if (!list_initialized)
++ return FALSE;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "testing on list %s.\n", list->name);
++*/
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ hashed = rcu_dereference(list->hashed);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ sublist = ta_lookup_lol_item(ta_number, list, hashed, hash, desc);
++ else
++#endif
++ sublist = lookup_lol_item(list, hashed, hash, desc);
++ if (sublist) { /* exists -> lookup subitem */
++ item_p = lookup_lol_subitem(list, sublist, subdesc);
++ if (item_p
++ && (!item_p->max_age
++ || (item_p->max_age > RSBAC_CURRENT_TIME)
++ )
++ ) { /* exists -> TRUE */
++ result = TRUE;
++ } else {
++ result = FALSE;
++ }
++ } else {
++ result = FALSE;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return result;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_subexist_compare);
++#endif
++int rsbac_ta_list_lol_subexist_compare(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc,
++ void *subdesc,
++ rsbac_list_compare_function_t
++ compare)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *sublist;
++ struct rsbac_list_item_t *item_p;
++ int result;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int hash = 0;
++
++ if (!handle || !desc || !subdesc)
++ return FALSE;
++ if (!list_initialized)
++ return FALSE;
++ /* Use standard function, if compare is not provided. */
++ if (!compare)
++ return rsbac_list_lol_subexist(handle, desc, subdesc);
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "testing on list %s.\n", list->name);
++*/
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ hashed = rcu_dereference(list->hashed);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ sublist = ta_lookup_lol_item(ta_number, list, hashed, hash, desc);
++ else
++#endif
++ sublist = lookup_lol_item(list, hashed, hash, desc);
++ if (sublist) { /* exists -> lookup subitem */
++ item_p =
++ lookup_lol_subitem_user_compare(list, sublist, subdesc,
++ compare);
++ if (item_p
++ && (!item_p->max_age
++ || (item_p->max_age > RSBAC_CURRENT_TIME)
++ )
++ ) { /* exists -> TRUE */
++ result = TRUE;
++ } else {
++ result = FALSE;
++ }
++ } else {
++ result = FALSE;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return result;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_exist);
++#endif
++int rsbac_ta_list_lol_exist(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void *desc)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *item_p;
++ int result;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int hash = 0;
++
++ if (!handle || !desc)
++ return FALSE;
++ if (!list_initialized)
++ return FALSE;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "testing on list %s.\n", list->name);
++*/
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ hashed = rcu_dereference(list->hashed);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ item_p = ta_lookup_lol_item(ta_number, list, hashed, hash, desc);
++ else
++#endif
++ item_p = lookup_lol_item(list, hashed, hash, desc);
++ if (item_p
++ && (!item_p->max_age || (item_p->max_age > RSBAC_CURRENT_TIME)
++ )
++ ) { /* exists -> TRUE */
++ result = TRUE;
++ } else {
++ result = FALSE;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return result;
++}
++
++/* count number of elements */
++/* returns number of elements or negative error code */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_subcount);
++#endif
++long rsbac_ta_list_lol_subcount(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void *desc)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *sublist;
++ long result;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int hash = 0;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ hashed = rcu_dereference(list->hashed);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ sublist = ta_lookup_lol_item(ta_number, list, hashed, hash, desc);
++ else
++#endif
++ sublist = lookup_lol_item(list, hashed, hash, desc);
++ if (sublist) {
++ result = sublist->count;
++ } else {
++ result = -RSBAC_ENOTFOUND;
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return result;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_all_subcount);
++#endif
++long rsbac_ta_list_lol_all_subcount(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *sublist;
++ long result = 0;
++ int i;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ sublist = rcu_dereference(hashed[i].ta_head);
++ else
++#endif
++ sublist = rcu_dereference(hashed[i].head);
++ while (sublist) {
++ result += sublist->count;
++ sublist = rcu_dereference(sublist->next);
++ }
++ }
++ rcu_read_unlock();
++ return result;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_count);
++#endif
++long rsbac_ta_list_lol_count(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ long result = 0;
++ int i;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ rcu_read_lock();
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ result += hashed[i].ta_count;
++ else
++#endif
++ result += hashed[i].count;
++ }
++ rcu_read_unlock();
++ return result;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_count);
++#endif
++long rsbac_ta_list_count(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle)
++{
++ struct rsbac_list_reg_item_t *list;
++ long result = 0;
++ int i;
++ struct rsbac_list_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ result += hashed[i].ta_count;
++ else
++#endif
++ result += hashed[i].count;
++ }
++ rcu_read_unlock();
++ return result;
++}
++
++/* Get array of all descriptors */
++/* Returns number of elements or negative error code */
++/* If return value > 0, *array_p contains a pointer to a rsbac_kmalloc'd array
++ of descs, otherwise *array_p is set to NULL. If *array_p has been set,
++ caller must call rsbac_kfree(*array_p) after use! */
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_get_all_desc);
++#endif
++long rsbac_ta_list_get_all_desc(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void **array_p)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_item_t *item_p;
++ char *buffer;
++ u_long offset = 0;
++ u_long count = 0;
++ long result = 0;
++ u_int item_size;
++ int i;
++ struct rsbac_list_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!array_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++ *array_p = NULL;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ count += hashed[i].ta_count;
++ else
++#endif
++ count += hashed[i].count;
++ }
++ if(!count) {
++ result = 0;
++ goto out_unlock;
++ }
++ item_size = list->info.desc_size;
++ if(count > RSBAC_MAX_KMALLOC / item_size)
++ count = RSBAC_MAX_KMALLOC / item_size;
++ buffer = rsbac_kmalloc(item_size * count);
++ if (!buffer) {
++ result = -ENOMEM;
++ goto out_unlock;
++ }
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ item_p = rcu_dereference(hashed[i].ta_head);
++ else
++#endif
++ item_p = rcu_dereference(hashed[i].head);
++ while (item_p && (result < count)) {
++ if (!item_p->max_age
++ || (item_p->max_age >
++ RSBAC_CURRENT_TIME)
++ ) {
++ memcpy(buffer + offset,
++ ((char *) item_p) +
++ sizeof(*item_p), item_size);
++ offset += item_size;
++ result++;
++ }
++ item_p = rcu_dereference(item_p->next);
++ }
++ }
++ *array_p = buffer;
++
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++out_unlock:
++ rcu_read_unlock();
++ return result;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_get_all_desc_selector);
++#endif
++long rsbac_ta_list_get_all_desc_selector (
++ rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void **array_p,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_item_t *item_p;
++ char *buffer;
++ u_long offset = 0;
++ u_long count = 0;
++ long result = 0;
++ u_int item_size;
++ int i;
++ struct rsbac_list_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!array_p || !selector)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++ *array_p = NULL;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ count += hashed[i].ta_count;
++ else
++#endif
++ count += hashed[i].count;
++ }
++ if(!count) {
++ result = 0;
++ goto out_unlock;
++ }
++ item_size = list->info.desc_size;
++ if(count > RSBAC_MAX_KMALLOC / item_size)
++ count = RSBAC_MAX_KMALLOC / item_size;
++ buffer = rsbac_kmalloc(item_size * count);
++ if (!buffer) {
++ result = -ENOMEM;
++ goto out_unlock;
++ }
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ item_p = rcu_dereference(hashed[i].ta_head);
++ else
++#endif
++ item_p = rcu_dereference(hashed[i].head);
++ while (item_p && (result < count)) {
++ if ( (!item_p->max_age
++ || (item_p->max_age >
++ RSBAC_CURRENT_TIME)
++ )
++ && selector(((char *) item_p) + sizeof(*item_p), param)
++ ) {
++ memcpy(buffer + offset,
++ ((char *) item_p) +
++ sizeof(*item_p), item_size);
++ offset += item_size;
++ result++;
++ }
++ item_p = rcu_dereference(item_p->next);
++ }
++ }
++ *array_p = buffer;
++
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++out_unlock:
++ rcu_read_unlock();
++ return result;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_get_all_subdesc_ttl);
++#endif
++long rsbac_ta_list_lol_get_all_subdesc_ttl(rsbac_list_ta_number_t
++ ta_number,
++ rsbac_list_handle_t handle,
++ void *desc, void **array_p,
++ rsbac_time_t ** ttl_array_p)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *sublist;
++ struct rsbac_list_item_t *item_p;
++ char *buffer;
++ rsbac_time_t *ttl_p = NULL;
++ u_long offset = 0;
++ long result = 0;
++ u_long count;
++ u_int item_size;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int hash = 0;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!array_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++ *array_p = NULL;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ hashed = rcu_dereference(list->hashed);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ sublist = ta_lookup_lol_item(ta_number, list, hashed, hash, desc);
++ else
++#endif
++ sublist = lookup_lol_item(list, hashed, hash, desc);
++ if (sublist && sublist->count) {
++ item_size = list->info.subdesc_size;
++ count = sublist->count;
++ if(count > RSBAC_MAX_KMALLOC / item_size)
++ count = RSBAC_MAX_KMALLOC / item_size;
++ buffer = rsbac_kmalloc(item_size * count);
++ if (buffer) {
++ if (ttl_array_p)
++ ttl_p =
++ rsbac_kmalloc(sizeof(**ttl_array_p) *
++ sublist->count);
++ item_p = rcu_dereference(sublist->head);
++ while (item_p && (result < count)) {
++ if (!item_p->max_age
++ || (item_p->max_age >
++ RSBAC_CURRENT_TIME)
++ ) {
++ memcpy(buffer + offset,
++ ((char *) item_p) +
++ sizeof(*item_p), item_size);
++ if (ttl_p) {
++ if (item_p->max_age)
++ ttl_p[result] =
++ item_p->
++ max_age -
++ RSBAC_CURRENT_TIME;
++ else
++ ttl_p[result] = 0;
++ }
++ offset += item_size;
++ result++;
++ }
++ item_p = rcu_dereference(item_p->next);
++ }
++ *array_p = buffer;
++ if (ttl_array_p)
++ *ttl_array_p = ttl_p;
++ } else {
++ result = -RSBAC_ENOMEM;
++ }
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return result;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_get_all_desc);
++#endif
++long rsbac_ta_list_lol_get_all_desc(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void **array_p)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *item_p;
++ char *buffer;
++ u_long offset = 0;
++ long result = 0;
++ u_int item_size;
++ int i;
++ u_long count = 0;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!array_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++ *array_p = NULL;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ count += hashed[i].ta_count;
++ else
++#endif
++ count += hashed[i].count;
++ }
++ if(!count) {
++ result = 0;
++ goto out_unlock;
++ }
++ item_size = list->info.desc_size;
++ if(count > RSBAC_MAX_KMALLOC / item_size)
++ count = RSBAC_MAX_KMALLOC / item_size;
++ buffer = rsbac_kmalloc(item_size * count);
++ if (!buffer) {
++ result = -ENOMEM;
++ rsbac_pr_debug(lists, "list %s: could not allocate buffer for %u items of size %u!\n",
++ list->name, count, item_size);
++ goto out_unlock;
++ }
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ item_p = rcu_dereference(hashed[i].ta_head);
++ else
++#endif
++ item_p = rcu_dereference(hashed[i].head);
++ while (item_p && (result < count)) {
++ if (!item_p->max_age
++ || (item_p->max_age >
++ RSBAC_CURRENT_TIME)
++ ) {
++ memcpy(buffer + offset,
++ ((char *) item_p) +
++ sizeof(*item_p), item_size);
++ offset += item_size;
++ result++;
++ }
++ item_p = rcu_dereference(item_p->next);
++ }
++ }
++ *array_p = buffer;
++
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++out_unlock:
++ rcu_read_unlock();
++ return result;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_get_all_desc_selector);
++#endif
++long rsbac_ta_list_lol_get_all_desc_selector (
++ rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void **array_p,
++ rsbac_list_desc_selector_function_t selector,
++ void * param)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *item_p;
++ char *buffer;
++ u_long offset = 0;
++ long result = 0;
++ u_int item_size;
++ int i;
++ u_long count = 0;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!array_p || !selector)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++ *array_p = NULL;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ count += hashed[i].ta_count;
++ else
++#endif
++ count += hashed[i].count;
++ }
++ if(!count) {
++ result = 0;
++ goto out_unlock;
++ }
++ item_size = list->info.desc_size;
++ if(count > RSBAC_MAX_KMALLOC / item_size)
++ count = RSBAC_MAX_KMALLOC / item_size;
++ buffer = rsbac_kmalloc(item_size * count);
++ if (!buffer) {
++ result = -ENOMEM;
++ rsbac_pr_debug(lists, "list %s: could not allocate buffer for %u items of size %u!\n",
++ list->name, count, item_size);
++ goto out_unlock;
++ }
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ item_p = rcu_dereference(hashed[i].ta_head);
++ else
++#endif
++ item_p = rcu_dereference(hashed[i].head);
++ while (item_p && (result < count)) {
++ if ( (!item_p->max_age
++ || (item_p->max_age >
++ RSBAC_CURRENT_TIME)
++ )
++ && selector(((char *) item_p) + sizeof(*item_p), param)
++ ) {
++ memcpy(buffer + offset,
++ ((char *) item_p) +
++ sizeof(*item_p), item_size);
++ offset += item_size;
++ result++;
++ }
++ item_p = rcu_dereference(item_p->next);
++ }
++ }
++ *array_p = buffer;
++
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++out_unlock:
++ rcu_read_unlock();
++ return result;
++}
++
++/* Get array of all data */
++/* Returns number of elements or negative error code */
++/* If return value > 0, *array_p contains a pointer to a rsbac_kmalloc'd array
++ of datas, otherwise *array_p is set to NULL. If *array_p has been set,
++ caller must call rsbac_kfree(*array_p) after use! */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_get_all_data);
++#endif
++long rsbac_ta_list_get_all_data(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle, void **array_p)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_item_t *item_p;
++ char *buffer;
++ u_long offset = 0;
++ long result = 0;
++ u_int item_size;
++ u_int item_offset;
++ int i;
++ u_long count = 0;
++ struct rsbac_list_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!array_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++ *array_p = NULL;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ if (!list->info.data_size) {
++ rcu_read_unlock();
++ return -RSBAC_EINVALIDREQUEST;
++ }
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ count += hashed[i].ta_count;
++ else
++#endif
++ count += hashed[i].count;
++ }
++ if(!count) {
++ result = 0;
++ goto out_unlock;
++ }
++ item_size = list->info.data_size;
++ item_offset = list->info.desc_size;
++ if(count > RSBAC_MAX_KMALLOC / item_size)
++ count = RSBAC_MAX_KMALLOC / item_size;
++ buffer = rsbac_kmalloc(item_size * count);
++ if (!buffer) {
++ result = -ENOMEM;
++ goto out_unlock;
++ }
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ item_p = rcu_dereference(hashed[i].ta_head);
++ else
++#endif
++ item_p = rcu_dereference(hashed[i].head);
++ while (item_p && (result < count)) {
++ if (!item_p->max_age
++ || (item_p->max_age >
++ RSBAC_CURRENT_TIME)
++ ) {
++ memcpy(buffer + offset,
++ ((char *) item_p) +
++ sizeof(*item_p) +
++ item_offset, item_size);
++ offset += item_size;
++ result++;
++ }
++ item_p = rcu_dereference(item_p->next);
++ }
++ }
++ *array_p = buffer;
++
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++
++out_unlock:
++ rcu_read_unlock();
++ return result;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_get_all_subdata);
++#endif
++long rsbac_ta_list_lol_get_all_subdata(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void *desc, void **array_p)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *sublist;
++ struct rsbac_list_item_t *item_p;
++ char *buffer;
++ u_long offset = 0;
++ long result = 0;
++ u_long count;
++ u_int item_size;
++ u_int item_offset;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int hash = 0;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!array_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++ *array_p = NULL;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ if (!list->info.subdata_size) {
++ return -RSBAC_EINVALIDREQUEST;
++ }
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ hashed = rcu_dereference(list->hashed);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ sublist = ta_lookup_lol_item(ta_number, list, hashed, hash, desc);
++ else
++#endif
++ sublist = lookup_lol_item(list, hashed, hash, desc);
++ if (sublist && sublist->count) {
++ item_size = list->info.subdata_size;
++ item_offset = list->info.subdesc_size;
++ count = sublist->count;
++ if(count > RSBAC_MAX_KMALLOC / item_size)
++ count = RSBAC_MAX_KMALLOC / item_size;
++ buffer = rsbac_kmalloc(item_size * count);
++ if (buffer) {
++ item_p = rcu_dereference(sublist->head);
++ while (item_p && (result < count)) {
++ if (!item_p->max_age
++ || (item_p->max_age >
++ RSBAC_CURRENT_TIME)
++ ) {
++ memcpy(buffer + offset,
++ ((char *) item_p) +
++ sizeof(*item_p) +
++ item_offset, item_size);
++ offset += item_size;
++ result++;
++ }
++ item_p = rcu_dereference(item_p->next);
++ }
++ *array_p = buffer;
++ } else {
++ result = -RSBAC_ENOMEM;
++ }
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return result;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_get_all_data);
++#endif
++long rsbac_ta_list_lol_get_all_data(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void **array_p)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *item_p;
++ char *buffer;
++ u_long offset = 0;
++ long result = 0;
++ u_int item_size;
++ u_int item_offset;
++ int i;
++ u_long count = 0;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!array_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++ *array_p = NULL;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ if (!list->info.data_size) {
++ rcu_read_unlock();
++ return -RSBAC_EINVALIDREQUEST;
++ }
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ count += hashed[i].ta_count;
++ else
++#endif
++ count += hashed[i].count;
++ }
++ if(!count) {
++ result = 0;
++ goto out_unlock;
++ }
++ item_size = list->info.data_size;
++ item_offset = list->info.desc_size;
++ if(count > RSBAC_MAX_KMALLOC / item_size)
++ count = RSBAC_MAX_KMALLOC / item_size;
++ buffer = rsbac_kmalloc(item_size * count);
++ if (!buffer) {
++ result = -ENOMEM;
++ goto out_unlock;
++ }
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ item_p = rcu_dereference(hashed[i].ta_head);
++ else
++#endif
++ item_p = rcu_dereference(hashed[i].head);
++ while (item_p && (result < count)) {
++ if (!item_p->max_age
++ || (item_p->max_age >
++ RSBAC_CURRENT_TIME)
++ ) {
++ memcpy(buffer + offset,
++ ((char *) item_p) +
++ sizeof(*item_p) +
++ item_offset, item_size);
++ offset += item_size;
++ result++;
++ }
++ item_p = rcu_dereference(item_p->next);
++ }
++ }
++ *array_p = buffer;
++
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++out_unlock:
++ rcu_read_unlock();
++ return result;
++}
++
++/* Get item size */
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_get_item_size);
++#endif
++int rsbac_list_get_item_size(rsbac_list_handle_t handle)
++{
++ struct rsbac_list_reg_item_t *list;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++ return list->info.desc_size + list->info.data_size;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_lol_get_subitem_size);
++#endif
++int rsbac_list_lol_get_subitem_size(rsbac_list_handle_t handle)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++ return list->info.subdesc_size + list->info.subdata_size;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_lol_get_item_size);
++#endif
++int rsbac_list_lol_get_item_size(rsbac_list_handle_t handle)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++ return list->info.desc_size + list->info.data_size;
++}
++
++/* Get array of all items */
++/* Returns number of items or negative error code */
++/* If return value > 0, *array_p contains a pointer to a rsbac_kmalloc'd array of items,
++ where desc and data are placed directly behind each other.
++ If *array_p has been set (return value > 0), caller must call rsbac_kfree(*array_p) after use! */
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_get_all_items_ttl);
++#endif
++long rsbac_ta_list_get_all_items_ttl(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void **array_p,
++ rsbac_time_t ** ttl_array_p)
++{
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_item_t *item_p;
++ char *buffer;
++ rsbac_time_t *ttl_p = NULL;
++ u_long offset = 0;
++ long result = 0;
++ u_int item_size;
++ int i;
++ u_long count = 0;
++ struct rsbac_list_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!array_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++ *array_p = NULL;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ count += hashed[i].ta_count;
++ else
++#endif
++ count += hashed[i].count;
++ }
++ if(!count) {
++ result = 0;
++ goto out_unlock;
++ }
++ item_size = list->info.desc_size + list->info.data_size;
++ if(count > RSBAC_MAX_KMALLOC / item_size)
++ count = RSBAC_MAX_KMALLOC / item_size;
++ buffer = rsbac_kmalloc(item_size * count);
++ if (!buffer) {
++ result = -ENOMEM;
++ goto out_unlock;
++ }
++ if (ttl_array_p) {
++ ttl_p = rsbac_kmalloc(sizeof(**ttl_array_p) * count);
++ if (!ttl_p) {
++ result = -ENOMEM;
++ rsbac_kfree(buffer);
++ goto out_unlock;
++ }
++ }
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ item_p = rcu_dereference(hashed[i].ta_head);
++ else
++#endif
++ item_p = rcu_dereference(hashed[i].head);
++ while (item_p && (result < count)) {
++ if (!item_p->max_age
++ || (item_p->max_age >
++ RSBAC_CURRENT_TIME)
++ ) {
++ memcpy(buffer + offset,
++ ((char *) item_p) +
++ sizeof(*item_p), item_size);
++ if (ttl_p) {
++ if (item_p->max_age)
++ ttl_p[result] =
++ item_p->max_age - RSBAC_CURRENT_TIME;
++ else
++ ttl_p[result] = 0;
++ }
++ offset += item_size;
++ result++;
++ }
++ item_p = rcu_dereference(item_p->next);
++ }
++ }
++ *array_p = buffer;
++ if (ttl_array_p)
++ *ttl_array_p = ttl_p;
++
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++out_unlock:
++ rcu_read_unlock();
++ return result;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_get_all_subitems_ttl);
++#endif
++long rsbac_ta_list_lol_get_all_subitems_ttl(rsbac_list_ta_number_t
++ ta_number,
++ rsbac_list_handle_t handle,
++ void *desc, void **array_p,
++ rsbac_time_t ** ttl_array_p)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *sublist;
++ struct rsbac_list_item_t *item_p;
++ char *buffer;
++ rsbac_time_t *ttl_p = NULL;
++ u_long offset = 0;
++ long result = 0;
++ u_long count;
++ u_int item_size;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int hash = 0;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!array_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++ *array_p = NULL;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ if(list->hash_function)
++ hash = list->hash_function(desc, list->nr_hashes);
++ hashed = rcu_dereference(list->hashed);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[hash].ta_copied == ta_number))
++ sublist = ta_lookup_lol_item(ta_number, list, hashed, hash, desc);
++ else
++#endif
++ sublist = lookup_lol_item(list, hashed, hash, desc);
++ if (sublist && sublist->count) {
++ count = sublist->count;
++ item_size =
++ list->info.subdesc_size + list->info.subdata_size;
++ if(count > RSBAC_MAX_KMALLOC / item_size)
++ count = RSBAC_MAX_KMALLOC / item_size;
++ buffer = rsbac_kmalloc(item_size * count);
++ if (buffer) {
++ if (ttl_array_p)
++ ttl_p =
++ rsbac_kmalloc(sizeof(**ttl_array_p) *
++ sublist->count);
++ item_p = rcu_dereference(sublist->head);
++ while (item_p && (result < count)) {
++ if (!item_p->max_age
++ || (item_p->max_age >
++ RSBAC_CURRENT_TIME)
++ ) {
++ memcpy(buffer + offset,
++ ((char *) item_p) +
++ sizeof(*item_p), item_size);
++ if (ttl_p) {
++ if (item_p->max_age)
++ ttl_p[result] =
++ item_p->
++ max_age -
++ RSBAC_CURRENT_TIME;
++ else
++ ttl_p[result] = 0;
++ }
++ offset += item_size;
++ result++;
++ }
++ item_p = rcu_dereference(item_p->next);
++ }
++ *array_p = buffer;
++ if (ttl_array_p)
++ *ttl_array_p = ttl_p;
++ } else {
++ result = -RSBAC_ENOMEM;
++ }
++ }
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++ rcu_read_unlock();
++ return result;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_ta_list_lol_get_all_items);
++#endif
++long rsbac_ta_list_lol_get_all_items(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t handle,
++ void **array_p)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++ struct rsbac_list_lol_item_t *item_p;
++ char *buffer;
++ u_long offset = 0;
++ long result = 0;
++ u_int item_size;
++ int i;
++ u_long count = 0;
++ struct rsbac_list_lol_hashed_t * hashed;
++ u_int nr_hashes;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!array_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++ *array_p = NULL;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s.\n", list->name);
++*/
++ nr_hashes = list->nr_hashes;
++ hashed = rcu_dereference(list->hashed);
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ count += hashed[i].ta_count;
++ else
++#endif
++ count += hashed[i].count;
++ }
++ if(!count) {
++ result = 0;
++ goto out_unlock;
++ }
++ item_size = list->info.desc_size + list->info.data_size;
++ if(count > RSBAC_MAX_KMALLOC / item_size)
++ count = RSBAC_MAX_KMALLOC / item_size;
++ buffer = rsbac_kmalloc(item_size * count);
++ if (!buffer) {
++ result = -ENOMEM;
++ goto out_unlock;
++ }
++ for (i=0; i<nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number && (hashed[i].ta_copied == ta_number))
++ item_p = rcu_dereference(hashed[i].ta_head);
++ else
++#endif
++ item_p = rcu_dereference(hashed[i].head);
++ while (item_p && (result < count)) {
++ if (!item_p->max_age
++ || (item_p->max_age >
++ RSBAC_CURRENT_TIME)
++ ) {
++ memcpy(buffer + offset,
++ ((char *) item_p) +
++ sizeof(*item_p), item_size);
++ offset += item_size;
++ result++;
++ }
++ item_p = rcu_dereference(item_p->next);
++ }
++ }
++ *array_p = buffer;
++
++#ifdef CONFIG_RSBAC_LIST_STATS
++ list->read_count++;
++#endif
++out_unlock:
++ rcu_read_unlock();
++ return result;
++}
++
++/* List hash functions
++ *
++ * nr_hashes is always 2^n, so we can safely use bit operations
++ */
++
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_hash_u32);
++#endif
++u_int rsbac_list_hash_u32(void * desc, __u32 nr_hashes)
++{
++ return (*((__u32 *) desc) & (nr_hashes - 1));
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_hash_fd);
++#endif
++u_int rsbac_list_hash_fd(void * desc, __u32 nr_hashes)
++{
++ return (*((rsbac_inode_nr_t *) desc) & (nr_hashes - 1));
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_hash_pid);
++#endif
++u_int rsbac_list_hash_pid(void * desc, __u32 nr_hashes)
++{
++// return (pid_nr(*((rsbac_pid_t *) desc)) & (nr_hashes - 1));
++ return ((*((__u32 *) desc) >> 6) & (nr_hashes - 1));
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_hash_uid);
++#endif
++u_int rsbac_list_hash_uid(void * desc, __u32 nr_hashes)
++{
++ return (*((rsbac_uid_t *) desc) & (nr_hashes - 1));
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_hash_gid);
++#endif
++u_int rsbac_list_hash_gid(void * desc, __u32 nr_hashes)
++{
++ return (*((rsbac_gid_t *) desc) & (nr_hashes - 1));
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_hash_ipc);
++#endif
++u_int rsbac_list_hash_ipc(void * desc, __u32 nr_hashes)
++{
++ return (((struct rsbac_ipc_t *) desc)->id.id_nr & (nr_hashes - 1));
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_hash_dev);
++#endif
++u_int rsbac_list_hash_dev(void * desc, __u32 nr_hashes)
++{
++ return ( ( ((struct rsbac_dev_desc_t *) desc)->major
++ + ((struct rsbac_dev_desc_t *) desc)->minor )
++ & (nr_hashes - 1));
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_hash_nettemp);
++#endif
++u_int rsbac_list_hash_nettemp(void * desc, __u32 nr_hashes)
++{
++ return (*((rsbac_net_temp_id_t *) desc) & (nr_hashes - 1));
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_hash_netobj);
++#endif
++u_int rsbac_list_hash_netobj(void * desc, __u32 nr_hashes)
++{
++ return (*((__u32 *) desc) & (nr_hashes - 1));
++}
++/* Copy a complete list to another */
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_copy);
++#endif
++long rsbac_list_copy(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t from_handle,
++ rsbac_list_handle_t to_handle)
++{
++ struct rsbac_list_reg_item_t *from_list;
++ struct rsbac_list_reg_item_t *to_list;
++ struct rsbac_list_item_t *item_p;
++ int i;
++ int err = 0;
++ struct rsbac_list_rcu_free_head_t * rcu_head_p;
++ struct rsbac_list_hashed_t * from_hashed;
++ u_int nr_from_hashes;
++
++ if (!from_handle || !to_handle)
++ return -RSBAC_EINVALIDLIST;
++
++ from_list = (struct rsbac_list_reg_item_t *) from_handle;
++ if (from_list->self != from_list)
++ return -RSBAC_EINVALIDLIST;
++ to_list = (struct rsbac_list_reg_item_t *) to_handle;
++ if (to_list->self != to_list)
++ return -RSBAC_EINVALIDLIST;
++ if((from_list->info.desc_size != to_list->info.desc_size)
++ || (from_list->info.data_size != to_list->info.data_size))
++ return -RSBAC_EINVALIDVALUE;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s to list %s.\n",
++ from_list->name, to_list->name);
++*/
++ spin_lock(&to_list->lock);
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ /* Check for other transactions at the target list */
++ if(ta_number)
++ for (i=0; i<to_list->nr_hashes; i++)
++ if(to_list->hashed[i].ta_copied != ta_number) {
++ err = -RSBAC_EBUSY;
++ goto out_unlock;
++ }
++#endif
++ for (i=0; i<to_list->nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if(ta_number && (to_list->hashed[i].ta_copied == ta_number))
++ ta_remove_all_items(to_list, i);
++ else
++#endif
++ remove_all_items(to_list, i);
++ }
++ nr_from_hashes = from_list->nr_hashes;
++ from_hashed = rcu_dereference(from_list->hashed);
++ for (i=0; i<nr_from_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number) {
++ if(from_hashed[i].ta_copied == ta_number)
++ item_p = rcu_dereference(from_hashed[i].ta_head);
++ else
++ item_p = rcu_dereference(from_hashed[i].head);
++ while(item_p) {
++ if (!ta_add_item(ta_number,
++ to_list,
++ item_p->max_age,
++ &item_p[1],
++ &item_p[1] + from_list->info.desc_size)) {
++ err = -RSBAC_EWRITEFAILED;
++ goto out_unlock;
++ }
++ item_p = rcu_dereference(item_p->next);
++ }
++ }
++ else
++#endif
++ {
++ item_p = rcu_dereference(from_hashed[i].head);
++ while(item_p) {
++ if (!add_item(to_list,
++ item_p->max_age,
++ &item_p[1],
++ &item_p[1] + from_list->info.desc_size)) {
++ err = -RSBAC_EWRITEFAILED;
++ goto out_unlock;
++ }
++ item_p = rcu_dereference(item_p->next);
++ }
++ }
++ }
++
++#ifdef CONFIG_RSBAC_LIST_STATS
++ from_list->read_count++;
++ to_list->write_count++;
++#endif
++out_unlock:
++ rcu_head_p = get_rcu_free(to_list);
++ spin_unlock(&to_list->lock);
++ rcu_read_unlock();
++ do_sync_rcu(rcu_head_p);
++ return err;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_lol_copy);
++#endif
++long rsbac_list_lol_copy(rsbac_list_ta_number_t ta_number,
++ rsbac_list_handle_t from_handle,
++ rsbac_list_handle_t to_handle)
++{
++ struct rsbac_list_lol_reg_item_t *from_list;
++ struct rsbac_list_lol_reg_item_t *to_list;
++ struct rsbac_list_lol_item_t *item_p;
++ struct rsbac_list_lol_item_t *new_item_p;
++ struct rsbac_list_item_t *subitem_p;
++ struct rsbac_list_item_t *new_subitem_p;
++ u_int subitem_size;
++ int i;
++ int err = 0;
++ struct rsbac_list_rcu_free_head_lol_t * rcu_head_lol_p;
++ struct rsbac_list_lol_hashed_t * from_hashed;
++ u_int nr_from_hashes;
++
++ if (!from_handle || !to_handle)
++ return -RSBAC_EINVALIDLIST;
++
++ from_list = (struct rsbac_list_lol_reg_item_t *) from_handle;
++ if (from_list->self != from_list)
++ return -RSBAC_EINVALIDLIST;
++ to_list = (struct rsbac_list_lol_reg_item_t *) to_handle;
++ if (to_list->self != to_list)
++ return -RSBAC_EINVALIDLIST;
++ if((from_list->info.desc_size != to_list->info.desc_size)
++ || (from_list->info.data_size != to_list->info.data_size)
++ || (from_list->info.subdesc_size != to_list->info.subdesc_size)
++ || (from_list->info.subdata_size != to_list->info.subdata_size))
++ return -RSBAC_EINVALIDVALUE;
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ while (ta_committing)
++ interruptible_sleep_on(&ta_wait);
++ if (ta_number && !rsbac_ta_list_exist(0, ta_handle, &ta_number))
++ return -RSBAC_EINVALIDTRANSACTION;
++#endif
++
++ subitem_size = sizeof(*new_subitem_p)
++ + from_list->info.subdesc_size + from_list->info.subdata_size;
++
++ rcu_read_lock();
++/*
++ rsbac_pr_debug(lists, "list %s to list %s.\n",
++ from_list->name, to_list->name);
++*/
++ spin_lock(&to_list->lock);
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ /* Check for other transactions at the target list */
++ if(ta_number)
++ for (i=0; i<to_list->nr_hashes; i++)
++ if(to_list->hashed[i].ta_copied != ta_number) {
++ err = -RSBAC_EBUSY;
++ goto out_unlock;
++ }
++#endif
++ for (i=0; i<to_list->nr_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if(ta_number && (to_list->hashed[i].ta_copied == ta_number))
++ ta_remove_all_lol_items(to_list, i);
++ else
++#endif
++ remove_all_lol_items(to_list, i);
++ }
++ nr_from_hashes = from_list->nr_hashes;
++ from_hashed = rcu_dereference(from_list->hashed);
++ for (i=0; i<nr_from_hashes; i++) {
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ if (ta_number) {
++ if(from_hashed[i].ta_copied == ta_number)
++ item_p = rcu_dereference(from_hashed[i].ta_head);
++ else
++ item_p = rcu_dereference(from_hashed[i].head);
++ while(item_p) {
++ new_item_p = ta_add_lol_item(ta_number,
++ to_list,
++ item_p->max_age,
++ &item_p[1],
++ &item_p[1] + from_list->info.desc_size);
++ if(!new_item_p) {
++ err = -RSBAC_EWRITEFAILED;
++ goto out_unlock;
++ }
++ subitem_p = rcu_dereference(item_p->head);
++ while (subitem_p) {
++ if (!(new_subitem_p = rsbac_kmalloc(subitem_size))) {
++ err = -RSBAC_ENOMEM;
++ goto out_unlock;
++ }
++ memcpy(new_subitem_p, subitem_p, subitem_size);
++ new_subitem_p->prev = NULL;
++ new_subitem_p->next = NULL;
++ if (new_item_p->tail) {
++ new_subitem_p->prev = new_item_p->tail;
++ new_item_p->tail->next = new_subitem_p;
++ new_item_p->tail = new_subitem_p;
++ new_item_p->count++;
++ } else {
++ new_item_p->head = new_subitem_p;
++ new_item_p->tail = new_subitem_p;
++ new_item_p->count = 1;
++ }
++ subitem_p = rcu_dereference(subitem_p->next);
++ }
++ item_p = rcu_dereference(item_p->next);
++ }
++ }
++ else
++#endif
++ {
++ item_p = rcu_dereference(from_hashed[i].head);
++ while(item_p) {
++ new_item_p = add_lol_item(to_list,
++ item_p->max_age,
++ &item_p[1],
++ &item_p[1] + from_list->info.desc_size);
++ if(!new_item_p) {
++ err = -RSBAC_EWRITEFAILED;
++ goto out_unlock;
++ }
++ subitem_p = rcu_dereference(item_p->head);
++ while (subitem_p) {
++ if (!(new_subitem_p = rsbac_kmalloc(subitem_size))) {
++ err = -RSBAC_ENOMEM;
++ goto out_unlock;
++ }
++ memcpy(new_subitem_p, subitem_p, subitem_size);
++ new_subitem_p->prev = NULL;
++ new_subitem_p->next = NULL;
++ if (new_item_p->tail) {
++ new_subitem_p->prev = new_item_p->tail;
++ new_item_p->tail->next = new_subitem_p;
++ new_item_p->tail = new_subitem_p;
++ new_item_p->count++;
++ } else {
++ new_item_p->head = new_subitem_p;
++ new_item_p->tail = new_subitem_p;
++ new_item_p->count = 1;
++ }
++ subitem_p = rcu_dereference(subitem_p->next);
++ }
++ item_p = rcu_dereference(item_p->next);
++ }
++ }
++ }
++
++#ifdef CONFIG_RSBAC_LIST_STATS
++ from_list->read_count++;
++ to_list->write_count++;
++#endif
++out_unlock:
++ rcu_head_lol_p = get_rcu_free_lol(to_list);
++ spin_unlock(&to_list->lock);
++ rcu_read_unlock();
++ do_sync_rcu_lol(rcu_head_lol_p);
++ return err;
++}
++
++static int do_rehash(struct rsbac_list_reg_item_t *list, u_int new_nr)
++{
++ struct rsbac_list_hashed_t * old_hashed;
++ struct rsbac_list_hashed_t * new_hashed;
++ int i;
++ struct rsbac_list_item_t *item_p;
++ struct rsbac_list_item_t *new_item_p;
++ u_int new_hash;
++ u_int old_nr;
++ u_int item_size;
++
++ new_hashed = rsbac_kmalloc_clear_unlocked(new_nr*sizeof(struct rsbac_list_hashed_t));
++ if(!new_hashed) {
++ return -RSBAC_ENOMEM;
++ }
++ spin_lock(&list->lock);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ for(i=0; i<list->nr_hashes; i++)
++ if(list->hashed[i].ta_copied) {
++ spin_unlock(&list->lock);
++ rsbac_kfree(new_hashed);
++ return -RSBAC_EBUSY;
++ }
++#endif
++ old_nr = list->nr_hashes;
++ old_hashed = list->hashed;
++ item_size = sizeof(*item_p) + list->info.desc_size + list->info.data_size;
++ for(i=0; i<old_nr; i++) {
++ item_p = old_hashed[i].head;
++ while(item_p) {
++ if (list->slab)
++ new_item_p = rsbac_smalloc(list->slab);
++ else
++ new_item_p = rsbac_kmalloc(item_size);
++ if (!new_item_p)
++ goto out_nomem;
++ memcpy(new_item_p, item_p, item_size);
++ new_hash = list->hash_function(&new_item_p[1], new_nr);
++ new_item_p->next = NULL;
++ if (!new_hashed[new_hash].head) {
++ new_hashed[new_hash].head = new_item_p;
++ new_hashed[new_hash].tail = new_item_p;
++ new_hashed[new_hash].count = 1;
++ new_item_p->prev = NULL;
++ } else {
++ new_item_p->prev = new_hashed[new_hash].tail;
++ new_hashed[new_hash].tail->next = new_item_p;
++ new_hashed[new_hash].tail = new_item_p;
++ new_hashed[new_hash].count++;
++ }
++ item_p = item_p->next;
++ }
++ }
++ rcu_assign_pointer(list->hashed, new_hashed);
++ list->nr_hashes = new_nr;
++ spin_unlock(&list->lock);
++ synchronize_rcu();
++ for(i=0; i<old_nr; i++) {
++ item_p = old_hashed[i].head;
++ while(item_p) {
++ new_item_p = item_p->next;
++ rsbac_sfree(list->slab, item_p);
++ item_p = new_item_p;
++ }
++ }
++ rsbac_kfree(old_hashed);
++ return 0;
++
++out_nomem:
++ spin_unlock(&list->lock);
++ for(i=0; i<new_nr; i++) {
++ item_p = new_hashed[i].head;
++ while(item_p) {
++ new_item_p = item_p->next;
++ rsbac_sfree(list->slab, item_p);
++ item_p = new_item_p;
++ }
++ }
++ rsbac_kfree(new_hashed);
++ return -RSBAC_ENOMEM;
++}
++
++static int do_lol_rehash(struct rsbac_list_lol_reg_item_t *list, u_int new_nr)
++{
++ struct rsbac_list_lol_hashed_t * old_hashed;
++ struct rsbac_list_lol_hashed_t * new_hashed;
++ int i;
++ struct rsbac_list_lol_item_t *item_p;
++ struct rsbac_list_lol_item_t *new_item_p;
++ u_int new_hash;
++ u_int old_nr;
++ u_int item_size;
++
++ new_hashed = rsbac_kmalloc_clear_unlocked(new_nr*sizeof(struct rsbac_list_lol_hashed_t));
++ if(!new_hashed) {
++ return -RSBAC_ENOMEM;
++ }
++ spin_lock(&list->lock);
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ for(i=0; i<list->nr_hashes; i++)
++ if(list->hashed[i].ta_copied) {
++ spin_unlock(&list->lock);
++ rsbac_kfree(new_hashed);
++ return -RSBAC_EBUSY;
++ }
++#endif
++ old_hashed = list->hashed;
++ old_nr = list->nr_hashes;
++ item_size = sizeof(*item_p) + list->info.desc_size + list->info.data_size;
++ for(i=0; i<old_nr; i++) {
++ item_p = old_hashed[i].head;
++ while(item_p) {
++ if (list->slab)
++ new_item_p = rsbac_smalloc(list->slab);
++ else
++ new_item_p = rsbac_kmalloc(item_size);
++ if (!new_item_p)
++ goto out_nomem;
++ memcpy(new_item_p, item_p, item_size);
++ new_hash = list->hash_function(&new_item_p[1], new_nr);
++ new_item_p->next = NULL;
++ if (!new_hashed[new_hash].head) {
++ new_hashed[new_hash].head = new_item_p;
++ new_hashed[new_hash].tail = new_item_p;
++ new_hashed[new_hash].count = 1;
++ new_item_p->prev = NULL;
++ } else {
++ new_item_p->prev = new_hashed[new_hash].tail;
++ new_hashed[new_hash].tail->next = new_item_p;
++ new_hashed[new_hash].tail = new_item_p;
++ new_hashed[new_hash].count++;
++ }
++ item_p = item_p->next;
++ }
++ }
++ rcu_assign_pointer(list->hashed, new_hashed);
++ list->nr_hashes = new_nr;
++ spin_unlock(&list->lock);
++ synchronize_rcu();
++ for(i=0; i<old_nr; i++) {
++ item_p = old_hashed[i].head;
++ while(item_p) {
++ new_item_p = item_p->next;
++ rsbac_sfree(list->slab, item_p);
++ item_p = new_item_p;
++ }
++ }
++ rsbac_kfree(old_hashed);
++ return 0;
++
++out_nomem:
++ spin_unlock(&list->lock);
++ for(i=0; i<new_nr; i++) {
++ item_p = new_hashed[i].head;
++ while(item_p) {
++ new_item_p = item_p->next;
++ rsbac_sfree(list->slab, item_p);
++ item_p = new_item_p;
++ }
++ }
++ rsbac_kfree(new_hashed);
++ return -RSBAC_ENOMEM;
++}
++
++/* Work through all lists and resize, if allowed and necessary */
++int rsbac_list_auto_rehash(void)
++{
++ int i;
++ int err;
++ struct rsbac_list_reg_item_t *list;
++ struct rsbac_list_lol_reg_item_t *lol_list;
++ long count;
++ u_int nr_rehashed = 0;
++ int srcu_idx;
++
++ srcu_idx = srcu_read_lock(&reg_list_srcu);
++ list = reg_head.head;
++ while(list) {
++ if((list->flags & RSBAC_LIST_AUTO_HASH_RESIZE)
++ && (list->nr_hashes < rsbac_list_max_hashes)) {
++ count = 0;
++ for (i=0; i<list->nr_hashes; i++) {
++ count += list->hashed[i].count;
++ }
++ if(count / list->nr_hashes > RSBAC_LIST_AUTO_REHASH_TRIGGER) {
++ u_int new_nr;
++
++ new_nr = list->nr_hashes;
++ while((new_nr < rsbac_list_max_hashes)
++ && (count / new_nr > RSBAC_LIST_AUTO_REHASH_TRIGGER))
++ new_nr = new_nr << 1;
++ if(new_nr > rsbac_list_max_hashes)
++ new_nr = rsbac_list_max_hashes;
++ rsbac_printk(KERN_INFO "rsbac_list_auto_rehash(): changing list %s hash size on device %02u:%02u from %u to %u\n",
++ list->name, MAJOR(list->device), MINOR(list->device), list->nr_hashes, new_nr);
++ err = do_rehash(list, new_nr);
++ if(!err)
++ nr_rehashed++;
++ else {
++ rsbac_printk(KERN_WARNING "rsbac_list_auto_rehash(): changing list %s hash size on device %02u:%02u from %u to %u failed with error %i\n",
++ list->name, MAJOR(list->device), MINOR(list->device), list->nr_hashes, new_nr, err);
++ }
++ }
++ }
++ list = list->next;
++ }
++ srcu_read_unlock(&reg_list_srcu, srcu_idx);
++ srcu_idx = srcu_read_lock(&lol_reg_list_srcu);
++ lol_list = lol_reg_head.head;
++ while(lol_list) {
++ if((lol_list->flags & RSBAC_LIST_AUTO_HASH_RESIZE)
++ && (lol_list->nr_hashes < rsbac_list_lol_max_hashes)) {
++ count = 0;
++ for (i=0; i<lol_list->nr_hashes; i++) {
++ count += lol_list->hashed[i].count;
++ }
++ if(count / lol_list->nr_hashes > RSBAC_LIST_AUTO_REHASH_TRIGGER) {
++ u_int new_nr;
++
++ new_nr = lol_list->nr_hashes;
++ while((new_nr < rsbac_list_lol_max_hashes)
++ && (count / new_nr > RSBAC_LIST_AUTO_REHASH_TRIGGER))
++ new_nr = new_nr << 1;
++ if(new_nr > rsbac_list_lol_max_hashes)
++ new_nr = rsbac_list_lol_max_hashes;
++ rsbac_printk(KERN_INFO "rsbac_list_auto_rehash(): changing list of lists %s hash size on device %02u:%02u from %u to %u\n",
++ lol_list->name, MAJOR(lol_list->device), MINOR(lol_list->device), lol_list->nr_hashes, new_nr);
++ err = do_lol_rehash(lol_list, new_nr);
++ if(!err)
++ nr_rehashed++;
++ else {
++ rsbac_printk(KERN_WARNING "rsbac_list_auto_rehash(): changing list of lists %s hash size on device %02u:%02u from %u to %u failed with error %i\n",
++ lol_list->name, MAJOR(lol_list->device), MINOR(lol_list->device), lol_list->nr_hashes, new_nr, err);
++ }
++ }
++ }
++ lol_list = lol_list->next;
++ }
++ srcu_read_unlock(&lol_reg_list_srcu, srcu_idx);
++
++ if(nr_rehashed > 0)
++ rsbac_printk(KERN_INFO "rsbac_list_auto_rehash(): %u lists rehashed\n",
++ nr_rehashed);
++ return nr_rehashed;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_get_nr_hashes);
++#endif
++long rsbac_list_get_nr_hashes(rsbac_list_handle_t handle)
++{
++ struct rsbac_list_reg_item_t *list;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++ return list->nr_hashes;
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_list_lol_get_nr_hashes);
++#endif
++long rsbac_list_lol_get_nr_hashes(rsbac_list_handle_t handle)
++{
++ struct rsbac_list_lol_reg_item_t *list;
++
++ if (!handle)
++ return -RSBAC_EINVALIDLIST;
++ if (!list_initialized)
++ return -RSBAC_ENOTINITIALIZED;
++
++ list = (struct rsbac_list_lol_reg_item_t *) handle;
++ if (list->self != list)
++ return -RSBAC_EINVALIDLIST;
++
++ return list->nr_hashes;
++}
+diff --git a/rsbac/data_structures/mac_data_structures.c b/rsbac/data_structures/mac_data_structures.c
+new file mode 100644
+index 0000000..e524ab1
+--- /dev/null
++++ b/rsbac/data_structures/mac_data_structures.c
+@@ -0,0 +1,1209 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of MAC data structures */
++/* Author and (c) 1999-2011: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 12/Jul/2011 */
++/*************************************************** */
++
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/ext2_fs.h>
++#include <linux/srcu.h>
++#include <asm/uaccess.h>
++#include <rsbac/types.h>
++#include <rsbac/aci_data_structures.h>
++#include <rsbac/mac_data_structures.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/adf.h>
++#include <rsbac/aci.h>
++#include <rsbac/lists.h>
++#include <rsbac/proc_fs.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/getname.h>
++#include <linux/string.h>
++#include <linux/seq_file.h>
++#include <linux/module.h>
++
++/************************************************************************** */
++/* Global Variables */
++/************************************************************************** */
++
++static struct rsbac_mac_device_list_head_t device_list_head;
++static struct srcu_struct device_list_srcu;
++static struct lock_class_key device_list_lock_class;
++
++static rsbac_list_handle_t process_handle = NULL;
++
++/**************************************************/
++/* Declarations of external functions */
++/**************************************************/
++
++rsbac_boolean_t writable(struct super_block *sb_p);
++
++/**************************************************/
++/* Declarations of internal functions */
++/**************************************************/
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++static u_int nr_fd_hashes = RSBAC_MAC_NR_TRU_FD_LISTS;
++
++static int fd_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ memcpy(new_desc, old_desc, sizeof(rsbac_inode_nr_t));
++ return 0;
++}
++
++static rsbac_list_conv_function_t *fd_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_MAC_FD_OLD_LIST_VERSION:
++ return fd_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int fd_subconv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ *((rsbac_uid_t *) new_desc) = *((rsbac_old_uid_t *) old_desc);
++ return 0;
++}
++
++static rsbac_list_conv_function_t *fd_get_subconv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_MAC_FD_OLD_LIST_VERSION:
++ return fd_subconv;
++ default:
++ return NULL;
++ }
++}
++
++
++
++/* mac_register_fd_lists() */
++/* register fd ACL lists for device */
++
++static int mac_register_fd_lists(struct rsbac_mac_device_list_item_t
++ *device_p, kdev_t kdev)
++{
++ int err = 0;
++ int tmperr;
++ struct rsbac_list_lol_info_t lol_info;
++
++ if (!device_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++ lol_info.version = RSBAC_MAC_FD_LIST_VERSION;
++ lol_info.key = RSBAC_MAC_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_inode_nr_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_uid_t);
++ lol_info.subdata_size = 0; /* rights */
++ lol_info.max_age = 0;
++ tmperr = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &device_p->handle,
++ &lol_info,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA,
++ NULL,
++ NULL,
++ fd_get_conv, fd_get_subconv,
++ NULL, NULL,
++ RSBAC_MAC_FD_FILENAME, kdev,
++ nr_fd_hashes,
++ rsbac_list_hash_fd,
++ RSBAC_MAC_FD_OLD_FILENAME);
++ if (tmperr) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "mac_register_fd_lists(): registering list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_MAC_FD_FILENAME,
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev),
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ err = tmperr;
++ }
++ return err;
++}
++
++/* mac_detach_fd_lists() */
++/* detach from fd MAC lists for device */
++
++static int mac_detach_fd_lists(struct rsbac_mac_device_list_item_t
++ *device_p)
++{
++ int err = 0;
++
++ if (!device_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++ err = rsbac_list_lol_detach(&device_p->handle,
++ RSBAC_MAC_LIST_KEY);
++ if (err) {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "mac_detach_fd_lists(): detaching from list %s for device %02u:%02u failed with error %s!\n",
++ RSBAC_MAC_FD_FILENAME,
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++ return err;
++}
++
++/************************************************************************** */
++/* The lookup functions return NULL, if the item is not found, and a */
++/* pointer to the item otherwise. */
++
++/* first the device item lookup */
++static struct rsbac_mac_device_list_item_t *lookup_device(kdev_t kdev)
++{
++ struct rsbac_mac_device_list_item_t *curr = rcu_dereference(device_list_head.curr);
++
++ /* if there is no current item or it is not the right one, search... */
++ if (!(curr && (RSBAC_MAJOR(curr->id) == RSBAC_MAJOR(kdev))
++ && (RSBAC_MINOR(curr->id) == RSBAC_MINOR(kdev))
++ )
++ ) {
++ curr = rcu_dereference(device_list_head.head);
++ while (curr
++ && ((RSBAC_MAJOR(curr->id) != RSBAC_MAJOR(kdev))
++ || (RSBAC_MINOR(curr->id) != RSBAC_MINOR(kdev))
++ )
++ ) {
++ curr = rcu_dereference(curr->next);
++ }
++ if (curr)
++ rcu_assign_pointer(device_list_head.curr, curr);
++ }
++ /* it is the current item -> return it */
++ return curr;
++}
++
++/************************************************************************** */
++/* The add_item() functions add an item to the list, set head.curr to it, */
++/* and return a pointer to the item. */
++/* These functions will NOT check, if there is already an item under the */
++/* same ID! If this happens, the lookup functions will return the old item! */
++/* All list manipulation is protected by rw-spinlocks to prevent inconsistency */
++/* and undefined behaviour in other concurrent functions. */
++
++/* Create a device item without adding to list. No locking needed. */
++static struct rsbac_mac_device_list_item_t
++*create_device_item(kdev_t kdev)
++{
++ struct rsbac_mac_device_list_item_t *new_item_p;
++
++ /* allocate memory for new device, return NULL, if failed */
++ if (!(new_item_p = (struct rsbac_mac_device_list_item_t *)
++ rsbac_kmalloc(sizeof(*new_item_p))))
++ return NULL;
++
++ new_item_p->id = kdev;
++ new_item_p->mount_count = 1;
++
++ /* init file/dir sublists */
++ new_item_p->handle = NULL;
++ return new_item_p;
++}
++
++/* Add an existing device item to list. Locking needed. */
++static struct rsbac_mac_device_list_item_t
++*add_device_item(struct rsbac_mac_device_list_item_t *device_p)
++{
++ if (!device_p)
++ return NULL;
++
++ /* add new device to device list */
++ if (!device_list_head.head) { /* first device */
++ device_p->prev = NULL;
++ device_p->next = NULL;
++ rcu_assign_pointer(device_list_head.head, device_p);
++ rcu_assign_pointer(device_list_head.tail, device_p);
++ rcu_assign_pointer(device_list_head.curr, device_p);
++ device_list_head.count = 1;
++ } else { /* there is another device -> hang to tail */
++ device_p->prev = device_list_head.tail;
++ device_p->next = NULL;
++ rcu_assign_pointer(device_list_head.tail->next, device_p);
++ rcu_assign_pointer(device_list_head.tail, device_p);
++ rcu_assign_pointer(device_list_head.curr, device_p);
++ device_list_head.count++;
++ }
++ return device_p;
++}
++
++/************************************************************************** */
++/* The remove_item() functions remove an item from the list. If this item */
++/* is head, tail or curr, these pointers are set accordingly. */
++/* To speed up removing several subsequent items, curr is set to the next */
++/* item, if possible. */
++/* If the item is not found, nothing is done. */
++
++static void clear_device_item(struct rsbac_mac_device_list_item_t *item_p)
++{
++ if (!item_p)
++ return;
++
++ /* First deregister lists... */
++ mac_detach_fd_lists(item_p);
++ rsbac_kfree(item_p);
++}
++
++static void remove_device_item(struct rsbac_mac_device_list_item_t *item_p)
++{
++ /* first we must locate the item. */
++ if (item_p) { /* ok, item was found */
++ if (device_list_head.head == item_p) { /* item is head */
++ if (device_list_head.tail == item_p) { /* item is head and tail = only item -> list will be empty */
++ rcu_assign_pointer(device_list_head.head, NULL);
++ rcu_assign_pointer(device_list_head.tail, NULL);
++ } else { /* item is head, but not tail -> next item becomes head */
++ rcu_assign_pointer(item_p->next->prev, NULL);
++ rcu_assign_pointer(device_list_head.head, item_p->next);
++ }
++ } else { /* item is not head */
++ if (device_list_head.tail == item_p) { /*item is not head, but tail -> previous item becomes tail */
++ rcu_assign_pointer(item_p->prev->next, NULL);
++ rcu_assign_pointer(device_list_head.tail, item_p->prev);
++ } else { /* item is neither head nor tail -> item is cut out */
++ rcu_assign_pointer(item_p->prev->next, item_p->next);
++ rcu_assign_pointer(item_p->next->prev, item_p->prev);
++ }
++ }
++
++ /* curr is no longer valid -> reset. */
++ device_list_head.curr = NULL;
++ /* adjust counter */
++ device_list_head.count--;
++ }
++}
++
++/************************************************************************** */
++/* The copy_fp_tru_set_item() function copies a file cap set to a process */
++/* cap set */
++
++static int copy_fp_tru_set_item(struct rsbac_mac_device_list_item_t
++ *device_p, rsbac_mac_file_t file,
++ rsbac_pid_t pid)
++{
++ rsbac_uid_t *tru_item_p;
++ rsbac_time_t *ttl_p;
++ int i;
++ long count;
++ enum rsbac_target_t target = T_FILE;
++ union rsbac_target_id_t tid;
++
++ rsbac_list_lol_remove(process_handle, &pid);
++ count = rsbac_list_lol_get_all_subdesc_ttl(device_p->handle,
++ &file.inode,
++ (void **) &tru_item_p,
++ &ttl_p);
++ if (!count || (count == -RSBAC_ENOTFOUND)
++ ) {
++ tid.file = file;
++ if (!rsbac_get_parent(target, tid, &target, &tid))
++ count =
++ rsbac_list_lol_get_all_subdesc_ttl(device_p->handle,
++ &tid.file.
++ inode,
++ (void **)
++ &tru_item_p,
++ &ttl_p);
++ }
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ rsbac_list_lol_subadd_ttl(process_handle,
++ ttl_p[i],
++ &pid,
++ &tru_item_p[i], NULL);
++ }
++ rsbac_kfree(tru_item_p);
++ rsbac_kfree(ttl_p);
++ } else {
++ if ((count < 0)
++ && (count != -RSBAC_ENOTFOUND)
++ )
++ return count;
++ }
++
++ return 0;
++} /* end of copy_fp_tru_set_item() */
++
++/************************************************************************** */
++/* The copy_pp_tru_set_item() function copies a process cap set to another */
++
++static int copy_pp_tru_set_item_handle(rsbac_list_handle_t handle,
++ rsbac_pid_t old_pid,
++ rsbac_pid_t new_pid)
++{
++ rsbac_uid_t *tru_item_p;
++ rsbac_time_t *ttl_p;
++ int i;
++ long count;
++
++ rsbac_list_lol_remove(handle, &new_pid);
++ count = rsbac_list_lol_get_all_subdesc_ttl(handle,
++ &old_pid,
++ (void **) &tru_item_p,
++ &ttl_p);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ rsbac_list_lol_subadd_ttl(handle,
++ ttl_p[i],
++ &new_pid,
++ &tru_item_p[i], NULL);
++ }
++ rsbac_kfree(tru_item_p);
++ rsbac_kfree(ttl_p);
++ } else {
++ if (count < 0)
++ return count;
++ }
++ return 0;
++}
++
++static int copy_pp_tru_set_item(rsbac_pid_t old_pid, rsbac_pid_t new_pid)
++{
++ return copy_pp_tru_set_item_handle(process_handle, old_pid,
++ new_pid);
++} /* end of copy_pp_tru_set_item() */
++
++/************************************************* */
++/* proc functions */
++/************************************************* */
++
++#if defined(CONFIG_RSBAC_PROC) && defined(CONFIG_PROC_FS)
++static int
++mac_devices_proc_show(struct seq_file *m, void *v)
++{
++ struct rsbac_mac_device_list_item_t *device_p;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized())
++ return -ENOSYS;
++
++ seq_printf(m, "%u RSBAC MAC Devices\n-------------------\n",
++ device_list_head.count);
++
++ /* wait for read access to device_list_head */
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ /* OK, go on */
++ for (device_p = rcu_dereference(device_list_head.head); device_p;
++ device_p = rcu_dereference(device_p->next)) {
++ seq_printf(m,
++ "%02u:%02u with mount_count = %u\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ device_p->mount_count);
++ }
++
++ /* free access to device_list_head */
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++
++ return 0;
++}
++
++static ssize_t mac_devices_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, mac_devices_proc_show, NULL);
++}
++
++static const struct file_operations mac_devices_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = mac_devices_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *mac_devices;
++
++static int
++stats_mac_proc_show(struct seq_file *m, void *v)
++{
++ struct rsbac_mac_device_list_item_t *device_p;
++ int srcu_idx;
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "stats_mac_proc_info(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ rsbac_pr_debug(aef_mac, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ seq_printf(m, "MAC Status\n----------\n");
++
++ seq_printf(m,
++ "%lu process trusted user set items, sum of %lu members\n",
++ rsbac_list_lol_count(process_handle),
++ rsbac_list_lol_all_subcount(process_handle));
++
++ /* protect device list */
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = rcu_dereference(device_list_head.head);
++ while (device_p) {
++ /* reset counters */
++ seq_printf(m,
++ "device %02u:%02u has %lu file trusted user set items, sum of %lu members\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ rsbac_list_lol_count(device_p->handle),
++ rsbac_list_lol_all_subcount(device_p->handle));
++ device_p = rcu_dereference(device_p->next);
++ }
++ /* unprotect device list */
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++
++ return 0;
++}
++
++static ssize_t stats_mac_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, stats_mac_proc_show, NULL);
++}
++
++static const struct file_operations stats_mac_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = stats_mac_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *stats_mac;
++
++static int
++mac_trulist_proc_show(struct seq_file *m, void *v)
++{
++ u_int count = 0;
++ u_int member_count = 0;
++ u_long all_member_count;
++ int i, j;
++ struct rsbac_mac_device_list_item_t *device_p;
++ rsbac_pid_t *p_list;
++ rsbac_inode_nr_t *f_list;
++ rsbac_uid_t *tru_list;
++ int srcu_idx;
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "mac_trulist_proc_info(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ rsbac_pr_debug(aef_mac, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ seq_printf(m,
++ "MAC Trusted User Lists\n---------------------\n");
++
++ /* protect process cap set list */
++ seq_printf(m,
++ "Process trusted user sets:\nset-id count members");
++
++ all_member_count = 0;
++ count = rsbac_list_lol_get_all_desc(process_handle,
++ (void **) &p_list);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ member_count =
++ rsbac_list_lol_get_all_subdesc(process_handle,
++ &p_list[i],
++ (void **)
++ &tru_list);
++ seq_printf(m, "\n %u\t%u\t", pid_vnr(p_list[i]),
++ member_count);
++ if (member_count > 0) {
++ for (j = 0; j < member_count; j++) {
++ if (RSBAC_UID_SET(tru_list[j]))
++ seq_printf(m, "%u/%u ",
++ RSBAC_UID_SET(tru_list[j]),
++ RSBAC_UID_NUM(tru_list[j]));
++ else
++ seq_printf(m, "%u ",
++ RSBAC_UID_NUM(tru_list[j]));
++ }
++ rsbac_kfree(tru_list);
++ all_member_count += member_count;
++ }
++ }
++ rsbac_kfree(p_list);
++ }
++ seq_printf(m,
++ "\n%u process trusted user set items, sum of %lu members\n",
++ count, all_member_count);
++
++ seq_printf(m,
++ "\nFile trusted user sets:\nset-id count members");
++
++ /* protect device list */
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = rcu_dereference(device_list_head.head);
++ while (device_p) {
++ /* reset counters */
++ all_member_count = 0;
++ count = rsbac_list_lol_get_all_desc(device_p->handle,
++ (void **) &f_list);
++ if (count > 0) {
++ for (i = 0; i < count; i++) {
++ member_count =
++ rsbac_list_lol_get_all_subdesc
++ (device_p->handle,
++ &f_list[i],
++ (void **) &tru_list);
++ seq_printf(m,
++ "\n %u\t%u\t",
++ f_list[i],
++ member_count);
++ if (member_count > 0) {
++ for (j = 0;
++ j < member_count;
++ j++) {
++ if (RSBAC_UID_SET(tru_list[j]))
++ seq_printf(m,
++ "%u/%u ",
++ RSBAC_UID_SET(tru_list[j]),
++ RSBAC_UID_NUM(tru_list[j]));
++ else
++ seq_printf(m,
++ "%u ",
++ RSBAC_UID_NUM(tru_list[j]));
++ }
++ rsbac_kfree(tru_list);
++ all_member_count +=
++ member_count;
++ }
++ }
++ rsbac_kfree(f_list);
++ }
++ seq_printf(m,
++ "\ndevice %02u:%02u has %u file trusted user set items, sum of %lu members\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id), count,
++ all_member_count);
++ device_p = rcu_dereference(device_p->next);
++ }
++ /* unprotect device list */
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++
++ return 0;
++}
++
++static ssize_t mac_trulist_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, mac_trulist_proc_show, NULL);
++}
++
++static const struct file_operations mac_trulist_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = mac_trulist_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *mac_trulist;
++
++#endif /* CONFIG_PROC_FS && CONFIG_RSBAC_PROC */
++
++/************************************************* */
++/* Init functions */
++/************************************************* */
++
++/* All functions return 0, if no error occurred, and a negative error code */
++/* otherwise. The error codes are defined in rsbac/error.h. */
++
++/************************************************************************** */
++/* Initialization of all MAC data structures. After this call, all MAC */
++/* data is kept in memory for performance reasons, but is written to disk */
++/* on every change. */
++
++/* Because there can be no access to aci data structures before init, */
++/* rsbac_init_mac() will initialize all rw-spinlocks to unlocked. */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++int rsbac_init_mac(void)
++#else
++int __init rsbac_init_mac(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_mac_device_list_item_t *device_p = NULL;
++ struct rsbac_list_lol_info_t lol_info;
++
++ if (rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_init_mac(): RSBAC already initialized\n");
++ return -RSBAC_EREINIT;
++ }
++
++ rsbac_printk(KERN_INFO "rsbac_init_mac(): Initializing RSBAC: MAC subsystem\n");
++
++ lol_info.version = RSBAC_MAC_P_LIST_VERSION;
++ lol_info.key = RSBAC_MAC_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_pid_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_uid_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register(RSBAC_LIST_VERSION,
++ &process_handle,
++ &lol_info,
++ RSBAC_LIST_DEF_DATA,
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_MAC_P_LIST_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ char *tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_init_mac(): Registering MAC process trusted user list failed with error %s\n",
++ get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++
++ /* Init FD lists */
++ spin_lock_init(&device_list_head.lock);
++ init_srcu_struct(&device_list_srcu);
++ lockdep_set_class(&device_list_head.lock, &device_list_lock_class);
++ device_list_head.head = NULL;
++ device_list_head.tail = NULL;
++ device_list_head.curr = NULL;
++ device_list_head.count = 0;
++
++ /* read all data */
++ rsbac_pr_debug(ds_mac, "rsbac_init_mac(): Registering FD lists\n");
++ device_p = create_device_item(rsbac_root_dev);
++ if (!device_p) {
++ rsbac_printk(KERN_CRIT
++ "rsbac_init_mac(): Could not add device!\n");
++ return -RSBAC_ECOULDNOTADDDEVICE;
++ }
++ if ((err = mac_register_fd_lists(device_p, rsbac_root_dev))) {
++ char tmp[RSBAC_MAXNAMELEN];
++
++ rsbac_printk(KERN_WARNING "rsbac_init_mac(): File/Dir trusted user set registration failed for dev %02u:%02u, err %s!\n",
++ RSBAC_MAJOR(rsbac_root_dev),
++ RSBAC_MINOR(rsbac_root_dev),
++ get_error_name(tmp, err));
++ }
++ /* wait for write access to device_list_head */
++ spin_lock(&device_list_head.lock);
++ device_p = add_device_item(device_p);
++ /* device was added, allow access */
++ spin_unlock(&device_list_head.lock);
++ if (!device_p) {
++ rsbac_printk(KERN_CRIT
++ "rsbac_init_mac(): Could not add device!\n");
++ return -RSBAC_ECOULDNOTADDDEVICE;
++ }
++#if defined(CONFIG_RSBAC_PROC) && defined(CONFIG_PROC_FS)
++ mac_devices = proc_create("mac_devices",
++ S_IFREG | S_IRUGO | S_IWUGO,
++ proc_rsbac_root_p, &mac_devices_proc_fops);
++ stats_mac = proc_create("stats_mac",
++ S_IFREG | S_IRUGO,
++ proc_rsbac_root_p, &stats_mac_proc_fops);
++ mac_trulist = proc_create("mac_trusted",
++ S_IFREG | S_IRUGO,
++ proc_rsbac_root_p, &mac_trulist_proc_fops);
++#endif
++
++ rsbac_pr_debug(aef_mac, "Ready.\n");
++ return err;
++}
++
++int rsbac_mount_mac(kdev_t kdev)
++{
++ int err = 0;
++ struct rsbac_mac_device_list_item_t *device_p;
++ struct rsbac_mac_device_list_item_t *new_device_p;
++ int srcu_idx;
++
++ rsbac_pr_debug(aef_mac, "mounting device %02u:%02u\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev));
++ /* wait for write access to device_list_head */
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = lookup_device(kdev);
++ /* repeated mount? */
++ if (device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_mount_mac: repeated mount %u of device %02u:%02u\n",
++ device_p->mount_count, RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev));
++ device_p->mount_count++;
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return 0;
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++
++ new_device_p = create_device_item(kdev);
++ if (!new_device_p)
++ return -RSBAC_ECOULDNOTADDDEVICE;
++
++ /* register lists */
++ if ((err = mac_register_fd_lists(new_device_p, kdev))) {
++ char tmp[RSBAC_MAXNAMELEN];
++
++ rsbac_printk(KERN_WARNING "rsbac_mount_mac(): File/Dir ACL registration failed for dev %02u:%02u, err %s!\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev),
++ get_error_name(tmp, err));
++ }
++
++ /* wait for read access to device_list_head */
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ /* make sure to only add, if this device item has not been added in the meantime */
++ device_p = lookup_device(kdev);
++ if (device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_mount_mac(): mount race for device %02u:%02u detected!\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev));
++ device_p->mount_count++;
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ clear_device_item(new_device_p);
++ } else {
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ spin_lock(&device_list_head.lock);
++ device_p = add_device_item(new_device_p);
++ spin_unlock(&device_list_head.lock);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_mount_mac: adding device %02u:%02u failed!\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev));
++ clear_device_item(new_device_p);
++ err = -RSBAC_ECOULDNOTADDDEVICE;
++ }
++ }
++ return err;
++}
++
++/* When umounting a device, its file lists must be removed. */
++
++int rsbac_umount_mac(kdev_t kdev)
++{
++ struct rsbac_mac_device_list_item_t *device_p;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_umount(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ rsbac_pr_debug(aef_mac, "umounting device %02u:%02u\n",
++ RSBAC_MAJOR(kdev), RSBAC_MINOR(kdev));
++ /* sync of attribute lists was done in rsbac_umount */
++ /* wait for write access to device_list_head */
++ spin_lock(&device_list_head.lock);
++ /* OK, nobody else is working on it... */
++ device_p = lookup_device(kdev);
++ if (device_p) {
++ if (device_p->mount_count == 1) {
++ remove_device_item(device_p);
++ spin_unlock(&device_list_head.lock);
++ synchronize_srcu(&device_list_srcu);
++ clear_device_item(device_p);
++ } else {
++ if (device_p->mount_count > 1) {
++ device_p->mount_count--;
++ } else {
++ rsbac_printk(KERN_WARNING "rsbac_mount_mac: device %02u:%02u has mount_count < 1!\n",
++ RSBAC_MAJOR(kdev),
++ RSBAC_MINOR(kdev));
++ }
++ spin_unlock(&device_list_head.lock);
++ }
++ } else
++ spin_unlock(&device_list_head.lock);
++ return 0;
++}
++
++/***************************************************/
++/* We also need some status information... */
++
++int rsbac_stats_mac(void)
++{
++ struct rsbac_mac_device_list_item_t *device_p;
++ int srcu_idx;
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_stats_mac(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ rsbac_pr_debug(aef_mac, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ rsbac_printk(KERN_INFO "MAC Status\n----------\n");
++
++ rsbac_printk(KERN_INFO "%lu process trusted user set items, sum of %lu members\n",
++ rsbac_list_lol_count(process_handle),
++ rsbac_list_lol_all_subcount(process_handle));
++
++ /* protect device list */
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = rcu_dereference(device_list_head.head);
++ while (device_p) {
++ rsbac_printk(KERN_INFO "device %02u:%02u has %u file trusted user set items, sum of %u members\n",
++ RSBAC_MAJOR(device_p->id),
++ RSBAC_MINOR(device_p->id),
++ rsbac_list_lol_count(device_p->handle),
++ rsbac_list_lol_all_subcount(device_p->handle));
++ device_p = rcu_dereference(device_p->next);
++ }
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return 0;
++}
++
++/************************************************* */
++/* Access functions */
++/************************************************* */
++
++/* All these procedures handle the rw-spinlocks to protect the targets during */
++/* access. */
++/* Trying to access a never created or removed set returns an error! */
++
++/* rsbac_mac_add_to_truset */
++/* Add a set member to a set sublist. Set behaviour: also returns success, */
++/* if member was already in set! */
++
++int rsbac_mac_add_to_p_truset(rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ rsbac_uid_t member, rsbac_time_t ttl)
++{
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_add_to_p_truset(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_add_to_p_truset(): called from interrupt!\n");
++ }
++ return rsbac_ta_list_lol_subadd_ttl(ta_number, process_handle, ttl,
++ &pid, &member, NULL);
++}
++
++int rsbac_mac_add_to_f_truset(rsbac_list_ta_number_t ta_number,
++ rsbac_mac_file_t file,
++ rsbac_uid_t member, rsbac_time_t ttl)
++{
++ int err = 0;
++ struct rsbac_mac_device_list_item_t *device_p;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_add_to_f_truset(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_add_to_f_truset(): called from interrupt!\n");
++ }
++
++ /* protect device list */
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = lookup_device(file.device);
++ if (!device_p) {
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ rsbac_printk(KERN_WARNING "rsbac_mac_add_to_f_truset(): invalid device %02u:%02u!\n",
++ RSBAC_MAJOR(file.device),
++ RSBAC_MINOR(file.device));
++ return -RSBAC_EINVALIDDEV;
++ }
++
++ err = rsbac_ta_list_lol_subadd_ttl(ta_number,
++ device_p->handle,
++ ttl, &file.inode, &member,
++ NULL);
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return err;
++}
++
++/* rsbac_mac_remove_from_truset */
++/* Remove a set member from a sublist. Set behaviour: Returns no error, if */
++/* member is not in list. */
++
++int rsbac_mac_remove_from_p_truset(rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid, rsbac_uid_t member)
++{
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_remove_from_p_truset(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_remove_from_p_truset(): called from interrupt!\n");
++ }
++ return rsbac_ta_list_lol_subremove(ta_number, process_handle, &pid,
++ &member);
++}
++
++int rsbac_mac_remove_from_f_truset(rsbac_list_ta_number_t ta_number,
++ rsbac_mac_file_t file,
++ rsbac_uid_t member)
++{
++ int err = 0;
++ struct rsbac_mac_device_list_item_t *device_p;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_remove_from_f_truset(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_remove_from_f_truset(): called from interrupt!\n");
++ }
++
++ /* protect device list */
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = lookup_device(file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_remove_from_f_truset(): invalid device %02u:%02u!\n",
++ RSBAC_MAJOR(file.device),
++ RSBAC_MINOR(file.device));
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ err = rsbac_ta_list_lol_subremove(ta_number,
++ device_p->handle,
++ &file.inode, &member);
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return err;
++}
++
++/* rsbac_mac_clear_truset */
++/* Remove all set members from a sublist. Set behaviour: Returns no error, */
++/* if list is empty. */
++
++int rsbac_mac_clear_p_truset(rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid)
++{
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_clear_p_truset(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_clear_p_truset(): called from interrupt!\n");
++ }
++ return rsbac_ta_list_lol_remove(ta_number, process_handle, &pid);
++}
++
++int rsbac_mac_clear_f_truset(rsbac_list_ta_number_t ta_number,
++ rsbac_mac_file_t file)
++{
++ int err = 0;
++ struct rsbac_mac_device_list_item_t *device_p;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_clear_f_truset(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_clear_f_truset(): called from interrupt!\n");
++ }
++ /* protect device list */
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = lookup_device(file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_clear_f_truset(): invalid device %02u:%02u!\n",
++ RSBAC_MAJOR(file.device),
++ RSBAC_MINOR(file.device));
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ err = rsbac_ta_list_lol_remove(ta_number,
++ device_p->handle,
++ &file.inode);
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return err;
++}
++
++/* rsbac_mac_truset_member */
++/* Return truth value, whether member is in set */
++
++rsbac_boolean_t rsbac_mac_p_truset_member(rsbac_pid_t pid,
++ rsbac_uid_t member)
++{
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_p_truset_member(): RSBAC not initialized\n");
++ return FALSE;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_p_truset_member(): called from interrupt!\n");
++ }
++ if (rsbac_list_lol_subexist(process_handle, &pid, &member))
++ return TRUE;
++ member = RSBAC_ALL_USERS;
++ return rsbac_list_lol_subexist(process_handle, &pid, &member);
++}
++
++/* rsbac_mac_remove_truset */
++/* Remove a full set. For cleanup, if object is deleted. */
++/* To empty an existing set use rsbac_mac_clear_truset. */
++
++int rsbac_mac_remove_p_trusets(rsbac_pid_t pid)
++{
++ return rsbac_mac_clear_p_truset(FALSE, pid);
++}
++
++int rsbac_mac_remove_f_trusets(rsbac_mac_file_t file)
++{
++ return rsbac_mac_clear_f_truset(FALSE, file);
++}
++
++int rsbac_mac_copy_fp_truset(rsbac_mac_file_t file,
++ rsbac_pid_t p_tru_set_id)
++{
++ struct rsbac_mac_device_list_item_t *device_p;
++ int err = 0;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_copy_fp_truset(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_copy_fp_truset(): called from interrupt!\n");
++ }
++/*
++ rsbac_pr_debug(ds_mac, "Copying file cap set data to process cap set\n");
++*/
++ /* protect device list */
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = lookup_device(file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_copy_fp_truset(): invalid device %02u:%02u!\n",
++ RSBAC_MAJOR(file.device),
++ RSBAC_MINOR(file.device));
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ /* call the copy function */
++ err = copy_fp_tru_set_item(device_p, file, p_tru_set_id);
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return err;
++}
++
++int rsbac_mac_copy_pp_truset(rsbac_pid_t old_p_set_id,
++ rsbac_pid_t new_p_set_id)
++{
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_copy_pp_truset(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_copy_pp_truset(): called from interrupt!\n");
++ }
++/*
++ rsbac_pr_debug(ds_mac, "Copying process cap set data to process cap set\n");
++*/
++ /* call the copy function */
++ return copy_pp_tru_set_item(old_p_set_id, new_p_set_id);
++}
++
++int rsbac_mac_get_f_trulist(rsbac_list_ta_number_t ta_number,
++ rsbac_mac_file_t file,
++ rsbac_uid_t ** trulist_p,
++ rsbac_time_t ** ttllist_p)
++{
++ struct rsbac_mac_device_list_item_t *device_p;
++ long count;
++ int srcu_idx;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_get_f_trulist(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_get_f_trulist(): called from interrupt!\n");
++ }
++/*
++ rsbac_pr_debug(ds_mac, "Getting file/dir trusted user set list\n");
++*/
++ /* protect device list */
++ srcu_idx = srcu_read_lock(&device_list_srcu);
++ device_p = lookup_device(file.device);
++ if (!device_p) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_get_f_trulist(): invalid device %02u:%02u!\n",
++ RSBAC_MAJOR(file.device),
++ RSBAC_MINOR(file.device));
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return -RSBAC_EINVALIDDEV;
++ }
++ count = rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ device_p->handle,
++ &file.inode,
++ (void **) trulist_p,
++ ttllist_p);
++ srcu_read_unlock(&device_list_srcu, srcu_idx);
++ return count;
++}
++
++int rsbac_mac_get_p_trulist(rsbac_list_ta_number_t ta_number,
++ rsbac_pid_t pid,
++ rsbac_uid_t ** trulist_p,
++ rsbac_time_t ** ttllist_p)
++{
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_get_p_trulist(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_mac_get_p_trulist(): called from interrupt!\n");
++ }
++/*
++ rsbac_pr_debug(ds_mac, "Getting process trusted user set list\n");
++*/
++ return rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ process_handle,
++ &pid,
++ (void **) trulist_p,
++ ttllist_p);
++}
+diff --git a/rsbac/data_structures/pm_data_structures.c b/rsbac/data_structures/pm_data_structures.c
+new file mode 100644
+index 0000000..28660a8
+--- /dev/null
++++ b/rsbac/data_structures/pm_data_structures.c
+@@ -0,0 +1,2717 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of PM data structures */
++/* Author and (c) 1999-2009: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 15/Oct/2009 */
++/*************************************************** */
++
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <asm/uaccess.h>
++#include <linux/file.h>
++#include <linux/init.h>
++#include <rsbac/types.h>
++#include <rsbac/pm_types.h>
++#include <rsbac/pm_data_structures.h>
++#include <rsbac/getname.h>
++#include <rsbac/pm_getname.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/fs.h>
++#include <rsbac/adf.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/debug.h>
++#include <rsbac/proc_fs.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/lists.h>
++#include <linux/string.h>
++#include <linux/seq_file.h>
++#include <linux/module.h>
++
++/************************************************************************** */
++/* Global Variables */
++/************************************************************************** */
++
++/* The following global variables are needed for access to PM data. */
++
++static rsbac_list_handle_t task_set_handle = NULL;
++static rsbac_list_handle_t tp_set_handle = NULL;
++static rsbac_list_handle_t ru_set_handle = NULL;
++static rsbac_list_handle_t pp_set_handle = NULL;
++static rsbac_list_handle_t in_pp_set_handle = NULL;
++static rsbac_list_handle_t out_pp_set_handle = NULL;
++
++static rsbac_list_handle_t task_handle = NULL;
++static rsbac_list_handle_t class_handle = NULL;
++static rsbac_list_handle_t na_handle = NULL;
++static rsbac_list_handle_t cs_handle = NULL;
++static rsbac_list_handle_t tp_handle = NULL;
++static rsbac_list_handle_t pp_handle = NULL;
++static rsbac_list_handle_t tkt_handle = NULL;
++
++/**************************************************/
++/* Declarations of external functions */
++/**************************************************/
++
++int sys_write(u_int, char *, u_int);
++
++/**************************************************/
++/* Declarations of internal functions */
++/**************************************************/
++
++/* As some function use later defined functions, we declare those here. */
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++
++/************************************************* */
++/* proc functions */
++/************************************************* */
++
++#if defined(CONFIG_RSBAC_PROC)
++static int
++stats_pm_proc_show(struct seq_file *m, void *v)
++{
++ u_long tmp_count;
++ u_long tmp_member_count;
++ u_long all_set_count = 0;
++ u_long all_member_count = 0;
++ u_long all_count = 0;
++
++#if !defined(CONFIG_RSBAC_MAINT)
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "stats_pm_proc_info(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++#if !defined(CONFIG_RSBAC_MAINT)
++ rsbac_pr_debug(aef_pm, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++#if defined(CONFIG_RSBAC_SOFTMODE)
++ if (!rsbac_softmode)
++#endif
++ return -EPERM;
++ }
++#endif
++
++ seq_printf(m, "PM Status\n---------\n");
++
++/****************/
++/* Helper lists */
++/****************/
++
++ tmp_count = rsbac_list_lol_count(task_set_handle);
++ tmp_member_count = rsbac_list_lol_all_subcount(task_set_handle);
++ seq_printf(m,
++ "%lu task-set-items, sum of %lu members\n", tmp_count,
++ tmp_member_count);
++ all_set_count += tmp_count;
++ all_member_count += tmp_member_count;
++
++ tmp_count = rsbac_list_lol_count(tp_set_handle);
++ tmp_member_count = rsbac_list_lol_all_subcount(tp_set_handle);
++ seq_printf(m, "%lu tp-set-items, sum of %lu members\n",
++ tmp_count, tmp_member_count);
++ all_set_count += tmp_count;
++ all_member_count += tmp_member_count;
++
++ tmp_count = rsbac_list_lol_count(ru_set_handle);
++ tmp_member_count = rsbac_list_lol_all_subcount(ru_set_handle);
++ seq_printf(m, "%lu ru-set-items, sum of %lu members\n",
++ tmp_count, tmp_member_count);
++ all_set_count += tmp_count;
++ all_member_count += tmp_member_count;
++
++ tmp_count = rsbac_list_lol_count(pp_set_handle);
++ tmp_member_count = rsbac_list_lol_all_subcount(pp_set_handle);
++ seq_printf(m, "%lu pp-set-items, sum of %lu members\n",
++ tmp_count, tmp_member_count);
++ all_set_count += tmp_count;
++ all_member_count += tmp_member_count;
++
++ tmp_count = rsbac_list_lol_count(in_pp_set_handle);
++ tmp_member_count = rsbac_list_lol_all_subcount(in_pp_set_handle);
++ seq_printf(m,
++ "%lu in_pp-set-items, sum of %lu members\n", tmp_count,
++ tmp_member_count);
++ all_set_count += tmp_count;
++ all_member_count += tmp_member_count;
++
++ tmp_count = rsbac_list_lol_count(out_pp_set_handle);
++ tmp_member_count = rsbac_list_lol_all_subcount(out_pp_set_handle);
++ seq_printf(m,
++ "%lu out_pp-set-items, sum of %lu members\n",
++ tmp_count, tmp_member_count);
++ all_set_count += tmp_count;
++ all_member_count += tmp_member_count;
++
++ seq_printf(m,
++ "Total of %lu registered rsbac-pm-set-items, %lu members\n",
++ all_set_count, all_member_count);
++
++/**************/
++/* Main lists */
++/**************/
++
++ tmp_count = rsbac_list_count(task_handle);
++ seq_printf(m, "%lu task-items\n", tmp_count);
++ all_count += tmp_count;
++
++ tmp_count = rsbac_list_count(class_handle);
++ seq_printf(m, "%lu class-items\n", tmp_count);
++ all_count += tmp_count;
++
++ tmp_count = rsbac_list_count(na_handle);
++ seq_printf(m, "%lu necessary access items\n",
++ tmp_count);
++ all_count += tmp_count;
++
++ tmp_count = rsbac_list_count(cs_handle);
++ seq_printf(m, "%lu consent items\n", tmp_count);
++ all_count += tmp_count;
++
++ tmp_count = rsbac_list_count(tp_handle);
++ seq_printf(m, "%lu tp items\n", tmp_count);
++ all_count += tmp_count;
++
++ tmp_count = rsbac_list_count(pp_handle);
++ seq_printf(m, "%lu purpose items\n", tmp_count);
++ all_count += tmp_count;
++
++ tmp_count = rsbac_list_count(tkt_handle);
++ seq_printf(m, "%lu tkt items\n", tmp_count);
++ all_count += tmp_count;
++
++ seq_printf(m,
++ "Total of %lu registered rsbac-pm-items\n", all_count);
++ return 0;
++}
++
++static ssize_t stats_pm_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, stats_pm_proc_show, NULL);
++}
++
++static const struct file_operations stats_pm_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = stats_pm_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *stats_pm;
++
++/* list_proc_read() */
++/* Generic readable list generation function */
++static int pm_list_proc_read(char *buffer, char **start, off_t offset,
++ int length, int *eof, void *data)
++{
++ int len = 0;
++ off_t pos = 0;
++ off_t begin = 0;
++ long count;
++ long subcount;
++ u_long i, j;
++ enum rsbac_pm_list_t list;
++
++ if (!rsbac_is_initialized())
++ return (-ENOSYS);
++ list = (enum rsbac_pm_all_list_t) data;
++
++#if !defined(CONFIG_RSBAC_MAINT)
++ /* access control */
++#if defined(CONFIG_RSBAC_SWITCH_PM)
++ if (rsbac_switch_pm)
++#endif
++ {
++ int error;
++ union rsbac_target_id_t tid;
++ union rsbac_attribute_value_t attr_val;
++
++ rsbac_get_owner(&tid.user);
++ error =
++ rsbac_get_attr(SW_PM, T_USER, tid, A_pm_role, &attr_val,
++ TRUE);
++ if (error) {
++ char *tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ get_error_name(tmp, error);
++ rsbac_printk(KERN_WARNING "pm_list_proc_read(): rsbac_get_attr() for pm_role returned error %s",
++ tmp);
++ rsbac_kfree(tmp);
++ }
++ return (error); /* something weird happened */
++ }
++ if ((attr_val.pm_role != PR_security_officer)
++ && (attr_val.pm_role != PR_data_protection_officer)
++ && (attr_val.pm_role != PR_tp_manager)) {
++ char *tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ get_pm_all_list_name(tmp, list);
++ rsbac_printk(KERN_WARNING "pm_list_proc_read(): access to list %s denied\n",
++ tmp);
++ rsbac_kfree(tmp);
++ }
++#if defined(CONFIG_RSBAC_SOFTMODE)
++ if (!rsbac_softmode)
++#endif
++ return (-EPERM);
++ }
++ if ((attr_val.pm_role == PR_tp_manager)
++ && (list != PA_tp)) {
++ rsbac_printk(KERN_WARNING "pm_list_proc_read(): access to list tp denied\n");
++#if defined(CONFIG_RSBAC_SOFTMODE)
++ if (!rsbac_softmode)
++#endif
++ return (-EPERM);
++ }
++ }
++#endif /* !MAINT */
++
++ switch (list) {
++ case PA_task_set:
++ {
++ rsbac_pm_task_set_id_t *set_array;
++ rsbac_pm_task_id_t *member_array;
++
++ count =
++ rsbac_list_lol_get_all_desc(task_set_handle,
++ (void **)
++ &set_array);
++ if (count < 0) {
++ return count;
++ }
++ len +=
++ sprintf(buffer + len, "task-set\tmembers\n");
++ for (i = 0; i < count; i++) {
++ len += sprintf(buffer + len, "%u\t\t",
++ set_array[i]);
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++
++ subcount =
++ rsbac_list_lol_get_all_subdesc
++ (task_set_handle, &set_array[i],
++ (void **) &member_array);
++ if (subcount < 0) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++ for (j = 0; j < subcount; j++) {
++ len += sprintf(buffer + len, "%u ",
++ member_array[j]);
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ rsbac_kfree(member_array);
++ goto out;
++ }
++ };
++ if (subcount > 0)
++ rsbac_kfree(member_array);
++ len += sprintf(buffer + len, "\n");
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++ };
++ if (count > 0)
++ rsbac_kfree(set_array);
++ break;
++ }
++
++ case PA_tp_set:
++ {
++ rsbac_pm_tp_set_id_t *set_array;
++ rsbac_pm_tp_id_t *member_array;
++
++ count =
++ rsbac_list_lol_get_all_desc(tp_set_handle,
++ (void **)
++ &set_array);
++ if (count < 0) {
++ return count;
++ }
++ len +=
++ sprintf(buffer + len, "tp-set\t\tmembers\n");
++ for (i = 0; i < count; i++) {
++ len += sprintf(buffer + len, "%u\t\t",
++ set_array[i]);
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++
++ subcount =
++ rsbac_list_lol_get_all_subdesc
++ (tp_set_handle, &set_array[i],
++ (void **) &member_array);
++ if (subcount < 0) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++ for (j = 0; j < subcount; j++) {
++ len += sprintf(buffer + len, "%u ",
++ member_array[j]);
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ rsbac_kfree(member_array);
++ goto out;
++ }
++ };
++ if (subcount > 0)
++ rsbac_kfree(member_array);
++ len += sprintf(buffer + len, "\n");
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++ };
++ if (count > 0)
++ rsbac_kfree(set_array);
++ break;
++ }
++
++ case PA_ru_set:
++ {
++ rsbac_pm_ru_set_id_t *set_array;
++ rsbac_uid_t *member_array;
++
++ count =
++ rsbac_list_lol_get_all_desc(ru_set_handle,
++ (void **)
++ &set_array);
++ if (count < 0) {
++ return count;
++ }
++ len +=
++ sprintf(buffer + len, "ru-set\t\tmembers\n");
++ for (i = 0; i < count; i++) {
++ len += sprintf(buffer + len, "%u\t\t",
++ set_array[i]);
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++
++ subcount =
++ rsbac_list_lol_get_all_subdesc
++ (ru_set_handle, &set_array[i],
++ (void **) &member_array);
++ if (subcount < 0) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++ for (j = 0; j < subcount; j++) {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(member_array[j]))
++ len += sprintf(buffer + len, "%u/%u ",
++ RSBAC_UID_SET(member_array[j]),
++ RSBAC_UID_NUM(member_array[j]));
++ else
++#endif
++ len += sprintf(buffer + len, "%u ",
++ RSBAC_UID_NUM(member_array[j]));
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ rsbac_kfree(member_array);
++ goto out;
++ }
++ };
++ if (subcount > 0)
++ rsbac_kfree(member_array);
++ len += sprintf(buffer + len, "\n");
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++ };
++ if (count > 0)
++ rsbac_kfree(set_array);
++ break;
++ }
++
++ case PA_pp_set:
++ {
++ rsbac_pm_pp_set_id_t *set_array;
++ rsbac_pm_purpose_id_t *member_array;
++
++ count =
++ rsbac_list_lol_get_all_desc(pp_set_handle,
++ (void **)
++ &set_array);
++ if (count < 0) {
++ return count;
++ }
++ len +=
++ sprintf(buffer + len, "pp-set\t\tmembers\n");
++ for (i = 0; i < count; i++) {
++ len += sprintf(buffer + len, "%u\t\t",
++ set_array[i]);
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++
++ subcount =
++ rsbac_list_lol_get_all_subdesc
++ (pp_set_handle, &set_array[i],
++ (void **) &member_array);
++ if (subcount < 0) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++ for (j = 0; j < subcount; j++) {
++ len += sprintf(buffer + len, "%u ",
++ member_array[j]);
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ rsbac_kfree(member_array);
++ goto out;
++ }
++ };
++ if (subcount > 0)
++ rsbac_kfree(member_array);
++ len += sprintf(buffer + len, "\n");
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++ };
++ if (count > 0)
++ rsbac_kfree(set_array);
++ break;
++ }
++
++ case PA_in_pp_set:
++ {
++ rsbac_pm_in_pp_set_id_t *set_array;
++ rsbac_pm_purpose_id_t *member_array;
++
++ count =
++ rsbac_list_lol_get_all_desc(in_pp_set_handle,
++ (void **)
++ &set_array);
++ if (count < 0) {
++ return count;
++ }
++
++ len +=
++ sprintf(buffer + len, "in-pp-set\tmembers\n");
++ for (i = 0; i < count; i++) {
++ len += sprintf(buffer + len, "%u\t\t",
++ pid_vnr(set_array[i]));
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++
++ subcount =
++ rsbac_list_lol_get_all_subdesc
++ (in_pp_set_handle, &set_array[i],
++ (void **) &member_array);
++ if (subcount < 0) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++ for (j = 0; j < subcount; j++) {
++ len += sprintf(buffer + len, "%u ",
++ member_array[j]);
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ rsbac_kfree(member_array);
++ goto out;
++ }
++ };
++ if (subcount > 0)
++ rsbac_kfree(member_array);
++ len += sprintf(buffer + len, "\n");
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++ };
++ if (count > 0)
++ rsbac_kfree(set_array);
++ break;
++ }
++
++ case PA_out_pp_set:
++ {
++ rsbac_pm_out_pp_set_id_t *set_array;
++ rsbac_pm_purpose_id_t *member_array;
++
++ count =
++ rsbac_list_lol_get_all_desc(out_pp_set_handle,
++ (void **)
++ &set_array);
++ if (count < 0) {
++ return count;
++ }
++
++ len +=
++ sprintf(buffer + len, "out-pp-set\tmembers\n");
++ for (i = 0; i < count; i++) {
++ len += sprintf(buffer + len, "%u\t\t",
++ pid_vnr(set_array[i]));
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++
++ subcount =
++ rsbac_list_lol_get_all_subdesc
++ (out_pp_set_handle, &set_array[i],
++ (void **) &member_array);
++ if (subcount < 0) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++ for (j = 0; j < subcount; j++) {
++ len += sprintf(buffer + len, "%u ",
++ member_array[j]);
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ rsbac_kfree(member_array);
++ goto out;
++ }
++ };
++ if (subcount > 0)
++ rsbac_kfree(member_array);
++ len += sprintf(buffer + len, "\n");
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(set_array);
++ goto out;
++ }
++ };
++ if (count > 0)
++ rsbac_kfree(set_array);
++ break;
++ }
++
++/***********/
++
++ case PA_task:
++ {
++ rsbac_pm_task_id_t *desc_array;
++
++ count =
++ rsbac_list_get_all_desc(task_handle,
++ (void **) &desc_array);
++ if (count < 0) {
++ return count;
++ }
++
++ len += sprintf(buffer + len, "task-id\n");
++ for (i = 0; i < count; i++) {
++ len += sprintf(buffer + len, "%u\n",
++ desc_array[i]);
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(desc_array);
++ goto out;
++ }
++ };
++ if (count > 0)
++ rsbac_kfree(desc_array);
++ break;
++ }
++
++ case PA_class:
++ {
++ rsbac_pm_object_class_id_t *desc_array;
++
++ count =
++ rsbac_list_get_all_desc(class_handle,
++ (void **) &desc_array);
++ if (count < 0) {
++ return count;
++ }
++
++ len += sprintf(buffer + len, "class-id\n");
++ for (i = 0; i < count; i++) {
++ len += sprintf(buffer + len, "%u\n",
++ desc_array[i]);
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(desc_array);
++ goto out;
++ }
++ };
++ if (count > 0)
++ rsbac_kfree(desc_array);
++ break;
++ }
++
++ case PA_na:
++ {
++ struct rsbac_pm_na_data_t *data_array;
++
++ count =
++ rsbac_list_get_all_data(na_handle,
++ (void **) &data_array);
++ if (count < 0) {
++ return count;
++ }
++ len +=
++ sprintf(buffer + len,
++ "task\tclass\ttp\taccesses\n");
++ for (i = 0; i < count; i++) {
++ len +=
++ sprintf(buffer + len,
++ "%u\t%u\t%u\t%u\n",
++ data_array[i].task,
++ data_array[i].object_class,
++ data_array[i].tp,
++ data_array[i].accesses);
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(data_array);
++ goto out;
++ }
++ };
++ if (count > 0)
++ rsbac_kfree(data_array);
++ break;
++ }
++
++ case PA_cs:
++ {
++ struct rsbac_pm_cs_id_t *desc_array;
++
++ count =
++ rsbac_list_get_all_desc(cs_handle,
++ (void **) &desc_array);
++ if (count < 0) {
++ return count;
++ }
++ len +=
++ sprintf(buffer + len,
++ "purpose\tdevice\tinode\n");
++ for (i = 0; i < count; i++) {
++ len +=
++ sprintf(buffer + len,
++ "%u\t%02u:02%u\t%u\n",
++ desc_array[i].purpose,
++ RSBAC_MAJOR(desc_array[i].file.
++ device),
++ RSBAC_MINOR(desc_array[i].file.
++ device),
++ desc_array[i].file.inode);
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(desc_array);
++ goto out;
++ }
++ };
++ if (count > 0)
++ rsbac_kfree(desc_array);
++ break;
++ }
++
++ case PA_tp:
++ {
++ rsbac_pm_tp_id_t *desc_array;
++
++ count =
++ rsbac_list_get_all_desc(tp_handle,
++ (void **) &desc_array);
++ if (count < 0) {
++ return count;
++ }
++
++ len += sprintf(buffer + len, "tp-id\n");
++ for (i = 0; i < count; i++) {
++ len += sprintf(buffer + len, "%u\n",
++ desc_array[i]);
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(desc_array);
++ goto out;
++ }
++ };
++ if (count > 0)
++ rsbac_kfree(desc_array);
++ break;
++ }
++
++ case PA_pp:
++ {
++ struct rsbac_pm_pp_data_t *data_array;
++
++ count =
++ rsbac_list_get_all_data(pp_handle,
++ (void **) &data_array);
++ if (count < 0) {
++ return count;
++ }
++ len +=
++ sprintf(buffer + len, "purpose\tdef-class\n");
++ for (i = 0; i < count; i++) {
++ len += sprintf(buffer + len, "%u\t%u\n",
++ data_array[i].id,
++ data_array[i].def_class);
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(data_array);
++ goto out;
++ }
++ };
++ if (count > 0)
++ rsbac_kfree(data_array);
++ break;
++ }
++
++ case PA_tkt:
++ {
++ struct rsbac_pm_tkt_data_t *data_array;
++
++ count =
++ rsbac_list_get_all_data(tkt_handle,
++ (void **) &data_array);
++ if (count < 0) {
++ return count;
++ }
++ len +=
++ sprintf(buffer + len,
++ "tkt-id\tvalid-for\tfunction-type\n");
++ for (i = 0; i < count; i++) {
++ char tmp1[RSBAC_MAXNAMELEN];
++ char tmp2[RSBAC_MAXNAMELEN];
++ struct timespec now = CURRENT_TIME;
++
++ tmp2[0] = 0;
++ if (data_array[i].valid_until < now.tv_sec)
++ {
++ strcpy(tmp2,
++ "\t(removed on cleanup)");
++ }
++ len +=
++ sprintf(buffer + len,
++ "%u\t%li\t\t%s%s\n",
++ data_array[i].id,
++ data_array[i].valid_until -
++ now.tv_sec,
++ get_pm_function_type_name(tmp1,
++ data_array
++ [i].
++ function_type),
++ tmp2);
++ pos = begin + len;
++ if (pos < offset) {
++ len = 0;
++ begin = pos;
++ }
++ if (pos > offset + length) {
++ rsbac_kfree(data_array);
++ goto out;
++ }
++ };
++ if (count > 0)
++ rsbac_kfree(data_array);
++ break;
++ }
++
++ default:
++ rsbac_printk(KERN_WARNING "pm_list_proc_read(): access to unknown list %i\n",
++ list);
++ return (-RSBAC_EINVALIDTARGET);
++ }
++
++ out:
++ if (len <= offset + length)
++ *eof = 1;
++ *start = buffer + (offset - begin);
++ len -= (offset - begin);
++
++ if (len > length)
++ len = length;
++ return len;
++}; /* end of list_proc_read */
++
++#endif /* CONFIG_PROC_FS && CONFIG_RSBAC_PROC */
++
++/************************************************* */
++/* Init functions */
++/************************************************* */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static void registration_error(int err, char *listname)
++#else
++static void __init registration_error(int err, char *listname)
++#endif
++{
++ if (err) {
++ char *tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_init_pm(): Registering PM %s list failed with error %s\n",
++ listname, get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++}
++
++/* All functions return 0, if no error occurred, and a negative error code */
++/* otherwise. The error codes are defined in rsbac/error.h. */
++
++/************************************************************************** */
++/* Initialization of all PM data structures. After this call, all PM data */
++/* is kept in memory for performance reasons, but is written to disk on */
++/* every change. */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++int rsbac_init_pm(void)
++#else
++int __init rsbac_init_pm(void)
++#endif
++{
++ int err = 0;
++ struct proc_dir_entry *tmp_entry_p;
++ struct proc_dir_entry *pm_entry_p;
++ struct rsbac_list_lol_info_t lol_info;
++ struct rsbac_list_info_t list_info;
++
++ if (rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_init_pm(): RSBAC already initialized\n");
++ return (-RSBAC_EREINIT);
++ }
++
++ rsbac_printk(KERN_INFO "rsbac_init_pm(): Initializing RSBAC: PM subsystem\n");
++
++/* Helper lists */
++ lol_info.version = RSBAC_PM_TASK_SET_LIST_VERSION;
++ lol_info.key = RSBAC_PM_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_pm_task_set_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_pm_task_id_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register(RSBAC_LIST_VERSION,
++ &task_set_handle,
++ &lol_info,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_BACKUP,
++ NULL,
++ NULL, NULL, NULL,
++ NULL, NULL,
++ RSBAC_PM_TASK_SET_LIST_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "task set");
++ return err;
++ }
++
++ lol_info.version = RSBAC_PM_TP_SET_LIST_VERSION;
++ lol_info.key = RSBAC_PM_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_pm_tp_set_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_pm_tp_id_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register(RSBAC_LIST_VERSION,
++ &tp_set_handle,
++ &lol_info,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_BACKUP,
++ NULL,
++ NULL, NULL, NULL,
++ NULL, NULL,
++ RSBAC_PM_TP_SET_LIST_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "tp set");
++ return err;
++ }
++
++ lol_info.version = RSBAC_PM_RU_SET_LIST_VERSION;
++ lol_info.key = RSBAC_PM_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_pm_ru_set_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_uid_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register(RSBAC_LIST_VERSION,
++ &ru_set_handle,
++ &lol_info,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_BACKUP,
++ NULL,
++ NULL, NULL, NULL,
++ NULL, NULL,
++ RSBAC_PM_RU_SET_LIST_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "ru set");
++ return err;
++ }
++
++ lol_info.version = RSBAC_PM_PP_SET_LIST_VERSION;
++ lol_info.key = RSBAC_PM_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_pm_pp_set_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_pm_purpose_id_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register(RSBAC_LIST_VERSION,
++ &pp_set_handle,
++ &lol_info,
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_BACKUP,
++ NULL,
++ NULL, NULL, NULL,
++ NULL, NULL,
++ RSBAC_PM_PP_SET_LIST_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "pp set");
++ return err;
++ }
++
++ lol_info.version = RSBAC_PM_NO_VERSION;
++ lol_info.key = RSBAC_PM_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_pm_in_pp_set_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_pm_purpose_id_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register(RSBAC_LIST_VERSION,
++ &in_pp_set_handle,
++ &lol_info,
++ 0,
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_PM_IN_PP_SET_LIST_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "in_pp set");
++ return err;
++ }
++
++ lol_info.version = RSBAC_PM_NO_VERSION;
++ lol_info.key = RSBAC_PM_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_pm_out_pp_set_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_pm_purpose_id_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register(RSBAC_LIST_VERSION,
++ &out_pp_set_handle,
++ &lol_info,
++ 0,
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_PM_OUT_PP_SET_LIST_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "out_pp set");
++ return err;
++ }
++
++/* Main lists */
++ list_info.version = RSBAC_PM_TASK_LIST_VERSION;
++ list_info.key = RSBAC_PM_LIST_KEY;
++ list_info.desc_size = sizeof(rsbac_pm_task_id_t);
++ list_info.data_size = sizeof(struct rsbac_pm_task_data_t);
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &task_handle,
++ &list_info,
++ RSBAC_LIST_PERSIST | RSBAC_LIST_BACKUP,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_PM_TASK_LIST_NAME, RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "task");
++ return err;
++ }
++
++ list_info.version = RSBAC_PM_CLASS_LIST_VERSION;
++ list_info.key = RSBAC_PM_LIST_KEY;
++ list_info.desc_size = sizeof(rsbac_pm_object_class_id_t);
++ list_info.data_size = sizeof(struct rsbac_pm_class_data_t);
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &class_handle,
++ &list_info,
++ RSBAC_LIST_PERSIST | RSBAC_LIST_BACKUP,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_PM_CLASS_LIST_NAME,
++ RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "class");
++ return err;
++ }
++
++ list_info.version = RSBAC_PM_NA_LIST_VERSION;
++ list_info.key = RSBAC_PM_LIST_KEY;
++ list_info.desc_size = sizeof(struct rsbac_pm_na_id_t);
++ list_info.data_size = sizeof(struct rsbac_pm_na_data_t);
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &na_handle,
++ &list_info,
++ RSBAC_LIST_PERSIST | RSBAC_LIST_BACKUP,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_PM_NA_LIST_NAME, RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "na");
++ return err;
++ }
++
++ list_info.version = RSBAC_PM_CS_LIST_VERSION;
++ list_info.key = RSBAC_PM_LIST_KEY;
++ list_info.desc_size = sizeof(struct rsbac_pm_cs_id_t);
++ list_info.data_size = sizeof(struct rsbac_pm_cs_data_t);
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &cs_handle,
++ &list_info,
++ RSBAC_LIST_PERSIST | RSBAC_LIST_BACKUP,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_PM_CS_LIST_NAME, RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "cs");
++ return err;
++ }
++
++ list_info.version = RSBAC_PM_TP_LIST_VERSION;
++ list_info.key = RSBAC_PM_LIST_KEY;
++ list_info.desc_size = sizeof(rsbac_pm_tp_id_t);
++ list_info.data_size = sizeof(struct rsbac_pm_tp_data_t);
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &tp_handle,
++ &list_info,
++ RSBAC_LIST_PERSIST | RSBAC_LIST_BACKUP,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_PM_TP_LIST_NAME, RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "tp");
++ return err;
++ }
++
++ list_info.version = RSBAC_PM_PP_LIST_VERSION;
++ list_info.key = RSBAC_PM_LIST_KEY;
++ list_info.desc_size = sizeof(rsbac_pm_purpose_id_t);
++ list_info.data_size = sizeof(struct rsbac_pm_pp_data_t);
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &pp_handle,
++ &list_info,
++ RSBAC_LIST_PERSIST | RSBAC_LIST_BACKUP,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_PM_PP_LIST_NAME, RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "pp");
++ return err;
++ }
++
++ list_info.version = RSBAC_PM_TKT_LIST_VERSION;
++ list_info.key = RSBAC_PM_LIST_KEY;
++ list_info.desc_size = sizeof(rsbac_pm_tkt_id_t);
++ list_info.data_size = sizeof(struct rsbac_pm_tkt_data_t);
++ list_info.max_age = 0;
++ err = rsbac_list_register(RSBAC_LIST_VERSION,
++ &tkt_handle,
++ &list_info,
++ RSBAC_LIST_PERSIST | RSBAC_LIST_BACKUP,
++ NULL,
++ NULL,
++ NULL,
++ RSBAC_PM_TKT_LIST_NAME, RSBAC_AUTO_DEV);
++ if (err) {
++ registration_error(err, "tkt");
++ return err;
++ }
++#if defined(CONFIG_RSBAC_PROC)
++ stats_pm = proc_create(RSBAC_PM_PROC_STATS_NAME,
++ S_IFREG | S_IRUGO,
++ proc_rsbac_root_p, &stats_pm_proc_fops);
++
++ pm_entry_p = create_proc_entry(RSBAC_PM_PROC_DIR_NAME,
++ S_IFDIR | S_IRUGO | S_IXUGO,
++ proc_rsbac_root_p);
++ if (pm_entry_p) {
++ tmp_entry_p =
++ create_proc_entry(RSBAC_PM_TASK_SET_LIST_PROC_NAME,
++ S_IFREG | S_IRUGO, pm_entry_p);
++ if (tmp_entry_p) {
++ tmp_entry_p->read_proc = pm_list_proc_read;
++ tmp_entry_p->data = (void *) PA_task_set;
++ }
++ tmp_entry_p =
++ create_proc_entry(RSBAC_PM_TP_SET_LIST_PROC_NAME,
++ S_IFREG | S_IRUGO, pm_entry_p);
++ if (tmp_entry_p) {
++ tmp_entry_p->read_proc = pm_list_proc_read;
++ tmp_entry_p->data = (void *) PA_tp_set;
++ }
++ tmp_entry_p =
++ create_proc_entry(RSBAC_PM_RU_SET_LIST_PROC_NAME,
++ S_IFREG | S_IRUGO, pm_entry_p);
++ if (tmp_entry_p) {
++ tmp_entry_p->read_proc = pm_list_proc_read;
++ tmp_entry_p->data = (void *) PA_ru_set;
++ }
++ tmp_entry_p =
++ create_proc_entry(RSBAC_PM_PP_SET_LIST_PROC_NAME,
++ S_IFREG | S_IRUGO, pm_entry_p);
++ if (tmp_entry_p) {
++ tmp_entry_p->read_proc = pm_list_proc_read;
++ tmp_entry_p->data = (void *) PA_pp_set;
++ }
++ tmp_entry_p =
++ create_proc_entry(RSBAC_PM_IN_PP_SET_LIST_PROC_NAME,
++ S_IFREG | S_IRUGO, pm_entry_p);
++ if (tmp_entry_p) {
++ tmp_entry_p->read_proc = pm_list_proc_read;
++ tmp_entry_p->data = (void *) PA_in_pp_set;
++ }
++ tmp_entry_p =
++ create_proc_entry(RSBAC_PM_OUT_PP_SET_LIST_PROC_NAME,
++ S_IFREG | S_IRUGO, pm_entry_p);
++ if (tmp_entry_p) {
++ tmp_entry_p->read_proc = pm_list_proc_read;
++ tmp_entry_p->data = (void *) PA_out_pp_set;
++ }
++
++ tmp_entry_p =
++ create_proc_entry(RSBAC_PM_TASK_LIST_PROC_NAME,
++ S_IFREG | S_IRUGO, pm_entry_p);
++ if (tmp_entry_p) {
++ tmp_entry_p->read_proc = pm_list_proc_read;
++ tmp_entry_p->data = (void *) PA_task;
++ }
++ tmp_entry_p =
++ create_proc_entry(RSBAC_PM_CLASS_LIST_PROC_NAME,
++ S_IFREG | S_IRUGO, pm_entry_p);
++ if (tmp_entry_p) {
++ tmp_entry_p->read_proc = pm_list_proc_read;
++ tmp_entry_p->data = (void *) PA_class;
++ }
++ tmp_entry_p = create_proc_entry(RSBAC_PM_NA_LIST_PROC_NAME,
++ S_IFREG | S_IRUGO,
++ pm_entry_p);
++ if (tmp_entry_p) {
++ tmp_entry_p->read_proc = pm_list_proc_read;
++ tmp_entry_p->data = (void *) PA_na;
++ }
++ tmp_entry_p = create_proc_entry(RSBAC_PM_CS_LIST_PROC_NAME,
++ S_IFREG | S_IRUGO,
++ pm_entry_p);
++ if (tmp_entry_p) {
++ tmp_entry_p->read_proc = pm_list_proc_read;
++ tmp_entry_p->data = (void *) PA_cs;
++ }
++ tmp_entry_p = create_proc_entry(RSBAC_PM_TP_LIST_PROC_NAME,
++ S_IFREG | S_IRUGO,
++ pm_entry_p);
++ if (tmp_entry_p) {
++ tmp_entry_p->read_proc = pm_list_proc_read;
++ tmp_entry_p->data = (void *) PA_tp;
++ }
++ tmp_entry_p = create_proc_entry(RSBAC_PM_PP_LIST_PROC_NAME,
++ S_IFREG | S_IRUGO,
++ pm_entry_p);
++ if (tmp_entry_p) {
++ tmp_entry_p->read_proc = pm_list_proc_read;
++ tmp_entry_p->data = (void *) PA_pp;
++ }
++ tmp_entry_p =
++ create_proc_entry(RSBAC_PM_TKT_LIST_PROC_NAME,
++ S_IFREG | S_IRUGO, pm_entry_p);
++ if (tmp_entry_p) {
++ tmp_entry_p->read_proc = pm_list_proc_read;
++ tmp_entry_p->data = (void *) PA_tkt;
++ }
++ }
++
++#endif
++ rsbac_pr_debug(ds_pm, "Ready.\n");
++ return (err);
++};
++
++/***************************************************/
++/* We also need some status information... */
++
++int rsbac_stats_pm(void)
++{
++ u_long tmp_count;
++ u_long tmp_member_count;
++ u_long all_set_count = 0;
++ u_long all_member_count = 0;
++ u_long all_count = 0;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_stats_pm(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++
++/****************/
++/* Helper lists */
++/****************/
++
++ tmp_count = rsbac_list_lol_count(task_set_handle);
++ tmp_member_count = rsbac_list_lol_all_subcount(task_set_handle);
++ rsbac_printk(KERN_INFO "rsbac_stats_pm(): %lu task-set-items, sum of %lu members\n",
++ tmp_count, tmp_member_count);
++ all_set_count += tmp_count;
++ all_member_count += tmp_member_count;
++
++ tmp_count = rsbac_list_lol_count(tp_set_handle);
++ tmp_member_count = rsbac_list_lol_all_subcount(tp_set_handle);
++ rsbac_printk(KERN_INFO "rsbac_stats_pm(): %lu tp set items, sum of %lu members\n",
++ tmp_count, tmp_member_count);
++ all_set_count += tmp_count;
++ all_member_count += tmp_member_count;
++
++ tmp_count = rsbac_list_lol_count(ru_set_handle);
++ tmp_member_count = rsbac_list_lol_all_subcount(ru_set_handle);
++ rsbac_printk(KERN_INFO "rsbac_stats_pm(): %lu ru set items, sum of %lu members\n",
++ tmp_count, tmp_member_count);
++ all_set_count += tmp_count;
++ all_member_count += tmp_member_count;
++
++ tmp_count = rsbac_list_lol_count(pp_set_handle);
++ tmp_member_count = rsbac_list_lol_all_subcount(pp_set_handle);
++ rsbac_printk(KERN_INFO "rsbac_stats_pm(): %lu pp set items, sum of %lu members\n",
++ tmp_count, tmp_member_count);
++ all_set_count += tmp_count;
++ all_member_count += tmp_member_count;
++
++ tmp_count = rsbac_list_lol_count(in_pp_set_handle);
++ tmp_member_count = rsbac_list_lol_all_subcount(in_pp_set_handle);
++ rsbac_printk(KERN_INFO "rsbac_stats_pm(): %lu input purpose set items, sum of %lu members\n",
++ tmp_count, tmp_member_count);
++ all_set_count += tmp_count;
++ all_member_count += tmp_member_count;
++
++ tmp_count = rsbac_list_lol_count(out_pp_set_handle);
++ tmp_member_count = rsbac_list_lol_all_subcount(out_pp_set_handle);
++ rsbac_printk(KERN_INFO "rsbac_stats_pm(): %lu output purpose set items, sum of %lu members\n",
++ tmp_count, tmp_member_count);
++ all_set_count += tmp_count;
++ all_member_count += tmp_member_count;
++
++ rsbac_printk(KERN_INFO "rsbac_stats_pm(): Total of %lu registered rsbac-pm-set-items, %lu members\n",
++ all_set_count, all_member_count);
++
++/**************/
++/* Main lists */
++/**************/
++ tmp_count = rsbac_list_count(task_handle);
++ rsbac_printk(KERN_INFO "rsbac_stats_pm(): %lu task items\n",
++ tmp_count);
++ all_count += tmp_count;
++
++ tmp_count = rsbac_list_count(class_handle);
++ rsbac_printk(KERN_INFO "rsbac_stats_pm(): %lu class items\n",
++ tmp_count);
++ all_count += tmp_count;
++
++ tmp_count = rsbac_list_count(na_handle);
++ rsbac_printk(KERN_INFO "rsbac_stats_pm(): %lu na items\n",
++ tmp_count);
++ all_count += tmp_count;
++
++ tmp_count = rsbac_list_count(cs_handle);
++ rsbac_printk(KERN_INFO "rsbac_stats_pm(): %lu cs items\n",
++ tmp_count);
++ all_count += tmp_count;
++
++ tmp_count = rsbac_list_count(tp_handle);
++ rsbac_printk(KERN_INFO "rsbac_stats_pm(): %lu tp items\n",
++ tmp_count);
++ all_count += tmp_count;
++
++ tmp_count = rsbac_list_count(pp_handle);
++ rsbac_printk(KERN_INFO "rsbac_stats_pm(): %lu pp items\n",
++ tmp_count);
++ all_count += tmp_count;
++
++ tmp_count = rsbac_list_count(tkt_handle);
++ rsbac_printk(KERN_INFO "rsbac_stats_pm(): %lu tkt items\n",
++ tmp_count);
++ all_count += tmp_count;
++
++ rsbac_printk(KERN_INFO "rsbac_stats_pm(): Total of %lu registered rsbac-pm-items\n",
++ all_count);
++ return 0;
++}
++
++/************************************************* */
++/* Access functions */
++/************************************************* */
++
++/***********************/
++/* Helper lists / sets */
++/***********************/
++
++/* Trying to access a never created or removed set returns an error! */
++
++
++/* rsbac_pm_add_to_set */
++/* Add a set member to a set sublist. Set behaviour: also returns success, */
++/* if member was already in set! */
++
++int rsbac_pm_add_to_set(rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_set_t set,
++ union rsbac_pm_set_id_t id,
++ union rsbac_pm_set_member_t member)
++{
++ switch (set) {
++ case PS_TASK:
++ return (rsbac_ta_list_lol_subadd_ttl
++ (ta_number, task_set_handle, 0, &id.task_set,
++ &member.task, NULL));
++ case PS_TP:
++ return (rsbac_ta_list_lol_subadd_ttl
++ (ta_number, tp_set_handle, 0, &id.tp_set,
++ &member.tp, NULL));
++ case PS_RU:
++ return (rsbac_ta_list_lol_subadd_ttl
++ (ta_number, ru_set_handle, 0, &id.ru_set,
++ &member.ru, NULL));
++ case PS_PP:
++ return (rsbac_ta_list_lol_subadd_ttl
++ (ta_number, pp_set_handle, 0, &id.pp_set,
++ &member.pp, NULL));
++ case PS_IN_PP:
++ return (rsbac_ta_list_lol_subadd_ttl
++ (ta_number, in_pp_set_handle, 0, &id.in_pp_set,
++ &member.pp, NULL));
++ case PS_OUT_PP:
++ return (rsbac_ta_list_lol_subadd_ttl
++ (ta_number, out_pp_set_handle, 0, &id.out_pp_set,
++ &member.pp, NULL));
++ default:
++ return (-RSBAC_EINVALIDTARGET);
++ }
++}
++
++/* rsbac_pm_remove_from_set */
++/* Remove a set member from a sublist. Set behaviour: Returns no error, if */
++/* member is not in list. */
++/* Caution: Writing to disk is not done in the remove functions! */
++
++int rsbac_pm_remove_from_set(rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_set_t set,
++ union rsbac_pm_set_id_t id,
++ union rsbac_pm_set_member_t member)
++{
++ switch (set) {
++ case PS_TASK:
++ return (rsbac_ta_list_lol_subremove
++ (ta_number, task_set_handle, &id.task_set,
++ &member.task));
++ case PS_TP:
++ return (rsbac_ta_list_lol_subremove
++ (ta_number, tp_set_handle, &id.tp_set,
++ &member.tp));
++ case PS_RU:
++ return (rsbac_ta_list_lol_subremove
++ (ta_number, ru_set_handle, &id.ru_set,
++ &member.ru));
++ case PS_PP:
++ return (rsbac_ta_list_lol_subremove
++ (ta_number, pp_set_handle, &id.pp_set,
++ &member.pp));
++ case PS_IN_PP:
++ return (rsbac_ta_list_lol_subremove
++ (ta_number, in_pp_set_handle, &id.in_pp_set,
++ &member.pp));
++ case PS_OUT_PP:
++ return (rsbac_ta_list_lol_subremove
++ (ta_number, out_pp_set_handle, &id.out_pp_set,
++ &member.pp));
++ default:
++ return (-RSBAC_EINVALIDTARGET);
++ }
++}
++
++/* rsbac_pm_clear_set */
++/* Remove all set members from a sublist. Set behaviour: Returns no error, */
++/* if list is empty. */
++/* Caution: Writing to disk is not done in the remove functions! */
++
++int rsbac_pm_clear_set(rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_set_t set, union rsbac_pm_set_id_t id)
++{
++ switch (set) {
++ case PS_TASK:
++ return (rsbac_ta_list_lol_subremove_all
++ (ta_number, task_set_handle, &id.task_set));
++ case PS_TP:
++ return (rsbac_ta_list_lol_subremove_all
++ (ta_number, tp_set_handle, &id.tp_set));
++ case PS_RU:
++ return (rsbac_ta_list_lol_subremove_all
++ (ta_number, ru_set_handle, &id.ru_set));
++ case PS_PP:
++ return (rsbac_ta_list_lol_subremove_all
++ (ta_number, pp_set_handle, &id.pp_set));
++ case PS_IN_PP:
++ return (rsbac_ta_list_lol_subremove_all
++ (ta_number, in_pp_set_handle, &id.in_pp_set));
++ case PS_OUT_PP:
++ return (rsbac_ta_list_lol_subremove_all
++ (ta_number, out_pp_set_handle, &id.out_pp_set));
++ default:
++ return (-RSBAC_EINVALIDTARGET);
++ }
++}
++
++/* rsbac_pm_set_member */
++/* Return truth value, whether member is in set */
++
++rsbac_boolean_t rsbac_pm_set_member(rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_set_t set,
++ union rsbac_pm_set_id_t id,
++ union rsbac_pm_set_member_t member)
++{
++ switch (set) {
++ case PS_TASK:
++ return (rsbac_ta_list_lol_subexist
++ (ta_number, task_set_handle, &id.task_set,
++ &member.task));
++ case PS_TP:
++ return (rsbac_ta_list_lol_subexist
++ (ta_number, tp_set_handle, &id.tp_set,
++ &member.tp));
++ case PS_RU:
++ return (rsbac_ta_list_lol_subexist
++ (ta_number, ru_set_handle, &id.ru_set,
++ &member.ru));
++ case PS_PP:
++ return (rsbac_ta_list_lol_subexist
++ (ta_number, pp_set_handle, &id.pp_set,
++ &member.pp));
++ case PS_IN_PP:
++ return (rsbac_ta_list_lol_subexist
++ (ta_number, in_pp_set_handle, &id.in_pp_set,
++ &member.pp));
++ case PS_OUT_PP:
++ return (rsbac_ta_list_lol_subexist
++ (ta_number, out_pp_set_handle, &id.out_pp_set,
++ &member.pp));
++ default:
++ return (FALSE);
++ }
++}
++
++/* rsbac_pm_pp_subset */
++/* Return truth value, whether pp_set is subset of in_pp_set */
++
++rsbac_boolean_t rsbac_pm_pp_subset(rsbac_pm_pp_set_id_t pp_set,
++ rsbac_pm_in_pp_set_id_t in_pp_set)
++{
++ rsbac_pm_purpose_id_t *pp_array;
++ long count;
++ u_long i;
++ rsbac_boolean_t result = TRUE;
++
++ if (!pp_set || !in_pp_set)
++ return (FALSE);
++
++ /* get all pp_set members */
++ count =
++ rsbac_list_lol_get_all_subdesc(pp_set_handle, &pp_set,
++ (void **) &pp_array);
++ if (count < 0)
++ return FALSE;
++ if (!count)
++ return TRUE;
++ if (!rsbac_list_lol_exist(in_pp_set_handle, &in_pp_set)) {
++ rsbac_kfree(pp_array);
++ return TRUE;
++ }
++ /* check all members in in_pp_set */
++ for (i = 0; i < count; i++) {
++ if (!rsbac_list_lol_subexist
++ (in_pp_set_handle, &in_pp_set, &pp_array[i])) {
++ result = FALSE;
++ break;
++ }
++ }
++ rsbac_kfree(pp_array);
++ return result;
++}
++
++/* rsbac_pm_pp_superset */
++/* Return truth value, whether pp_set is superset of out_pp_set */
++
++rsbac_boolean_t rsbac_pm_pp_superset(rsbac_pm_pp_set_id_t pp_set,
++ rsbac_pm_out_pp_set_id_t out_pp_set)
++{
++ rsbac_pm_purpose_id_t *pp_array;
++ long count;
++ u_long i;
++ rsbac_boolean_t result = TRUE;
++
++ if (!pp_set)
++ return (FALSE);
++ if (!out_pp_set)
++ return (TRUE);
++ if (!rsbac_list_lol_exist(pp_set_handle, &pp_set))
++ return FALSE;
++
++ /* get all pp_set members */
++ count =
++ rsbac_list_lol_get_all_subdesc(out_pp_set_handle, &out_pp_set,
++ (void **) &pp_array);
++ if (count <= 0)
++ return TRUE;
++ /* check all members in in_pp_set */
++ for (i = 0; i < count; i++) {
++ if (!rsbac_list_lol_subexist
++ (pp_set_handle, &pp_set, &pp_array[i])) {
++ result = FALSE;
++ break;
++ }
++ }
++ rsbac_kfree(pp_array);
++ return result;
++}
++
++/* rsbac_pm_pp_only */
++/* Return truth value, if there is no other item in out_pp_set than purpose */
++
++rsbac_boolean_t rsbac_pm_pp_only(rsbac_pm_purpose_id_t purpose,
++ rsbac_pm_out_pp_set_id_t out_pp_set)
++{
++ long count;
++
++ if (!out_pp_set)
++ return (TRUE);
++
++ /* get number of pp_set members */
++ count = rsbac_list_lol_subcount(out_pp_set_handle, &out_pp_set);
++ if (count <= 0)
++ return TRUE;
++ if (count == 1)
++ return rsbac_list_lol_subexist(out_pp_set_handle,
++ &out_pp_set, &purpose);
++ else
++ return FALSE;
++}
++
++/* rsbac_pm_pp_intersec */
++/* Create intersection of pp_set and in_pp_set in in_pp_set */
++/* If in_pp_set does not exist, it is created with all members of pp_set */
++/* If pp_set does not exist or one of them is invalid, an error is returned */
++
++int rsbac_pm_pp_intersec(rsbac_pm_pp_set_id_t pp_set,
++ rsbac_pm_in_pp_set_id_t in_pp_set)
++{
++ rsbac_pm_purpose_id_t *pp_array;
++ long count;
++ u_long i;
++
++ if (!rsbac_list_lol_exist(pp_set_handle, &pp_set))
++ return -RSBAC_EINVALIDVALUE;
++
++ if (!rsbac_list_lol_exist(in_pp_set_handle, &in_pp_set)) { /* in_pp_set not found -> try to create and fill with pp_set */
++ if ((count =
++ rsbac_list_lol_add(in_pp_set_handle, &in_pp_set,
++ NULL)))
++ return count;
++ /* creation successful -> copy list */
++ /* get all pp_set members */
++ count =
++ rsbac_list_lol_get_all_subdesc(pp_set_handle, &pp_set,
++ (void **) &pp_array);
++ if (count <= 0)
++ return count;
++ for (i = 0; i < count; i++) {
++ rsbac_list_lol_subadd(in_pp_set_handle, &in_pp_set,
++ &pp_array[i], NULL);
++ }
++ rsbac_kfree(pp_array);
++ } else { /* in_pp_set exists -> remove all members not in pp_set */
++ /* get all in_pp_set members */
++ count =
++ rsbac_list_lol_get_all_subdesc(in_pp_set_handle,
++ &in_pp_set,
++ (void **) &pp_array);
++ if (count <= 0)
++ return count;
++ for (i = 0; i < count; i++) {
++ if (!rsbac_list_lol_subexist
++ (pp_set_handle, &pp_set, &pp_array[i]))
++ rsbac_list_lol_subremove(in_pp_set_handle,
++ &in_pp_set,
++ &pp_array[i]);
++ }
++ rsbac_kfree(pp_array);
++ }
++ return 0;
++}
++
++/* rsbac_pm_pp_union */
++/* Create union of pp_set and out_pp_set in out_pp_set
++ * If out_pp_set does not exist, it is created with all members of pp_set
++ * If pp_set does not exist or one of them is invalid, an error is returned */
++
++int rsbac_pm_pp_union(rsbac_pm_pp_set_id_t pp_set,
++ rsbac_pm_out_pp_set_id_t out_pp_set)
++{
++ rsbac_pm_purpose_id_t *pp_array;
++ long count;
++ u_long i;
++
++ /* check, whether set-id pp_set exists */
++ if (!rsbac_list_lol_exist(pp_set_handle, &pp_set))
++ return -RSBAC_EINVALIDVALUE;
++
++ if (!rsbac_list_lol_exist(out_pp_set_handle, &out_pp_set)) { /* out_pp_set not found -> try to create */
++ count =
++ rsbac_list_lol_add(out_pp_set_handle, &out_pp_set,
++ NULL);
++ if (count)
++ return count;
++ }
++ /* out_pp_set exists -> add all members in pp_set */
++ /* get all pp_set members */
++ count =
++ rsbac_list_lol_get_all_subdesc(pp_set_handle, &pp_set,
++ (void **) &pp_array);
++ if (count <= 0)
++ return count;
++ for (i = 0; i < count; i++) {
++ rsbac_list_lol_subadd(out_pp_set_handle, &out_pp_set,
++ &pp_array[i], NULL);
++ }
++ rsbac_kfree(pp_array);
++ return 0;
++}
++
++/* rsbac_pm_create_set */
++/* Create a new set of given type set, using id id. Using any other set */
++/* function for a set id without creating this set returns an error. */
++/* To empty an existing set use rsbac_pm_clear_set. */
++
++int rsbac_pm_create_set(rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_set_t set,
++ union rsbac_pm_set_id_t id)
++{
++ switch (set) {
++ case PS_TASK:
++/*
++ rsbac_pr_debug(ds_pm, "Creating task set\n");
++*/
++ if (rsbac_ta_list_lol_exist
++ (ta_number, task_set_handle, &id.task_set))
++ return -RSBAC_EEXISTS;
++ return rsbac_ta_list_lol_add_ttl(ta_number,
++ task_set_handle, 0,
++ &id.task_set, NULL);
++ case PS_TP:
++/*
++ rsbac_pr_debug(ds_pm, "Creating tp set\n");
++*/
++ if (rsbac_ta_list_lol_exist
++ (ta_number, tp_set_handle, &id.tp_set))
++ return -RSBAC_EEXISTS;
++ return rsbac_ta_list_lol_add_ttl(ta_number, tp_set_handle,
++ 0, &id.tp_set, NULL);
++ case PS_RU:
++/*
++ rsbac_pr_debug(ds_pm, "Creating ru set\n");
++*/
++ if (rsbac_ta_list_lol_exist
++ (ta_number, ru_set_handle, &id.ru_set))
++ return -RSBAC_EEXISTS;
++ return rsbac_ta_list_lol_add_ttl(ta_number, ru_set_handle,
++ 0, &id.ru_set, NULL);
++ case PS_PP:
++/*
++ rsbac_pr_debug(ds_pm, "Creating pp set\n");
++*/
++ if (rsbac_ta_list_lol_exist
++ (ta_number, pp_set_handle, &id.pp_set))
++ return -RSBAC_EEXISTS;
++ return rsbac_ta_list_lol_add_ttl(ta_number, pp_set_handle,
++ 0, &id.pp_set, NULL);
++ case PS_IN_PP:
++/*
++ rsbac_pr_debug(ds_pm, "Creating in_pp set\n");
++*/
++ if (rsbac_ta_list_lol_exist
++ (ta_number, in_pp_set_handle, &id.in_pp_set))
++ return -RSBAC_EEXISTS;
++ return rsbac_ta_list_lol_add_ttl(ta_number,
++ in_pp_set_handle, 0,
++ &id.in_pp_set, NULL);
++ case PS_OUT_PP:
++/*
++ rsbac_pr_debug(ds_pm, "Creating out_pp set\n");
++*/
++ if (rsbac_ta_list_lol_exist
++ (ta_number, out_pp_set_handle, &id.out_pp_set))
++ return -RSBAC_EEXISTS;
++ return rsbac_ta_list_lol_add_ttl(ta_number,
++ out_pp_set_handle, 0,
++ &id.out_pp_set, NULL);
++
++ default:
++ return (-RSBAC_EINVALIDTARGET);
++ }
++}
++
++/* rsbac_pm_set_exist */
++/* Return rsbac_boolean_t value whether set exists */
++
++rsbac_boolean_t rsbac_pm_set_exist(rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_set_t set,
++ union rsbac_pm_set_id_t id)
++{
++ switch (set) {
++ case PS_TASK:
++ return rsbac_ta_list_lol_exist(ta_number, task_set_handle,
++ &id.task_set);
++ case PS_TP:
++ return rsbac_ta_list_lol_exist(ta_number, tp_set_handle,
++ &id.tp_set);
++ case PS_RU:
++ return rsbac_ta_list_lol_exist(ta_number, ru_set_handle,
++ &id.ru_set);
++ case PS_PP:
++ return rsbac_ta_list_lol_exist(ta_number, pp_set_handle,
++ &id.pp_set);
++ case PS_IN_PP:
++ return rsbac_ta_list_lol_exist(ta_number, in_pp_set_handle,
++ &id.in_pp_set);
++ case PS_OUT_PP:
++ return rsbac_ta_list_lol_exist(ta_number,
++ out_pp_set_handle,
++ &id.out_pp_set);
++
++ default:
++ return FALSE;
++ }
++}
++
++/* rsbac_pm_remove_set */
++/* Remove a full set. After this call the given id can only be used for */
++/* creating a new set, anything else returns an error. */
++/* To empty an existing set use rsbac_pm_clear_set. */
++/* Caution: Writing to disk is done in the remove_item functions! */
++
++int rsbac_pm_remove_set(rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_set_t set,
++ union rsbac_pm_set_id_t id)
++{
++ switch (set) {
++ case PS_TASK:
++ return rsbac_ta_list_lol_remove(ta_number, task_set_handle,
++ &id.task_set);
++ case PS_TP:
++ return rsbac_ta_list_lol_remove(ta_number, tp_set_handle,
++ &id.tp_set);
++ case PS_RU:
++ return rsbac_ta_list_lol_remove(ta_number, ru_set_handle,
++ &id.ru_set);
++ case PS_PP:
++ return rsbac_ta_list_lol_remove(ta_number, pp_set_handle,
++ &id.pp_set);
++ case PS_IN_PP:
++ return rsbac_ta_list_lol_remove(ta_number,
++ in_pp_set_handle,
++ &id.in_pp_set);
++ case PS_OUT_PP:
++ return rsbac_ta_list_lol_remove(ta_number,
++ out_pp_set_handle,
++ &id.out_pp_set);
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++}
++
++/**************/
++/* Main lists */
++/**************/
++
++/* rsbac_pm_get_data() and rsbac_pm_set_data() change single data values. */
++/* rsbac_pm_add_target() adds a new list item and sets all data values as */
++/* given. rsbac_pm_remove_target() removes an item. */
++
++/* A rsbac_pm_[sg]et_data() call for a non-existing target will return an */
++/* error.*/
++/* Invalid parameter combinations return an error. */
++
++int rsbac_pm_get_data(rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_target_t target,
++ union rsbac_pm_target_id_t tid,
++ enum rsbac_pm_data_t data,
++ union rsbac_pm_data_value_t *value)
++{
++ int err = 0;
++
++ if (!value)
++ return (-RSBAC_EINVALIDVALUE);
++
++ switch (target) {
++ case PMT_TASK:
++ {
++ struct rsbac_pm_task_data_t all_data;
++
++/*
++ rsbac_pr_debug(ds_pm, "Getting task data\n");
++*/
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ task_handle, NULL,
++ &tid.task,
++ &all_data);
++ if (err)
++ return err;
++
++ switch (data) {
++ case PD_purpose:
++ value->purpose = all_data.purpose;
++ break;
++ case PD_tp_set:
++ value->tp_set = all_data.tp_set;
++ break;
++ case PD_ru_set:
++ value->ru_set = all_data.ru_set;
++ break;
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++ return 0;
++ }
++
++ case PMT_CLASS:
++ {
++ struct rsbac_pm_class_data_t all_data;
++
++/*
++ rsbac_pr_debug(ds_pm, "Getting class data\n");
++*/
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ class_handle, NULL,
++ &tid.object_class,
++ &all_data);
++ if (err)
++ return err;
++
++ switch (data) {
++ case PD_pp_set:
++ value->pp_set = all_data.pp_set;
++ break;
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++ return 0;
++ }
++
++ case PMT_NA:
++ {
++ struct rsbac_pm_na_data_t all_data;
++
++/*
++ rsbac_pr_debug(ds_pm, "Getting na data\n");
++*/
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ na_handle, NULL,
++ &tid.na, &all_data);
++ if (err)
++ return err;
++
++ switch (data) {
++ case PD_task:
++ value->task = all_data.task;
++ break;
++ case PD_class:
++ value->object_class =
++ all_data.object_class;
++ break;
++ case PD_tp:
++ value->tp = all_data.tp;
++ break;
++ case PD_accesses:
++ value->accesses = all_data.accesses;
++ break;
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++ return 0;
++ }
++
++ case PMT_CS:
++ {
++ struct rsbac_pm_cs_data_t all_data;
++
++/*
++ rsbac_pr_debug(ds_pm, "Getting cs data\n");
++*/
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ cs_handle, NULL,
++ &tid.cs, &all_data);
++ if (err)
++ return err;
++
++ switch (data) {
++ case PD_purpose:
++ value->purpose = all_data.purpose;
++ break;
++ case PD_file:
++ value->file = all_data.file;
++ break;
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++ return 0;
++ }
++
++ case PMT_TP:
++ {
++ struct rsbac_pm_tp_data_t all_data;
++
++/*
++ rsbac_pr_debug(ds_pm, "Getting tp data\n");
++*/
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ tp_handle, NULL,
++ &tid.tp, &all_data);
++ if (err)
++ return err;
++
++ switch (data) {
++ case PD_tp:
++ value->tp = all_data.id;
++ break;
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++ return 0;
++ }
++
++ case PMT_PP:
++ {
++ struct rsbac_pm_pp_data_t all_data;
++
++/*
++ rsbac_pr_debug(ds_pm, "Getting pp data\n");
++*/
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ pp_handle, NULL,
++ &tid.pp, &all_data);
++ if (err)
++ return err;
++
++ switch (data) {
++ case PD_purpose:
++ value->purpose = all_data.id;
++ break;
++ case PD_def_class:
++ value->def_class = all_data.def_class;
++ break;
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++ return 0;
++ }
++
++ case PMT_TKT:
++ {
++ struct rsbac_pm_tkt_data_t all_data;
++
++/*
++ rsbac_pr_debug(ds_pm, "Getting tkt data\n");
++*/
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ tkt_handle, NULL,
++ &tid.tkt,
++ &all_data);
++ if (err)
++ return err;
++
++ switch (data) {
++ case PD_issuer:
++ value->issuer = all_data.issuer;
++ break;
++ case PD_function_type:
++ value->function_type =
++ all_data.function_type;
++ break;
++ case PD_function_param:
++ value->function_param =
++ all_data.function_param;
++ break;
++ case PD_valid_until:
++ value->valid_until = all_data.valid_until;
++ break;
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++ return 0;
++ }
++
++ /* switch target: no valid target */
++ default:
++ return (-RSBAC_EINVALIDTARGET);
++ }
++}; /* end of rsbac_pm_get_data() */
++
++/************************************************************************** */
++
++int rsbac_pm_get_all_data(rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_target_t target,
++ union rsbac_pm_target_id_t tid,
++ union rsbac_pm_all_data_value_t *value)
++{
++ if (!value)
++ return (-RSBAC_EINVALIDVALUE);
++ switch (target) {
++ case PMT_TASK:
++/*
++ rsbac_pr_debug(ds_pm, "Getting task data\n");
++*/
++ return rsbac_ta_list_get_data_ttl(ta_number, task_handle,
++ NULL, &tid.task,
++ &value->task);
++
++ case PMT_CLASS:
++/*
++ rsbac_pr_debug(ds_pm, "Getting class data\n");
++*/
++ return rsbac_ta_list_get_data_ttl(ta_number, class_handle,
++ NULL, &tid.object_class,
++ &value->object_class);
++
++ case PMT_NA:
++/*
++ rsbac_pr_debug(ds_pm, "Getting na data\n");
++*/
++ return rsbac_ta_list_get_data_ttl(ta_number, na_handle,
++ NULL, &tid.na,
++ &value->na);
++
++ case PMT_CS:
++/*
++ rsbac_pr_debug(ds_pm, "Getting cs data\n");
++*/
++ return rsbac_ta_list_get_data_ttl(ta_number, cs_handle,
++ NULL, &tid.cs,
++ &value->cs);
++
++ case PMT_TP:
++/*
++ rsbac_pr_debug(ds_pm, "Getting tp data\n");
++*/
++ return rsbac_ta_list_get_data_ttl(ta_number, tp_handle,
++ NULL, &tid.tp,
++ &value->tp);
++
++ case PMT_PP:
++/*
++ rsbac_pr_debug(ds_pm, "Getting pp data\n");
++*/
++ return rsbac_ta_list_get_data_ttl(ta_number, pp_handle,
++ NULL, &tid.pp,
++ &value->pp);
++
++ case PMT_TKT:
++/*
++ rsbac_pr_debug(ds_pm, "Getting tkt data\n");
++*/
++ return rsbac_ta_list_get_data_ttl(ta_number, tkt_handle,
++ NULL, &tid.tkt,
++ &value->tkt);
++
++ /* switch target: no valid target */
++ default:
++ return (-RSBAC_EINVALIDTARGET);
++ }
++} /* end of rsbac_pm_get_all_data() */
++
++/************************************************************************** */
++
++rsbac_boolean_t rsbac_pm_exists(rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_target_t target,
++ union rsbac_pm_target_id_t tid)
++{
++ switch (target) {
++ case PMT_TASK:
++ return rsbac_ta_list_exist(ta_number, task_handle,
++ &tid.task);
++
++ case PMT_CLASS:
++ /* IPC and DEV classes always exist */
++ if ((tid.object_class == RSBAC_PM_IPC_OBJECT_CLASS_ID)
++ || (tid.object_class == RSBAC_PM_DEV_OBJECT_CLASS_ID))
++ return (TRUE);
++ return rsbac_ta_list_exist(ta_number, class_handle,
++ &tid.object_class);
++
++ case PMT_NA:
++ return rsbac_ta_list_exist(ta_number, na_handle, &tid.na);
++
++ case PMT_CS:
++ return rsbac_ta_list_exist(ta_number, cs_handle, &tid.cs);
++
++ case PMT_TP:
++ return rsbac_ta_list_exist(ta_number, tp_handle, &tid.tp);
++
++ case PMT_PP:
++ return rsbac_ta_list_exist(ta_number, pp_handle, &tid.pp);
++
++ case PMT_TKT:
++ {
++ struct rsbac_pm_tkt_data_t all_data;
++
++ if (rsbac_ta_list_get_data_ttl
++ (ta_number, tkt_handle, NULL, &tid.tkt,
++ &all_data))
++ return FALSE;
++
++ /* ticket too old? -> remove it and return FALSE */
++ {
++ if (all_data.valid_until <
++ RSBAC_CURRENT_TIME) {
++ rsbac_pm_pp_set_id_t pp_set =
++ -tid.tkt;
++
++ if (rsbac_ta_list_lol_exist
++ (ta_number, pp_set_handle,
++ &pp_set))
++ rsbac_ta_list_lol_remove
++ (ta_number,
++ pp_set_handle,
++ &pp_set);
++ rsbac_ta_list_remove(ta_number,
++ tkt_handle,
++ &tid.tkt);
++ return (FALSE);
++ } else
++ return TRUE;
++ }
++ }
++ /* switch target: no valid target */
++ default:
++ rsbac_printk(KERN_WARNING "rsbac_pm_exists(): Invalid target!\n");
++ return FALSE;
++ }
++} /* end of rsbac_pm_exists() */
++
++/************************************************************************** */
++
++int rsbac_pm_set_data(rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_target_t target,
++ union rsbac_pm_target_id_t tid,
++ enum rsbac_pm_data_t data,
++ union rsbac_pm_data_value_t value)
++{
++ switch (target) {
++ case PMT_TASK:
++ {
++ struct rsbac_pm_task_data_t all_data;
++ int err;
++
++/*
++ rsbac_pr_debug(ds_pm, "Setting task data\n");
++*/
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ task_handle, NULL,
++ &tid.task,
++ &all_data);
++ if (err)
++ return err;
++
++ switch (data) {
++ case PD_purpose:
++ all_data.purpose = value.purpose;
++ break;
++ case PD_tp_set:
++ all_data.tp_set = value.tp_set;
++ break;
++ case PD_ru_set:
++ all_data.ru_set = value.ru_set;
++ break;
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++ err =
++ rsbac_ta_list_add_ttl(ta_number, task_handle,
++ 0, &tid.task, &all_data);
++ return err;
++ }
++
++ case PMT_CLASS:
++ {
++ struct rsbac_pm_class_data_t all_data;
++ int err;
++
++/*
++ rsbac_pr_debug(ds_pm, "Setting class data\n");
++*/
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ class_handle, NULL,
++ &tid.object_class,
++ &all_data);
++ if (err)
++ return err;
++
++ switch (data) {
++ case PD_pp_set:
++ all_data.pp_set = value.pp_set;
++ break;
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++ err =
++ rsbac_ta_list_add_ttl(ta_number, class_handle,
++ 0, &tid.object_class,
++ &all_data);
++ return err;
++ }
++
++ case PMT_NA:
++ {
++ struct rsbac_pm_na_data_t all_data;
++ int err;
++
++/*
++ rsbac_pr_debug(ds_pm, "Setting na data\n");
++*/
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ na_handle, NULL,
++ &tid.na, &all_data);
++ if (err)
++ return err;
++
++ switch (data) {
++ case PD_task:
++ all_data.task = value.task;
++ break;
++ case PD_class:
++ all_data.object_class = value.object_class;
++ break;
++ case PD_tp:
++ all_data.tp = value.tp;
++ break;
++ case PD_accesses:
++ all_data.accesses = value.accesses;
++ break;
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++ err =
++ rsbac_ta_list_add_ttl(ta_number, na_handle, 0,
++ &tid.na, &all_data);
++ return err;
++ }
++
++ case PMT_CS:
++ {
++ struct rsbac_pm_cs_data_t all_data;
++ int err;
++
++/*
++ rsbac_pr_debug(ds_pm, "Setting cs data\n");
++*/
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ cs_handle, NULL,
++ &tid.cs, &all_data);
++ if (err)
++ return err;
++
++ switch (data) {
++ case PD_purpose:
++ all_data.purpose = value.purpose;
++ break;
++ case PD_file:
++ all_data.file = value.file;
++ break;
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++ err =
++ rsbac_ta_list_add_ttl(ta_number, cs_handle, 0,
++ &tid.cs, &all_data);
++ return err;
++ }
++
++ case PMT_TP:
++ return -RSBAC_EINVALIDATTR;
++
++ case PMT_PP:
++ {
++ struct rsbac_pm_pp_data_t all_data;
++ int err;
++
++/*
++ rsbac_pr_debug(ds_pm, "Setting pp data\n");
++*/
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ pp_handle, NULL,
++ &tid.pp, &all_data);
++ if (err)
++ return err;
++
++ switch (data) {
++ case PD_def_class:
++ all_data.def_class = value.def_class;
++ break;
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++ err =
++ rsbac_ta_list_add_ttl(ta_number, pp_handle, 0,
++ &tid.pp, &all_data);
++ return err;
++ }
++
++ case PMT_TKT:
++ {
++ struct rsbac_pm_tkt_data_t all_data;
++ int err;
++
++/*
++ rsbac_pr_debug(ds_pm, "Setting tkt data\n");
++*/
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ tkt_handle, NULL,
++ &tid.tkt,
++ &all_data);
++ if (err)
++ return err;
++
++ switch (data) {
++ case PD_issuer:
++ all_data.issuer = value.issuer;
++ break;
++ case PD_function_type:
++ all_data.function_type =
++ value.function_type;
++ break;
++ case PD_function_param:
++ all_data.function_param =
++ value.function_param;
++ break;
++ case PD_valid_until:
++ all_data.valid_until = value.valid_until;
++ break;
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++ err =
++ rsbac_ta_list_add_ttl(ta_number, tkt_handle, 0,
++ &tid.tkt, &all_data);
++ return err;
++ }
++
++ /* switch target: no valid target */
++ default:
++ return (-RSBAC_EINVALIDTARGET);
++ }
++} /* end of rsbac_pm_set_data() */
++
++/************************************************************************** */
++
++int rsbac_pm_add_target(rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_target_t target,
++ union rsbac_pm_all_data_value_t data)
++{
++ switch (target) {
++ case PMT_TASK:
++/*
++ rsbac_pr_debug(ds_pm, "Adding task item\n");
++*/
++ return rsbac_ta_list_add_ttl(ta_number, task_handle, 0,
++ &data.task.id, &data.task);
++
++ case PMT_CLASS:
++/*
++ rsbac_pr_debug(ds_pm, "Adding class item\n");
++*/
++ return rsbac_ta_list_add_ttl(ta_number, class_handle, 0,
++ &data.object_class.id,
++ &data.object_class);
++
++ case PMT_NA:
++ {
++ struct rsbac_pm_na_id_t na_id;
++
++/*
++ rsbac_pr_debug(ds_pm, "Adding na item\n");
++*/
++ na_id.task = data.na.task;
++ na_id.object_class = data.na.object_class;
++ na_id.tp = data.na.tp;
++ return rsbac_ta_list_add_ttl(ta_number, na_handle,
++ 0, &na_id, &data.na);
++ }
++
++ case PMT_CS:
++ {
++ struct rsbac_pm_cs_id_t cs_id;
++
++/*
++ rsbac_pr_debug(ds_pm, "Adding cs item\n");
++*/
++ cs_id.purpose = data.cs.purpose;
++ cs_id.file = data.cs.file;
++ return rsbac_ta_list_add_ttl(ta_number, cs_handle,
++ 0, &cs_id, &data.cs);
++ }
++
++ case PMT_TP:
++/*
++ rsbac_pr_debug(ds_pm, "Adding tp item\n");
++*/
++ return rsbac_ta_list_add_ttl(ta_number, tp_handle, 0,
++ &data.tp.id, &data.tp);
++
++ case PMT_PP:
++/*
++ rsbac_pr_debug(ds_pm, "Adding pp item\n");
++*/
++ return rsbac_ta_list_add_ttl(ta_number, pp_handle, 0,
++ &data.pp.id, &data.pp);
++
++ case PMT_TKT:
++/*
++ rsbac_pr_debug(ds_pm, "Adding tkt item\n");
++*/
++ return rsbac_ta_list_add_ttl(ta_number, tkt_handle, 0,
++ &data.tkt.id, &data.tkt);
++
++ /* switch target: no valid target */
++ default:
++ return (-RSBAC_EINVALIDTARGET);
++ }
++} /* end of rsbac_pm_add_target() */
++
++/************************************************************************** */
++
++int rsbac_pm_remove_target(rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_target_t target,
++ union rsbac_pm_target_id_t tid)
++{
++ switch (target) {
++ case PMT_TASK:
++/*
++ rsbac_pr_debug(ds_pm, "Removing task data\n");
++*/
++ return rsbac_ta_list_remove(ta_number, task_handle,
++ &tid.task);
++
++ case PMT_CLASS:
++/*
++ rsbac_pr_debug(ds_pm, "Removing class data\n");
++*/
++ return rsbac_ta_list_remove(ta_number, class_handle,
++ &tid.object_class);
++
++ case PMT_NA:
++/*
++ rsbac_pr_debug(ds_pm, "Removing tp data\n");
++*/
++ return rsbac_ta_list_remove(ta_number, na_handle, &tid.na);
++
++ case PMT_CS:
++/*
++ rsbac_pr_debug(ds_pm, "Removing cs data\n");
++*/
++ return rsbac_ta_list_remove(ta_number, cs_handle, &tid.cs);
++
++ case PMT_TP:
++/*
++ rsbac_pr_debug(ds_pm, "Removing tp data\n");
++*/
++ return rsbac_ta_list_remove(ta_number, tp_handle, &tid.tp);
++
++ case PMT_PP:
++/*
++ rsbac_pr_debug(ds_pm, "Removing pp data\n");
++*/
++ return rsbac_ta_list_remove(ta_number, pp_handle, &tid.pp);
++
++ case PMT_TKT:
++ {
++ rsbac_pm_pp_set_id_t pp_set = -tid.tkt;
++
++/*
++ rsbac_pr_debug(ds_pm, "Removing tkt data\n");
++*/
++ if (rsbac_ta_list_lol_exist
++ (ta_number, pp_set_handle, &pp_set))
++ rsbac_ta_list_lol_remove(ta_number,
++ pp_set_handle,
++ &pp_set);
++ return rsbac_ta_list_remove(ta_number, tkt_handle,
++ &tid.tkt);
++ }
++
++ default:
++ return (-RSBAC_EINVALIDTARGET);
++ }
++}; /* end of rsbac_remove_target() */
++
+diff --git a/rsbac/data_structures/rc_data_structures.c b/rsbac/data_structures/rc_data_structures.c
+new file mode 100644
+index 0000000..643d270
+--- /dev/null
++++ b/rsbac/data_structures/rc_data_structures.c
+@@ -0,0 +1,4445 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of RC data structures */
++/* Author and (C) 1999-2011: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 12/Jul/2011 */
++/*************************************************** */
++
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <asm/uaccess.h>
++#include <rsbac/aci_data_structures.h>
++#include <rsbac/rc_types.h>
++#include <rsbac/rc_data_structures.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/fs.h>
++#include <rsbac/adf.h>
++#include <rsbac/acl.h>
++#include <rsbac/getname.h>
++#include <rsbac/rc_getname.h>
++#include <rsbac/proc_fs.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/request_groups.h>
++#include <linux/seq_file.h>
++#include <linux/module.h>
++
++/************************************************************************** */
++/* Global Variables */
++/************************************************************************** */
++
++/* The following global variables are needed for access to RC data. */
++
++static rsbac_list_handle_t role_handle = NULL;
++static rsbac_list_handle_t role_rc_handle = NULL;
++static rsbac_list_handle_t role_adr_handle = NULL;
++static rsbac_list_handle_t role_asr_handle = NULL;
++static rsbac_list_handle_t role_dfdc_handle = NULL;
++static rsbac_list_handle_t role_tcfd_handle = NULL;
++static rsbac_list_handle_t role_tcdv_handle = NULL;
++static rsbac_list_handle_t role_tcus_handle = NULL;
++static rsbac_list_handle_t role_tcpr_handle = NULL;
++static rsbac_list_handle_t role_tcip_handle = NULL;
++static rsbac_list_handle_t role_tcsc_handle = NULL;
++static rsbac_list_handle_t role_tcgr_handle = NULL;
++static rsbac_list_handle_t role_tcnd_handle = NULL;
++static rsbac_list_handle_t role_tcnt_handle = NULL;
++static rsbac_list_handle_t role_tcno_handle = NULL;
++
++static rsbac_list_handle_t type_fd_handle = NULL;
++static rsbac_list_handle_t type_dev_handle = NULL;
++static rsbac_list_handle_t type_ipc_handle = NULL;
++static rsbac_list_handle_t type_user_handle = NULL;
++static rsbac_list_handle_t type_process_handle = NULL;
++static rsbac_list_handle_t type_group_handle = NULL;
++static rsbac_list_handle_t type_netdev_handle = NULL;
++static rsbac_list_handle_t type_nettemp_handle = NULL;
++static rsbac_list_handle_t type_netobj_handle = NULL;
++
++/**************************************************/
++/* Declarations of external functions */
++/**************************************************/
++
++/**************************************************/
++/* Declarations of internal functions */
++/**************************************************/
++
++/* As some function use later defined functions, we declare those here. */
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++/* nr_hashes is always 2^n, no matter what the macros say */
++static u_int nr_role_hashes = RSBAC_RC_NR_ROLE_LISTS;
++static u_int role_hash(void * desc, __u32 nr_hashes)
++{
++ return (*((rsbac_rc_role_id_t *) desc) & (nr_hashes - 1));
++}
++
++static u_int nr_type_hashes = RSBAC_RC_NR_TYPE_LISTS;
++static u_int type_hash(void * desc, __u32 nr_hashes)
++{
++ return (*((rsbac_rc_type_id_t *) desc) & (nr_hashes - 1));
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int role_conv(
++#else
++static int __init role_conv(
++#endif
++ void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ struct rsbac_rc_role_entry_t *new = new_data;
++ struct rsbac_rc_old_role_entry_t *old = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_rc_role_id_t));
++ new->admin_type = old->admin_type;
++ memcpy(new->name, old->name, RSBAC_RC_NAME_LEN);
++ new->def_fd_create_type = old->def_fd_create_type;
++ new->def_user_create_type = old->def_user_create_type;
++ new->def_process_create_type = old->def_process_create_type;
++ new->def_process_chown_type = old->def_process_chown_type;
++ new->def_process_execute_type = old->def_process_execute_type;
++ new->def_ipc_create_type = old->def_ipc_create_type;
++ new->def_group_create_type = RSBAC_RC_GENERAL_TYPE;
++ new->def_unixsock_create_type = RC_type_use_fd;
++ new->boot_role = old->boot_role;
++ new->req_reauth = old->req_reauth;
++ return 0;
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int old_role_conv(
++#else
++static int __init old_role_conv(
++#endif
++ void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ struct rsbac_rc_role_entry_t *new = new_data;
++ struct rsbac_rc_old_role_entry_t *old = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_rc_role_id_t));
++ new->admin_type = old->admin_type;
++ memcpy(new->name, old->name, RSBAC_RC_NAME_LEN);
++ new->def_fd_create_type = old->def_fd_create_type;
++ new->def_user_create_type = old->def_user_create_type;
++ new->def_process_create_type = old->def_process_create_type;
++ new->def_process_chown_type = old->def_process_chown_type;
++ new->def_process_execute_type = old->def_process_execute_type;
++ new->def_ipc_create_type = old->def_ipc_create_type;
++ new->def_group_create_type = RSBAC_RC_GENERAL_TYPE;
++ new->def_unixsock_create_type = RC_type_use_fd;
++ new->boot_role = old->boot_role;
++ new->req_reauth = FALSE;
++ return 0;
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int old_old_role_conv(
++#else
++static int __init old_old_role_conv(
++#endif
++ void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ struct rsbac_rc_role_entry_t *new = new_data;
++ struct rsbac_rc_old_role_entry_t *old = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_rc_role_id_t));
++ new->admin_type = old->admin_type;
++ memcpy(new->name, old->name, RSBAC_RC_NAME_LEN);
++ new->def_fd_create_type = old->def_fd_create_type;
++ new->def_user_create_type = old->def_user_create_type;
++ new->def_process_create_type = old->def_process_create_type;
++ new->def_process_chown_type = old->def_process_chown_type;
++ new->def_process_execute_type = old->def_process_execute_type;
++ new->def_ipc_create_type = old->def_ipc_create_type;
++ new->def_group_create_type = RSBAC_RC_GENERAL_TYPE;
++ new->def_unixsock_create_type = RC_type_use_fd;
++ new->boot_role = old->boot_role;
++ new->req_reauth = FALSE;
++ return 0;
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int old_old_old_role_conv(
++#else
++static int __init old_old_old_role_conv(
++#endif
++ void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ struct rsbac_rc_role_entry_t *new = new_data;
++ struct rsbac_rc_old_role_entry_t *old = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_rc_role_id_t));
++ new->admin_type = old->admin_type;
++ memcpy(new->name, old->name, RSBAC_RC_NAME_LEN);
++ new->def_fd_create_type = old->def_fd_create_type;
++ new->def_user_create_type = RSBAC_RC_GENERAL_TYPE;
++ new->def_process_create_type = old->def_process_create_type;
++ new->def_process_chown_type = old->def_process_chown_type;
++ new->def_process_execute_type = old->def_process_execute_type;
++ new->def_ipc_create_type = old->def_ipc_create_type;
++ new->def_group_create_type = RSBAC_RC_GENERAL_TYPE;
++ new->def_unixsock_create_type = RC_type_use_fd;
++ new->boot_role = FALSE;
++ new->req_reauth = FALSE;
++ return 0;
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static rsbac_list_conv_function_t *role_get_conv(rsbac_version_t
++ old_version)
++#else
++static rsbac_list_conv_function_t *__init role_get_conv(rsbac_version_t
++ old_version)
++#endif
++{
++ switch (old_version) {
++ case RSBAC_RC_ROLE_OLD_LIST_VERSION:
++ return role_conv;
++ case RSBAC_RC_ROLE_OLD_OLD_LIST_VERSION:
++ return old_role_conv;
++ case RSBAC_RC_ROLE_OLD_OLD_OLD_LIST_VERSION:
++ return old_old_role_conv;
++ case RSBAC_RC_ROLE_OLD_OLD_OLD_OLD_LIST_VERSION:
++ return old_old_old_role_conv;
++ default:
++ return NULL;
++ }
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int tc_subconv(
++#else
++static int __init tc_subconv(
++#endif
++ void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ rsbac_rc_rights_vector_t *new = new_data;
++ rsbac_rc_rights_vector_t *old = old_data;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_rc_type_id_t));
++ *new = (*old & RSBAC_ALL_REQUEST_VECTOR)
++ | ((*old & ~(RSBAC_ALL_REQUEST_VECTOR)) <<
++ (RSBAC_RC_SPECIAL_RIGHT_BASE -
++ RSBAC_RC_OLD_SPECIAL_RIGHT_BASE));
++ return 0;
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static rsbac_list_conv_function_t *tcfd_get_subconv(rsbac_version_t
++ old_version)
++#else
++static rsbac_list_conv_function_t *__init tcfd_get_subconv(rsbac_version_t
++ old_version)
++#endif
++{
++ switch (old_version) {
++ case RSBAC_RC_ROLE_TCFD_OLD_LIST_VERSION:
++ return tc_subconv;
++ default:
++ return NULL;
++ }
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int tc_conv(
++#else
++static int __init tc_conv(
++#endif
++ void *old_desc,
++ void *old_data,
++ void *new_desc, void *new_data)
++{
++ memcpy(new_desc, old_desc, sizeof(rsbac_rc_role_id_t));
++ return 0;
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static rsbac_list_conv_function_t *tcfd_get_conv(rsbac_version_t
++ old_version)
++#else
++static rsbac_list_conv_function_t *__init tcfd_get_conv(rsbac_version_t
++ old_version)
++#endif
++{
++ switch (old_version) {
++ case RSBAC_RC_ROLE_TCFD_OLD_LIST_VERSION:
++ return tc_conv;
++ default:
++ return NULL;
++ }
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static int rsbac_rc_role_compare_data(void *data1, void *data2)
++#else
++static int __init rsbac_rc_role_compare_data(void *data1, void *data2)
++#endif
++{
++ struct rsbac_rc_role_entry_t *role = data1;
++
++ if (!data1)
++ return 1;
++ if (role->boot_role)
++ return 0;
++ else
++ return 1;
++}
++
++/************************************************* */
++/* proc functions */
++/************************************************* */
++
++#if defined(CONFIG_RSBAC_PROC)
++static int
++stats_rc_proc_show(struct seq_file *m, void *v)
++{
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "stats_rc_proc_info(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++ rsbac_pr_debug(aef_rc, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ seq_printf(m, "RC Status\n---------\n");
++ seq_printf(m,
++ "Role entry size is %Zd, %lu entries used\n",
++ sizeof(struct rsbac_rc_role_entry_t),
++ rsbac_list_count(role_handle));
++ seq_printf(m,
++ "Used type entries: fd: %lu, dev: %lu, ipc: %lu, user: %lu, process: %lu, group: %lu, netdev: %lu, nettemp: %lu, netobj: %lu\n",
++ rsbac_list_count(type_fd_handle),
++ rsbac_list_count(type_dev_handle),
++ rsbac_list_count(type_ipc_handle),
++ rsbac_list_count(type_user_handle),
++ rsbac_list_count(type_process_handle),
++ rsbac_list_count(type_group_handle),
++ rsbac_list_count(type_netdev_handle),
++ rsbac_list_count(type_nettemp_handle),
++ rsbac_list_count(type_netobj_handle));
++ return 0;
++}
++
++static int stats_rc_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, stats_rc_proc_show, NULL);
++}
++
++static const struct file_operations stats_rc_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = stats_rc_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *stats_rc;
++
++#endif /* CONFIG_PROC_FS && CONFIG_RSBAC_PROC */
++
++/************************************************* */
++/* Init functions */
++/************************************************* */
++
++/* All functions return 0, if no error occurred, and a negative error code */
++/* otherwise. The error codes are defined in rsbac/error.h. */
++
++/************************************************************************** */
++/* Initialization of all RC data structures. After this call, all RC data */
++/* is kept in memory for performance reasons, but is written to disk on */
++/* every change. */
++
++/* There can be no access to aci data structures before init. */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static void registration_error(int err, char *listname)
++#else
++static void __init registration_error(int err, char *listname)
++#endif
++{
++ if (err) {
++ char *tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_init_rc(): Registering RC %s list failed with error %s\n",
++ listname, get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static void create_def_roles(void)
++#else
++static void __init create_def_roles(void)
++#endif
++{
++ rsbac_rc_role_id_t role;
++ rsbac_rc_type_id_t type;
++ rsbac_rc_rights_vector_t rights;
++ struct rsbac_rc_role_entry_t gen_entry =
++ RSBAC_RC_GENERAL_ROLE_ENTRY;
++ struct rsbac_rc_role_entry_t ra_entry =
++ RSBAC_RC_ROLE_ADMIN_ROLE_ENTRY;
++ struct rsbac_rc_role_entry_t sa_entry =
++ RSBAC_RC_SYSTEM_ADMIN_ROLE_ENTRY;
++
++ rsbac_printk(KERN_WARNING "rsbac_init_rc(): no RC roles read, generating default role entries!\n");
++
++ role = RSBAC_RC_GENERAL_ROLE;
++ if (!rsbac_list_add(role_handle, &role, &gen_entry)) {
++ if (!rsbac_list_lol_add
++ (role_tcfd_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_EXECUTE_REQUEST_VECTOR)
++ & RSBAC_FD_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcfd_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcdv_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ RSBAC_READ_WRITE_REQUEST_VECTOR &
++ RSBAC_DEV_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcdv_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcus_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ RSBAC_REQUEST_VECTOR(R_SEARCH) |
++ RSBAC_REQUEST_VECTOR(R_CHANGE_OWNER) |
++ RSBAC_REQUEST_VECTOR
++ (R_GET_STATUS_DATA);
++ rsbac_list_lol_subadd(role_tcus_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcpr_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ RSBAC_READ_WRITE_REQUEST_VECTOR &
++ RSBAC_PROCESS_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcpr_handle,
++ &role, &type,
++ &rights);
++ type = CONFIG_RSBAC_RC_KERNEL_PROCESS_TYPE;
++ rights =
++ RSBAC_READ_REQUEST_VECTOR &
++ RSBAC_PROCESS_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcpr_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcip_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ RSBAC_READ_WRITE_REQUEST_VECTOR &
++ RSBAC_IPC_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcip_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcgr_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ RSBAC_REQUEST_VECTOR(R_SEARCH) |
++ RSBAC_REQUEST_VECTOR
++ (R_GET_STATUS_DATA);
++ rsbac_list_lol_subadd(role_tcgr_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcnd_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ RSBAC_READ_WRITE_REQUEST_VECTOR &
++ RSBAC_NETDEV_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcnd_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcno_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ RSBAC_READ_WRITE_REQUEST_VECTOR &
++ RSBAC_NETOBJ_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcno_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcsc_handle, &role, NULL)) {
++#ifdef CONFIG_RSBAC_USER_MOD_IOPERM
++ type = ST_ioports;
++ rights =
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_MODIFY_PERMISSIONS_DATA);
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++#endif
++ type = ST_rlimit;
++ rights =
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_GET_STATUS_DATA)
++ |
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_MODIFY_SYSTEM_DATA);
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ type = ST_other;
++ rights =
++ RSBAC_RC_RIGHTS_VECTOR(R_MAP_EXEC);
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ type = ST_network;
++ rights =
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_GET_STATUS_DATA);
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ }
++ }
++ role = RSBAC_RC_ROLE_ADMIN_ROLE;
++ if (!rsbac_list_add(role_handle, &role, &ra_entry)) {
++ if (!rsbac_list_lol_add
++ (role_tcfd_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights = ((RSBAC_READ_WRITE_REQUEST_VECTOR
++ | RSBAC_EXECUTE_REQUEST_VECTOR
++ | RSBAC_SECURITY_REQUEST_VECTOR)
++ & RSBAC_FD_REQUEST_VECTOR) |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcfd_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SEC_TYPE;
++ rsbac_list_lol_subadd(role_tcfd_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SYS_TYPE;
++ rights =
++ (RSBAC_READ_REQUEST_VECTOR &
++ RSBAC_FD_REQUEST_VECTOR)
++ | RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcfd_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcdv_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ ((RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SECURITY_REQUEST_VECTOR)
++ & RSBAC_DEV_REQUEST_VECTOR) |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcdv_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SEC_TYPE;
++ rights =
++ ((RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SECURITY_REQUEST_VECTOR)
++ & RSBAC_DEV_REQUEST_VECTOR) |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcdv_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcus_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ ((RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SECURITY_REQUEST_VECTOR |
++ RSBAC_REQUEST_VECTOR(R_AUTHENTICATE))
++ & RSBAC_USER_REQUEST_VECTOR) |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcus_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SYS_TYPE;
++ rights =
++ ((RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SECURITY_REQUEST_VECTOR |
++ RSBAC_REQUEST_VECTOR(R_AUTHENTICATE))
++ & RSBAC_USER_REQUEST_VECTOR) |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcus_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SEC_TYPE;
++ rights =
++ ((RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SECURITY_REQUEST_VECTOR |
++ RSBAC_REQUEST_VECTOR(R_AUTHENTICATE))
++ & RSBAC_USER_REQUEST_VECTOR) |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcus_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcpr_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ ((RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SECURITY_REQUEST_VECTOR)
++ & RSBAC_PROCESS_REQUEST_VECTOR) |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcpr_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SEC_TYPE;
++ rights =
++ ((RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SECURITY_REQUEST_VECTOR)
++ & RSBAC_PROCESS_REQUEST_VECTOR) |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcpr_handle,
++ &role, &type,
++ &rights);
++ type = CONFIG_RSBAC_RC_KERNEL_PROCESS_TYPE;
++ rights =
++ ((RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SECURITY_REQUEST_VECTOR)
++ & RSBAC_PROCESS_REQUEST_VECTOR) |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcpr_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcip_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ ((RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SECURITY_REQUEST_VECTOR)
++ & RSBAC_IPC_REQUEST_VECTOR) |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcip_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SEC_TYPE;
++ rights =
++ ((RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SECURITY_REQUEST_VECTOR)
++ & RSBAC_IPC_REQUEST_VECTOR) |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcip_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcgr_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ ((RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SECURITY_REQUEST_VECTOR)
++ & RSBAC_GROUP_REQUEST_VECTOR) |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcgr_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcnd_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ ((RSBAC_REQUEST_VECTOR
++ (R_GET_STATUS_DATA) |
++ RSBAC_SECURITY_REQUEST_VECTOR)
++ & RSBAC_NETDEV_REQUEST_VECTOR) |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcnd_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SEC_TYPE;
++ rsbac_list_lol_subadd(role_tcnd_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SYS_TYPE;
++ rsbac_list_lol_subadd(role_tcnd_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcnt_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ ((RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SECURITY_REQUEST_VECTOR)
++ & RSBAC_NETTEMP_REQUEST_VECTOR) |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcnt_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SEC_TYPE;
++ rights =
++ ((RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SECURITY_REQUEST_VECTOR)
++ & RSBAC_NETTEMP_REQUEST_VECTOR) |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcnt_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcno_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ ((RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SECURITY_REQUEST_VECTOR)
++ & RSBAC_NETOBJ_REQUEST_VECTOR) |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcno_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcsc_handle, &role, NULL)) {
++#ifdef CONFIG_RSBAC_USER_MOD_IOPERM
++ type = ST_ioports;
++ rights =
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_MODIFY_PERMISSIONS_DATA)
++ | RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++#endif
++ type = ST_rlimit;
++ rights =
++ RSBAC_SCD_REQUEST_VECTOR |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ type = ST_rsbac;
++ rights =
++ RSBAC_SCD_REQUEST_VECTOR |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ type = ST_rsbac_log;
++ rights =
++ RSBAC_SCD_REQUEST_VECTOR |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ type = ST_other;
++ rights = RSBAC_RC_RIGHTS_VECTOR(R_MAP_EXEC)
++ |
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_MODIFY_PERMISSIONS_DATA)
++ | RSBAC_RC_RIGHTS_VECTOR(R_SWITCH_LOG)
++ |
++ RSBAC_RC_RIGHTS_VECTOR(R_SWITCH_MODULE)
++ | RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ type = ST_network;
++ rights =
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_GET_STATUS_DATA)
++ | RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ type = ST_firewall;
++ rights =
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_GET_STATUS_DATA)
++ | RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ type = RST_auth_administration;
++ rights =
++ RSBAC_SCD_REQUEST_VECTOR |
++ RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ type = ST_sysfs;
++ rights =
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_GET_STATUS_DATA)
++ | RSBAC_RC_SPECIAL_RIGHTS_VECTOR;
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ }
++ }
++ role = RSBAC_RC_SYSTEM_ADMIN_ROLE;
++ if (!rsbac_list_add(role_handle, &role, &sa_entry)) {
++ if (!rsbac_list_lol_add
++ (role_tcfd_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights = (RSBAC_READ_WRITE_REQUEST_VECTOR
++ | RSBAC_EXECUTE_REQUEST_VECTOR
++ | RSBAC_SYSTEM_REQUEST_VECTOR)
++ & RSBAC_FD_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcfd_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SYS_TYPE;
++ rsbac_list_lol_subadd(role_tcfd_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcdv_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_DEV_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcdv_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SYS_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_DEV_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcdv_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcus_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ RSBAC_REQUEST_VECTOR(R_CHANGE_OWNER) |
++ RSBAC_REQUEST_VECTOR(R_SEARCH) |
++ RSBAC_REQUEST_VECTOR(R_GET_STATUS_DATA) |
++ RSBAC_REQUEST_VECTOR(R_AUTHENTICATE);
++ rsbac_list_lol_subadd(role_tcus_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SYS_TYPE;
++ rights =
++ RSBAC_REQUEST_VECTOR(R_SEARCH) |
++ RSBAC_REQUEST_VECTOR(R_GET_STATUS_DATA) |
++ RSBAC_REQUEST_VECTOR(R_AUTHENTICATE);
++ rsbac_list_lol_subadd(role_tcus_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SEC_TYPE;
++ rights =
++ RSBAC_REQUEST_VECTOR(R_SEARCH) |
++ RSBAC_REQUEST_VECTOR(R_AUTHENTICATE);
++ rsbac_list_lol_subadd(role_tcus_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcpr_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_REQUEST_VECTOR(R_CONNECT) |
++ RSBAC_REQUEST_VECTOR(R_SEND) |
++ RSBAC_REQUEST_VECTOR(R_RECEIVE) |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_PROCESS_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcpr_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SYS_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_PROCESS_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcpr_handle,
++ &role, &type,
++ &rights);
++ type = CONFIG_RSBAC_RC_KERNEL_PROCESS_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_PROCESS_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcpr_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcip_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_IPC_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcip_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SYS_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_IPC_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcip_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcgr_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ RSBAC_REQUEST_VECTOR(R_SEARCH) |
++ RSBAC_REQUEST_VECTOR(R_GET_STATUS_DATA) |
++ RSBAC_REQUEST_VECTOR(R_READ);
++ rsbac_list_lol_subadd(role_tcgr_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcnd_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_NETDEV_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcnd_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcnt_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ (RSBAC_READ_REQUEST_VECTOR) &
++ RSBAC_NETTEMP_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcnt_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcno_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_NETOBJ_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcno_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcsc_handle, &role, NULL)) {
++ rights =
++ RSBAC_SCD_REQUEST_VECTOR &
++ (RSBAC_SYSTEM_REQUEST_VECTOR |
++ RSBAC_READ_WRITE_REQUEST_VECTOR);
++ for (type = ST_time_strucs;
++ type <= ST_rsbac; type++) {
++ rsbac_list_lol_subadd
++ (role_tcsc_handle, &role,
++ &type, &rights);
++ }
++ for (type = ST_network; type < ST_none;
++ type++) {
++ rsbac_list_lol_subadd
++ (role_tcsc_handle, &role,
++ &type, &rights);
++ }
++ type = ST_other;
++ rights =
++ RSBAC_RC_RIGHTS_VECTOR(R_ADD_TO_KERNEL)
++ | RSBAC_RC_RIGHTS_VECTOR(R_MAP_EXEC)
++ |
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_MODIFY_SYSTEM_DATA)
++ | RSBAC_RC_RIGHTS_VECTOR(R_MOUNT)
++ |
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_REMOVE_FROM_KERNEL)
++ | RSBAC_RC_RIGHTS_VECTOR(R_UMOUNT)
++ | RSBAC_RC_RIGHTS_VECTOR(R_SHUTDOWN);
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ }
++ }
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++static void create_def_roles2(void)
++#else
++static void __init create_def_roles2(void)
++#endif
++{
++ rsbac_rc_role_id_t role;
++ rsbac_rc_type_id_t type;
++ rsbac_rc_rights_vector_t rights;
++ struct rsbac_rc_role_entry_t au_entry =
++ RSBAC_RC_AUDITOR_ROLE_ENTRY;
++ struct rsbac_rc_role_entry_t bo_entry =
++ RSBAC_RC_BOOT_ROLE_ENTRY;
++
++ rsbac_printk(KERN_WARNING "rsbac_init_rc(): no RC roles read, generating default role entries!\n");
++
++ role = RSBAC_RC_AUDITOR_ROLE;
++ if (!rsbac_list_add(role_handle, &role, &au_entry)) {
++ if (!rsbac_list_lol_add
++ (role_tcfd_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_EXECUTE_REQUEST_VECTOR)
++ & RSBAC_FD_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcfd_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcdv_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ RSBAC_READ_WRITE_REQUEST_VECTOR &
++ RSBAC_DEV_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcdv_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcus_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ RSBAC_REQUEST_VECTOR(R_CHANGE_OWNER) |
++ RSBAC_REQUEST_VECTOR(R_SEARCH) |
++ RSBAC_REQUEST_VECTOR
++ (R_GET_STATUS_DATA);
++ rsbac_list_lol_subadd(role_tcus_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcgr_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ RSBAC_REQUEST_VECTOR(R_SEARCH) |
++ RSBAC_REQUEST_VECTOR
++ (R_GET_STATUS_DATA);
++ rsbac_list_lol_subadd(role_tcgr_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcpr_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ RSBAC_READ_WRITE_REQUEST_VECTOR &
++ RSBAC_PROCESS_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcpr_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcip_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ RSBAC_READ_WRITE_REQUEST_VECTOR &
++ RSBAC_IPC_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcip_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcnd_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ RSBAC_READ_WRITE_REQUEST_VECTOR &
++ RSBAC_NETDEV_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcnd_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcno_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ RSBAC_READ_WRITE_REQUEST_VECTOR &
++ RSBAC_NETOBJ_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcno_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcsc_handle, &role, NULL)) {
++#ifdef CONFIG_RSBAC_USER_MOD_IOPERM
++ type = ST_ioports;
++ rights =
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_MODIFY_PERMISSIONS_DATA);
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++#endif
++ type = ST_rlimit;
++ rights =
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_GET_STATUS_DATA)
++ |
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_MODIFY_SYSTEM_DATA);
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ type = ST_rsbac_log;
++ rights =
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_GET_STATUS_DATA)
++ |
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_MODIFY_SYSTEM_DATA);
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ type = ST_other;
++ rights =
++ RSBAC_RC_RIGHTS_VECTOR(R_MAP_EXEC);
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ type = ST_network;
++ rights =
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_GET_STATUS_DATA);
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ }
++ }
++ role = RSBAC_RC_BOOT_ROLE;
++ if (!rsbac_list_add(role_handle, &role, &bo_entry)) {
++ if (!rsbac_list_lol_add
++ (role_tcfd_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights = (RSBAC_READ_WRITE_REQUEST_VECTOR
++ | RSBAC_EXECUTE_REQUEST_VECTOR
++ | RSBAC_SYSTEM_REQUEST_VECTOR)
++ & RSBAC_FD_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcfd_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SYS_TYPE;
++ rsbac_list_lol_subadd(role_tcfd_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcdv_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_DEV_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcdv_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SYS_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_DEV_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcdv_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcus_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ (RSBAC_READ_REQUEST_VECTOR |
++ RSBAC_REQUEST_VECTOR(R_CHANGE_OWNER) |
++ RSBAC_SYSTEM_REQUEST_VECTOR |
++ RSBAC_REQUEST_VECTOR(R_AUTHENTICATE)) &
++ RSBAC_USER_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcus_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SEC_TYPE;
++ rights =
++ RSBAC_REQUEST_VECTOR(R_SEARCH) |
++ RSBAC_REQUEST_VECTOR(R_CHANGE_OWNER) |
++ RSBAC_REQUEST_VECTOR(R_AUTHENTICATE);
++ rsbac_list_lol_subadd(role_tcus_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SYS_TYPE;
++ rights =
++ (RSBAC_READ_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_USER_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcus_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcpr_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_PROCESS_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcpr_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SYS_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_PROCESS_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcpr_handle,
++ &role, &type,
++ &rights);
++ type = CONFIG_RSBAC_RC_KERNEL_PROCESS_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_PROCESS_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcpr_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcip_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_IPC_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcip_handle,
++ &role, &type,
++ &rights);
++ type = RSBAC_RC_SYS_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_IPC_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcip_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcnd_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_NETDEV_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcnd_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcnt_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ (RSBAC_READ_REQUEST_VECTOR) &
++ RSBAC_NETTEMP_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcnt_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcno_handle, &role, NULL)) {
++ type = RSBAC_RC_GENERAL_TYPE;
++ rights =
++ (RSBAC_READ_WRITE_REQUEST_VECTOR |
++ RSBAC_SYSTEM_REQUEST_VECTOR) &
++ RSBAC_NETOBJ_REQUEST_VECTOR;
++ rsbac_list_lol_subadd(role_tcno_handle,
++ &role, &type,
++ &rights);
++ }
++ if (!rsbac_list_lol_add
++ (role_tcsc_handle, &role, NULL)) {
++ rights =
++ RSBAC_SCD_REQUEST_VECTOR &
++ (RSBAC_SYSTEM_REQUEST_VECTOR |
++ RSBAC_READ_WRITE_REQUEST_VECTOR);
++ for (type = ST_time_strucs;
++ type <= ST_rsbac; type++) {
++ rsbac_list_lol_subadd
++ (role_tcsc_handle, &role,
++ &type, &rights);
++ }
++ for (type = ST_network; type < ST_none;
++ type++) {
++ rsbac_list_lol_subadd
++ (role_tcsc_handle, &role,
++ &type, &rights);
++ }
++ type = ST_other;
++ rights =
++ RSBAC_RC_RIGHTS_VECTOR(R_ADD_TO_KERNEL)
++ | RSBAC_RC_RIGHTS_VECTOR(R_MAP_EXEC)
++ |
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_MODIFY_SYSTEM_DATA)
++ | RSBAC_RC_RIGHTS_VECTOR(R_MOUNT)
++ |
++ RSBAC_RC_RIGHTS_VECTOR
++ (R_REMOVE_FROM_KERNEL)
++ | RSBAC_RC_RIGHTS_VECTOR(R_UMOUNT)
++ | RSBAC_RC_RIGHTS_VECTOR(R_SHUTDOWN);
++ rsbac_list_lol_subadd(role_tcsc_handle,
++ &role, &type,
++ &rights);
++ }
++ }
++}
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++int rsbac_init_rc(void)
++#else
++int __init rsbac_init_rc(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_list_lol_info_t lol_info;
++ struct rsbac_list_info_t list_info;
++ rsbac_rc_rights_vector_t def_tc = RSBAC_RC_DEFAULT_RIGHTS_VECTOR;
++
++ if (rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_init_rc(): RSBAC already initialized\n");
++ return -RSBAC_EREINIT;
++ }
++
++ /* init data structures */
++ rsbac_printk(KERN_INFO "rsbac_init_rc(): Initializing RSBAC: RC subsystem\n");
++ rsbac_pr_debug(stack, "free stack: %lu\n", rsbac_stack_free_space());
++
++ list_info.version = RSBAC_RC_ROLE_LIST_VERSION;
++ list_info.key = RSBAC_RC_LIST_KEY;
++ list_info.desc_size = sizeof(rsbac_rc_role_id_t);
++ list_info.data_size = sizeof(struct rsbac_rc_role_entry_t);
++ list_info.max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &role_handle, &list_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE | RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL, role_get_conv,
++ NULL, RSBAC_RC_ROLE_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_role_hashes,
++ (nr_role_hashes > 1) ? role_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "role");
++ }
++
++ lol_info.version = RSBAC_RC_ROLE_RC_LIST_VERSION;
++ lol_info.key = RSBAC_RC_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &role_rc_handle, &lol_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL, NULL, NULL,
++ NULL, NULL,
++ RSBAC_RC_ROLE_RC_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_role_hashes,
++ (nr_role_hashes > 1) ? role_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "role compatibilities");
++ }
++ lol_info.version = RSBAC_RC_ROLE_ADR_LIST_VERSION;
++ lol_info.key = RSBAC_RC_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &role_adr_handle, &lol_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL, NULL, NULL,
++ NULL, NULL,
++ RSBAC_RC_ROLE_ADR_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_role_hashes,
++ (nr_role_hashes > 1) ? role_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "admin roles");
++ }
++ lol_info.version = RSBAC_RC_ROLE_ASR_LIST_VERSION;
++ lol_info.key = RSBAC_RC_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.subdata_size = 0;
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &role_asr_handle, &lol_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL, NULL, NULL,
++ NULL, NULL,
++ RSBAC_RC_ROLE_ASR_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_role_hashes,
++ (nr_role_hashes > 1) ? role_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "assign roles");
++ }
++ lol_info.version = RSBAC_RC_ROLE_DFDC_LIST_VERSION;
++ lol_info.key = RSBAC_RC_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_rc_type_id_t);
++ lol_info.subdata_size = sizeof(rsbac_rc_type_id_t);
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &role_dfdc_handle, &lol_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL, NULL, NULL,
++ NULL, NULL,
++ RSBAC_RC_ROLE_DFDC_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_role_hashes,
++ (nr_role_hashes > 1) ? role_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "Role default FD create types");
++ }
++ lol_info.version = RSBAC_RC_ROLE_TCFD_LIST_VERSION;
++ lol_info.key = RSBAC_RC_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_rc_type_id_t);
++ lol_info.subdata_size = sizeof(rsbac_rc_rights_vector_t);
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &role_tcfd_handle, &lol_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_DEF_SUBDATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL,
++ tcfd_get_conv, tcfd_get_subconv,
++ NULL, &def_tc,
++ RSBAC_RC_ROLE_TCFD_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_role_hashes,
++ (nr_role_hashes > 1) ? role_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "Role FD type compatibilities");
++ }
++ lol_info.version = RSBAC_RC_ROLE_TCDV_LIST_VERSION;
++ lol_info.key = RSBAC_RC_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_rc_type_id_t);
++ lol_info.subdata_size = sizeof(rsbac_rc_rights_vector_t);
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &role_tcdv_handle, &lol_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_DEF_SUBDATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL,
++ tcfd_get_conv, tcfd_get_subconv,
++ NULL, &def_tc,
++ RSBAC_RC_ROLE_TCDV_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_role_hashes,
++ (nr_role_hashes > 1) ? role_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "Role DEV type compatibilities");
++ }
++ lol_info.version = RSBAC_RC_ROLE_TCUS_LIST_VERSION;
++ lol_info.key = RSBAC_RC_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_rc_type_id_t);
++ lol_info.subdata_size = sizeof(rsbac_rc_rights_vector_t);
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &role_tcus_handle, &lol_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_DEF_SUBDATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL,
++ tcfd_get_conv, tcfd_get_subconv,
++ NULL, &def_tc,
++ RSBAC_RC_ROLE_TCUS_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_role_hashes,
++ (nr_role_hashes > 1) ? role_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "Role User type compatibilities");
++ }
++ lol_info.version = RSBAC_RC_ROLE_TCPR_LIST_VERSION;
++ lol_info.key = RSBAC_RC_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_rc_type_id_t);
++ lol_info.subdata_size = sizeof(rsbac_rc_rights_vector_t);
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &role_tcpr_handle, &lol_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_DEF_SUBDATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL,
++ tcfd_get_conv, tcfd_get_subconv,
++ NULL, &def_tc,
++ RSBAC_RC_ROLE_TCPR_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_role_hashes,
++ (nr_role_hashes > 1) ? role_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err,
++ "Role Process type compatibilities");
++ }
++ lol_info.version = RSBAC_RC_ROLE_TCIP_LIST_VERSION;
++ lol_info.key = RSBAC_RC_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_rc_type_id_t);
++ lol_info.subdata_size = sizeof(rsbac_rc_rights_vector_t);
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &role_tcip_handle, &lol_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_DEF_SUBDATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL,
++ tcfd_get_conv, tcfd_get_subconv,
++ NULL, &def_tc,
++ RSBAC_RC_ROLE_TCIP_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_role_hashes,
++ (nr_role_hashes > 1) ? role_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "Role IPC type compatibilities");
++ }
++ lol_info.version = RSBAC_RC_ROLE_TCSC_LIST_VERSION;
++ lol_info.key = RSBAC_RC_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_rc_type_id_t);
++ lol_info.subdata_size = sizeof(rsbac_rc_rights_vector_t);
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &role_tcsc_handle, &lol_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_DEF_SUBDATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL,
++ tcfd_get_conv, tcfd_get_subconv,
++ NULL, &def_tc,
++ RSBAC_RC_ROLE_TCSC_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_role_hashes,
++ (nr_role_hashes > 1) ? role_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "Role SCD type compatibilities");
++ }
++ lol_info.version = RSBAC_RC_ROLE_TCGR_LIST_VERSION;
++ lol_info.key = RSBAC_RC_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_rc_type_id_t);
++ lol_info.subdata_size = sizeof(rsbac_rc_rights_vector_t);
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &role_tcgr_handle, &lol_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_DEF_SUBDATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL,
++ tcfd_get_conv, tcfd_get_subconv,
++ NULL, &def_tc,
++ RSBAC_RC_ROLE_TCGR_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_role_hashes,
++ (nr_role_hashes > 1) ? role_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "Role Group type compatibilities");
++ }
++ lol_info.version = RSBAC_RC_ROLE_TCND_LIST_VERSION;
++ lol_info.key = RSBAC_RC_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_rc_type_id_t);
++ lol_info.subdata_size = sizeof(rsbac_rc_rights_vector_t);
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &role_tcnd_handle, &lol_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_DEF_SUBDATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL,
++ tcfd_get_conv, tcfd_get_subconv,
++ NULL, &def_tc,
++ RSBAC_RC_ROLE_TCND_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_role_hashes,
++ (nr_role_hashes > 1) ? role_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err,
++ "Role NETDEV type compatibilities");
++ }
++ lol_info.version = RSBAC_RC_ROLE_TCNT_LIST_VERSION;
++ lol_info.key = RSBAC_RC_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_rc_type_id_t);
++ lol_info.subdata_size = sizeof(rsbac_rc_rights_vector_t);
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &role_tcnt_handle, &lol_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_DEF_SUBDATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL,
++ tcfd_get_conv, tcfd_get_subconv,
++ NULL, &def_tc,
++ RSBAC_RC_ROLE_TCNT_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_role_hashes,
++ (nr_role_hashes > 1) ? role_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err,
++ "Role NETTEMP type compatibilities");
++ }
++ lol_info.version = RSBAC_RC_ROLE_TCNO_LIST_VERSION;
++ lol_info.key = RSBAC_RC_LIST_KEY;
++ lol_info.desc_size = sizeof(rsbac_rc_role_id_t);
++ lol_info.data_size = 0;
++ lol_info.subdesc_size = sizeof(rsbac_rc_type_id_t);
++ lol_info.subdata_size = sizeof(rsbac_rc_rights_vector_t);
++ lol_info.max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &role_tcno_handle, &lol_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_DEF_SUBDATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL,
++ NULL,
++ tcfd_get_conv, tcfd_get_subconv,
++ NULL, &def_tc,
++ RSBAC_RC_ROLE_TCNO_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_role_hashes,
++ (nr_role_hashes > 1) ? role_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err,
++ "Role NETOBJ type compatibilities");
++ }
++
++ /* Create default role settings, if none there */
++ if (!rsbac_no_defaults && !rsbac_list_count(role_handle)) {
++ create_def_roles();
++ create_def_roles2();
++ }
++
++ list_info.version = RSBAC_RC_TYPE_FD_LIST_VERSION;
++ list_info.key = RSBAC_RC_LIST_KEY;
++ list_info.desc_size = sizeof(rsbac_rc_type_id_t);
++ list_info.data_size = sizeof(struct rsbac_rc_type_fd_entry_t);
++ list_info.max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &type_fd_handle, &list_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL, NULL, NULL,
++ RSBAC_RC_TYPE_FD_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_type_hashes,
++ (nr_type_hashes > 1) ? type_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "type FD");
++ }
++ if (!rsbac_no_defaults && !rsbac_list_count(type_fd_handle)) {
++ rsbac_rc_type_id_t type;
++ struct rsbac_rc_type_fd_entry_t entry;
++
++ type = RSBAC_RC_GENERAL_TYPE;
++ strcpy(entry.name, "General FD");
++ entry.need_secdel = 0;
++ rsbac_list_add(type_fd_handle, &type, &entry);
++ type = RSBAC_RC_SEC_TYPE;
++ strcpy(entry.name, "Security FD");
++ entry.need_secdel = 0;
++ rsbac_list_add(type_fd_handle, &type, &entry);
++ type = RSBAC_RC_SYS_TYPE;
++ strcpy(entry.name, "System FD");
++ entry.need_secdel = 0;
++ rsbac_list_add(type_fd_handle, &type, &entry);
++ }
++ list_info.version = RSBAC_RC_TYPE_DEV_LIST_VERSION;
++ list_info.key = RSBAC_RC_LIST_KEY;
++ list_info.desc_size = sizeof(rsbac_rc_type_id_t);
++ list_info.data_size = RSBAC_RC_NAME_LEN;
++ list_info.max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &type_dev_handle, &list_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL, NULL, NULL,
++ RSBAC_RC_TYPE_DEV_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_type_hashes,
++ (nr_type_hashes > 1) ? type_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "type DEV");
++ }
++ if (!rsbac_no_defaults && !rsbac_list_count(type_dev_handle)) {
++ rsbac_rc_type_id_t type;
++ char name[RSBAC_RC_NAME_LEN];
++
++ type = RSBAC_RC_GENERAL_TYPE;
++ strcpy(name, "General Device");
++ rsbac_list_add(type_dev_handle, &type, name);
++ type = RSBAC_RC_SEC_TYPE;
++ strcpy(name, "Security Device");
++ rsbac_list_add(type_dev_handle, &type, name);
++ type = RSBAC_RC_SYS_TYPE;
++ strcpy(name, "System Device");
++ rsbac_list_add(type_dev_handle, &type, &name);
++ }
++ list_info.version = RSBAC_RC_TYPE_IPC_LIST_VERSION;
++ list_info.key = RSBAC_RC_LIST_KEY;
++ list_info.desc_size = sizeof(rsbac_rc_type_id_t);
++ list_info.data_size = RSBAC_RC_NAME_LEN;
++ list_info.max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &type_ipc_handle, &list_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL, NULL, NULL,
++ RSBAC_RC_TYPE_IPC_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_type_hashes,
++ (nr_type_hashes > 1) ? type_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "type IPC");
++ }
++ if (!rsbac_no_defaults && !rsbac_list_count(type_ipc_handle)) {
++ rsbac_rc_type_id_t type;
++ char name[RSBAC_RC_NAME_LEN];
++
++ type = RSBAC_RC_GENERAL_TYPE;
++ strcpy(name, "General IPC");
++ rsbac_list_add(type_ipc_handle, &type, name);
++ type = RSBAC_RC_SEC_TYPE;
++ strcpy(name, "Security IPC");
++ rsbac_list_add(type_ipc_handle, &type, name);
++ type = RSBAC_RC_SYS_TYPE;
++ strcpy(name, "System IPC");
++ rsbac_list_add(type_ipc_handle, &type, &name);
++ }
++ list_info.version = RSBAC_RC_TYPE_USER_LIST_VERSION;
++ list_info.key = RSBAC_RC_LIST_KEY;
++ list_info.desc_size = sizeof(rsbac_rc_type_id_t);
++ list_info.data_size = RSBAC_RC_NAME_LEN;
++ list_info.max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &type_user_handle, &list_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL, NULL, NULL,
++ RSBAC_RC_TYPE_USER_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_type_hashes,
++ (nr_type_hashes > 1) ? type_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "type USER");
++ }
++ if (!rsbac_no_defaults && !rsbac_list_count(type_user_handle)) {
++ rsbac_rc_type_id_t type;
++ char name[RSBAC_RC_NAME_LEN];
++
++ type = RSBAC_RC_GENERAL_TYPE;
++ strcpy(name, "General User");
++ rsbac_list_add(type_user_handle, &type, name);
++ type = RSBAC_RC_SEC_TYPE;
++ strcpy(name, "Security User");
++ rsbac_list_add(type_user_handle, &type, name);
++ type = RSBAC_RC_SYS_TYPE;
++ strcpy(name, "System User");
++ rsbac_list_add(type_user_handle, &type, &name);
++ }
++ list_info.version = RSBAC_RC_TYPE_PROCESS_LIST_VERSION;
++ list_info.key = RSBAC_RC_LIST_KEY;
++ list_info.desc_size = sizeof(rsbac_rc_type_id_t);
++ list_info.data_size = RSBAC_RC_NAME_LEN;
++ list_info.max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &type_process_handle, &list_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL, NULL, NULL,
++ RSBAC_RC_TYPE_PROCESS_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_type_hashes,
++ (nr_type_hashes > 1) ? type_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "type PROCESS");
++ }
++ if (!rsbac_no_defaults && !rsbac_list_count(type_process_handle)) {
++ rsbac_rc_type_id_t type;
++ char name[RSBAC_RC_NAME_LEN];
++
++ type = RSBAC_RC_GENERAL_TYPE;
++ strcpy(name, "General Process");
++ rsbac_list_add(type_process_handle, &type, name);
++ type = RSBAC_RC_SEC_TYPE;
++ strcpy(name, "Security Proc");
++ rsbac_list_add(type_process_handle, &type, name);
++ type = RSBAC_RC_SYS_TYPE;
++ strcpy(name, "System Process");
++ rsbac_list_add(type_process_handle, &type, &name);
++ }
++ if (!rsbac_no_defaults) {
++ rsbac_rc_type_id_t type =
++ CONFIG_RSBAC_RC_KERNEL_PROCESS_TYPE;
++
++ if (!rsbac_list_exist(type_process_handle, &type)) {
++ char name[RSBAC_RC_NAME_LEN];
++ rsbac_rc_role_id_t *role_array;
++ u_long count;
++ rsbac_rc_rights_vector_t rights;
++
++ strcpy(name, "Kernel Process");
++ rsbac_list_add(type_process_handle, &type, &name);
++
++ /* Set type compatibilities for the new type for all roles */
++ rights = RSBAC_READ_WRITE_REQUEST_VECTOR
++ & RSBAC_PROCESS_REQUEST_VECTOR;
++
++ count =
++ rsbac_list_lol_get_all_desc(role_tcpr_handle,
++ (void **)
++ &role_array);
++ if (count > 0) {
++ u_int i;
++
++ for (i = 0; i < count; i++) {
++ if (!rsbac_list_lol_subexist
++ (role_tcpr_handle,
++ &role_array[i], &type))
++ rsbac_list_lol_subadd
++ (role_tcpr_handle,
++ &role_array[i], &type,
++ &rights);
++ }
++ rsbac_kfree(role_array);
++ }
++ }
++ }
++ list_info.version = RSBAC_RC_TYPE_GROUP_LIST_VERSION;
++ list_info.key = RSBAC_RC_LIST_KEY;
++ list_info.desc_size = sizeof(rsbac_rc_type_id_t);
++ list_info.data_size = RSBAC_RC_NAME_LEN;
++ list_info.max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &type_group_handle, &list_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL, NULL, NULL,
++ RSBAC_RC_TYPE_GROUP_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_type_hashes,
++ (nr_type_hashes > 1) ? type_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "type GROUP");
++ }
++ if (!rsbac_no_defaults && !rsbac_list_count(type_group_handle)) {
++ rsbac_rc_type_id_t type;
++ char name[RSBAC_RC_NAME_LEN];
++
++ type = RSBAC_RC_GENERAL_TYPE;
++ strcpy(name, "General Group");
++ rsbac_list_add(type_group_handle, &type, name);
++ }
++ list_info.version = RSBAC_RC_TYPE_NETDEV_LIST_VERSION;
++ list_info.key = RSBAC_RC_LIST_KEY;
++ list_info.desc_size = sizeof(rsbac_rc_type_id_t);
++ list_info.data_size = RSBAC_RC_NAME_LEN;
++ list_info.max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &type_netdev_handle, &list_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL, NULL, NULL,
++ RSBAC_RC_TYPE_NETDEV_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_type_hashes,
++ (nr_type_hashes > 1) ? type_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "type NETDEV");
++ }
++ if (!rsbac_no_defaults && !rsbac_list_count(type_netdev_handle)) {
++ rsbac_rc_type_id_t type;
++ char name[RSBAC_RC_NAME_LEN];
++
++ type = RSBAC_RC_GENERAL_TYPE;
++ strcpy(name, "General NETDEV");
++ rsbac_list_add(type_netdev_handle, &type, name);
++ type = RSBAC_RC_SEC_TYPE;
++ strcpy(name, "Security NETDEV");
++ rsbac_list_add(type_netdev_handle, &type, name);
++ type = RSBAC_RC_SYS_TYPE;
++ strcpy(name, "System NETDEV");
++ rsbac_list_add(type_netdev_handle, &type, &name);
++ }
++ list_info.version = RSBAC_RC_TYPE_NETTEMP_LIST_VERSION;
++ list_info.key = RSBAC_RC_LIST_KEY;
++ list_info.desc_size = sizeof(rsbac_rc_type_id_t);
++ list_info.data_size = RSBAC_RC_NAME_LEN;
++ list_info.max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &type_nettemp_handle, &list_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL, NULL, NULL,
++ RSBAC_RC_TYPE_NETTEMP_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_type_hashes,
++ (nr_type_hashes > 1) ? type_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "type NETTEMP");
++ }
++ if (!rsbac_no_defaults && !rsbac_list_count(type_nettemp_handle)) {
++ rsbac_rc_type_id_t type;
++ char name[RSBAC_RC_NAME_LEN];
++
++ type = RSBAC_RC_GENERAL_TYPE;
++ strcpy(name, "General NETTEMP");
++ rsbac_list_add(type_nettemp_handle, &type, name);
++ type = RSBAC_RC_SEC_TYPE;
++ strcpy(name, "Securit NETTEMP");
++ rsbac_list_add(type_nettemp_handle, &type, name);
++ type = RSBAC_RC_SYS_TYPE;
++ strcpy(name, "System NETTEMP");
++ rsbac_list_add(type_nettemp_handle, &type, &name);
++ }
++ list_info.version = RSBAC_RC_TYPE_NETOBJ_LIST_VERSION;
++ list_info.key = RSBAC_RC_LIST_KEY;
++ list_info.desc_size = sizeof(rsbac_rc_type_id_t);
++ list_info.data_size = RSBAC_RC_NAME_LEN;
++ list_info.max_age = 0;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &type_netobj_handle, &list_info,
++#if defined(CONFIG_RSBAC_RC_BACKUP)
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST | RSBAC_LIST_OWN_SLAB |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL, NULL, NULL,
++ RSBAC_RC_TYPE_NETOBJ_FILENAME,
++ RSBAC_AUTO_DEV,
++ nr_type_hashes,
++ (nr_type_hashes > 1) ? type_hash : NULL,
++ NULL);
++ if (err) {
++ registration_error(err, "type NETOBJ");
++ }
++ if (!rsbac_no_defaults && !rsbac_list_count(type_netobj_handle)) {
++ rsbac_rc_type_id_t type;
++ char name[RSBAC_RC_NAME_LEN];
++
++ type = RSBAC_RC_GENERAL_TYPE;
++ strcpy(name, "General NETOBJ");
++ rsbac_list_add(type_netobj_handle, &type, name);
++ type = RSBAC_RC_SEC_TYPE;
++ strcpy(name, "Security NETOBJ");
++ rsbac_list_add(type_netobj_handle, &type, name);
++ type = RSBAC_RC_SYS_TYPE;
++ strcpy(name, "System NETOBJ");
++ rsbac_list_add(type_netobj_handle, &type, &name);
++ }
++ rsbac_pr_debug(stack, "free stack before adding proc entry: %lu\n",
++ rsbac_stack_free_space());
++#if defined(CONFIG_RSBAC_PROC)
++ stats_rc = proc_create("stats_rc",
++ S_IFREG | S_IRUGO,
++ proc_rsbac_root_p, &stats_rc_proc_fops);
++#endif
++ rsbac_pr_debug(stack, "final free stack: %lu\n",
++ rsbac_stack_free_space());
++ rsbac_pr_debug(ds_rc, "Ready.\n");
++ return (err);
++}
++
++/***************************************************/
++/* We also need some status information... */
++
++int rsbac_stats_rc(void)
++{
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_stats_rc(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++
++ rsbac_printk(KERN_INFO "Role entry size is %u, %lu entries used\n",
++ sizeof(struct rsbac_rc_role_entry_t),
++ rsbac_list_count(role_handle));
++
++ rsbac_printk(KERN_INFO "Used type entries: fd: %lu, dev: %lu, ipc: %lu, user: %lu, process: %lu, group: %lu, netdev: %lu, nettemp: %lu, netobj: %lu\n",
++ rsbac_list_count(type_fd_handle),
++ rsbac_list_count(type_dev_handle),
++ rsbac_list_count(type_ipc_handle),
++ rsbac_list_count(type_user_handle),
++ rsbac_list_count(type_process_handle),
++ rsbac_list_count(type_group_handle),
++ rsbac_list_count(type_netdev_handle),
++ rsbac_list_count(type_nettemp_handle),
++ rsbac_list_count(type_netobj_handle));
++ return 0;
++}
++
++/************************************************* */
++/* Access functions */
++/************************************************* */
++
++/* Find the boot role */
++#ifdef CONFIG_RSBAC_INIT_DELAY
++int rsbac_rc_get_boot_role(rsbac_rc_role_id_t * role_p)
++#else
++int __init rsbac_rc_get_boot_role(rsbac_rc_role_id_t * role_p)
++#endif
++{
++ /* Try to find role marked as boot role */
++ if (rsbac_list_get_desc(role_handle,
++ role_p, role_p, rsbac_rc_role_compare_data)
++ ) { /* none found */
++ return -RSBAC_ENOTFOUND;
++ }
++ return 0;
++}
++
++/* Checking whether role exists */
++rsbac_boolean_t rsbac_rc_role_exists(rsbac_list_ta_number_t ta_number,
++ rsbac_rc_role_id_t role)
++{
++ return rsbac_ta_list_exist(ta_number, role_handle, &role);
++}
++
++rsbac_boolean_t rsbac_rc_type_exists(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ rsbac_rc_type_id_t type)
++{
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_FD:
++ return rsbac_ta_list_exist(ta_number, type_fd_handle,
++ &type);
++ case T_DEV:
++ return rsbac_ta_list_exist(ta_number, type_dev_handle,
++ &type);
++ case T_IPC:
++ return rsbac_ta_list_exist(ta_number, type_ipc_handle,
++ &type);
++ case T_USER:
++ return rsbac_ta_list_exist(ta_number, type_user_handle,
++ &type);
++ case T_PROCESS:
++ return rsbac_ta_list_exist(ta_number, type_process_handle,
++ &type);
++ case T_NETDEV:
++ return rsbac_ta_list_exist(ta_number, type_netdev_handle,
++ &type);
++ case T_NETTEMP:
++ return rsbac_ta_list_exist(ta_number, type_nettemp_handle,
++ &type);
++ case T_NETOBJ:
++ return rsbac_ta_list_exist(ta_number, type_netobj_handle,
++ &type);
++ case T_SCD:
++ if (type < ST_none)
++ return TRUE;
++ else
++ return FALSE;
++ default:
++ return FALSE;
++ }
++}
++
++/* Invalid parameter combinations return an error. */
++
++int rsbac_rc_copy_role(rsbac_list_ta_number_t ta_number,
++ rsbac_rc_role_id_t from_role,
++ rsbac_rc_role_id_t to_role)
++{
++ struct rsbac_rc_role_entry_t entry;
++ rsbac_rc_role_id_t *role_array;
++ char *item_array;
++ long count;
++ u_long i;
++ int err;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_rc_copy_role(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++ if ((from_role > RC_role_max_value)
++ || (to_role > RC_role_max_value)
++ || (to_role == from_role)
++ )
++ return (-RSBAC_EINVALIDTARGET);
++
++ /* copy */
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number, role_handle, NULL,
++ &from_role, &entry);
++ if (err)
++ return err;
++ err =
++ rsbac_ta_list_add_ttl(ta_number, role_handle, 0, &to_role,
++ &entry);
++ if (err)
++ return err;
++
++ rsbac_ta_list_lol_remove(ta_number, role_rc_handle, &to_role);
++ count =
++ rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ role_rc_handle,
++ &from_role,
++ (void **) &role_array,
++ NULL);
++ if (count > 0) {
++ for (i = 0; i < count; i++)
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_rc_handle, 0,
++ &to_role,
++ &role_array[i], 0);
++ rsbac_kfree(role_array);
++ }
++ rsbac_ta_list_lol_remove(ta_number, role_adr_handle, &to_role);
++ count =
++ rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ role_adr_handle,
++ &from_role,
++ (void **) &role_array,
++ NULL);
++ if (count > 0) {
++ for (i = 0; i < count; i++)
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_adr_handle, 0,
++ &to_role,
++ &role_array[i], 0);
++ rsbac_kfree(role_array);
++ }
++ rsbac_ta_list_lol_remove(ta_number, role_asr_handle, &to_role);
++ count =
++ rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ role_asr_handle,
++ &from_role,
++ (void **) &role_array,
++ NULL);
++ if (count > 0) {
++ for (i = 0; i < count; i++)
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_asr_handle, 0,
++ &to_role,
++ &role_array[i], 0);
++ rsbac_kfree(role_array);
++ }
++ rsbac_ta_list_lol_remove(ta_number, role_dfdc_handle, &to_role);
++ count =
++ rsbac_ta_list_lol_get_all_subitems_ttl(ta_number,
++ role_dfdc_handle,
++ &from_role,
++ (void **) &item_array,
++ NULL);
++ if (count > 0) {
++ char *tmp = item_array;
++ int size =
++ rsbac_list_lol_get_subitem_size(role_dfdc_handle);
++
++ for (i = 0; i < count; i++) {
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_dfdc_handle, 0,
++ &to_role, tmp,
++ tmp +
++ sizeof
++ (rsbac_rc_role_id_t));
++ tmp += size;
++ }
++ rsbac_kfree(item_array);
++ }
++ rsbac_ta_list_lol_remove(ta_number, role_tcfd_handle, &to_role);
++ count =
++ rsbac_ta_list_lol_get_all_subitems_ttl(ta_number,
++ role_tcfd_handle,
++ &from_role,
++ (void **) &item_array,
++ NULL);
++ if (count > 0) {
++ char *tmp = item_array;
++ int size =
++ rsbac_list_lol_get_subitem_size(role_tcfd_handle);
++
++ for (i = 0; i < count; i++) {
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcfd_handle, 0,
++ &to_role, tmp,
++ tmp +
++ sizeof
++ (rsbac_rc_role_id_t));
++ tmp += size;
++ }
++ rsbac_kfree(item_array);
++ }
++ rsbac_ta_list_lol_remove(ta_number, role_tcdv_handle, &to_role);
++ count =
++ rsbac_ta_list_lol_get_all_subitems_ttl(ta_number,
++ role_tcdv_handle,
++ &from_role,
++ (void **) &item_array,
++ NULL);
++ if (count > 0) {
++ char *tmp = item_array;
++ int size =
++ rsbac_list_lol_get_subitem_size(role_tcdv_handle);
++
++ for (i = 0; i < count; i++) {
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcdv_handle, 0,
++ &to_role, tmp,
++ tmp +
++ sizeof
++ (rsbac_rc_role_id_t));
++ tmp += size;
++ }
++ rsbac_kfree(item_array);
++ }
++ rsbac_ta_list_lol_remove(ta_number, role_tcus_handle, &to_role);
++ count =
++ rsbac_ta_list_lol_get_all_subitems_ttl(ta_number,
++ role_tcus_handle,
++ &from_role,
++ (void **) &item_array,
++ NULL);
++ if (count > 0) {
++ char *tmp = item_array;
++ int size =
++ rsbac_list_lol_get_subitem_size(role_tcus_handle);
++
++ for (i = 0; i < count; i++) {
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcus_handle, 0,
++ &to_role, tmp,
++ tmp +
++ sizeof
++ (rsbac_rc_role_id_t));
++ tmp += size;
++ }
++ rsbac_kfree(item_array);
++ }
++ rsbac_ta_list_lol_remove(ta_number, role_tcpr_handle, &to_role);
++ count =
++ rsbac_ta_list_lol_get_all_subitems_ttl(ta_number,
++ role_tcpr_handle,
++ &from_role,
++ (void **) &item_array,
++ NULL);
++ if (count > 0) {
++ char *tmp = item_array;
++ int size =
++ rsbac_list_lol_get_subitem_size(role_tcpr_handle);
++
++ for (i = 0; i < count; i++) {
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcpr_handle, 0,
++ &to_role, tmp,
++ tmp +
++ sizeof
++ (rsbac_rc_role_id_t));
++ tmp += size;
++ }
++ rsbac_kfree(item_array);
++ }
++ rsbac_ta_list_lol_remove(ta_number, role_tcip_handle, &to_role);
++ count =
++ rsbac_ta_list_lol_get_all_subitems_ttl(ta_number,
++ role_tcip_handle,
++ &from_role,
++ (void **) &item_array,
++ NULL);
++ if (count > 0) {
++ char *tmp = item_array;
++ int size =
++ rsbac_list_lol_get_subitem_size(role_tcip_handle);
++
++ for (i = 0; i < count; i++) {
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcip_handle, 0,
++ &to_role, tmp,
++ tmp +
++ sizeof
++ (rsbac_rc_role_id_t));
++ tmp += size;
++ }
++ rsbac_kfree(item_array);
++ }
++ rsbac_ta_list_lol_remove(ta_number, role_tcsc_handle, &to_role);
++ count =
++ rsbac_ta_list_lol_get_all_subitems_ttl(ta_number,
++ role_tcsc_handle,
++ &from_role,
++ (void **) &item_array,
++ NULL);
++ if (count > 0) {
++ char *tmp = item_array;
++ int size =
++ rsbac_list_lol_get_subitem_size(role_tcsc_handle);
++
++ for (i = 0; i < count; i++) {
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcsc_handle, 0,
++ &to_role, tmp,
++ tmp +
++ sizeof
++ (rsbac_rc_role_id_t));
++ tmp += size;
++ }
++ rsbac_kfree(item_array);
++ }
++ rsbac_ta_list_lol_remove(ta_number, role_tcgr_handle, &to_role);
++ count =
++ rsbac_ta_list_lol_get_all_subitems_ttl(ta_number,
++ role_tcgr_handle,
++ &from_role,
++ (void **) &item_array,
++ NULL);
++ if (count > 0) {
++ char *tmp = item_array;
++ int size =
++ rsbac_list_lol_get_subitem_size(role_tcgr_handle);
++
++ for (i = 0; i < count; i++) {
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcgr_handle, 0,
++ &to_role, tmp,
++ tmp +
++ sizeof
++ (rsbac_rc_role_id_t));
++ tmp += size;
++ }
++ rsbac_kfree(item_array);
++ }
++ rsbac_ta_list_lol_remove(ta_number, role_tcnd_handle, &to_role);
++ count =
++ rsbac_ta_list_lol_get_all_subitems_ttl(ta_number,
++ role_tcnd_handle,
++ &from_role,
++ (void **) &item_array,
++ NULL);
++ if (count > 0) {
++ char *tmp = item_array;
++ int size =
++ rsbac_list_lol_get_subitem_size(role_tcnd_handle);
++
++ for (i = 0; i < count; i++) {
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcnd_handle, 0,
++ &to_role, tmp,
++ tmp +
++ sizeof
++ (rsbac_rc_role_id_t));
++ tmp += size;
++ }
++ rsbac_kfree(item_array);
++ }
++ rsbac_ta_list_lol_remove(ta_number, role_tcnt_handle, &to_role);
++ count =
++ rsbac_ta_list_lol_get_all_subitems_ttl(ta_number,
++ role_tcnt_handle,
++ &from_role,
++ (void **) &item_array,
++ NULL);
++ if (count > 0) {
++ char *tmp = item_array;
++ int size =
++ rsbac_list_lol_get_subitem_size(role_tcnt_handle);
++
++ for (i = 0; i < count; i++) {
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcnt_handle, 0,
++ &to_role, tmp,
++ tmp +
++ sizeof
++ (rsbac_rc_role_id_t));
++ tmp += size;
++ }
++ rsbac_kfree(item_array);
++ }
++ rsbac_ta_list_lol_remove(ta_number, role_tcno_handle, &to_role);
++ count =
++ rsbac_ta_list_lol_get_all_subitems_ttl(ta_number,
++ role_tcno_handle,
++ &from_role,
++ (void **) &item_array,
++ NULL);
++ if (count > 0) {
++ char *tmp = item_array;
++ int size =
++ rsbac_list_lol_get_subitem_size(role_tcno_handle);
++
++ for (i = 0; i < count; i++) {
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcno_handle, 0,
++ &to_role, tmp,
++ tmp +
++ sizeof
++ (rsbac_rc_role_id_t));
++ tmp += size;
++ }
++ rsbac_kfree(item_array);
++ }
++ return 0;
++}
++
++int rsbac_rc_copy_type(rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ rsbac_rc_type_id_t from_type,
++ rsbac_rc_type_id_t to_type)
++{
++ rsbac_rc_role_id_t *role_array;
++ rsbac_list_handle_t i_type_handle = NULL;
++ rsbac_list_handle_t i_comp_handle = NULL;
++ struct rsbac_rc_type_fd_entry_t type_fd_entry;
++ char type_name[RSBAC_RC_NAME_LEN];
++ long count;
++ rsbac_time_t ttl;
++ u_long i;
++ int err;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_rc_copy_type(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++ if ((from_type > RC_type_max_value)
++ || (to_type > RC_type_max_value)
++ || (to_type == from_type)
++ )
++ return (-RSBAC_EINVALIDTARGET);
++
++ switch (target) {
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_FD:
++ i_type_handle = type_fd_handle;
++ i_comp_handle = role_tcfd_handle;
++ break;
++ case T_DEV:
++ i_type_handle = type_dev_handle;
++ i_comp_handle = role_tcdv_handle;
++ break;
++ case T_USER:
++ i_type_handle = type_user_handle;
++ i_comp_handle = role_tcus_handle;
++ break;
++ case T_PROCESS:
++ i_type_handle = type_process_handle;
++ i_comp_handle = role_tcpr_handle;
++ break;
++ case T_IPC:
++ i_type_handle = type_ipc_handle;
++ i_comp_handle = role_tcip_handle;
++ break;
++ case T_GROUP:
++ i_type_handle = type_group_handle;
++ i_comp_handle = role_tcgr_handle;
++ break;
++ case T_NETDEV:
++ i_type_handle = type_netdev_handle;
++ i_comp_handle = role_tcnd_handle;
++ break;
++ case T_NETTEMP:
++ i_type_handle = type_nettemp_handle;
++ i_comp_handle = role_tcnt_handle;
++ break;
++ case T_NETOBJ:
++ i_type_handle = type_netobj_handle;
++ i_comp_handle = role_tcno_handle;
++ break;
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ /* copy */
++ if (i_type_handle == type_fd_handle) {
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number, i_type_handle,
++ &ttl, &from_type,
++ &type_fd_entry);
++ if (err)
++ return err;
++ err =
++ rsbac_ta_list_add_ttl(ta_number, i_type_handle, ttl,
++ &to_type, &type_fd_entry);
++ if (err)
++ return err;
++ } else {
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number, i_type_handle,
++ NULL, &from_type,
++ &type_name);
++ if (err)
++ return err;
++ err =
++ rsbac_ta_list_add_ttl(ta_number, i_type_handle, 0,
++ &to_type, &type_name);
++ if (err)
++ return err;
++ }
++
++ err =
++ rsbac_ta_list_lol_subremove_from_all(ta_number, i_comp_handle,
++ &to_type);
++ if (err)
++ return err;
++
++ count = rsbac_ta_list_get_all_desc(ta_number, role_handle,
++ (void **) &role_array);
++ if (count > 0) {
++ rsbac_rc_rights_vector_t rights;
++
++ for (i = 0; i < count; i++) {
++ err = rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ i_comp_handle,
++ &ttl,
++ &role_array
++ [i],
++ &from_type,
++ &rights);
++ if (!err)
++ err =
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ i_comp_handle,
++ ttl,
++ &role_array
++ [i],
++ &to_type,
++ &rights);
++ }
++ rsbac_kfree(role_array);
++ }
++ return 0;
++}
++
++
++/* Getting values */
++int rsbac_rc_get_item(rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ union rsbac_rc_target_id_t tid,
++ union rsbac_rc_target_id_t subtid,
++ enum rsbac_rc_item_t item,
++ union rsbac_rc_item_value_t *value_p,
++ rsbac_time_t * ttl_p)
++{
++ int err = 0;
++ struct rsbac_rc_role_entry_t role_entry;
++ struct rsbac_rc_type_fd_entry_t type_fd_entry;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_rc_get_item(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_rc_get_item(): called from interrupt!\n");
++ }
++ if (ttl_p)
++ *ttl_p = 0;
++ switch (target) {
++ case RT_ROLE:
++ if (tid.role > RC_role_max_value)
++ return (-RSBAC_EINVALIDTARGET);
++/*
++ rsbac_pr_debug(ds_rc, "getting role item value\n");
++*/
++ switch (item) {
++ case RI_role_comp:
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ role_rc_handle,
++ ttl_p,
++ &tid.role,
++ &subtid.
++ role, NULL))
++ value_p->comp = TRUE;
++ else
++ value_p->comp = FALSE;
++ return 0;
++ case RI_admin_roles:
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ role_adr_handle,
++ ttl_p,
++ &tid.role,
++ &subtid.
++ role, NULL))
++ value_p->comp = TRUE;
++ else
++ value_p->comp = FALSE;
++ return 0;
++ case RI_assign_roles:
++ if (!rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ role_asr_handle,
++ ttl_p,
++ &tid.role,
++ &subtid.
++ role, NULL))
++ value_p->comp = TRUE;
++ else
++ value_p->comp = FALSE;
++ return 0;
++ case RI_type_comp_fd:
++ if (rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ role_tcfd_handle,
++ ttl_p,
++ &tid.role,
++ &subtid.type,
++ &value_p->
++ rights)) {
++ value_p->rights =
++ RSBAC_RC_DEFAULT_RIGHTS_VECTOR;
++ if (ttl_p)
++ *ttl_p = 0;
++ }
++ return 0;
++ case RI_type_comp_dev:
++ if (rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ role_tcdv_handle,
++ ttl_p,
++ &tid.role,
++ &subtid.type,
++ &value_p->
++ rights)) {
++ value_p->rights =
++ RSBAC_RC_DEFAULT_RIGHTS_VECTOR;
++ if (ttl_p)
++ *ttl_p = 0;
++ }
++ return 0;
++ case RI_type_comp_user:
++ if (rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ role_tcus_handle,
++ ttl_p,
++ &tid.role,
++ &subtid.type,
++ &value_p->
++ rights)) {
++ value_p->rights =
++ RSBAC_RC_DEFAULT_RIGHTS_VECTOR;
++ if (ttl_p)
++ *ttl_p = 0;
++ }
++ return 0;
++ case RI_type_comp_process:
++ if (rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ role_tcpr_handle,
++ ttl_p,
++ &tid.role,
++ &subtid.type,
++ &value_p->
++ rights)) {
++ value_p->rights =
++ RSBAC_RC_DEFAULT_RIGHTS_VECTOR;
++ if (ttl_p)
++ *ttl_p = 0;
++ }
++ return 0;
++ case RI_type_comp_ipc:
++ if (rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ role_tcip_handle,
++ ttl_p,
++ &tid.role,
++ &subtid.type,
++ &value_p->
++ rights)) {
++ value_p->rights =
++ RSBAC_RC_DEFAULT_RIGHTS_VECTOR;
++ if (ttl_p)
++ *ttl_p = 0;
++ }
++ return 0;
++ case RI_type_comp_scd:
++ if (rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ role_tcsc_handle,
++ ttl_p,
++ &tid.role,
++ &subtid.type,
++ &value_p->
++ rights)) {
++ value_p->rights =
++ RSBAC_RC_DEFAULT_RIGHTS_VECTOR;
++ if (ttl_p)
++ *ttl_p = 0;
++ }
++ return 0;
++ case RI_type_comp_group:
++ if (rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ role_tcgr_handle,
++ ttl_p,
++ &tid.role,
++ &subtid.type,
++ &value_p->
++ rights)) {
++ value_p->rights =
++ RSBAC_RC_DEFAULT_RIGHTS_VECTOR;
++ if (ttl_p)
++ *ttl_p = 0;
++ }
++ return 0;
++ case RI_type_comp_netdev:
++ if (rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ role_tcnd_handle,
++ ttl_p,
++ &tid.role,
++ &subtid.type,
++ &value_p->
++ rights)) {
++ value_p->rights =
++ RSBAC_RC_DEFAULT_RIGHTS_VECTOR;
++ if (ttl_p)
++ *ttl_p = 0;
++ }
++ return 0;
++ case RI_type_comp_nettemp:
++ if (rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ role_tcnt_handle,
++ ttl_p,
++ &tid.role,
++ &subtid.type,
++ &value_p->
++ rights)) {
++ value_p->rights =
++ RSBAC_RC_DEFAULT_RIGHTS_VECTOR;
++ if (ttl_p)
++ *ttl_p = 0;
++ }
++ return 0;
++ case RI_type_comp_netobj:
++ if (rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ role_tcno_handle,
++ ttl_p,
++ &tid.role,
++ &subtid.type,
++ &value_p->
++ rights)) {
++ value_p->rights =
++ RSBAC_RC_DEFAULT_RIGHTS_VECTOR;
++ if (ttl_p)
++ *ttl_p = 0;
++ }
++ return 0;
++ case RI_admin_type:
++ if (!
++ (err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle, NULL,
++ &tid.role,
++ &role_entry)))
++ value_p->admin_type =
++ role_entry.admin_type;
++ return err;
++ case RI_name:
++ if (!
++ (err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle, NULL,
++ &tid.role,
++ &role_entry))) {
++ strncpy(value_p->name, role_entry.name,
++ RSBAC_RC_NAME_LEN - 1);
++ value_p->name[RSBAC_RC_NAME_LEN - 1] =
++ (char) 0;
++ }
++ return err;
++ case RI_def_fd_create_type:
++ if (!
++ (err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle, NULL,
++ &tid.role,
++ &role_entry)))
++ value_p->type_id =
++ role_entry.def_fd_create_type;
++ return err;
++ case RI_def_fd_ind_create_type:
++ return rsbac_ta_list_lol_get_subdata_ttl(ta_number,
++ role_dfdc_handle,
++ ttl_p,
++ &tid.role,
++ &subtid.
++ type,
++ &value_p->
++ type_id);
++ case RI_def_user_create_type:
++ if (!
++ (err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle, NULL,
++ &tid.role,
++ &role_entry)))
++ value_p->type_id =
++ role_entry.def_user_create_type;
++ return err;
++ case RI_def_process_create_type:
++ if (!
++ (err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle, NULL,
++ &tid.role,
++ &role_entry)))
++ value_p->type_id =
++ role_entry.def_process_create_type;
++ return err;
++ case RI_def_process_chown_type:
++ if (!
++ (err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle, NULL,
++ &tid.role,
++ &role_entry)))
++ value_p->type_id =
++ role_entry.def_process_chown_type;
++ return err;
++ case RI_def_process_execute_type:
++ if (!
++ (err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle, NULL,
++ &tid.role,
++ &role_entry)))
++ value_p->type_id =
++ role_entry.def_process_execute_type;
++ return err;
++ case RI_def_ipc_create_type:
++ if (!
++ (err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle, NULL,
++ &tid.role,
++ &role_entry)))
++ value_p->type_id =
++ role_entry.def_ipc_create_type;
++ return err;
++ case RI_def_group_create_type:
++ if (!
++ (err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle, NULL,
++ &tid.role,
++ &role_entry)))
++ value_p->type_id =
++ role_entry.def_group_create_type;
++ return err;
++ case RI_def_unixsock_create_type:
++ if (!
++ (err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle, NULL,
++ &tid.role,
++ &role_entry)))
++ value_p->type_id =
++ role_entry.def_unixsock_create_type;
++ return err;
++ case RI_boot_role:
++ if (!
++ (err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle, NULL,
++ &tid.role,
++ &role_entry)))
++ value_p->boot_role = role_entry.boot_role;
++ return err;
++ case RI_req_reauth:
++ if (!
++ (err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle, NULL,
++ &tid.role,
++ &role_entry)))
++ value_p->req_reauth =
++ role_entry.req_reauth;
++ return err;
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++ /* return */
++ return (err);
++ break;
++
++ case RT_TYPE:
++ if (tid.type > RC_type_max_value)
++ return (-RSBAC_EINVALIDTARGET);
++/*
++ rsbac_pr_debug(ds_rc, "getting type item value\n");
++*/
++ switch (item) {
++ case RI_type_fd_name:
++ if (!
++ (err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ type_fd_handle,
++ NULL, &tid.type,
++ &type_fd_entry))) {
++ strncpy(value_p->name, type_fd_entry.name,
++ RSBAC_RC_NAME_LEN - 1);
++ value_p->name[RSBAC_RC_NAME_LEN - 1] =
++ (char) 0;
++ }
++ return err;
++ case RI_type_fd_need_secdel:
++ if (!
++ (err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ type_fd_handle,
++ NULL, &tid.type,
++ &type_fd_entry))) {
++ value_p->need_secdel =
++ type_fd_entry.need_secdel;
++ }
++ return err;
++ case RI_type_dev_name:
++ return rsbac_ta_list_get_data_ttl(ta_number,
++ type_dev_handle,
++ NULL, &tid.type,
++ value_p->name);
++ case RI_type_ipc_name:
++ return rsbac_ta_list_get_data_ttl(ta_number,
++ type_ipc_handle,
++ NULL, &tid.type,
++ value_p->name);
++ case RI_type_user_name:
++ return rsbac_ta_list_get_data_ttl(ta_number,
++ type_user_handle,
++ NULL, &tid.type,
++ value_p->name);
++ case RI_type_process_name:
++ return rsbac_ta_list_get_data_ttl(ta_number,
++ type_process_handle,
++ NULL, &tid.type,
++ value_p->name);
++ case RI_type_group_name:
++ return rsbac_ta_list_get_data_ttl(ta_number,
++ type_group_handle,
++ NULL, &tid.type,
++ value_p->name);
++ case RI_type_netdev_name:
++ return rsbac_ta_list_get_data_ttl(ta_number,
++ type_netdev_handle,
++ NULL, &tid.type,
++ value_p->name);
++ case RI_type_nettemp_name:
++ return rsbac_ta_list_get_data_ttl(ta_number,
++ type_nettemp_handle,
++ NULL, &tid.type,
++ value_p->name);
++ case RI_type_netobj_name:
++ return rsbac_ta_list_get_data_ttl(ta_number,
++ type_netobj_handle,
++ NULL, &tid.type,
++ value_p->name);
++ case RI_type_scd_name:
++ {
++ char *tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if (!tmp)
++ err = -RSBAC_ENOMEM;
++ else {
++ get_rc_scd_type_name(tmp,
++ tid.type);
++ strncpy(value_p->name, tmp,
++ RSBAC_RC_NAME_LEN - 1);
++ value_p->name[RSBAC_RC_NAME_LEN -
++ 1] = (char) 0;
++ rsbac_kfree(tmp);
++ }
++ break;
++ }
++ default:
++ err = -RSBAC_EINVALIDATTR;
++ }
++ /* and return */
++ return (err);
++ break;
++
++ /* switch target: no valid target */
++ default:
++ err = -RSBAC_EINVALIDTARGET;
++ }
++ return err;
++} /* end of rsbac_rc_get_item() */
++
++/* Checking role's compatibility */
++rsbac_boolean_t rsbac_rc_check_comp(rsbac_rc_role_id_t role,
++ union rsbac_rc_target_id_t subtid,
++ enum rsbac_rc_item_t item,
++ enum rsbac_rc_special_rights_t right)
++{
++ rsbac_rc_rights_vector_t rights_vector;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_rc_check_comp(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_rc_check_comp(): called from interrupt!\n");
++ }
++ if (role > RC_role_max_value)
++ return FALSE;
++/*
++ rsbac_pr_debug(ds_rc, "checking role compatibility\n");
++*/
++ switch (item) {
++ case RI_role_comp:
++ return rsbac_list_lol_subexist(role_rc_handle, &role,
++ &subtid.role);
++ case RI_admin_roles:
++ return rsbac_list_lol_subexist(role_adr_handle, &role,
++ &subtid.role);
++ case RI_assign_roles:
++ return rsbac_list_lol_subexist(role_asr_handle, &role,
++ &subtid.role);
++ case RI_type_comp_fd:
++ if (!rsbac_list_lol_get_subdata
++ (role_tcfd_handle, &role, &subtid.type, &rights_vector)
++ && (rights_vector & RSBAC_RC_RIGHTS_VECTOR(right))
++ )
++ return TRUE;
++ else
++ return FALSE;
++ case RI_type_comp_dev:
++ if (!rsbac_list_lol_get_subdata
++ (role_tcdv_handle, &role, &subtid.type, &rights_vector)
++ && (rights_vector & RSBAC_RC_RIGHTS_VECTOR(right))
++ )
++ return TRUE;
++ else
++ return FALSE;
++ case RI_type_comp_user:
++ if (!rsbac_list_lol_get_subdata
++ (role_tcus_handle, &role, &subtid.type, &rights_vector)
++ && (rights_vector & RSBAC_RC_RIGHTS_VECTOR(right))
++ )
++ return TRUE;
++ else
++ return FALSE;
++ case RI_type_comp_process:
++ if (!rsbac_list_lol_get_subdata
++ (role_tcpr_handle, &role, &subtid.type, &rights_vector)
++ && (rights_vector & RSBAC_RC_RIGHTS_VECTOR(right))
++ )
++ return TRUE;
++ else
++ return FALSE;
++ case RI_type_comp_ipc:
++ if (!rsbac_list_lol_get_subdata
++ (role_tcip_handle, &role, &subtid.type, &rights_vector)
++ && (rights_vector & RSBAC_RC_RIGHTS_VECTOR(right))
++ )
++ return TRUE;
++ else
++ return FALSE;
++ case RI_type_comp_scd:
++ if (!rsbac_list_lol_get_subdata
++ (role_tcsc_handle, &role, &subtid.type, &rights_vector)
++ && (rights_vector & RSBAC_RC_RIGHTS_VECTOR(right))
++ )
++ return TRUE;
++ else
++ return FALSE;
++ case RI_type_comp_group:
++ if (!rsbac_list_lol_get_subdata
++ (role_tcgr_handle, &role, &subtid.type, &rights_vector)
++ && (rights_vector & RSBAC_RC_RIGHTS_VECTOR(right))
++ )
++ return TRUE;
++ else
++ return FALSE;
++ case RI_type_comp_netdev:
++ if (!rsbac_list_lol_get_subdata
++ (role_tcnd_handle, &role, &subtid.type, &rights_vector)
++ && (rights_vector & RSBAC_RC_RIGHTS_VECTOR(right))
++ )
++ return TRUE;
++ else
++ return FALSE;
++ case RI_type_comp_nettemp:
++ if (!rsbac_list_lol_get_subdata
++ (role_tcnt_handle, &role, &subtid.type, &rights_vector)
++ && (rights_vector & RSBAC_RC_RIGHTS_VECTOR(right))
++ )
++ return TRUE;
++ else
++ return FALSE;
++ case RI_type_comp_netobj:
++ if (!rsbac_list_lol_get_subdata
++ (role_tcno_handle, &role, &subtid.type, &rights_vector)
++ && (rights_vector & RSBAC_RC_RIGHTS_VECTOR(right))
++ )
++ return TRUE;
++ else
++ return FALSE;
++
++ default:
++ rsbac_printk(KERN_WARNING "rsbac_rc_check_comp(): called for invalid item %u\n",
++ item);
++ return FALSE;
++ }
++} /* end of rsbac_rc_check_comp() */
++
++/* Get list of defined items. Returns number or negative error.
++ * item is to distinguish type targets, use RI_type_xx_name */
++int rsbac_rc_get_list(rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ union rsbac_rc_target_id_t tid,
++ enum rsbac_rc_item_t item,
++ __u32 ** array_pp, rsbac_time_t ** ttl_array_pp)
++{
++ int res;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_rc_get_list(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_rc_get_list(): called from interrupt!\n");
++ }
++ if (ttl_array_pp)
++ *ttl_array_pp = NULL;
++ switch (target) {
++ case RT_ROLE:
++/*
++ rsbac_pr_debug(ds_rc, "getting role list\n");
++*/
++ switch (item) {
++ case RI_name:
++ if (array_pp)
++ return
++ rsbac_ta_list_get_all_desc(ta_number,
++ role_handle,
++ (void **)
++ array_pp);
++ else
++ return rsbac_ta_list_count(ta_number,
++ role_handle);
++ case RI_role_comp:
++ if (array_pp)
++ res =
++ rsbac_ta_list_lol_get_all_subdesc_ttl
++ (ta_number, role_rc_handle, &tid.role,
++ (void **) array_pp, ttl_array_pp);
++ else
++ res =
++ rsbac_ta_list_lol_subcount(ta_number,
++ role_rc_handle,
++ &tid.role);
++ if (res == -RSBAC_ENOTFOUND)
++ return 0;
++ else
++ return res;
++ case RI_admin_roles:
++ if (array_pp)
++ res =
++ rsbac_ta_list_lol_get_all_subdesc_ttl
++ (ta_number, role_adr_handle, &tid.role,
++ (void **) array_pp, ttl_array_pp);
++ else
++ res =
++ rsbac_ta_list_lol_subcount(ta_number,
++ role_adr_handle,
++ &tid.role);
++ if (res == -RSBAC_ENOTFOUND)
++ return 0;
++ else
++ return res;
++ case RI_assign_roles:
++ if (array_pp)
++ res =
++ rsbac_ta_list_lol_get_all_subdesc_ttl
++ (ta_number, role_asr_handle, &tid.role,
++ (void **) array_pp, ttl_array_pp);
++ else
++ res =
++ rsbac_ta_list_lol_subcount(ta_number,
++ role_asr_handle,
++ &tid.role);
++ if (res == -RSBAC_ENOTFOUND)
++ return 0;
++ else
++ return res;
++ case RI_def_fd_ind_create_type:
++ if (array_pp)
++ return
++ rsbac_ta_list_lol_get_all_subdesc_ttl
++ (ta_number, role_dfdc_handle,
++ &tid.role, (void **) array_pp,
++ ttl_array_pp);
++ else
++ return
++ rsbac_ta_list_lol_subcount(ta_number,
++ role_dfdc_handle,
++ &tid.role);
++ case RI_type_comp_fd:
++ if (array_pp)
++ return
++ rsbac_ta_list_lol_get_all_subdesc_ttl
++ (ta_number, role_tcfd_handle,
++ &tid.role, (void **) array_pp,
++ ttl_array_pp);
++ else
++ return
++ rsbac_ta_list_lol_subcount(ta_number,
++ role_tcfd_handle,
++ &tid.role);
++ case RI_type_comp_dev:
++ if (array_pp)
++ return
++ rsbac_ta_list_lol_get_all_subdesc_ttl
++ (ta_number, role_tcdv_handle,
++ &tid.role, (void **) array_pp,
++ ttl_array_pp);
++ else
++ return
++ rsbac_ta_list_lol_subcount(ta_number,
++ role_tcdv_handle,
++ &tid.role);
++ case RI_type_comp_user:
++ if (array_pp)
++ return
++ rsbac_ta_list_lol_get_all_subdesc_ttl
++ (ta_number, role_tcus_handle,
++ &tid.role, (void **) array_pp,
++ ttl_array_pp);
++ else
++ return
++ rsbac_ta_list_lol_subcount(ta_number,
++ role_tcus_handle,
++ &tid.role);
++ case RI_type_comp_process:
++ if (array_pp)
++ return
++ rsbac_ta_list_lol_get_all_subdesc_ttl
++ (ta_number, role_tcpr_handle,
++ &tid.role, (void **) array_pp,
++ ttl_array_pp);
++ else
++ return
++ rsbac_ta_list_lol_subcount(ta_number,
++ role_tcpr_handle,
++ &tid.role);
++ case RI_type_comp_ipc:
++ if (array_pp)
++ return
++ rsbac_ta_list_lol_get_all_subdesc_ttl
++ (ta_number, role_tcip_handle,
++ &tid.role, (void **) array_pp,
++ ttl_array_pp);
++ else
++ return
++ rsbac_ta_list_lol_subcount(ta_number,
++ role_tcip_handle,
++ &tid.role);
++ case RI_type_comp_scd:
++ if (array_pp)
++ return
++ rsbac_ta_list_lol_get_all_subdesc_ttl
++ (ta_number, role_tcsc_handle,
++ &tid.role, (void **) array_pp,
++ ttl_array_pp);
++ else
++ return
++ rsbac_ta_list_lol_subcount(ta_number,
++ role_tcsc_handle,
++ &tid.role);
++ case RI_type_comp_group:
++ if (array_pp)
++ return
++ rsbac_ta_list_lol_get_all_subdesc_ttl
++ (ta_number, role_tcgr_handle,
++ &tid.role, (void **) array_pp,
++ ttl_array_pp);
++ else
++ return
++ rsbac_ta_list_lol_subcount(ta_number,
++ role_tcgr_handle,
++ &tid.role);
++ case RI_type_comp_netdev:
++ if (array_pp)
++ return
++ rsbac_ta_list_lol_get_all_subdesc_ttl
++ (ta_number, role_tcnd_handle,
++ &tid.role, (void **) array_pp,
++ ttl_array_pp);
++ else
++ return
++ rsbac_ta_list_lol_subcount(ta_number,
++ role_tcnd_handle,
++ &tid.role);
++ case RI_type_comp_nettemp:
++ if (array_pp)
++ return
++ rsbac_ta_list_lol_get_all_subdesc_ttl
++ (ta_number, role_tcnt_handle,
++ &tid.role, (void **) array_pp,
++ ttl_array_pp);
++ else
++ return
++ rsbac_ta_list_lol_subcount(ta_number,
++ role_tcnt_handle,
++ &tid.role);
++ case RI_type_comp_netobj:
++ if (array_pp)
++ return
++ rsbac_ta_list_lol_get_all_subdesc_ttl
++ (ta_number, role_tcno_handle,
++ &tid.role, (void **) array_pp,
++ ttl_array_pp);
++ else
++ return
++ rsbac_ta_list_lol_subcount(ta_number,
++ role_tcno_handle,
++ &tid.role);
++
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++
++ case RT_TYPE:
++/*
++ rsbac_pr_debug(ds_rc, "getting type item value\n");
++*/
++ switch (item) {
++ case RI_type_fd_name:
++ case RI_type_fd_need_secdel:
++ if (array_pp)
++ return
++ rsbac_ta_list_get_all_desc(ta_number,
++ type_fd_handle,
++ (void **)
++ array_pp);
++ else
++ return rsbac_ta_list_count(ta_number,
++ type_fd_handle);
++ case RI_type_dev_name:
++ if (array_pp)
++ return
++ rsbac_ta_list_get_all_desc(ta_number,
++ type_dev_handle,
++ (void **)
++ array_pp);
++ else
++ return rsbac_ta_list_count(ta_number,
++ type_dev_handle);
++ case RI_type_ipc_name:
++ if (array_pp)
++ return
++ rsbac_ta_list_get_all_desc(ta_number,
++ type_ipc_handle,
++ (void **)
++ array_pp);
++ else
++ return rsbac_ta_list_count(ta_number,
++ type_ipc_handle);
++ case RI_type_user_name:
++ if (array_pp)
++ return
++ rsbac_ta_list_get_all_desc(ta_number,
++ type_user_handle,
++ (void **)
++ array_pp);
++ else
++ return rsbac_ta_list_count(ta_number,
++ type_user_handle);
++ case RI_type_process_name:
++ if (array_pp)
++ return
++ rsbac_ta_list_get_all_desc(ta_number,
++ type_process_handle,
++ (void **)
++ array_pp);
++ else
++ return rsbac_ta_list_count(ta_number,
++ type_process_handle);
++ case RI_type_group_name:
++ if (array_pp)
++ return
++ rsbac_ta_list_get_all_desc(ta_number,
++ type_group_handle,
++ (void **)
++ array_pp);
++ else
++ return rsbac_ta_list_count(ta_number,
++ type_group_handle);
++ case RI_type_netdev_name:
++ if (array_pp)
++ return
++ rsbac_ta_list_get_all_desc(ta_number,
++ type_netdev_handle,
++ (void **)
++ array_pp);
++ else
++ return rsbac_ta_list_count(ta_number,
++ type_netdev_handle);
++ case RI_type_nettemp_name:
++ if (array_pp)
++ return
++ rsbac_ta_list_get_all_desc(ta_number,
++ type_nettemp_handle,
++ (void **)
++ array_pp);
++ else
++ return rsbac_ta_list_count(ta_number,
++ type_nettemp_handle);
++ case RI_type_netobj_name:
++ if (array_pp)
++ return
++ rsbac_ta_list_get_all_desc(ta_number,
++ type_netobj_handle,
++ (void **)
++ array_pp);
++ else
++ return rsbac_ta_list_count(ta_number,
++ type_netobj_handle);
++
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++} /* end of rsbac_rc_get_list() */
++
++
++/* Setting values */
++int rsbac_rc_set_item(rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ union rsbac_rc_target_id_t tid,
++ union rsbac_rc_target_id_t subtid,
++ enum rsbac_rc_item_t item,
++ union rsbac_rc_item_value_t value, rsbac_time_t ttl)
++{
++ int err = 0;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_rc_set_item(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++ if (in_interrupt()) {
++ rsbac_printk(KERN_WARNING "rsbac_rc_set_item(): called from interrupt!\n");
++ }
++ switch (target) {
++ case RT_ROLE:
++ if (tid.role > RC_role_max_value)
++ return (-RSBAC_EINVALIDTARGET);
++ if ((item != RI_name)
++ && !rsbac_ta_list_exist(ta_number, role_handle,
++ &tid.role)
++ )
++ return (-RSBAC_EINVALIDTARGET);
++ rsbac_pr_debug(ds_rc, "Setting role item value\n");
++ switch (item) {
++ case RI_role_comp:
++ if (value.comp) {
++ return
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_rc_handle,
++ ttl,
++ &tid.role,
++ &subtid.
++ role,
++ NULL);
++ } else {
++ rsbac_ta_list_lol_subremove(ta_number,
++ role_rc_handle,
++ &tid.role,
++ &subtid.role);
++ return 0;
++ }
++ case RI_admin_roles:
++ if (value.comp) {
++ return
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_adr_handle,
++ ttl,
++ &tid.role,
++ &subtid.
++ role,
++ NULL);
++ } else {
++ rsbac_ta_list_lol_subremove(ta_number,
++ role_adr_handle,
++ &tid.role,
++ &subtid.role);
++ return 0;
++ }
++ case RI_assign_roles:
++ if (value.comp) {
++ return
++ rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_asr_handle,
++ ttl,
++ &tid.role,
++ &subtid.
++ role,
++ NULL);
++ } else {
++ rsbac_ta_list_lol_subremove(ta_number,
++ role_asr_handle,
++ &tid.role,
++ &subtid.role);
++ return 0;
++ }
++ case RI_type_comp_fd:
++ if (!rsbac_ta_list_exist
++ (ta_number, type_fd_handle, &subtid.type))
++ return -RSBAC_EINVALIDVALUE;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcfd_handle,
++ ttl,
++ &tid.role,
++ &subtid.type,
++ &value.rights);
++ case RI_type_comp_dev:
++ if (!rsbac_ta_list_exist
++ (ta_number, type_dev_handle, &subtid.type))
++ return -RSBAC_EINVALIDVALUE;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcdv_handle,
++ ttl,
++ &tid.role,
++ &subtid.type,
++ &value.rights);
++ case RI_type_comp_user:
++ if (!rsbac_ta_list_exist
++ (ta_number, type_user_handle, &subtid.type))
++ return -RSBAC_EINVALIDVALUE;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcus_handle,
++ ttl,
++ &tid.role,
++ &subtid.type,
++ &value.rights);
++ case RI_type_comp_process:
++ if (!rsbac_ta_list_exist
++ (ta_number, type_process_handle, &subtid.type))
++ return -RSBAC_EINVALIDVALUE;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcpr_handle,
++ ttl,
++ &tid.role,
++ &subtid.type,
++ &value.rights);
++ case RI_type_comp_ipc:
++ if (!rsbac_ta_list_exist
++ (ta_number, type_ipc_handle, &subtid.type))
++ return -RSBAC_EINVALIDVALUE;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcip_handle,
++ ttl,
++ &tid.role,
++ &subtid.type,
++ &value.rights);
++ case RI_type_comp_scd:
++ if ((subtid.type >= ST_none)
++ && (subtid.type < RST_min)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ if (subtid.type >= RST_none)
++ return -RSBAC_EINVALIDVALUE;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcsc_handle,
++ ttl,
++ &tid.role,
++ &subtid.type,
++ &value.rights);
++ case RI_type_comp_group:
++ if (!rsbac_ta_list_exist
++ (ta_number, type_group_handle, &subtid.type))
++ return -RSBAC_EINVALIDVALUE;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcgr_handle,
++ ttl,
++ &tid.role,
++ &subtid.type,
++ &value.rights);
++ case RI_type_comp_netdev:
++ if (!rsbac_ta_list_exist
++ (ta_number, type_netdev_handle, &subtid.type))
++ return -RSBAC_EINVALIDVALUE;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcnd_handle,
++ ttl,
++ &tid.role,
++ &subtid.type,
++ &value.rights);
++ case RI_type_comp_nettemp:
++ if (!rsbac_ta_list_exist
++ (ta_number, type_nettemp_handle, &subtid.type))
++ return -RSBAC_EINVALIDVALUE;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcnt_handle,
++ ttl,
++ &tid.role,
++ &subtid.type,
++ &value.rights);
++ case RI_type_comp_netobj:
++ if (!rsbac_ta_list_exist
++ (ta_number, type_netobj_handle, &subtid.type))
++ return -RSBAC_EINVALIDVALUE;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_tcno_handle,
++ ttl,
++ &tid.role,
++ &subtid.type,
++ &value.rights);
++ case RI_admin_type:
++ {
++ struct rsbac_rc_role_entry_t entry;
++
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle,
++ NULL,
++ &tid.role,
++ &entry);
++ if (err)
++ return err;
++ entry.admin_type = value.admin_type;
++ return rsbac_ta_list_add_ttl(ta_number,
++ role_handle,
++ 0, &tid.role,
++ &entry);
++ }
++ case RI_name:
++ {
++ struct rsbac_rc_role_entry_t entry;
++
++ /* no empty names */
++ if (!value.name[0])
++ return -RSBAC_EINVALIDVALUE;
++ /* create, if necessary, and set name */
++ memset(&entry, 0,
++ sizeof(struct
++ rsbac_rc_role_entry_t));
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle,
++ NULL, &tid.role,
++ &entry);
++ strncpy(entry.name, value.name,
++ RSBAC_RC_NAME_LEN - 1);
++ entry.name[RSBAC_RC_NAME_LEN - 1] = 0;
++ return rsbac_ta_list_add_ttl(ta_number,
++ role_handle,
++ 0, &tid.role,
++ &entry);
++ }
++ case RI_remove_role:
++ if (!tid.role)
++ return -RSBAC_EINVALIDVALUE;
++ /* remove role compat. */
++ rsbac_ta_list_lol_remove(ta_number, role_rc_handle,
++ &tid.role);
++ /* remove from other roles' role compat */
++ rsbac_ta_list_lol_subremove_from_all(ta_number,
++ role_rc_handle,
++ &tid.role);
++
++ /* remove admin roles */
++ rsbac_ta_list_lol_remove(ta_number,
++ role_adr_handle,
++ &tid.role);
++ /* remove from other roles' admin roles */
++ rsbac_ta_list_lol_subremove_from_all(ta_number,
++ role_adr_handle,
++ &tid.role);
++
++ /* remove assign roles */
++ rsbac_ta_list_lol_remove(ta_number,
++ role_asr_handle,
++ &tid.role);
++ /* remove from other roles' assign roles */
++ rsbac_ta_list_lol_subremove_from_all(ta_number,
++ role_asr_handle,
++ &tid.role);
++
++ /* remove def_fd_ind_create_type */
++ rsbac_ta_list_lol_remove(ta_number,
++ role_dfdc_handle,
++ &tid.role);
++
++ /* remove type compatibilities */
++ rsbac_ta_list_lol_remove(ta_number,
++ role_tcfd_handle,
++ &tid.role);
++ rsbac_ta_list_lol_remove(ta_number,
++ role_tcdv_handle,
++ &tid.role);
++ rsbac_ta_list_lol_remove(ta_number,
++ role_tcus_handle,
++ &tid.role);
++ rsbac_ta_list_lol_remove(ta_number,
++ role_tcpr_handle,
++ &tid.role);
++ rsbac_ta_list_lol_remove(ta_number,
++ role_tcip_handle,
++ &tid.role);
++ rsbac_ta_list_lol_remove(ta_number,
++ role_tcsc_handle,
++ &tid.role);
++ rsbac_ta_list_lol_remove(ta_number,
++ role_tcgr_handle,
++ &tid.role);
++ rsbac_ta_list_lol_remove(ta_number,
++ role_tcnd_handle,
++ &tid.role);
++ rsbac_ta_list_lol_remove(ta_number,
++ role_tcnt_handle,
++ &tid.role);
++ rsbac_ta_list_lol_remove(ta_number,
++ role_tcno_handle,
++ &tid.role);
++
++#ifdef CONFIG_RSBAC_ACL
++ /* remove ACL entries */
++ {
++ struct rsbac_acl_entry_desc_t desc;
++
++ desc.subj_type = ACLS_ROLE;
++ desc.subj_id = tid.role;
++ rsbac_acl_remove_subject(ta_number, desc);
++ }
++#endif
++
++ return rsbac_ta_list_remove(ta_number, role_handle,
++ &tid.role);
++
++ case RI_def_fd_create_type:
++ {
++ struct rsbac_rc_role_entry_t entry;
++
++ if ((value.type_id <= RC_type_max_value)
++ && !rsbac_ta_list_exist(ta_number,
++ type_fd_handle,
++ &value.type_id)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ if ((value.type_id > RC_type_max_value)
++ && (value.type_id <
++ RC_type_min_special)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle,
++ NULL,
++ &tid.role,
++ &entry);
++ if (err)
++ return err;
++ entry.def_fd_create_type = value.type_id;
++ return rsbac_ta_list_add_ttl(ta_number,
++ role_handle,
++ 0, &tid.role,
++ &entry);
++ }
++ case RI_def_fd_ind_create_type:
++ if ((value.type_id <= RC_type_max_value)
++ && !rsbac_ta_list_exist(ta_number,
++ type_fd_handle,
++ &value.type_id)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ if ((value.type_id > RC_type_max_value)
++ && (value.type_id < RC_type_min_special)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ role_dfdc_handle,
++ ttl,
++ &tid.role,
++ &subtid.type,
++ &value.
++ type_id);
++ case RI_def_fd_ind_create_type_remove:
++ return rsbac_ta_list_lol_subremove(ta_number,
++ role_dfdc_handle,
++ &tid.role,
++ &subtid.type);
++
++ case RI_def_user_create_type:
++ {
++ struct rsbac_rc_role_entry_t entry;
++
++ if ((value.type_id <= RC_type_max_value)
++ && !rsbac_ta_list_exist(ta_number,
++ type_user_handle,
++ &value.type_id)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ if ((value.type_id > RC_type_max_value)
++ && (value.type_id <
++ RC_type_min_special)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle,
++ NULL,
++ &tid.role,
++ &entry);
++ if (err)
++ return err;
++ entry.def_user_create_type = value.type_id;
++ return rsbac_ta_list_add_ttl(ta_number,
++ role_handle,
++ 0, &tid.role,
++ &entry);
++ }
++ case RI_def_process_create_type:
++ {
++ struct rsbac_rc_role_entry_t entry;
++
++ if ((value.type_id <= RC_type_max_value)
++ && !rsbac_ta_list_exist(ta_number,
++ type_process_handle,
++ &value.type_id)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ if ((value.type_id > RC_type_max_value)
++ && (value.type_id <
++ RC_type_min_special)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle,
++ NULL,
++ &tid.role,
++ &entry);
++ if (err)
++ return err;
++ entry.def_process_create_type =
++ value.type_id;
++ return rsbac_ta_list_add_ttl(ta_number,
++ role_handle,
++ 0, &tid.role,
++ &entry);
++ }
++ case RI_def_process_chown_type:
++ {
++ struct rsbac_rc_role_entry_t entry;
++
++ if ((value.type_id <= RC_type_max_value)
++ && !rsbac_ta_list_exist(ta_number,
++ type_process_handle,
++ &value.type_id)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ if ((value.type_id > RC_type_max_value)
++ && (value.type_id <
++ RC_type_min_special)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle,
++ NULL,
++ &tid.role,
++ &entry);
++ if (err)
++ return err;
++ entry.def_process_chown_type =
++ value.type_id;
++ return rsbac_ta_list_add_ttl(ta_number,
++ role_handle,
++ 0, &tid.role,
++ &entry);
++ }
++ case RI_def_process_execute_type:
++ {
++ struct rsbac_rc_role_entry_t entry;
++
++ if ((value.type_id <= RC_type_max_value)
++ && !rsbac_ta_list_exist(ta_number,
++ type_process_handle,
++ &value.type_id)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ if ((value.type_id > RC_type_max_value)
++ && (value.type_id <
++ RC_type_min_special)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle,
++ NULL,
++ &tid.role,
++ &entry);
++ if (err)
++ return err;
++ entry.def_process_execute_type =
++ value.type_id;
++ return rsbac_ta_list_add_ttl(ta_number,
++ role_handle,
++ 0, &tid.role,
++ &entry);
++ }
++ case RI_def_ipc_create_type:
++ {
++ struct rsbac_rc_role_entry_t entry;
++
++ if ((value.type_id <= RC_type_max_value)
++ && !rsbac_ta_list_exist(ta_number,
++ type_ipc_handle,
++ &value.type_id)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ if ((value.type_id > RC_type_max_value)
++ && (value.type_id <
++ RC_type_min_special)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle,
++ NULL,
++ &tid.role,
++ &entry);
++ if (err)
++ return err;
++ entry.def_ipc_create_type = value.type_id;
++ return rsbac_ta_list_add_ttl(ta_number,
++ role_handle,
++ 0, &tid.role,
++ &entry);
++ }
++ case RI_def_group_create_type:
++ {
++ struct rsbac_rc_role_entry_t entry;
++
++ if ((value.type_id <= RC_type_max_value)
++ && !rsbac_ta_list_exist(ta_number,
++ type_group_handle,
++ &value.type_id)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ if ((value.type_id > RC_type_max_value)
++ && (value.type_id <
++ RC_type_min_special)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle,
++ NULL,
++ &tid.role,
++ &entry);
++ if (err)
++ return err;
++ entry.def_group_create_type =
++ value.type_id;
++ return rsbac_ta_list_add_ttl(ta_number,
++ role_handle,
++ 0, &tid.role,
++ &entry);
++ }
++ case RI_def_unixsock_create_type:
++ {
++ struct rsbac_rc_role_entry_t entry;
++
++ if ((value.type_id <= RC_type_max_value)
++ && !rsbac_ta_list_exist(ta_number,
++ type_fd_handle,
++ &value.type_id)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ if ((value.type_id > RC_type_max_value)
++ && (value.type_id <
++ RC_type_min_special)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle,
++ NULL,
++ &tid.role,
++ &entry);
++ if (err)
++ return err;
++ entry.def_unixsock_create_type =
++ value.type_id;
++ return rsbac_ta_list_add_ttl(ta_number,
++ role_handle,
++ 0, &tid.role,
++ &entry);
++ }
++ case RI_boot_role:
++ {
++ struct rsbac_rc_role_entry_t entry;
++
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle,
++ NULL,
++ &tid.role,
++ &entry);
++ if (err)
++ return err;
++ entry.boot_role = value.boot_role;
++ return rsbac_ta_list_add_ttl(ta_number,
++ role_handle,
++ 0, &tid.role,
++ &entry);
++ }
++ case RI_req_reauth:
++ {
++ struct rsbac_rc_role_entry_t entry;
++
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ role_handle,
++ NULL,
++ &tid.role,
++ &entry);
++ if (err)
++ return err;
++ entry.req_reauth = value.req_reauth;
++// printk(KERN_WARNING "entry %u value %u\n",
++// entry.req_reauth, value.req_reauth);
++ return rsbac_ta_list_add_ttl(ta_number,
++ role_handle,
++ 0, &tid.role,
++ &entry);
++ }
++
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++
++ case RT_TYPE:
++ if (tid.type > RC_type_max_value)
++ return (-RSBAC_EINVALIDTARGET);
++ rsbac_pr_debug(ds_rc, "Setting type item value\n");
++ switch (item) {
++ case RI_type_fd_name:
++ {
++ struct rsbac_rc_type_fd_entry_t entry;
++
++ /* no empty names */
++ if (!value.name[0])
++ return -RSBAC_EINVALIDVALUE;
++ /* create, if necessary, and set name */
++ memset(&entry, 0,
++ sizeof(struct
++ rsbac_rc_type_fd_entry_t));
++ rsbac_ta_list_get_data_ttl(ta_number,
++ type_fd_handle,
++ NULL, &tid.type,
++ &entry);
++ strncpy(entry.name, value.name,
++ RSBAC_RC_NAME_LEN - 1);
++ entry.name[RSBAC_RC_NAME_LEN - 1] = 0;
++ return rsbac_ta_list_add_ttl(ta_number,
++ type_fd_handle,
++ 0, &tid.type,
++ &entry);
++ }
++ case RI_type_fd_need_secdel:
++ {
++ struct rsbac_rc_type_fd_entry_t entry;
++
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ type_fd_handle,
++ NULL,
++ &tid.type,
++ &entry);
++ if (err)
++ return err;
++ entry.need_secdel = value.need_secdel;
++ return rsbac_ta_list_add_ttl(ta_number,
++ type_fd_handle,
++ 0, &tid.type,
++ &entry);
++ }
++ case RI_type_dev_name:
++ /* no empty names */
++ if (!value.name[0])
++ return -RSBAC_EINVALIDVALUE;
++ /* create, if necessary, and set name */
++ value.name[RSBAC_RC_NAME_LEN - 1] = 0;
++ return rsbac_ta_list_add_ttl(ta_number,
++ type_dev_handle, 0,
++ &tid.type,
++ &value.name);
++ case RI_type_ipc_name:
++ /* no empty names */
++ if (!value.name[0])
++ return -RSBAC_EINVALIDVALUE;
++ /* create, if necessary, and set name */
++ value.name[RSBAC_RC_NAME_LEN - 1] = 0;
++ return rsbac_ta_list_add_ttl(ta_number,
++ type_ipc_handle, 0,
++ &tid.type,
++ &value.name);
++ case RI_type_user_name:
++ /* no empty names */
++ if (!value.name[0])
++ return -RSBAC_EINVALIDVALUE;
++ /* create, if necessary, and set name */
++ value.name[RSBAC_RC_NAME_LEN - 1] = 0;
++ return rsbac_ta_list_add_ttl(ta_number,
++ type_user_handle, 0,
++ &tid.type,
++ &value.name);
++ case RI_type_process_name:
++ /* no empty names */
++ if (!value.name[0])
++ return -RSBAC_EINVALIDVALUE;
++ /* create, if necessary, and set name */
++ value.name[RSBAC_RC_NAME_LEN - 1] = 0;
++ return rsbac_ta_list_add_ttl(ta_number,
++ type_process_handle,
++ 0, &tid.type,
++ &value.name);
++ case RI_type_group_name:
++ /* no empty names */
++ if (!value.name[0])
++ return -RSBAC_EINVALIDVALUE;
++ /* create, if necessary, and set name */
++ value.name[RSBAC_RC_NAME_LEN - 1] = 0;
++ return rsbac_ta_list_add_ttl(ta_number,
++ type_group_handle, 0,
++ &tid.type,
++ &value.name);
++ case RI_type_netdev_name:
++ /* no empty names */
++ if (!value.name[0])
++ return -RSBAC_EINVALIDVALUE;
++ /* create, if necessary, and set name */
++ value.name[RSBAC_RC_NAME_LEN - 1] = 0;
++ return rsbac_ta_list_add_ttl(ta_number,
++ type_netdev_handle, 0,
++ &tid.type,
++ &value.name);
++ case RI_type_nettemp_name:
++ /* no empty names */
++ if (!value.name[0])
++ return -RSBAC_EINVALIDVALUE;
++ /* create, if necessary, and set name */
++ value.name[RSBAC_RC_NAME_LEN - 1] = 0;
++ return rsbac_ta_list_add_ttl(ta_number,
++ type_nettemp_handle,
++ 0, &tid.type,
++ &value.name);
++ case RI_type_netobj_name:
++ /* no empty names */
++ if (!value.name[0])
++ return -RSBAC_EINVALIDVALUE;
++ /* create, if necessary, and set name */
++ value.name[RSBAC_RC_NAME_LEN - 1] = 0;
++ return rsbac_ta_list_add_ttl(ta_number,
++ type_netobj_handle, 0,
++ &tid.type,
++ &value.name);
++
++ case RI_type_fd_remove:
++ if (!tid.type)
++ return -RSBAC_EINVALIDVALUE;
++ rsbac_ta_list_lol_subremove_from_all(ta_number,
++ role_tcfd_handle,
++ &tid.type);
++ rsbac_ta_list_lol_subremove_from_all(ta_number,
++ role_dfdc_handle,
++ &tid.type);
++ return rsbac_ta_list_remove(ta_number,
++ type_fd_handle,
++ &tid.type);
++ case RI_type_dev_remove:
++ if (!tid.type)
++ return -RSBAC_EINVALIDVALUE;
++ rsbac_ta_list_lol_subremove_from_all(ta_number,
++ role_tcdv_handle,
++ &tid.type);
++ return rsbac_ta_list_remove(ta_number,
++ type_dev_handle,
++ &tid.type);
++ case RI_type_user_remove:
++ if (!tid.type)
++ return -RSBAC_EINVALIDVALUE;
++ rsbac_ta_list_lol_subremove_from_all(ta_number,
++ role_tcus_handle,
++ &tid.type);
++ return rsbac_ta_list_remove(ta_number,
++ type_user_handle,
++ &tid.type);
++ case RI_type_process_remove:
++ if (!tid.type)
++ return -RSBAC_EINVALIDVALUE;
++ rsbac_ta_list_lol_subremove_from_all(ta_number,
++ role_tcpr_handle,
++ &tid.type);
++ return rsbac_ta_list_remove(ta_number,
++ type_process_handle,
++ &tid.type);
++ case RI_type_ipc_remove:
++ if (!tid.type)
++ return -RSBAC_EINVALIDVALUE;
++ rsbac_ta_list_lol_subremove_from_all(ta_number,
++ role_tcip_handle,
++ &tid.type);
++ return rsbac_ta_list_remove(ta_number,
++ type_ipc_handle,
++ &tid.type);
++ case RI_type_group_remove:
++ if (!tid.type)
++ return -RSBAC_EINVALIDVALUE;
++ rsbac_ta_list_lol_subremove_from_all(ta_number,
++ role_tcgr_handle,
++ &tid.type);
++ return rsbac_ta_list_remove(ta_number,
++ type_group_handle,
++ &tid.type);
++ case RI_type_netdev_remove:
++ if (!tid.type)
++ return -RSBAC_EINVALIDVALUE;
++ rsbac_ta_list_lol_subremove_from_all(ta_number,
++ role_tcnd_handle,
++ &tid.type);
++ return rsbac_ta_list_remove(ta_number,
++ type_netdev_handle,
++ &tid.type);
++ case RI_type_nettemp_remove:
++ if (!tid.type)
++ return -RSBAC_EINVALIDVALUE;
++ rsbac_ta_list_lol_subremove_from_all(ta_number,
++ role_tcnt_handle,
++ &tid.type);
++ return rsbac_ta_list_remove(ta_number,
++ type_nettemp_handle,
++ &tid.type);
++ case RI_type_netobj_remove:
++ if (!tid.type)
++ return -RSBAC_EINVALIDVALUE;
++ rsbac_ta_list_lol_subremove_from_all(ta_number,
++ role_tcno_handle,
++ &tid.type);
++ return rsbac_ta_list_remove(ta_number,
++ type_netobj_handle,
++ &tid.type);
++
++ default:
++ return -RSBAC_EINVALIDATTR;
++ }
++
++ /* switch target: no valid target */
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++}
+diff --git a/rsbac/data_structures/um_data_structures.c b/rsbac/data_structures/um_data_structures.c
+new file mode 100644
+index 0000000..2088c4d
+--- /dev/null
++++ b/rsbac/data_structures/um_data_structures.c
+@@ -0,0 +1,2042 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of User Management data structures */
++/* Author and (c) 1999-2011: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 12/Jul/2011 */
++/*************************************************** */
++
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/random.h>
++#include <linux/export.h>
++#include <asm/uaccess.h>
++#include <rsbac/types.h>
++#include <rsbac/aci_data_structures.h>
++#include <rsbac/um_types.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/adf.h>
++#include <rsbac/aci.h>
++#include <rsbac/um.h>
++#include <rsbac/lists.h>
++#include <rsbac/proc_fs.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/getname.h>
++#include <linux/string.h>
++#include <linux/delay.h>
++#ifdef CONFIG_RSBAC_UM_DIGEST
++#include <linux/crypto.h>
++#include <linux/scatterlist.h>
++#endif
++#include <linux/seq_file.h>
++/************************************************************************** */
++/* Global Variables */
++/************************************************************************** */
++
++static rsbac_list_handle_t user_handle;
++static rsbac_list_handle_t group_handle;
++#ifdef CONFIG_RSBAC_UM_PWHISTORY
++static rsbac_list_handle_t user_pwhistory_handle;
++#endif
++#ifdef CONFIG_RSBAC_UM_ONETIME
++static rsbac_list_handle_t onetime_handle;
++#endif
++#define EXTRA_ROOM 20
++
++/**************************************************/
++/* Declarations of external functions */
++/**************************************************/
++
++/**************************************************/
++/* Declarations of internal functions */
++/**************************************************/
++
++/************************************************* */
++/* Internal Help functions */
++/************************************************* */
++
++static u_int nr_user_hashes = RSBAC_UM_NR_USER_LISTS;
++static u_int nr_group_hashes = RSBAC_UM_NR_GROUP_LISTS;
++
++#ifdef CONFIG_RSBAC_UM_PWHISTORY
++static u_int nr_user_pwhistory_hashes = RSBAC_UM_NR_USER_PWHISTORY_LISTS;
++#endif
++
++static int user_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_um_user_entry_t * new_aci = new_data;
++ struct rsbac_um_old_user_entry_t * old_aci = old_data;
++
++ memcpy(&new_aci->name, &old_aci->name, RSBAC_UM_OLD_NAME_LEN);
++ memcpy(&new_aci->pass, &old_aci->pass, RSBAC_UM_PASS_LEN);
++ memcpy(&new_aci->fullname, &old_aci->fullname, RSBAC_UM_OLD_FULLNAME_LEN);
++ memcpy(&new_aci->homedir, &old_aci->homedir, RSBAC_UM_OLD_HOMEDIR_LEN);
++ memcpy(&new_aci->shell, &old_aci->shell, RSBAC_UM_OLD_SHELL_LEN);
++ new_aci->group = old_aci->group;
++ new_aci->lastchange = old_aci->lastchange;
++ new_aci->minchange = old_aci->minchange;
++ new_aci->maxchange = old_aci->maxchange;
++ new_aci->warnchange = old_aci->warnchange;
++ new_aci->inactive = old_aci->inactive;
++ new_aci->expire = old_aci->expire;
++ *((rsbac_uid_t *)new_desc) = *((rsbac_uid_t *)old_desc);
++ return 0;
++}
++
++static int user_old_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_um_user_entry_t * new_aci = new_data;
++ struct rsbac_um_old_user_entry_t * old_aci = old_data;
++
++ memcpy(&new_aci->name, &old_aci->name, RSBAC_UM_OLD_NAME_LEN);
++ memcpy(&new_aci->pass, &old_aci->pass, RSBAC_UM_PASS_LEN);
++ memcpy(&new_aci->fullname, &old_aci->fullname, RSBAC_UM_OLD_FULLNAME_LEN);
++ memcpy(&new_aci->homedir, &old_aci->homedir, RSBAC_UM_OLD_HOMEDIR_LEN);
++ memcpy(&new_aci->shell, &old_aci->shell, RSBAC_UM_OLD_SHELL_LEN);
++ new_aci->group = old_aci->group;
++ new_aci->lastchange = old_aci->lastchange;
++ new_aci->minchange = old_aci->minchange;
++ new_aci->maxchange = old_aci->maxchange;
++ new_aci->warnchange = old_aci->warnchange;
++ new_aci->inactive = old_aci->inactive;
++ new_aci->expire = old_aci->expire;
++ *((rsbac_uid_t *)new_desc) = *((rsbac_old_uid_t *)old_desc);
++ return 0;
++}
++
++rsbac_list_conv_function_t *user_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_UM_USER_OLD_LIST_VERSION:
++ return user_conv;
++ case RSBAC_UM_USER_OLD_OLD_LIST_VERSION:
++ return user_old_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int user_subconv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ *((rsbac_gid_num_t *)new_desc) = *((rsbac_gid_num_t *)old_desc);
++ return 0;
++}
++
++static int user_old_subconv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ *((rsbac_gid_num_t *)new_desc) = *((rsbac_old_gid_t *)old_desc);
++ return 0;
++}
++
++rsbac_list_conv_function_t *user_get_subconv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_UM_USER_OLD_LIST_VERSION:
++ return user_subconv;
++ case RSBAC_UM_USER_OLD_OLD_LIST_VERSION:
++ return user_old_subconv;
++ default:
++ return NULL;
++ }
++}
++
++static int group_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_um_group_entry_t * new_aci = new_data;
++ struct rsbac_um_old_group_entry_t * old_aci = old_data;
++
++ memcpy(&new_aci->name, &old_aci->name, RSBAC_UM_OLD_NAME_LEN);
++ memcpy(&new_aci->pass, &old_aci->pass, RSBAC_UM_PASS_LEN);
++ *((rsbac_gid_t *)new_desc) = *((rsbac_gid_t *)old_desc);
++ return 0;
++}
++
++static int group_old_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ struct rsbac_um_group_entry_t * new_aci = new_data;
++ struct rsbac_um_old_group_entry_t * old_aci = old_data;
++
++ memcpy(&new_aci->name, &old_aci->name, RSBAC_UM_OLD_NAME_LEN);
++ memcpy(&new_aci->pass, &old_aci->pass, RSBAC_UM_PASS_LEN);
++ *((rsbac_gid_t *)new_desc) = *((rsbac_old_gid_t *)old_desc);
++ return 0;
++}
++
++rsbac_list_conv_function_t *group_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_UM_GROUP_OLD_LIST_VERSION:
++ return group_conv;
++ case RSBAC_UM_GROUP_OLD_OLD_LIST_VERSION:
++ return group_old_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int user_pwh_conv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ memcpy(new_data, old_data, sizeof(__u8));
++ *((rsbac_uid_t *) new_desc) = *((rsbac_old_uid_t *) old_desc);
++ return 0;
++}
++
++rsbac_list_conv_function_t *user_pwh_get_conv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_UM_USER_PWHISTORY_OLD_LIST_VERSION:
++ return user_pwh_conv;
++ default:
++ return NULL;
++ }
++}
++
++static int user_pwh_subconv(void *old_desc,
++ void *old_data, void *new_desc, void *new_data)
++{
++ memcpy(new_desc, old_desc, sizeof(__u32));
++ memcpy(new_data, old_data, RSBAC_UM_PASS_LEN);
++ return 0;
++}
++
++rsbac_list_conv_function_t *user_pwh_get_subconv(rsbac_version_t old_version)
++{
++ switch (old_version) {
++ case RSBAC_UM_USER_PWHISTORY_OLD_LIST_VERSION:
++ return user_pwh_subconv;
++ default:
++ return NULL;
++ }
++}
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++static int vset_selector(void *desc, void * param)
++{
++ if (RSBAC_UID_SET(*((rsbac_uid_t *) desc)) == *((rsbac_um_set_t *) param))
++ return TRUE;
++ else
++ return FALSE;
++}
++#endif
++
++#if defined(CONFIG_RSBAC_PROC)
++static int
++stats_um_proc_show(struct seq_file *m, void *v)
++{
++ u_long user_count;
++ u_long group_count;
++ u_long member_count;
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "stats_um_proc_info(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++ rsbac_pr_debug(aef_um, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ user_count = rsbac_list_lol_count(user_handle);
++ member_count = rsbac_list_lol_all_subcount(user_handle);
++ group_count = rsbac_list_count(group_handle);
++
++ seq_printf(m, "UM Status\n---------\n");
++
++ seq_printf(m,
++ "%lu user items with sum of %lu group memberships, %lu group items\n",
++ user_count, member_count, group_count);
++ return 0;
++}
++
++static ssize_t stats_um_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, stats_um_proc_show, NULL);
++}
++
++static const struct file_operations stats_um_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = stats_um_proc_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *stats_um;
++
++#endif /* CONFIG_PROC_FS && CONFIG_RSBAC_PROC */
++
++static int name_compare(void *data1, void *data2)
++{
++ struct rsbac_um_user_entry_t *entry_p = data1;
++ char *name = data2;
++
++ if (!entry_p || !name)
++ return 1;
++
++ return strcmp(entry_p->name, name);
++}
++
++static int group_name_compare(void *data1, void *data2)
++{
++ struct rsbac_um_group_entry_t *entry_p = data1;
++ char *name = data2;
++
++ if (!entry_p || !name)
++ return 1;
++
++ return strcmp(entry_p->name, name);
++}
++
++/************************************************* */
++/* Init functions */
++/************************************************* */
++
++/* All functions return 0, if no error occurred, and a negative error code */
++/* otherwise. The error codes are defined in rsbac/error.h. */
++
++/************************************************************************** */
++/* Initialization of all MAC data structures. After this call, all MAC */
++/* data is kept in memory for performance reasons, but is written to disk */
++/* on every change. */
++
++/* Because there can be no access to aci data structures before init, */
++/* rsbac_init_mac() will initialize all rw-spinlocks to unlocked. */
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++int rsbac_init_um(void)
++#else
++int __init rsbac_init_um(void)
++#endif
++{
++ int err = 0;
++ struct rsbac_list_info_t *list_info_p;
++ struct rsbac_list_lol_info_t *lol_info_p;
++
++ if (rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_init_um(): RSBAC already initialized\n");
++ return (-RSBAC_EREINIT);
++ }
++
++ /* set rw-spinlocks to unlocked status and init data structures */
++ rsbac_printk(KERN_INFO "rsbac_init_um(): Initializing RSBAC: User Management subsystem\n");
++
++ list_info_p = rsbac_kmalloc_unlocked(sizeof(*list_info_p));
++ if (!list_info_p) {
++ return -ENOMEM;
++ }
++ lol_info_p = rsbac_kmalloc_unlocked(sizeof(*lol_info_p));
++ if (!lol_info_p) {
++ rsbac_kfree(list_info_p);
++ return -ENOMEM;
++ }
++
++ lol_info_p->version = RSBAC_UM_USER_LIST_VERSION;
++ lol_info_p->key = RSBAC_UM_USER_LIST_KEY;
++ lol_info_p->desc_size = sizeof(rsbac_uid_t);
++ lol_info_p->data_size = sizeof(struct rsbac_um_user_entry_t);
++ lol_info_p->subdesc_size = sizeof(rsbac_gid_num_t);
++ lol_info_p->subdata_size = 0;
++ lol_info_p->max_age = 0;
++ nr_user_hashes = RSBAC_UM_NR_USER_LISTS;
++
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &user_handle, lol_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_AUTO_HASH_RESIZE |
++ RSBAC_LIST_OWN_SLAB,
++ NULL, NULL,
++ user_get_conv,
++ user_get_subconv,
++ NULL, NULL,
++ RSBAC_UM_USER_LIST_NAME,
++ RSBAC_AUTO_DEV,
++ nr_user_hashes,
++ rsbac_list_hash_uid,
++ RSBAC_UM_OLD_USER_LIST_NAME);
++ if (err) {
++ char *tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_init_um(): Registering user list of lists %s failed with error %s\n",
++ RSBAC_UM_USER_LIST_NAME, get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++
++ list_info_p->version = RSBAC_UM_GROUP_LIST_VERSION;
++ list_info_p->key = RSBAC_UM_GROUP_LIST_KEY;
++ list_info_p->desc_size = sizeof(rsbac_gid_t);
++ list_info_p->data_size = sizeof(struct rsbac_um_group_entry_t);
++ list_info_p->max_age = 0;
++ nr_group_hashes = RSBAC_UM_NR_GROUP_LISTS;
++ err = rsbac_list_register_hashed(RSBAC_LIST_VERSION,
++ &group_handle, list_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_REPLICATE |
++ RSBAC_LIST_AUTO_HASH_RESIZE |
++ RSBAC_LIST_OWN_SLAB,
++ NULL,
++ group_get_conv,
++ NULL, RSBAC_UM_GROUP_LIST_NAME,
++ RSBAC_AUTO_DEV,
++ nr_group_hashes,
++ rsbac_list_hash_gid,
++ RSBAC_UM_OLD_GROUP_LIST_NAME);
++ if (err) {
++ char *tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_init_um(): Registering group list %s failed with error %s\n",
++ RSBAC_UM_GROUP_LIST_NAME, get_error_name(tmp, err));
++ rsbac_kfree(tmp);
++ }
++ }
++
++#ifdef CONFIG_RSBAC_UM_PWHISTORY
++ {
++ __u8 def_max_history = CONFIG_RSBAC_UM_PWHISTORY_MAX;
++
++ lol_info_p->version = RSBAC_UM_USER_PWHISTORY_LIST_VERSION;
++ lol_info_p->key = RSBAC_UM_USER_PWHISTORY_LIST_KEY;
++ lol_info_p->desc_size = sizeof(rsbac_uid_t);
++ lol_info_p->data_size = sizeof(__u8);
++ lol_info_p->subdesc_size = sizeof(__u32);
++ lol_info_p->subdata_size = RSBAC_UM_PASS_LEN;
++ lol_info_p->max_age = 0;
++ nr_user_pwhistory_hashes = RSBAC_UM_NR_USER_LISTS;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &user_pwhistory_handle,
++ lol_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE,
++ NULL, NULL,
++ user_pwh_get_conv, user_pwh_get_subconv,
++ &def_max_history, NULL,
++ RSBAC_UM_USER_PWHISTORY_LIST_NAME,
++ RSBAC_AUTO_DEV,
++ nr_user_pwhistory_hashes,
++ rsbac_list_hash_uid,
++ RSBAC_UM_OLD_USER_PWHISTORY_LIST_NAME);
++ if (err) {
++ char *tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_init_um(): Registering user password history list of lists %s failed with error %s\n",
++ RSBAC_UM_USER_PWHISTORY_LIST_NAME,
++ get_error_name(tmp,
++ err));
++ rsbac_kfree(tmp);
++ }
++ }
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_UM_ONETIME
++ {
++ lol_info_p->version = RSBAC_UM_ONETIME_LIST_VERSION;
++ lol_info_p->key = RSBAC_UM_ONETIME_LIST_KEY;
++ lol_info_p->desc_size = sizeof(rsbac_uid_t);
++ lol_info_p->data_size = 0;
++ lol_info_p->subdesc_size = RSBAC_UM_PASS_LEN;
++ lol_info_p->subdata_size = 0;
++ lol_info_p->max_age = 0;
++ err = rsbac_list_lol_register_hashed(RSBAC_LIST_VERSION,
++ &onetime_handle,
++ lol_info_p,
++#ifdef CONFIG_RSBAC_DEV_USER_BACKUP
++ RSBAC_LIST_BACKUP |
++#endif
++ RSBAC_LIST_PERSIST |
++ RSBAC_LIST_DEF_DATA |
++ RSBAC_LIST_AUTO_HASH_RESIZE |
++ RSBAC_LIST_OWN_SLAB,
++ NULL, NULL,
++ NULL, NULL, /* conv */
++ NULL, NULL,
++ RSBAC_UM_ONETIME_LIST_NAME,
++ RSBAC_AUTO_DEV,
++ 1,
++ rsbac_list_hash_uid,
++ NULL);
++ if (err) {
++ char *tmp = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ rsbac_printk(KERN_WARNING "rsbac_init_um(): Registering user password one-time list of lists %s failed with error %s\n",
++ RSBAC_UM_USER_PWHISTORY_LIST_NAME,
++ get_error_name(tmp,
++ err));
++ rsbac_kfree(tmp);
++ }
++ } else {
++ rsbac_list_lol_max_items(onetime_handle,
++ RSBAC_UM_ONETIME_LIST_KEY,
++ RSBAC_LIST_MAX_NR_ITEMS,
++ CONFIG_RSBAC_UM_ONETIME_MAX);
++ }
++ }
++#endif
++
++
++#if defined(CONFIG_RSBAC_PROC)
++ stats_um = proc_create("stats_um", S_IFREG | S_IRUGO,
++ proc_rsbac_root_p, &stats_um_proc_fops);
++#endif
++
++ rsbac_pr_debug(ds_um, "Ready.\n");
++ rsbac_kfree(list_info_p);
++ rsbac_kfree(lol_info_p);
++ return err;
++}
++
++/***************************************************/
++/* We also need some status information... */
++
++int rsbac_stats_um(void)
++{
++ u_long user_count;
++ u_long group_count;
++ u_long member_count;
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_stats_um(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++ rsbac_pr_debug(aef_um, "calling ADF\n");
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none, rsbac_attribute_value)) {
++ return -EPERM;
++ }
++
++ user_count = rsbac_list_lol_count(user_handle);
++ member_count = rsbac_list_lol_all_subcount(user_handle);
++ group_count = rsbac_list_count(group_handle);
++ rsbac_printk(KERN_INFO "UM Status\n---------\n");
++
++ rsbac_printk(KERN_INFO "%lu user items with sum of %lu group memberships, %lu group items\n",
++ user_count, member_count, group_count);
++ return 0;
++}
++
++/************************************************* */
++/* Access functions */
++/************************************************* */
++
++/* Trying to access a never created or removed user entry returns an error! */
++#ifndef offset_in_page
++#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
++#endif
++
++static inline void new_salt(__u32 * salt_p)
++{
++ *salt_p = 0;
++ while (!*salt_p)
++ get_random_bytes(salt_p, sizeof(*salt_p));
++}
++
++int rsbac_um_hash(char *pass, __u32 salt)
++{
++#ifdef CONFIG_RSBAC_UM_DIGEST
++ char *buffer;
++ struct scatterlist sg[1];
++ struct crypto_hash *tfm;
++ struct hash_desc hd;
++ u_int len;
++ u_int plen;
++ int err = 0;
++
++ plen = strlen(pass);
++ len = rsbac_max(plen + sizeof(salt), RSBAC_UM_PASS_LEN);
++ buffer = rsbac_kmalloc_unlocked(len);
++ if (!buffer)
++ return -RSBAC_ENOMEM;
++
++ if (!crypto_has_hash("sha1", 0, 0)) {
++ rsbac_printk(KERN_WARNING "rsbac_um_hash(): User management configured for crypto API with SHA1, but SHA1 is not available!\n");
++ err = -RSBAC_ENOTFOUND;
++ goto out;
++ }
++
++ tfm = crypto_alloc_hash("sha1", 0, 0);
++ if (!tfm) {
++ rsbac_printk(KERN_WARNING "pid %u/%.15s: rsbac_um_hash(): Could not allocate tfm for SHA1!\n",
++ current->pid, current->comm);
++ err = -RSBAC_ENOTFOUND;
++ goto out;
++ }
++ memset(buffer, 0, len);
++ memcpy(buffer, &salt, sizeof(salt));
++ strcpy(buffer + sizeof(salt), pass);
++ sg_init_one(sg, buffer, plen + sizeof(salt));
++
++ hd.tfm = tfm;
++ hd.flags = 0;
++ err = crypto_hash_init(&hd);
++ if(err) {
++ rsbac_printk(KERN_WARNING "pid %u/%.15s: rsbac_um_hash(): crypto_hash_init() failed with error %u!\n",
++ current->pid, current->comm, err);
++ goto out;
++ }
++ err = crypto_hash_update(&hd, sg, sg[0].length);
++ if(err) {
++ rsbac_printk(KERN_WARNING "pid %u/%.15s: rsbac_um_hash(): crypto_hash_update() failed with error %u!\n",
++ current->pid, current->comm, err);
++ goto out;
++ }
++ err = crypto_hash_final(&hd, pass);
++ if(err) {
++ rsbac_printk(KERN_WARNING "pid %u/%.15s: rsbac_um_hash(): crypto_hash_final() failed with error %u!\n",
++ current->pid, current->comm, err);
++ goto out;
++ }
++ crypto_free_hash(tfm);
++out:
++ rsbac_kfree(buffer);
++ return err;
++#else
++ /* no crypto: just zero rest of string to allow comparizon */
++ u_int len;
++
++ len = strlen(pass);
++ if (len < RSBAC_UM_PASS_LEN)
++ memset(pass + len, 0, RSBAC_UM_PASS_LEN - len);
++ return 0;
++#endif
++}
++
++int rsbac_um_get_uid(rsbac_list_ta_number_t ta_number,
++ char *name,
++ rsbac_uid_t * uid_p)
++{
++ int err;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ rsbac_um_set_t vset;
++#endif
++
++ if (!name || !uid_p)
++ return -RSBAC_EINVALIDPOINTER;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ vset = RSBAC_UID_SET(*uid_p);
++ if (vset == RSBAC_UM_VIRTUAL_KEEP) {
++ char * p = name;
++
++ while (*p && (*p != '/'))
++ p++;
++ if (*p) {
++ *p = 0;
++ err = rsbac_get_vset_num(name, &vset);
++ if (err)
++ return err;
++ p++;
++ name = p;
++ if (vset == RSBAC_UM_VIRTUAL_KEEP)
++ vset = rsbac_get_vset();
++ } else
++ vset = rsbac_get_vset();
++ }
++ if (!strcmp(name, "ALL")) {
++ *uid_p = RSBAC_GEN_UID(vset, RSBAC_ALL_USERS);
++ return 0;
++ }
++ if (vset != RSBAC_UM_VIRTUAL_ALL)
++ err = rsbac_ta_list_lol_get_desc_selector(ta_number,
++ user_handle,
++ uid_p,
++ name,
++ name_compare,
++ vset_selector,
++ &vset);
++ else
++#endif
++ err = rsbac_ta_list_lol_get_desc(ta_number,
++ user_handle,
++ uid_p,
++ name,
++ name_compare);
++ if (!err)
++ return 0;
++ else
++ return -RSBAC_ENOTFOUND;
++}
++
++int rsbac_um_get_gid(rsbac_list_ta_number_t ta_number,
++ char *name, rsbac_gid_t * gid_p)
++{
++ int err;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ rsbac_um_set_t vset;
++#endif
++
++ if (!name || !gid_p)
++ return -RSBAC_EINVALIDPOINTER;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ vset = RSBAC_GID_SET(*gid_p);
++ if (vset == RSBAC_UM_VIRTUAL_KEEP) {
++ char * p = name;
++
++ while (*p && (*p != '/'))
++ p++;
++ if (*p) {
++ *p = 0;
++ err = rsbac_get_vset_num(name, &vset);
++ if (err)
++ return err;
++ p++;
++ name = p;
++ if (vset == RSBAC_UM_VIRTUAL_KEEP)
++ vset = rsbac_get_vset();
++ } else
++ vset = rsbac_get_vset();
++ }
++ if (!strcmp(name, "ALL")) {
++ *gid_p = RSBAC_GEN_GID(vset, RSBAC_ALL_GROUPS);
++ return 0;
++ }
++ if (vset != RSBAC_UM_VIRTUAL_ALL)
++ err = rsbac_ta_list_get_desc_selector(ta_number,
++ group_handle,
++ gid_p,
++ name,
++ group_name_compare,
++ vset_selector,
++ &vset);
++ else
++#endif
++ err = rsbac_ta_list_get_desc(ta_number,
++ group_handle,
++ gid_p,
++ name, group_name_compare);
++ if (!err)
++ return 0;
++ else
++ return -RSBAC_ENOTFOUND;
++}
++
++int rsbac_um_add_user(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t * user_p,
++ struct rsbac_um_user_entry_t *entry_p,
++ char *pass, rsbac_time_t ttl)
++{
++ int err;
++ rsbac_uid_t user;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_um_add_user(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++ if (!entry_p || !user_p)
++ return -RSBAC_EINVALIDPOINTER;
++ user = *user_p;
++#ifdef CONFIG_RSBAC_UM_EXCL
++ if (!rsbac_um_no_excl) {
++ rsbac_gid_t gid = RSBAC_GEN_GID(RSBAC_UID_SET(user),
++ entry_p->group);
++ if (!rsbac_ta_list_exist(ta_number,
++ group_handle,
++ &gid)) {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_GID_SET(gid))
++ rsbac_printk(KERN_INFO "rsbac_um_add_user(): gid %u/%u not known to RSBAC User Management!\n",
++ RSBAC_GID_SET(gid), entry_p->group);
++ else
++#endif
++ rsbac_printk(KERN_INFO "rsbac_um_add_user(): gid %u not known to RSBAC User Management!\n",
++ entry_p->group);
++ return -RSBAC_EINVALIDVALUE;
++ }
++ }
++#endif
++ if (RSBAC_UID_NUM(user) == RSBAC_NO_USER) {
++ user = RSBAC_GEN_UID(RSBAC_UID_SET(user),
++ CONFIG_RSBAC_UM_USER_MIN);
++ while (rsbac_ta_list_lol_exist
++ (ta_number, user_handle, &user))
++ user++;
++ } else
++ if (rsbac_ta_list_lol_exist
++ (ta_number, user_handle, &user))
++ return -RSBAC_EEXISTS;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(user))
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: adding user %u/%u\n",
++ current->pid, current->comm,
++ RSBAC_UID_SET(user), RSBAC_UID_NUM(user));
++ else
++#endif
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: adding user %u\n",
++ current->pid, current->comm, RSBAC_UID_NUM(user));
++ if (pass) {
++ __u32 salt;
++
++ new_salt(&salt);
++ err = rsbac_um_hash(pass, salt);
++ if (err)
++ return err;
++ memcpy(entry_p->pass, &salt, sizeof(salt));
++ memcpy(entry_p->pass + sizeof(salt), pass,
++ RSBAC_UM_PASS_LEN - sizeof(salt));
++ } else
++ memset(entry_p->pass, 0, RSBAC_UM_PASS_LEN);
++ err =
++ rsbac_ta_list_lol_add_ttl(ta_number,
++ user_handle, ttl,
++ &user, entry_p);
++ if (!err)
++ *user_p = user;
++ return err;
++}
++
++int rsbac_um_add_group(rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t * group_p,
++ struct rsbac_um_group_entry_t *entry_p,
++ char *pass, rsbac_time_t ttl)
++{
++ int err;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_um_add_group(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++ if (!entry_p || !group_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (RSBAC_GID_NUM(*group_p) == RSBAC_NO_USER) {
++ *group_p = RSBAC_GEN_GID(RSBAC_GID_SET(*group_p), CONFIG_RSBAC_UM_GROUP_MIN);
++ while (rsbac_ta_list_exist
++ (ta_number, group_handle,
++ group_p))
++ (*group_p)++;
++ } else
++ if (rsbac_ta_list_exist
++ (ta_number, group_handle, group_p))
++ return -RSBAC_EEXISTS;
++ if (RSBAC_GID_SET(*group_p))
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: adding group %u/%u\n",
++ current->pid, current->comm,
++ RSBAC_GID_SET(*group_p), RSBAC_GID_NUM(*group_p));
++ else
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: adding group %u\n",
++ current->pid, current->comm, RSBAC_GID_NUM(*group_p));
++ if (pass) {
++ __u32 salt;
++
++ new_salt(&salt);
++ err = rsbac_um_hash(pass, salt);
++ if (err)
++ return err;
++ memcpy(entry_p->pass, &salt, sizeof(salt));
++ memcpy(entry_p->pass + sizeof(salt), pass,
++ RSBAC_UM_PASS_LEN - sizeof(salt));
++ } else
++ memset(entry_p->pass, 0, RSBAC_UM_PASS_LEN);
++ return rsbac_ta_list_add_ttl(ta_number,
++ group_handle,
++ ttl, group_p, entry_p);
++}
++
++int rsbac_um_add_gm(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user, rsbac_gid_num_t group, rsbac_time_t ttl)
++{
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_um_add_gm(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++#ifdef CONFIG_RSBAC_UM_EXCL
++ if (!rsbac_um_no_excl) {
++ rsbac_gid_t gid = RSBAC_GEN_GID(RSBAC_UID_SET(user),
++ group);
++
++ if (!rsbac_ta_list_exist
++ (ta_number, user_handle, &user)) {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(user))
++ rsbac_printk(KERN_INFO "rsbac_um_add_gm(): uid %u/%u not known to RSBAC User Management!\n",
++ RSBAC_UID_SET(user), RSBAC_UID_NUM(user));
++ else
++#endif
++ rsbac_printk(KERN_INFO "rsbac_um_add_gm(): uid %u not known to RSBAC User Management!\n",
++ RSBAC_UID_SET(user));
++ return -RSBAC_ENOTFOUND;
++ }
++ if (!rsbac_ta_list_exist
++ (ta_number, group_handle, &gid)) {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_GID_SET(gid))
++ rsbac_printk(KERN_INFO "rsbac_um_add_gm(): gid %u/%u not known to RSBAC User Management!\n",
++ RSBAC_GID_SET(gid), group);
++ else
++#endif
++ rsbac_printk(KERN_INFO "rsbac_um_add_gm(): gid %u not known to RSBAC User Management!\n",
++ group);
++ return -RSBAC_ENOTFOUND;
++ }
++ }
++#endif
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: adding user %u group %u\n",
++ current->pid, current->comm, user, group);
++ return rsbac_ta_list_lol_subadd_ttl(ta_number,
++ user_handle,
++ ttl, &user, &group, NULL);
++}
++
++int rsbac_um_mod_user(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user,
++ enum rsbac_um_mod_t mod,
++ union rsbac_um_mod_data_t *data_p)
++{
++ int err;
++ struct rsbac_um_user_entry_t *entry_p;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_um_mod_user(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++ if (!data_p && (mod != UM_pass)
++ )
++ return -RSBAC_EINVALIDPOINTER;
++ if (!rsbac_ta_list_lol_exist
++ (ta_number, user_handle, &user))
++ return -RSBAC_ENOTFOUND;
++
++ entry_p = rsbac_kmalloc_unlocked(sizeof(*entry_p));
++ if (!entry_p)
++ return -RSBAC_ENOMEM;
++ err =
++ rsbac_ta_list_lol_get_data_ttl(ta_number,
++ user_handle,
++ NULL, &user, entry_p);
++ if (err) {
++ rsbac_kfree(entry_p);
++ return err;
++ }
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: modifying user %u\n",
++ current->pid, current->comm, user);
++ switch (mod) {
++ case UM_name:
++ {
++ rsbac_uid_t tmp_user = user;
++
++ if (!rsbac_um_get_uid
++ (ta_number, data_p->string, &tmp_user)
++ && (tmp_user != user)
++ )
++ return -RSBAC_EEXISTS;
++ strncpy(entry_p->name, data_p->string,
++ RSBAC_UM_NAME_LEN);
++ entry_p->name[RSBAC_UM_NAME_LEN - 1] = 0;
++ }
++ break;
++
++ case UM_pass:
++ if (data_p) {
++ __u32 salt;
++
++ new_salt(&salt);
++ err = rsbac_um_hash(data_p->string, salt);
++ if (err) {
++ rsbac_kfree(entry_p);
++ return err;
++ }
++ memcpy(entry_p->pass, &salt, sizeof(salt));
++ memcpy(entry_p->pass + sizeof(salt),
++ data_p->string,
++ RSBAC_UM_PASS_LEN - sizeof(salt));
++ } else
++ memset(entry_p->pass, 0, RSBAC_UM_PASS_LEN);
++ entry_p->lastchange = RSBAC_CURRENT_TIME / 86400;
++ break;
++
++ case UM_cryptpass:
++ memcpy(entry_p->pass, data_p->string, RSBAC_UM_PASS_LEN);
++ break;
++
++ case UM_fullname:
++ strncpy(entry_p->fullname, data_p->string,
++ RSBAC_UM_FULLNAME_LEN);
++ entry_p->fullname[RSBAC_UM_FULLNAME_LEN - 1] = 0;
++ break;
++
++ case UM_homedir:
++ strncpy(entry_p->homedir, data_p->string,
++ RSBAC_UM_HOMEDIR_LEN);
++ entry_p->homedir[RSBAC_UM_HOMEDIR_LEN - 1] = 0;
++ break;
++
++ case UM_shell:
++ strncpy(entry_p->shell, data_p->string,
++ RSBAC_UM_SHELL_LEN);
++ entry_p->shell[RSBAC_UM_SHELL_LEN - 1] = 0;
++ break;
++
++ case UM_group:
++#ifdef CONFIG_RSBAC_UM_EXCL
++ {
++ rsbac_gid_t gid = RSBAC_GEN_GID(RSBAC_UID_SET(user),
++ data_p->group);
++ if (!rsbac_um_no_excl
++ && !rsbac_ta_list_exist(ta_number,
++ group_handle,
++ &gid)) {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_GID_SET(gid))
++ rsbac_printk(KERN_INFO "rsbac_um_mod_user(): gid %u/%u not known to RSBAC User Management!\n",
++ RSBAC_GID_SET(gid), RSBAC_GID_NUM(gid));
++ else
++#endif
++ rsbac_printk(KERN_INFO "rsbac_um_mod_user(): gid %u not known to RSBAC User Management!\n",
++ RSBAC_GID_NUM(gid));
++ rsbac_kfree(entry_p);
++ return -RSBAC_EINVALIDVALUE;
++ }
++ }
++#endif
++ entry_p->group = data_p->group;
++ break;
++
++ case UM_lastchange:
++ entry_p->lastchange = data_p->days;
++ break;
++
++ case UM_minchange:
++ entry_p->minchange = data_p->days;
++ break;
++
++ case UM_maxchange:
++ entry_p->maxchange = data_p->days;
++ break;
++
++ case UM_warnchange:
++ entry_p->warnchange = data_p->days;
++ break;
++
++ case UM_inactive:
++ entry_p->inactive = data_p->days;
++ break;
++
++ case UM_expire:
++ entry_p->expire = data_p->days;
++ break;
++
++ case UM_ttl:
++ err =
++ rsbac_ta_list_lol_add_ttl(ta_number,
++ user_handle,
++ data_p->ttl, &user, entry_p);
++ rsbac_kfree(entry_p);
++ return err;
++
++ default:
++ rsbac_kfree(entry_p);
++ return -RSBAC_EINVALIDREQUEST;
++ }
++
++ err =
++ rsbac_ta_list_lol_add_ttl(ta_number,
++ user_handle,
++ RSBAC_LIST_TTL_KEEP, &user, entry_p);
++ rsbac_kfree(entry_p);
++ return err;
++}
++
++int rsbac_um_mod_group(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t group,
++ enum rsbac_um_mod_t mod,
++ union rsbac_um_mod_data_t *data_p)
++{
++ int err;
++ struct rsbac_um_group_entry_t *entry_p;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_um_mod_group(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++ if (!data_p && (mod != UM_pass)
++ )
++ return -RSBAC_EINVALIDPOINTER;
++ if (!rsbac_ta_list_exist
++ (ta_number, group_handle, &group))
++ return -RSBAC_ENOTFOUND;
++
++ entry_p = rsbac_kmalloc_unlocked(sizeof(*entry_p));
++ if (!entry_p)
++ return -RSBAC_ENOMEM;
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ group_handle,
++ NULL, &group, entry_p);
++ if (err) {
++ rsbac_kfree(entry_p);
++ return err;
++ }
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: modifying group %u\n",
++ current->pid, current->comm, group);
++ switch (mod) {
++ case UM_name:
++ {
++ rsbac_gid_t tmp_group = group;
++
++ if (!rsbac_um_get_gid
++ (ta_number, data_p->string, &tmp_group)
++ && (tmp_group != group)
++ )
++ return -RSBAC_EEXISTS;
++ strncpy(entry_p->name, data_p->string,
++ RSBAC_UM_NAME_LEN);
++ entry_p->name[RSBAC_UM_NAME_LEN - 1] = 0;
++ }
++ break;
++
++ case UM_pass:
++ if (data_p) {
++ __u32 salt;
++
++ new_salt(&salt);
++ err = rsbac_um_hash(data_p->string, salt);
++ if (err) {
++ rsbac_kfree(entry_p);
++ return err;
++ }
++ memcpy(entry_p->pass, &salt, sizeof(salt));
++ memcpy(entry_p->pass + sizeof(salt),
++ data_p->string,
++ RSBAC_UM_PASS_LEN - sizeof(salt));
++ } else
++ memset(entry_p->pass, 0, RSBAC_UM_PASS_LEN);
++ break;
++
++ case UM_cryptpass:
++ memcpy(entry_p->pass, data_p->string, RSBAC_UM_PASS_LEN);
++ break;
++
++ case UM_ttl:
++ err =
++ rsbac_ta_list_add_ttl(ta_number,
++ group_handle,
++ data_p->ttl, &group, entry_p);
++ rsbac_kfree(entry_p);
++ return err;
++
++ default:
++ rsbac_kfree(entry_p);
++ return -RSBAC_EINVALIDREQUEST;
++ }
++
++ err =
++ rsbac_ta_list_add_ttl(ta_number,
++ group_handle,
++ RSBAC_LIST_TTL_KEEP, &group, entry_p);
++ rsbac_kfree(entry_p);
++ return err;
++}
++
++int rsbac_um_get_user_item(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user,
++ enum rsbac_um_mod_t mod,
++ union rsbac_um_mod_data_t *data_p)
++{
++ int err;
++ struct rsbac_um_user_entry_t *entry_p;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_um_get_user_item(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++ if (!data_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!rsbac_ta_list_lol_exist
++ (ta_number, user_handle, &user))
++ return -RSBAC_ENOTFOUND;
++ if (mod == UM_ttl)
++ return rsbac_ta_list_lol_get_data_ttl(ta_number,
++ user_handle,
++ &data_p->ttl, &user,
++ NULL);
++
++ entry_p = rsbac_kmalloc_unlocked(sizeof(*entry_p));
++ if (!entry_p)
++ return -RSBAC_ENOMEM;
++ err =
++ rsbac_ta_list_lol_get_data_ttl(ta_number,
++ user_handle,
++ NULL, &user, entry_p);
++ if (err) {
++ rsbac_kfree(entry_p);
++ return err;
++ }
++ switch (mod) {
++ case UM_name:
++ strcpy(data_p->string, entry_p->name);
++ break;
++
++ case UM_pass:
++ memcpy(data_p->string, entry_p->pass, RSBAC_UM_PASS_LEN);
++ break;
++
++ case UM_fullname:
++ strcpy(data_p->string, entry_p->fullname);
++ break;
++
++ case UM_homedir:
++ strcpy(data_p->string, entry_p->homedir);
++ break;
++
++ case UM_shell:
++ strcpy(data_p->string, entry_p->shell);
++ break;
++
++ case UM_group:
++ data_p->group = entry_p->group;
++ break;
++
++ case UM_lastchange:
++ data_p->days = entry_p->lastchange;
++ break;
++
++ case UM_minchange:
++ data_p->days = entry_p->minchange;
++ break;
++
++ case UM_maxchange:
++ data_p->days = entry_p->maxchange;
++ break;
++
++ case UM_warnchange:
++ data_p->days = entry_p->warnchange;
++ break;
++
++ case UM_inactive:
++ data_p->days = entry_p->inactive;
++ break;
++
++ case UM_expire:
++ data_p->days = entry_p->expire;
++ break;
++
++ default:
++ rsbac_kfree(entry_p);
++ return -RSBAC_EINVALIDREQUEST;
++ }
++
++ rsbac_kfree(entry_p);
++ return 0;
++}
++
++int rsbac_um_get_group_item(rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t group,
++ enum rsbac_um_mod_t mod,
++ union rsbac_um_mod_data_t *data_p)
++{
++ int err;
++ struct rsbac_um_group_entry_t *entry_p;
++
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_um_get_group_item(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++ if (!data_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!rsbac_ta_list_exist
++ (ta_number, group_handle, &group))
++ return -RSBAC_ENOTFOUND;
++ if (mod == UM_ttl)
++ return rsbac_ta_list_get_data_ttl(ta_number,
++ group_handle,
++ &data_p->ttl, &group,
++ NULL);
++
++ entry_p = rsbac_kmalloc_unlocked(sizeof(*entry_p));
++ if (!entry_p)
++ return -RSBAC_ENOMEM;
++ err =
++ rsbac_ta_list_get_data_ttl(ta_number,
++ group_handle,
++ NULL, &group, entry_p);
++ if (err) {
++ rsbac_kfree(entry_p);
++ return err;
++ }
++ switch (mod) {
++ case UM_name:
++ strcpy(data_p->string, entry_p->name);
++ break;
++
++ case UM_pass:
++ memcpy(data_p->string, entry_p->pass, RSBAC_UM_PASS_LEN);
++ break;
++
++ default:
++ rsbac_kfree(entry_p);
++ return -RSBAC_EINVALIDREQUEST;
++ }
++
++ rsbac_kfree(entry_p);
++ return 0;
++}
++
++int rsbac_um_user_exists(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user)
++{
++ return rsbac_ta_list_lol_exist(ta_number,
++ user_handle,
++ &user);
++}
++
++int rsbac_um_group_exists(rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t group)
++{
++ return rsbac_ta_list_exist(ta_number,
++ group_handle,
++ &group);
++}
++
++int rsbac_um_remove_user(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user)
++{
++ if (!rsbac_ta_list_lol_exist
++ (ta_number, user_handle, &user))
++ return -RSBAC_ENOTFOUND;
++ return rsbac_ta_list_lol_remove(ta_number,
++ user_handle,
++ &user);
++}
++
++int rsbac_um_remove_group(rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t group)
++{
++ rsbac_gid_num_t group_num;
++
++ if (!rsbac_ta_list_exist
++ (ta_number, group_handle, &group))
++ return -RSBAC_ENOTFOUND;
++ group_num = RSBAC_GID_NUM(group);
++ rsbac_ta_list_lol_subremove_from_all(ta_number,
++ user_handle,
++ &group_num);
++ return rsbac_ta_list_remove(ta_number,
++ group_handle,
++ &group);
++}
++
++int rsbac_um_remove_gm(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user, rsbac_gid_num_t group)
++{
++ if (!rsbac_is_initialized()) {
++ rsbac_printk(KERN_WARNING "rsbac_um_remove_gm(): RSBAC not initialized\n");
++ return (-RSBAC_ENOTINITIALIZED);
++ }
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: removing user %u group %u\n",
++ current->pid, current->comm, user, group);
++ return rsbac_ta_list_lol_subremove(ta_number,
++ user_handle,
++ &user, &group);
++}
++
++int rsbac_um_get_user_entry(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user,
++ struct rsbac_um_user_entry_t *entry_p,
++ rsbac_time_t * ttl_p)
++{
++ return rsbac_ta_list_lol_get_data_ttl(ta_number,
++ user_handle,
++ ttl_p, &user, entry_p);
++}
++
++int rsbac_um_get_next_user(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t old_user, rsbac_uid_t * next_user_p)
++{
++ rsbac_uid_t *old_user_p;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ rsbac_um_set_t vset;
++#endif
++
++ if (old_user == RSBAC_NO_USER)
++ old_user_p = NULL;
++ else
++ old_user_p = &old_user;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ vset = RSBAC_UID_SET(old_user);
++ if (vset != RSBAC_UM_VIRTUAL_ALL)
++ return rsbac_ta_list_lol_get_next_desc_selector(ta_number,
++ user_handle,
++ old_user_p,
++ next_user_p,
++ vset_selector,
++ &vset);
++ else
++#endif
++ return rsbac_ta_list_lol_get_next_desc(ta_number,
++ user_handle,
++ old_user_p,
++ next_user_p);
++}
++
++int rsbac_um_get_user_list(rsbac_list_ta_number_t ta_number,
++ rsbac_um_set_t vset,
++ rsbac_uid_t ** list_pp)
++{
++ if(!list_pp)
++ return rsbac_ta_list_lol_count(ta_number, user_handle);
++ else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (vset != RSBAC_UM_VIRTUAL_ALL)
++ return rsbac_ta_list_lol_get_all_desc_selector(
++ ta_number,
++ user_handle,
++ (void **) list_pp,
++ vset_selector,
++ &vset);
++ else
++#endif
++ return rsbac_ta_list_lol_get_all_desc(ta_number,
++ user_handle,
++ (void **) list_pp);
++ }
++}
++
++int rsbac_um_get_gm_list(rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user, rsbac_gid_num_t ** list_pp)
++{
++ if (!list_pp)
++ return rsbac_ta_list_lol_subcount(ta_number,
++ user_handle,
++ &user);
++ else
++ return rsbac_ta_list_lol_get_all_subdesc_ttl(ta_number,
++ user_handle,
++ &user,
++ (void **) list_pp,
++ NULL);
++}
++
++int rsbac_um_get_gm_user_list(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t group,
++ rsbac_uid_num_t ** list_pp)
++ {
++ int j;
++ long all_count = 0;
++ long copy_count = 0;
++ long tmp_count;
++ rsbac_uid_t * tmp_list_p;
++ rsbac_uid_num_t * collect_list_p;
++ rsbac_uid_num_t * p;
++ rsbac_um_set_t gid_set;
++ rsbac_gid_num_t gid_num;
++
++#ifdef CONFIG_RSBAC_UM_EXCL
++ if(!rsbac_um_no_excl && !rsbac_ta_list_exist(ta_number, group_handle, &group))
++ {
++ return -RSBAC_ENOTFOUND;
++ }
++#endif
++ all_count = rsbac_ta_list_lol_count(ta_number, user_handle);
++ if(!list_pp || (all_count <= 0))
++ return all_count;
++
++ /* provide some extra room in case new groups have been added during this function run */
++ all_count += EXTRA_ROOM;
++ collect_list_p = rsbac_kmalloc_unlocked(all_count * sizeof(rsbac_uid_num_t));
++ if(!collect_list_p)
++ return -RSBAC_ENOMEM;
++ p = collect_list_p;
++ tmp_count = rsbac_ta_list_lol_get_all_desc(ta_number, user_handle, (void *) &tmp_list_p);
++ if(tmp_count > 0)
++ {
++ gid_set = RSBAC_GID_SET(group);
++ gid_num = RSBAC_GID_NUM(group);
++ for(j=0; j<tmp_count; j++)
++ {
++ if( (RSBAC_UID_SET(tmp_list_p[j]) == gid_set)
++ && rsbac_ta_list_lol_subexist(ta_number, user_handle, &tmp_list_p[j], &gid_num))
++ {
++ *p = RSBAC_UID_NUM(tmp_list_p[j]);
++ p++;
++ copy_count++;
++ }
++ }
++ rsbac_kfree(tmp_list_p);
++ }
++ if(!copy_count)
++ rsbac_kfree(collect_list_p);
++ else
++ *list_pp = collect_list_p;
++ return copy_count;
++ }
++
++int rsbac_um_get_group_list(rsbac_list_ta_number_t ta_number,
++ rsbac_um_set_t vset,
++ rsbac_gid_t ** list_pp)
++{
++ if(!list_pp)
++ return rsbac_ta_list_count(ta_number, group_handle);
++ else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (vset != RSBAC_UM_VIRTUAL_ALL)
++ return rsbac_ta_list_get_all_desc_selector(
++ ta_number,
++ group_handle,
++ (void **) list_pp,
++ vset_selector,
++ &vset);
++ else
++#endif
++ return rsbac_ta_list_get_all_desc(ta_number,
++ group_handle,
++ (void **) list_pp);
++ }
++}
++
++int rsbac_um_check_pass(rsbac_uid_t uid, char *pass)
++{
++ int err;
++ struct rsbac_um_user_entry_t *entry_p;
++ __u32 salt;
++ u_long curdays;
++ char * pass_copy;
++
++ if (!pass)
++ return -RSBAC_EINVALIDPOINTER;
++ entry_p = rsbac_kmalloc_unlocked(sizeof(*entry_p));
++ if (!entry_p)
++ return -RSBAC_ENOMEM;
++ err = rsbac_ta_list_lol_get_data_ttl(0, user_handle,
++ NULL, &uid, entry_p);
++ if (err)
++ goto out_free;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(uid))
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: checking password for user %u/%u\n",
++ current->pid, current->comm, RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ else
++#endif
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: checking password for user %u\n",
++ current->pid, current->comm, RSBAC_UID_NUM(uid));
++ /* check whether account or password has expired */
++ curdays = RSBAC_CURRENT_TIME / 86400;
++ if ((curdays > entry_p->expire) && (entry_p->expire != -1)
++ && (entry_p->expire != 0) && (entry_p->lastchange != 0)) {
++ err = -RSBAC_EEXPIRED;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(uid))
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: account for user %u/%u has expired\n",
++ current->pid, current->comm, RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ else
++#endif
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: account for user %u has expired\n",
++ current->pid, current->comm, RSBAC_UID_NUM(uid));
++ goto out_free;
++ }
++ if ((curdays >
++ (entry_p->lastchange + entry_p->maxchange +
++ entry_p->inactive))
++ && (entry_p->maxchange != -1)
++ && (entry_p->maxchange)
++ && (entry_p->inactive != -1)
++ && (entry_p->inactive)
++ && (entry_p->lastchange)
++ ) {
++ err = -RSBAC_EEXPIRED;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(uid))
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: password for user %u/%u has expired\n",
++ current->pid, current->comm, RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ else
++#endif
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: password for user %u has expired\n",
++ current->pid, current->comm, RSBAC_UID_NUM(uid));
++ goto out_free;
++ }
++
++/* rsbac_um_hash destroys old pass, so make a copy */
++ pass_copy = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if (!pass_copy) {
++ err = -RSBAC_ENOMEM;
++ goto out_free;
++ }
++ strncpy(pass_copy, pass, RSBAC_MAXNAMELEN);
++ pass_copy[RSBAC_MAXNAMELEN - 1] = 0;
++ salt = *((__u32 *) entry_p->pass);
++ if ( !salt
++ || rsbac_um_hash(pass_copy, salt)
++ || memcmp (pass_copy, entry_p->pass + sizeof(salt),
++ RSBAC_UM_PASS_LEN - sizeof(salt))) {
++#ifdef CONFIG_RSBAC_UM_ONETIME
++ rsbac_um_password_t * pw_array;
++ int count;
++
++ count = rsbac_list_lol_get_all_subdesc(onetime_handle,
++ &uid, (void **) &pw_array);
++ if (count > 0) {
++ u_int i;
++
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: check %u one-time passwords for user %u/%u\n",
++ current->pid, current->comm, count, RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ err = -EPERM;
++ for (i=0; i<count ;i++) {
++ salt = *((__u32 *) pw_array[i]);
++ strncpy(pass_copy, pass, RSBAC_MAXNAMELEN);
++ pass_copy[RSBAC_MAXNAMELEN - 1] = 0;
++ if (!salt || rsbac_um_hash(pass_copy, salt))
++ continue;
++ if (!memcmp
++ (pass_copy, pw_array[i] + sizeof(salt),
++ RSBAC_UM_PASS_LEN - sizeof(salt))) {
++ /* found pw: remove and success */
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: one-time password %u for user %u/%u matched, removing\n",
++ current->pid, current->comm, i, RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ rsbac_list_lol_subremove(onetime_handle,
++ &uid, pw_array[i]);
++ err = 0;
++ break;
++ }
++ }
++ rsbac_kfree(pw_array);
++ } else
++#endif
++ err = -EPERM;
++ } else
++ err = 0;
++
++ rsbac_kfree(pass_copy);
++out_free:
++ rsbac_kfree(entry_p);
++ if (err)
++ ssleep(1);
++ return err;
++}
++
++int rsbac_um_good_pass(rsbac_uid_t uid, char *pass)
++{
++#ifdef CONFIG_RSBAC_UM_NON_ALPHA
++ char *p;
++#endif
++#ifdef CONFIG_RSBAC_UM_PWHISTORY
++ int i;
++ long count;
++ char *hist_pass;
++ char *tmp;
++ __u8 *pwhistory_array;
++ __u32 salt;
++#endif
++
++ if (!pass)
++ return -RSBAC_EINVALIDPOINTER;
++ if (strlen(pass) < CONFIG_RSBAC_UM_MIN_PASS_LEN)
++ return -RSBAC_EWEAKPASSWORD;
++
++#ifdef CONFIG_RSBAC_UM_NON_ALPHA
++ p = pass;
++ while (*p && (((*p >= 'a')
++ && (*p <= 'z')
++ )
++ || ((*p >= 'A')
++ && (*p <= 'Z')
++ )
++ )
++ )
++ p++;
++ if (!(*p))
++ return -RSBAC_EWEAKPASSWORD;
++#endif
++
++#ifdef CONFIG_RSBAC_UM_PWHISTORY
++ count = rsbac_ta_list_lol_get_all_subdata(0,
++ user_pwhistory_handle,
++ &uid,
++ (void **) &pwhistory_array);
++ if (count > 0) {
++ tmp =
++ rsbac_kmalloc_unlocked(rsbac_max
++ (strlen(pass), RSBAC_UM_PASS_LEN));
++ hist_pass = pwhistory_array;
++
++ for (i = 0; i < count; i++) {
++ salt = *((__u32 *) hist_pass);
++ memcpy(tmp, pass,
++ rsbac_max(strlen(pass), RSBAC_UM_PASS_LEN));
++ rsbac_um_hash(tmp, salt);
++
++ if (memcmp
++ (tmp, hist_pass + sizeof(salt),
++ RSBAC_UM_PASS_LEN - sizeof(salt)) == 0) {
++ rsbac_kfree(tmp);
++ rsbac_kfree(pwhistory_array);
++ return -RSBAC_EWEAKPASSWORD;
++ }
++ hist_pass += RSBAC_UM_PASS_LEN;
++ }
++ rsbac_kfree(tmp);
++ rsbac_kfree(pwhistory_array);
++ }
++#endif
++
++ return 0;
++}
++
++#ifdef CONFIG_RSBAC_UM_ONETIME
++int rsbac_um_add_onetime(rsbac_uid_t uid, char *pass, rsbac_time_t ttl)
++{
++ int err;
++ __u32 salt;
++ char pass_entry[RSBAC_UM_PASS_LEN];
++
++ if (!pass)
++ return -RSBAC_EINVALIDPOINTER;
++
++ if (RSBAC_UID_SET(uid))
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: add one-time password for user %u/%u with ttl %lu\n",
++ current->pid, current->comm, RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid), ttl);
++ else
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: add one-time password for user %u with ttl %lu\n",
++ current->pid, current->comm, RSBAC_UID_NUM(uid), ttl);
++ new_salt(&salt);
++ err = rsbac_um_hash(pass, salt);
++ if (err)
++ return err;
++ memcpy(pass_entry, &salt, sizeof(salt));
++ memcpy(pass_entry + sizeof(salt), pass,
++ RSBAC_UM_PASS_LEN - sizeof(salt));
++
++ return rsbac_list_lol_subadd_ttl(onetime_handle, ttl, &uid, pass_entry, NULL);
++}
++
++int rsbac_um_remove_all_onetime(rsbac_uid_t uid)
++{
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: remove all one-time passwords for user %u/%u\n",
++ current->pid, current->comm, RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ return rsbac_list_lol_subremove_all(onetime_handle, &uid);
++}
++
++int rsbac_um_count_onetime(rsbac_uid_t uid)
++{
++ int err;
++
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: counting one-time passwords for user %u/%u\n",
++ current->pid, current->comm, RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ err = rsbac_list_lol_subcount(onetime_handle, &uid);
++ if (err == -RSBAC_ENOTFOUND)
++ err = 0;
++ return err;
++}
++#endif
++
++int rsbac_um_set_pass(rsbac_uid_t uid, char *pass)
++{
++ int err;
++ struct rsbac_um_user_entry_t *entry_p;
++ __u32 salt;
++
++ entry_p = rsbac_kmalloc_unlocked(sizeof(*entry_p));
++ if (!entry_p)
++ return -RSBAC_ENOMEM;
++ err =
++ rsbac_ta_list_lol_get_data_ttl(0, user_handle,
++ NULL, &uid, entry_p);
++ if (err)
++ goto out_free;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(uid))
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: setting password for user %u/%u\n",
++ current->pid, current->comm, RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ else
++#endif
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: setting password for user %u\n",
++ current->pid, current->comm, RSBAC_UID_NUM(uid));
++ if (pass) {
++#ifdef CONFIG_RSBAC_UM_PWHISTORY
++ __u32 max_index = 0;
++ __u8 max_history = CONFIG_RSBAC_UM_PWHISTORY_MAX;
++ long count;
++#endif
++ new_salt(&salt);
++ err = rsbac_um_hash(pass, salt);
++ if (err)
++ goto out_free;
++ memcpy(entry_p->pass, &salt, sizeof(salt));
++ memcpy(entry_p->pass + sizeof(salt), pass,
++ RSBAC_UM_PASS_LEN - sizeof(salt));
++#ifdef CONFIG_RSBAC_UM_PWHISTORY
++ rsbac_list_lol_get_data(user_pwhistory_handle,
++ &uid,
++ &max_history);
++ if (max_history > 0) {
++ rsbac_ta_list_lol_get_max_subdesc(0,
++ user_pwhistory_handle,
++ &uid,
++ &max_index);
++ max_index++;
++
++ if (max_index != 0)
++ rsbac_list_lol_subadd(user_pwhistory_handle,
++ &uid, &max_index,
++ entry_p->pass);
++ else {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(uid))
++ rsbac_printk(KERN_WARNING "rsbac_um_set_pass(): maximum password history index reached for user %u/%u, password will not be stored!\n",
++ RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ else
++#endif
++ rsbac_printk(KERN_WARNING "rsbac_um_set_pass(): maximum password history index reached for user %u, password will not be stored!\n",
++ RSBAC_UID_NUM(uid));
++ }
++ count =
++ rsbac_list_lol_subcount(user_pwhistory_handle,
++ &uid);
++ if (count > max_history)
++ rsbac_ta_list_lol_subremove_count(0,
++ user_pwhistory_handle,
++ &uid,
++ (count - max_history));
++ }
++#endif
++ } else
++ memset(entry_p->pass, 0, RSBAC_UM_PASS_LEN);
++ entry_p->lastchange = RSBAC_CURRENT_TIME / 86400;
++ err = rsbac_ta_list_lol_add_ttl(0, user_handle,
++ 0, &uid, entry_p);
++
++ out_free:
++ rsbac_kfree(entry_p);
++ return err;
++}
++
++int rsbac_um_set_group_pass(rsbac_gid_t gid, char *pass)
++{
++ int err;
++ struct rsbac_um_group_entry_t *entry_p;
++ __u32 salt;
++
++ entry_p = rsbac_kmalloc_unlocked(sizeof(*entry_p));
++ if (!entry_p)
++ return -RSBAC_ENOMEM;
++ err = rsbac_ta_list_get_data_ttl(0, group_handle,
++ NULL, &gid, entry_p);
++ if (err)
++ goto out_free;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_GID_SET(gid))
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: setting password for group %u/%u\n",
++ current->pid, current->comm, RSBAC_GID_SET(gid), RSBAC_GID_NUM(gid));
++ else
++#endif
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: setting password for group %u\n",
++ current->pid, current->comm, RSBAC_GID_NUM(gid));
++ if (pass) {
++ new_salt(&salt);
++ err = rsbac_um_hash(pass, salt);
++ if (err)
++ goto out_free;
++ memcpy(entry_p->pass, &salt, sizeof(salt));
++ memcpy(entry_p->pass + sizeof(salt), pass,
++ RSBAC_UM_PASS_LEN - sizeof(salt));
++ } else
++ memset(entry_p->pass, 0, RSBAC_UM_PASS_LEN);
++ err =
++ rsbac_ta_list_add_ttl(0, group_handle, 0,
++ &gid, entry_p);
++
++ out_free:
++ rsbac_kfree(entry_p);
++ return err;
++}
++
++int rsbac_um_check_account(rsbac_uid_t uid)
++{
++ int err;
++ struct rsbac_um_user_entry_t *entry_p;
++ u_long curdays;
++
++ entry_p = rsbac_kmalloc_unlocked(sizeof(*entry_p));
++ if (!entry_p)
++ return -RSBAC_ENOMEM;
++ err =
++ rsbac_ta_list_lol_get_data_ttl(0, user_handle,
++ NULL, &uid, entry_p);
++ if (err)
++ goto out_free;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(uid))
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: checking account for user %u/%u\n",
++ current->pid, current->comm, RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ else
++#endif
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: checking account for user %u\n",
++ current->pid, current->comm, RSBAC_UID_NUM(uid));
++ /* check whether account or password has expired */
++ curdays = RSBAC_CURRENT_TIME / 86400;
++ if (*((__u32 *) entry_p->pass)
++ && !entry_p->lastchange) {
++ err = -RSBAC_EMUSTCHANGE;
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: user %u must change password, "
++ "lastchange = 0\n", current->pid, current->comm, uid);
++ goto out_free;
++ }
++ if ((curdays > entry_p->expire)
++ && (entry_p->expire != -1)
++ && (entry_p->expire)
++ ) {
++ err = -RSBAC_EEXPIRED;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(uid))
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: account for user %u/%u has expired\n",
++ current->pid, current->comm, RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ else
++#endif
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: account for user %u has expired\n",
++ current->pid, current->comm, RSBAC_UID_NUM(uid));
++ goto out_free;
++ }
++ if ((curdays >
++ (entry_p->lastchange + entry_p->maxchange +
++ entry_p->inactive))
++ && (entry_p->maxchange != -1)
++ && (entry_p->maxchange)
++ && (entry_p->inactive != -1)
++ && (entry_p->inactive)
++ ) {
++ err = -RSBAC_EEXPIRED;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(uid))
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: password for user %u/%u has expired\n",
++ current->pid, current->comm, RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ else
++#endif
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: password for user %u has expired\n",
++ current->pid, current->comm, RSBAC_UID_NUM(uid));
++ goto out_free;
++ }
++ if (((entry_p->lastchange + entry_p->maxchange) < curdays)
++ && entry_p->maxchange && (entry_p->maxchange != -1)
++ ) {
++ err = -RSBAC_EMUSTCHANGE;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(uid))
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: user %u/%u must change password, "
++ "lastchange too old\n",
++ current->pid, current->comm, RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ else
++#endif
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: user %u must change password, "
++ "lastchange too old\n",
++ current->pid, current->comm, RSBAC_UID_NUM(uid));
++ goto out_free;
++ }
++ if ((curdays >
++ (entry_p->lastchange + entry_p->maxchange -
++ entry_p->warnchange))
++ && (entry_p->maxchange != -1)
++ && (entry_p->warnchange != -1)
++ && entry_p->maxchange && entry_p->warnchange) {
++ err = (entry_p->lastchange + entry_p->maxchange) - curdays;
++ } else
++ err = 0;
++
++ out_free:
++ rsbac_kfree(entry_p);
++ return err;
++}
++
++#ifdef CONFIG_RSBAC_UM_PWHISTORY
++int rsbac_um_get_max_history(rsbac_list_ta_number_t ta_number, rsbac_uid_t uid)
++{
++ int err;
++ __u8 max_history;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(uid))
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: getting max_history for user %u/%u\n",
++ current->pid, current->comm, RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ else
++#endif
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: getting max_history for user %u\n",
++ current->pid, current->comm, RSBAC_UID_NUM(uid));
++ err = rsbac_ta_list_lol_get_data_ttl(ta_number, user_pwhistory_handle,
++ NULL,
++ &uid,
++ &max_history);
++ if (err)
++ return err;
++ else
++ return max_history;
++}
++
++int rsbac_um_set_max_history(rsbac_list_ta_number_t ta_number, rsbac_uid_t uid, __u8 max_history)
++{
++ int err;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(uid))
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: setting max_history for user %u/%u to %u\n",
++ current->pid, current->comm, RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid), max_history);
++ else
++#endif
++ rsbac_pr_debug(aef_um, "pid %u/%.15s: setting max_history for user %u to %u\n",
++ current->pid, current->comm, RSBAC_UID_NUM(uid), max_history);
++ err = rsbac_ta_list_lol_add_ttl(ta_number, user_pwhistory_handle,
++ 0,
++ &uid,
++ &max_history);
++ if (err)
++ return err;
++ if (max_history > 0) {
++ long count;
++
++ count = rsbac_ta_list_lol_subcount(ta_number,
++ user_pwhistory_handle,
++ &uid);
++ if (count > max_history)
++ rsbac_ta_list_lol_subremove_count(ta_number,
++ user_pwhistory_handle,
++ &uid,
++ (count - max_history));
++ } else {
++ rsbac_ta_list_lol_subremove_all(ta_number,
++ user_pwhistory_handle,
++ &uid);
++ }
++ return 0;
++}
++#endif
+diff --git a/rsbac/help/Makefile b/rsbac/help/Makefile
+new file mode 100644
+index 0000000..90a8d15
+--- /dev/null
++++ b/rsbac/help/Makefile
+@@ -0,0 +1,14 @@
++#
++# Makefile for the Rule Set Based Access Control helpers.
++#
++# Author and (c) 1999-2012 Amon Ott <ao@rsbac.org>
++obj-y := syscalls.o helpers.o getname.o debug.o rkmem.o net_getname.o
++#lsm.o
++
++obj-$(CONFIG_RSBAC_PM) += pm_getname.o
++obj-$(CONFIG_RSBAC_RC) += rc_getname.o
++obj-$(CONFIG_RSBAC_ACL) += acl_getname.o
++obj-$(CONFIG_RSBAC_PAX) += pax_getname.o
++obj-$(CONFIG_RSBAC_CAP_LOG_MISSING) += cap_getname.o
++obj-$(CONFIG_RSBAC_JAIL_LOG_MISSING) += jail_getname.o
++obj-$(CONFIG_RSBAC_NET_OBJ) += net_helpers.o
+diff --git a/rsbac/help/acl_getname.c b/rsbac/help/acl_getname.c
+new file mode 100644
+index 0000000..dad613f
+--- /dev/null
++++ b/rsbac/help/acl_getname.c
+@@ -0,0 +1,184 @@
++/************************************ */
++/* Rule Set Based Access Control */
++/* */
++/* Author and (c) 1999,2000: Amon Ott */
++/* */
++/* Getname functions for ACL module */
++/* Last modified: 19/Sep/2000 */
++/************************************ */
++
++#include <rsbac/types.h>
++#include <rsbac/getname.h>
++#include <rsbac/acl_getname.h>
++#include <rsbac/helpers.h>
++#include <rsbac/error.h>
++
++#ifdef __KERNEL__
++#include <linux/string.h>
++#else
++#include <string.h>
++#endif
++
++static char acl_subject_type_list[ACLS_NONE+1][6] = {
++ "USER",
++ "ROLE",
++ "GROUP",
++ "NONE" };
++
++static char acl_group_syscall_list[ACLGS_none+1][18] = {
++ "add_group",
++ "change_group",
++ "remove_group",
++ "get_group_entry",
++ "list_groups",
++ "add_member",
++ "remove_member",
++ "get_user_groups",
++ "get_group_members",
++ "none" };
++
++static char acl_scd_type_list[AST_none-32+1][20] = {
++ "auth_administration",
++ "none" };
++
++static char acl_special_right_list[ACLR_NONE-32+1][20] = {
++ "FORWARD",
++ "ACCESS_CONTROL",
++ "SUPERVISOR",
++ "NONE" };
++
++/*****************************************/
++
++char * get_acl_subject_type_name(char * name,
++ enum rsbac_acl_subject_type_t value)
++ {
++ if(!name)
++ return(NULL);
++ if(value > ACLS_NONE)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, acl_subject_type_list[value]);
++ return(name);
++ };
++
++#ifndef __KERNEL__
++enum rsbac_acl_subject_type_t get_acl_subject_type_nr(const char * name)
++ {
++ enum rsbac_acl_subject_type_t i;
++
++ if(!name)
++ return(ACLS_NONE);
++ for (i = 0; i < ACLS_NONE; i++)
++ {
++ if (!strcmp(name, acl_subject_type_list[i]))
++ {
++ return(i);
++ }
++ }
++ return(ACLS_NONE);
++ };
++#endif
++
++char * get_acl_group_syscall_name(char * name,
++ enum rsbac_acl_group_syscall_type_t value)
++ {
++ if(!name)
++ return(NULL);
++ if(value > ACLGS_none)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, acl_group_syscall_list[value]);
++ return(name);
++ };
++
++#ifndef __KERNEL__
++enum rsbac_acl_group_syscall_type_t get_acl_group_syscall_nr(const char * name)
++ {
++ enum rsbac_acl_group_syscall_type_t i;
++
++ if(!name)
++ return(ACLGS_none);
++ for (i = 0; i < ACLGS_none; i++)
++ {
++ if (!strcmp(name, acl_group_syscall_list[i]))
++ {
++ return(i);
++ }
++ }
++ return(ACLGS_none);
++ };
++#endif
++
++char * get_acl_scd_type_name(char * name,
++ enum rsbac_acl_scd_type_t value)
++ {
++ if(!name)
++ return(NULL);
++ if(value < AST_min)
++ {
++ return(get_scd_type_name(name, value));
++ }
++ value -= AST_min;
++ if(value > AST_none)
++ {
++ strcpy(name, "ERROR!");
++ return(name);
++ }
++ strcpy(name, acl_scd_type_list[value]);
++ return(name);
++ };
++
++#ifndef __KERNEL__
++enum rsbac_acl_scd_type_t get_acl_scd_type_nr(const char * name)
++ {
++ enum rsbac_acl_scd_type_t i;
++
++ if(!name)
++ return(AST_none);
++ for (i = 0; i < AST_none-32; i++)
++ {
++ if (!strcmp(name, acl_scd_type_list[i]))
++ {
++ return(i+32);
++ }
++ }
++ return(get_scd_type_nr(name));
++ };
++#endif
++
++char * get_acl_special_right_name(char * name,
++ enum rsbac_acl_special_rights_t value)
++ {
++ if(!name)
++ return(NULL);
++ if(value < RSBAC_ACL_SPECIAL_RIGHT_BASE)
++ {
++ return(get_request_name(name, value));
++ }
++ value -= RSBAC_ACL_SPECIAL_RIGHT_BASE;
++ if(value > ACLR_NONE)
++ {
++ strcpy(name, "ERROR!");
++ return(name);
++ }
++ strcpy(name, acl_special_right_list[value]);
++ return(name);
++ };
++
++#ifndef __KERNEL__
++enum rsbac_acl_special_rights_t get_acl_special_right_nr(const char * name)
++ {
++ enum rsbac_acl_special_rights_t i;
++
++ if(!name)
++ return(ACLR_NONE);
++ for (i = 0; i < (ACLR_NONE - RSBAC_ACL_SPECIAL_RIGHT_BASE); i++)
++ {
++ if (!strcmp(name, acl_special_right_list[i]))
++ {
++ return(i + RSBAC_ACL_SPECIAL_RIGHT_BASE);
++ }
++ }
++ return(get_request_nr(name));
++ };
++#endif
+diff --git a/rsbac/help/cap_getname.c b/rsbac/help/cap_getname.c
+new file mode 100644
+index 0000000..b850f8b
+--- /dev/null
++++ b/rsbac/help/cap_getname.c
+@@ -0,0 +1,450 @@
++/********************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2011: */
++/* Amon Ott <ao@rsbac.org> */
++/* Getname functions for CAP module */
++/* Last modified: 12/Jul/2011 */
++/********************************** */
++
++#include <rsbac/getname.h>
++#include <rsbac/cap_getname.h>
++#include <rsbac/helpers.h>
++#include <rsbac/error.h>
++
++#ifdef __KERNEL__
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/debug.h>
++#include <rsbac/aci.h>
++#include <rsbac/lists.h>
++#else
++#include <string.h>
++#endif
++
++/*****************************************/
++
++#ifdef CONFIG_RSBAC_CAP_LEARN_TA
++rsbac_list_ta_number_t cap_learn_ta = CONFIG_RSBAC_CAP_LEARN_TA;
++#else
++#define cap_learn_ta 0
++#endif
++
++#ifdef __KERNEL__
++#if defined(CONFIG_RSBAC_CAP_LOG_MISSING) || defined(CONFIG_RSBAC_CAP_LEARN)
++void rsbac_cap_log_missing_cap(int cap)
++ {
++ char * tmp;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++#ifdef CONFIG_RSBAC_CAP_LEARN_TA
++ if (!rsbac_list_ta_exist(cap_learn_ta))
++ rsbac_list_ta_begin(CONFIG_RSBAC_LIST_TRANS_MAX_TTL,
++ &cap_learn_ta,
++ RSBAC_ALL_USERS,
++ RSBAC_CAP_LEARN_TA_NAME,
++ NULL);
++#endif
++ i_tid.process = task_pid(current);
++ if (rsbac_ta_get_attr(cap_learn_ta,
++ SW_CAP,
++ T_PROCESS,
++ i_tid,
++ A_max_caps_user,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_cap_log_missing_cap()", A_max_caps_user);
++ }
++ else
++ {
++ if(cap < 32)
++ {
++ if(!(i_attr_val1.max_caps_user.cap[0] & (1 << cap)))
++ {
++#if defined(CONFIG_RSBAC_CAP_LEARN)
++ if (rsbac_cap_learn)
++ {
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if(tmp)
++ {
++ get_cap_name(tmp, cap);
++ rsbac_printk(KERN_INFO
++ "capable(): pid %u(%.15s), uid %u: add missing user max_cap %s to transaction %u!\n",
++ current->pid, current->comm,
++ current_uid(),
++ tmp,
++ cap_learn_ta);
++ rsbac_kfree(tmp);
++ }
++ i_attr_val1.max_caps_user.cap[0] |= (1 << cap);
++ if (rsbac_ta_set_attr(cap_learn_ta,
++ SW_CAP,
++ T_PROCESS,
++ i_tid,
++ A_max_caps_user,
++ i_attr_val1))
++ {
++ rsbac_pr_set_error (A_max_caps_user);
++ }
++ i_tid.user = current_uid();
++ if (rsbac_ta_get_attr(cap_learn_ta,
++ SW_CAP,
++ T_USER,
++ i_tid,
++ A_max_caps,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_max_caps);
++ }
++ else
++ {
++ struct cred *override_cred;
++
++ i_attr_val1.max_caps.cap[0] |= (1 << cap);
++ if (rsbac_ta_set_attr(cap_learn_ta,
++ SW_CAP,
++ T_USER,
++ i_tid,
++ A_max_caps,
++ i_attr_val1))
++ {
++ rsbac_pr_set_error (A_max_caps);
++ }
++ /* set effective cap for process */
++ override_cred = prepare_creds();
++ if (override_cred)
++ {
++ override_cred->cap_effective.cap[0] |= (1 << cap);
++ commit_creds(override_cred);
++ }
++ }
++ }
++ else
++#endif
++ if(rsbac_cap_log_missing)
++ {
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if(tmp)
++ {
++ get_cap_name(tmp, cap);
++ rsbac_printk(KERN_DEBUG
++ "capable(): pid %u(%.15s), uid %u: missing user max_cap %s!\n",
++ current->pid, current->comm,
++ current_uid(),
++ tmp);
++ rsbac_kfree(tmp);
++ }
++ }
++ }
++ }
++ else
++ {
++ if(!(i_attr_val1.max_caps_user.cap[1] & (1 << (cap - 32))))
++ {
++#if defined(CONFIG_RSBAC_CAP_LEARN)
++ if (rsbac_cap_learn)
++ {
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if(tmp)
++ {
++ get_cap_name(tmp, cap);
++ rsbac_printk(KERN_INFO
++ "capable(): pid %u(%.15s), uid %u: add missing user max_cap %s to transaction %u!\n",
++ current->pid, current->comm,
++ current_uid(),
++ tmp,
++ cap_learn_ta);
++ rsbac_kfree(tmp);
++ }
++ i_attr_val1.max_caps_user.cap[1] |= (1 << (cap - 32));
++ if (rsbac_ta_set_attr(cap_learn_ta,
++ SW_CAP,
++ T_PROCESS,
++ i_tid,
++ A_max_caps_user,
++ i_attr_val1)) {
++ rsbac_ds_set_error ("rsbac_adf_set_attr_cap()",
++ A_max_caps_user);
++ }
++ i_tid.user = current_uid();
++ if (rsbac_ta_get_attr(cap_learn_ta,
++ SW_CAP,
++ T_USER,
++ i_tid,
++ A_max_caps,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_max_caps);
++ }
++ else
++ {
++ struct cred *override_cred;
++
++ i_attr_val1.max_caps.cap[1] |= (1 << (cap - 32));
++ if (rsbac_ta_set_attr(cap_learn_ta,
++ SW_CAP,
++ T_USER,
++ i_tid,
++ A_max_caps,
++ i_attr_val1))
++ {
++ rsbac_pr_set_error (A_max_caps);
++ }
++ /* set effective cap for process */
++ override_cred = prepare_creds();
++ if (override_cred)
++ {
++ override_cred->cap_effective.cap[0] |= (1 << (cap - 32));
++ commit_creds(override_cred);
++ }
++ }
++ }
++ else
++#endif
++ if(rsbac_cap_log_missing)
++ {
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if(tmp)
++ {
++ get_cap_name(tmp, cap);
++ rsbac_printk(KERN_DEBUG
++ "capable(): pid %u(%.15s), uid %u: missing user max_cap %s!\n",
++ current->pid, current->comm,
++ current_uid(),
++ tmp);
++ rsbac_kfree(tmp);
++ }
++ }
++ }
++ }
++ }
++
++
++ if (rsbac_ta_get_attr(cap_learn_ta,
++ SW_CAP,
++ T_PROCESS,
++ i_tid,
++ A_max_caps_program,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_max_caps_program);
++ }
++ else
++ {
++ if(cap < 32)
++ {
++ if(!(i_attr_val1.max_caps_program.cap[0] & (1 << cap)))
++ {
++#if defined(CONFIG_RSBAC_CAP_LEARN)
++ if (rsbac_cap_learn)
++ {
++ i_attr_val1.max_caps_program.cap[0] |= (1 << cap);
++ if (rsbac_ta_set_attr(cap_learn_ta,
++ SW_CAP,
++ T_PROCESS,
++ i_tid,
++ A_max_caps_program,
++ i_attr_val1)) {
++ rsbac_pr_set_error (A_max_caps_program);
++ }
++ if (rsbac_get_attr(SW_GEN,
++ T_PROCESS,
++ i_tid,
++ A_program_file,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_program_file);
++ }
++ else
++ {
++ i_tid.file = i_attr_val1.program_file;
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if(tmp)
++ {
++ char * target_id_name;
++
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++#else
++ target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++#endif
++ if(target_id_name)
++ {
++ get_cap_name(tmp, cap);
++ rsbac_printk(KERN_INFO
++ "capable(): pid %u(%.15s), uid %u: add missing program max_cap %s to FILE %s to transaction %u!\n",
++ current->pid, current->comm,
++ current_uid(),
++ tmp,
++ get_target_name(NULL, T_FILE, target_id_name, i_tid),
++ cap_learn_ta);
++ rsbac_kfree(target_id_name);
++ }
++ rsbac_kfree(tmp);
++ }
++ if (rsbac_ta_get_attr(cap_learn_ta,
++ SW_CAP,
++ T_FILE,
++ i_tid,
++ A_max_caps,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_max_caps);
++ }
++ else
++ {
++ struct cred *override_cred;
++
++ i_attr_val1.max_caps.cap[0] |= (1 << cap);
++ if (rsbac_ta_set_attr(cap_learn_ta,
++ SW_CAP,
++ T_FILE,
++ i_tid,
++ A_max_caps,
++ i_attr_val1))
++ {
++ rsbac_pr_set_error (A_max_caps);
++ }
++ /* set effective cap for process */
++ override_cred = prepare_creds();
++ if (override_cred)
++ {
++ override_cred->cap_effective.cap[0] |= (1 << cap);
++ commit_creds(override_cred);
++ }
++ }
++ }
++ }
++ else
++#endif
++ if(rsbac_cap_log_missing)
++ {
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if(tmp)
++ {
++ get_cap_name(tmp, cap);
++ rsbac_printk(KERN_DEBUG
++ "capable(): pid %u(%.15s), uid %u: missing program max_cap %s!\n",
++ current->pid, current->comm,
++ current_uid(),
++ tmp);
++ rsbac_kfree(tmp);
++ }
++ }
++ }
++ }
++ else
++ {
++ if(!(i_attr_val1.max_caps_program.cap[1] & (1 << (cap - 32))))
++ {
++#if defined(CONFIG_RSBAC_CAP_LEARN)
++ if (rsbac_cap_learn)
++ {
++ i_attr_val1.max_caps_program.cap[1] |= (1 << (cap - 32));
++ if (rsbac_ta_set_attr(cap_learn_ta,
++ SW_CAP,
++ T_PROCESS,
++ i_tid,
++ A_max_caps_program,
++ i_attr_val1)) {
++ rsbac_pr_set_error (A_max_caps_program);
++ }
++ if (rsbac_get_attr(SW_GEN,
++ T_PROCESS,
++ i_tid,
++ A_program_file,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_program_file);
++ }
++ else
++ {
++ i_tid.file = i_attr_val1.program_file;
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if(tmp)
++ {
++ char * target_id_name;
++
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ target_id_name = rsbac_kmalloc_unlocked(CONFIG_RSBAC_MAX_PATH_LEN + RSBAC_MAXNAMELEN);
++#else
++ target_id_name = rsbac_kmalloc_unlocked(2 * RSBAC_MAXNAMELEN);
++#endif
++ if(target_id_name)
++ {
++ get_cap_name(tmp, cap);
++ rsbac_printk(KERN_INFO
++ "capable(): pid %u(%.15s), uid %u: add missing program max_cap %s to FILE %s to transaction %u!\n",
++ current->pid, current->comm,
++ current_uid(),
++ tmp,
++ get_target_name(NULL, T_FILE, target_id_name, i_tid),
++ cap_learn_ta);
++ rsbac_kfree(target_id_name);
++ }
++ rsbac_kfree(tmp);
++ }
++ if (rsbac_ta_get_attr(cap_learn_ta,
++ SW_CAP,
++ T_FILE,
++ i_tid,
++ A_max_caps,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_pr_get_error(A_max_caps);
++ }
++ else
++ {
++ struct cred *override_cred;
++
++ i_attr_val1.max_caps.cap[1] |= (1 << (cap - 32));
++ if (rsbac_ta_set_attr(cap_learn_ta,
++ SW_CAP,
++ T_FILE,
++ i_tid,
++ A_max_caps,
++ i_attr_val1))
++ {
++ rsbac_pr_set_error (A_max_caps);
++ }
++ /* set effective cap for process */
++ override_cred = prepare_creds();
++ if (override_cred)
++ {
++ override_cred->cap_effective.cap[0] |= (1 << (cap - 32));
++ commit_creds(override_cred);
++ }
++ }
++ }
++ }
++ else
++#endif
++ if(rsbac_cap_log_missing)
++ {
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if(tmp)
++ {
++ get_cap_name(tmp, cap);
++ rsbac_printk(KERN_DEBUG
++ "capable(): pid %u(%.15s), uid %u: missing program max_cap %s!\n",
++ current->pid, current->comm,
++ current_uid(),
++ tmp);
++ rsbac_kfree(tmp);
++ }
++ }
++ }
++ }
++ }
++ }
++#endif
++#endif
+diff --git a/rsbac/help/debug.c b/rsbac/help/debug.c
+new file mode 100644
+index 0000000..738fb52
+--- /dev/null
++++ b/rsbac/help/debug.c
+@@ -0,0 +1,4667 @@
++/******************************************* */
++/* Rule Set Based Access Control */
++/* */
++/* Author and (c) 1999-2011: */
++/* Amon Ott <ao@rsbac.org> */
++/* */
++/* Debug and logging functions for all parts */
++/* */
++/* Last modified: 12/Jul/2011 */
++/******************************************* */
++
++#include <asm/uaccess.h>
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/aci_data_structures.h>
++#include <rsbac/debug.h>
++#include <rsbac/error.h>
++#include <rsbac/proc_fs.h>
++#include <rsbac/getname.h>
++#include <rsbac/net_getname.h>
++#include <rsbac/adf.h>
++#include <rsbac/rkmem.h>
++#if defined(CONFIG_RSBAC_DAZ)
++#include <rsbac/daz.h>
++#endif
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/console.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/moduleparam.h>
++#include <linux/syscalls.h>
++#include <linux/kthread.h>
++#include <linux/freezer.h>
++#include <linux/seq_file.h>
++
++extern u_int rsbac_list_rcu_rate;
++
++unsigned long int rsbac_flags;
++
++/* Boolean debug switch for NO_WRITE (global) */
++int rsbac_debug_no_write = 0;
++
++static rsbac_boolean_t debug_initialized = FALSE;
++
++#ifdef CONFIG_RSBAC_FD_CACHE
++rsbac_time_t rsbac_fd_cache_ttl = CONFIG_RSBAC_FD_CACHE_TTL;
++u_int rsbac_fd_cache_disable = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++rsbac_time_t rsbac_list_check_interval = CONFIG_RSBAC_LIST_CHECK_INTERVAL;
++#endif
++
++#ifdef CONFIG_RSBAC_DEBUG
++/* Boolean debug switch for data structures */
++int rsbac_debug_ds = 0;
++
++/* Boolean debug switch for writing of data structures */
++int rsbac_debug_write = 0;
++
++/* Boolean debug switch for AEF */
++EXPORT_SYMBOL(rsbac_debug_aef);
++int rsbac_debug_aef = 0;
++
++/* Boolean debug switch for stack debugging */
++int rsbac_debug_stack = 0;
++
++/* Boolean debug switch for generic lists */
++int rsbac_debug_lists = 0;
++
++#ifdef CONFIG_RSBAC_NET
++int rsbac_debug_ds_net = 0;
++int rsbac_debug_adf_net = 0;
++int rsbac_debug_aef_net = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_MAC)
++/* Boolean debug switch for MAC data structures */
++int rsbac_debug_ds_mac = 0;
++/* Boolean debug switch for MAC syscalls / AEF */
++int rsbac_debug_aef_mac = 0;
++/* Boolean debug switch for MAC decisions / ADF */
++int rsbac_debug_adf_mac = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT)
++/* Boolean debug switch for PM data structures */
++int rsbac_debug_ds_pm = 0;
++/* Boolean debug switch for PM syscalls / AEF */
++int rsbac_debug_aef_pm = 0;
++/* Boolean debug switch for PM decisions / ADF */
++int rsbac_debug_adf_pm = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_DAZ)
++/* Boolean debug switch for DAZ decisions / ADF */
++int rsbac_debug_adf_daz = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_RC) || defined(CONFIG_RSBAC_RC_MAINT)
++/* Boolean debug switch for RC data structures */
++int rsbac_debug_ds_rc = 0;
++/* Boolean debug switch for RC syscalls / AEF */
++int rsbac_debug_aef_rc = 0;
++/* Boolean debug switch for RC decisions / ADF */
++int rsbac_debug_adf_rc = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH) || defined(CONFIG_RSBAC_AUTH_MAINT)
++/* Boolean debug switch for AUTH data structures */
++int rsbac_debug_ds_auth = 0;
++/* Boolean debug switch for AUTH syscalls / AEF */
++int rsbac_debug_aef_auth = 0;
++/* Boolean debug switch for AUTH decisions / ADF */
++int rsbac_debug_adf_auth = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++/* Boolean debug switch for REG */
++int rsbac_debug_reg = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_ACL) || defined(CONFIG_RSBAC_ACL_MAINT)
++/* Boolean debug switch for ACL data structures */
++int rsbac_debug_ds_acl = 0;
++/* Boolean debug switch for ACL syscalls / AEF */
++int rsbac_debug_aef_acl = 0;
++/* Boolean debug switch for ACL decisions / ADF */
++int rsbac_debug_adf_acl = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_JAIL)
++/* Boolean debug switch for JAIL syscalls / AEF */
++int rsbac_debug_aef_jail = 0;
++/* Boolean debug switch for JAIL decisions / ADF */
++int rsbac_debug_adf_jail = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_PAX)
++/* Boolean debug switch for PAX decisions / ADF */
++int rsbac_debug_adf_pax = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_UM)
++/* Boolean debug switch for UM data structures */
++int rsbac_debug_ds_um = 0;
++/* Boolean debug switch for UM syscalls / AEF */
++int rsbac_debug_aef_um = 0;
++/* Boolean debug switch for UM decisions / ADF */
++int rsbac_debug_adf_um = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++ int rsbac_debug_auto = 0;
++#endif
++
++#endif /* DEBUG */
++
++#if defined(CONFIG_RSBAC_UM_EXCL)
++int rsbac_um_no_excl = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_RC_LEARN)
++int rsbac_rc_learn = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH) || defined(CONFIG_RSBAC_AUTH_MAINT)
++/* Boolean switch for AUTH init: set may_setuid for /bin/login */
++int rsbac_auth_enable_login = 0;
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++int rsbac_auth_learn = 0;
++#endif
++#endif
++
++#if defined(CONFIG_RSBAC_CAP_LEARN)
++int rsbac_cap_learn = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_ACL_LEARN)
++int rsbac_acl_learn_fd = 0;
++#endif
++
++/* Suppress default list creation for complete restore */
++int rsbac_no_defaults = 0;
++
++static rsbac_list_handle_t log_levels_handle = NULL;
++
++#ifdef CONFIG_RSBAC_SOFTMODE
++/* Boolean switch for RSBAC soft mode */
++int rsbac_softmode = 0;
++int rsbac_softmode_prohibit = 0;
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++int rsbac_ind_softmode[SW_NONE] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0};
++#endif
++#endif
++
++int rsbac_list_recover = 0;
++
++#ifdef CONFIG_RSBAC_FREEZE
++int rsbac_freeze = 0;
++#endif
++
++#if defined(CONFIG_RSBAC_CAP_PROC_HIDE)
++int rsbac_cap_process_hiding = 0;
++#endif
++#ifdef CONFIG_RSBAC_CAP_LOG_MISSING
++int rsbac_cap_log_missing = 0;
++#endif
++#ifdef CONFIG_RSBAC_JAIL_LOG_MISSING
++int rsbac_jail_log_missing = 0;
++#endif
++
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_FULL
++/* Boolean switch for disabling Linux DAC */
++int rsbac_dac_disable = 0;
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_dac_is_disabled);
++#endif
++int rsbac_dac_is_disabled(void)
++ {
++ return rsbac_dac_disable;
++ }
++#endif
++
++static u_int log_seq = 0;
++
++/* Boolean switch for no syslog option*/
++#ifdef CONFIG_RSBAC_RMSG_NOSYSLOG
++int rsbac_nosyslog = 0;
++#endif
++
++#ifdef CONFIG_RSBAC_SYSLOG_RATE
++static u_int rsbac_syslog_rate = CONFIG_RSBAC_SYSLOG_RATE_DEF;
++static u_int syslog_count = 0;
++#endif
++
++/* Boolean switch for delayed init option*/
++#ifdef CONFIG_RSBAC_INIT_DELAY
++int rsbac_no_delay_init = 0;
++kdev_t rsbac_delayed_root = RSBAC_MKDEV(0,0);
++#endif
++
++/* Array of Boolean debug switches for ADF */
++int rsbac_debug_adf_default = 1;
++rsbac_log_entry_t rsbac_log_levels[R_NONE+1];
++
++rsbac_boolean_t rsbac_debug_adf_dirty = FALSE;
++
++/* variables for rsbac_logging */
++#if defined(CONFIG_RSBAC_RMSG)
++#include <linux/poll.h>
++#include <linux/smp.h>
++DECLARE_WAIT_QUEUE_HEAD(rlog_wait);
++struct rsbac_log_list_head_t log_list_head = {NULL, NULL, 0, 0};
++static u_int rsbac_rmsg_maxentries = CONFIG_RSBAC_RMSG_MAXENTRIES;
++#if defined(CONFIG_RSBAC_LOG_REMOTE)
++struct rsbac_log_list_head_t remote_log_list_head = {NULL, NULL, 0, 0};
++static DECLARE_WAIT_QUEUE_HEAD(rsbaclogd_wait);
++static u_int rsbac_log_remote_maxentries = CONFIG_RSBAC_LOG_REMOTE_MAXENTRIES;
++#ifndef CONFIG_RSBAC_LOG_REMOTE_SYNC
++static struct timer_list rsbac_log_remote_timer;
++u_int rsbac_log_remote_interval = CONFIG_RSBAC_LOG_INTERVAL;
++#endif
++rsbac_pid_t rsbaclogd_pid=0;
++#define REMOTE_SEND_BUF_LEN 1024
++static __u16 rsbac_log_remote_port = 0;
++static __u32 rsbac_log_remote_addr = 0;
++static char rsbac_log_remote_addr_string[RSBAC_MAXNAMELEN] = CONFIG_RSBAC_LOG_REMOTE_ADDR;
++#endif
++
++#endif /* RMSG */
++
++#ifdef CONFIG_RSBAC_SYSLOG_RATE
++static struct timer_list rsbac_syslog_rate_timer;
++#endif
++
++void rsbac_adf_log_switch(rsbac_adf_request_int_t request,
++ enum rsbac_target_t target,
++ rsbac_enum_t value)
++ {
++ if( (request < R_NONE)
++ && (target <= T_NONE)
++ && (value <= LL_full)
++ )
++ {
++ rsbac_log_levels[request][target] = value;
++ if(log_levels_handle)
++ rsbac_list_add(log_levels_handle, &request, rsbac_log_levels[request]);
++ }
++ };
++
++int rsbac_get_adf_log(rsbac_adf_request_int_t request,
++ enum rsbac_target_t target,
++ u_int * value_p)
++ {
++ if( (request < R_NONE)
++ && (target <= T_NONE)
++ )
++ {
++ *value_p = rsbac_log_levels[request][target];
++ return 0;
++ }
++ else
++ return -RSBAC_EINVALIDVALUE;
++ }
++
++static int R_INIT rsbac_flags_setup(char * line)
++{
++ rsbac_flags = simple_strtoul(line, NULL, 0);
++ rsbac_flags_set(rsbac_flags);
++ return 1;
++}
++__setup("rsbac_flags=", rsbac_flags_setup);
++
++// module_param(rsbac_no_defaults, bool, S_IRUGO);
++ static int R_INIT no_defaults_setup(char *line)
++ {
++ rsbac_no_defaults = 1;
++ return 1;
++ }
++__setup("rsbac_no_defaults", no_defaults_setup);
++
++ #if defined(CONFIG_RSBAC_UM_EXCL)
++ static int R_INIT um_no_excl_setup(char *line)
++ {
++ rsbac_um_no_excl = 1;
++ return 1;
++ }
++ __setup("rsbac_um_no_excl", um_no_excl_setup);
++ #endif
++ #if defined(CONFIG_RSBAC_DAZ_CACHE)
++ /* RSBAC: DAZ - set cache ttl */
++// module_param(rsbac_daz_ttl,
++// int,
++// S_IRUGO);
++ static int R_INIT daz_ttl_setup(char *line)
++ {
++ rsbac_daz_set_ttl(simple_strtoul(line, NULL, 0));
++ return 1;
++ }
++ __setup("rsbac_daz_ttl=", daz_ttl_setup);
++ #endif
++ #if defined(CONFIG_RSBAC_RC_LEARN)
++ static int R_INIT rc_learn_setup(char *line)
++ {
++ rsbac_rc_learn = 1;
++ rsbac_debug_adf_rc = 1;
++ return 1;
++ }
++ __setup("rsbac_rc_learn", rc_learn_setup);
++ #endif
++ #if defined(CONFIG_RSBAC_AUTH) || defined(CONFIG_RSBAC_AUTH_MAINT)
++ /* RSBAC: AUTH - set auth_may_setuid for /bin/login? */
++// module_param(rsbac_auth_enable_login, int, S_IRUGO);
++ static int R_INIT auth_enable_login_setup(char *line)
++ {
++ rsbac_auth_enable_login = 1;
++ return 1;
++ }
++ __setup("rsbac_auth_enable_login", auth_enable_login_setup);
++ #if defined(CONFIG_RSBAC_AUTH_LEARN)
++ static int R_INIT auth_learn_setup(char *line)
++ {
++ rsbac_auth_learn = 1;
++ return 1;
++ }
++ __setup("rsbac_auth_learn", auth_learn_setup);
++ #endif
++ #endif
++ #if defined(CONFIG_RSBAC_CAP_LEARN)
++ static int R_INIT cap_learn_setup(char *line)
++ {
++ rsbac_cap_learn = 1;
++ return 1;
++ }
++ __setup("rsbac_cap_learn", cap_learn_setup);
++ #endif
++ #if defined(CONFIG_RSBAC_ACL_LEARN)
++ /* learn all target types */
++ static int R_INIT acl_learn_setup(char *line)
++ {
++ rsbac_acl_learn_fd = 1;
++ return 1;
++ }
++ __setup("rsbac_acl_learn", acl_learn_setup);
++ static int R_INIT acl_learn_fd_setup(char *line)
++ {
++ rsbac_acl_learn_fd = 1;
++ return 1;
++ }
++ __setup("rsbac_acl_learn_fd", acl_learn_fd_setup);
++ #endif
++ #if defined(CONFIG_RSBAC_RC_LEARN) || defined(CONFIG_RSBAC_AUTH_LEARN) || defined(CONFIG_RSBAC_ACL_LEARN) || defined(CONFIG_RSBAC_CAP_LEARN)
++ static int R_INIT learn_all_setup(char *line)
++ {
++ #if defined(CONFIG_RSBAC_RC_LEARN)
++ rsbac_rc_learn = 1;
++ rsbac_debug_adf_rc = 1;
++ #endif
++ #if defined(CONFIG_RSBAC_AUTH_LEARN)
++ rsbac_auth_learn = 1;
++ #endif
++ #if defined(CONFIG_RSBAC_ACL_LEARN)
++ rsbac_acl_learn_fd = 1;
++ #endif
++ #if defined(CONFIG_RSBAC_CAP_LEARN)
++ rsbac_cap_learn = 1;
++ #endif
++ return 1;
++ }
++ __setup("rsbac_learn_all", learn_all_setup);
++ #endif
++
++ #if defined(CONFIG_RSBAC_SOFTMODE)
++ /* RSBAC: softmode on? */
++// module_param(rsbac_softmode_once, bool, S_IRUGO);
++// module_param(rsbac_softmode, bool, S_IRUGO);
++ static int R_INIT softmode_setup(char *line)
++ {
++ rsbac_softmode = 1;
++ return 1;
++ }
++ __setup("rsbac_softmode", softmode_setup);
++ static int R_INIT softmode_once_setup(char *line)
++ {
++ rsbac_softmode = 1;
++ rsbac_softmode_prohibit = 1;
++ return 1;
++ }
++ __setup("rsbac_softmode_once", softmode_once_setup);
++// module_param(rsbac_softmode_never, bool, S_IRUGO);
++ static int R_INIT softmode_never_setup(char *line)
++ {
++ rsbac_softmode_prohibit = 1;
++ return 1;
++ }
++ __setup("rsbac_softmode_never", softmode_never_setup);
++
++ #if defined(CONFIG_RSBAC_SOFTMODE_IND)
++ /* RSBAC: softmode on for a module? */
++// module_param_named(rsbac_softmode_mac, rsbac_ind_softmode[MAC], bool, S_IRUGO);
++ static int R_INIT softmode_mac_setup(char *line)
++ {
++ rsbac_ind_softmode[SW_MAC] = 1;
++ return 1;
++ }
++ __setup("rsbac_softmode_mac", softmode_mac_setup);
++// module_param_named(rsbac_softmode_pm, rsbac_ind_softmode[SW_PM], bool, S_IRUGO);
++ static int R_INIT softmode_pm_setup(char *line)
++ {
++ rsbac_ind_softmode[SW_PM] = 1;
++ return 1;
++ }
++ __setup("rsbac_softmode_pm", softmode_pm_setup);
++// module_param_named(rsbac_softmode_daz, rsbac_ind_softmode[SW_DAZ], bool, S_IRUGO);
++ static int R_INIT softmode_daz_setup(char *line)
++ {
++ rsbac_ind_softmode[SW_DAZ] = 1;
++ return 1;
++ }
++ __setup("rsbac_softmode_daz", softmode_daz_setup);
++// module_param_named(rsbac_softmode_ff, rsbac_ind_softmode[SW_FF], bool, S_IRUGO);
++ static int R_INIT softmode_ff_setup(char *line)
++ {
++ rsbac_ind_softmode[SW_FF] = 1;
++ return 1;
++ }
++ __setup("rsbac_softmode_ff", softmode_ff_setup);
++// module_param_named(rsbac_softmode_rc, rsbac_ind_softmode[SW_RC], bool, S_IRUGO);
++ static int R_INIT softmode_rc_setup(char *line)
++ {
++ rsbac_ind_softmode[SW_RC] = 1;
++ return 1;
++ }
++ __setup("rsbac_softmode_rc", softmode_rc_setup);
++// module_param_named(rsbac_softmode_auth, rsbac_ind_softmode[SW_AUTH], bool, S_IRUGO);
++ static int R_INIT softmode_auth_setup(char *line)
++ {
++ rsbac_ind_softmode[SW_AUTH] = 1;
++ return 1;
++ }
++ __setup("rsbac_softmode_auth", softmode_auth_setup);
++// module_param_named(rsbac_softmode_reg, rsbac_ind_softmode[SW_REG], bool, S_IRUGO);
++ static int R_INIT softmode_reg_setup(char *line)
++ {
++ rsbac_ind_softmode[SW_REG] = 1;
++ return 1;
++ }
++ __setup("rsbac_softmode_reg", softmode_reg_setup);
++// module_param_named(rsbac_softmode_acl, rsbac_ind_softmode[SW_ACL], bool, S_IRUGO);
++ static int R_INIT softmode_acl_setup(char *line)
++ {
++ rsbac_ind_softmode[SW_ACL] = 1;
++ return 1;
++ }
++ __setup("rsbac_softmode_acl", softmode_acl_setup);
++// module_param_named(rsbac_softmode_cap, rsbac_ind_softmode[SW_CAP], bool, S_IRUGO);
++ static int R_INIT softmode_cap_setup(char *line)
++ {
++ rsbac_ind_softmode[SW_CAP] = 1;
++ return 1;
++ }
++ __setup("rsbac_softmode_cap", softmode_cap_setup);
++// module_param_named(rsbac_softmode_jail, rsbac_ind_softmode[SW_JAIL], bool, S_IRUGO);
++ static int R_INIT softmode_jail_setup(char *line)
++ {
++ rsbac_ind_softmode[SW_JAIL] = 1;
++ return 1;
++ }
++ __setup("rsbac_softmode_jail", softmode_jail_setup);
++// module_param_named(rsbac_softmode_res, rsbac_ind_softmode[SW_RES], bool, S_IRUGO);
++ static int R_INIT softmode_res_setup(char *line)
++ {
++ rsbac_ind_softmode[SW_RES] = 1;
++ return 1;
++ }
++ __setup("rsbac_softmode_res", softmode_res_setup);
++ #endif
++ #endif
++
++ #if defined(CONFIG_RSBAC_CAP_PROC_HIDE)
++ /* RSBAC: hide processes? */
++// module_param(rsbac_cap_process_hiding, bool, S_IRUGO);
++ static int R_INIT cap_process_hiding_setup(char *line)
++ {
++ rsbac_cap_process_hiding = 1;
++ return 1;
++ }
++ __setup("rsbac_cap_process_hiding", cap_process_hiding_setup);
++ #endif
++ #ifdef CONFIG_RSBAC_CAP_LOG_MISSING
++ /* RSBAC: log missing caps? */
++// module_param(rsbac_cap_log_missing, bool, S_IRUGO);
++ static int R_INIT cap_log_missing_setup(char *line)
++ {
++ rsbac_cap_log_missing = 1;
++ return 1;
++ }
++ __setup("rsbac_cap_log_missing", cap_log_missing_setup);
++ #endif
++ #ifdef CONFIG_RSBAC_JAIL_LOG_MISSING
++ /* RSBAC: log missing jail caps? */
++// module_param(rsbac_jail_log_missing, bool, S_IRUGO);
++ static int R_INIT jail_log_missing_setup(char *line)
++ {
++ rsbac_jail_log_missing = 1;
++ return 1;
++ }
++ __setup("rsbac_jail_log_missing", jail_log_missing_setup);
++ #endif
++ #if defined(CONFIG_RSBAC_FREEZE)
++ /* RSBAC: freeze config? */
++// module_param(rsbac_freeze, bool, S_IRUGO);
++ static int R_INIT freeze_setup(char *line)
++ {
++ rsbac_freeze = 1;
++ return 1;
++ }
++ __setup("rsbac_freeze", freeze_setup);
++ #endif
++ /* RSBAC: recover lists? */
++// module_param(rsbac_list_recover, bool, S_IRUGO);
++ static int R_INIT list_recover_setup(char *line)
++ {
++ rsbac_list_recover = 1;
++ return 1;
++ }
++ __setup("rsbac_list_recover", list_recover_setup);
++ static int R_INIT list_rcu_rate_setup(char *line)
++ {
++ rsbac_list_rcu_rate = simple_strtoul(line, NULL, 0);
++ if (rsbac_list_rcu_rate < 1)
++ rsbac_list_rcu_rate = 1;
++ else
++ if (rsbac_list_rcu_rate > 100000)
++ rsbac_list_rcu_rate = 100000;
++ return 1;
++ }
++ __setup("rsbac_list_rcu_rate=", list_rcu_rate_setup);
++ #ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_FULL
++ /* RSBAC: disable Linux DAC? */
++// module_param(rsbac_dac_disable, bool, S_IRUGO);
++ static int R_INIT dac_disable_setup(char *line)
++ {
++ rsbac_dac_disable = 1;
++ return 1;
++ }
++ __setup("rsbac_dac_disable", dac_disable_setup);
++ #endif
++ #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG
++// module_param(rsbac_nosyslog, bool, S_IRUGO);
++ static int R_INIT nosyslog_setup(char *line)
++ {
++ rsbac_nosyslog = 1;
++ return 1;
++ }
++ __setup("rsbac_nosyslog", nosyslog_setup);
++// module_param_named(rsbac_no_syslog, rsbac_nosyslog, bool, S_IRUGO);
++ static int R_INIT no_syslog_setup(char *line)
++ {
++ rsbac_nosyslog = 1;
++ return 1;
++ }
++ __setup("rsbac_no_syslog", no_syslog_setup);
++ #endif
++ #if defined(CONFIG_RSBAC_RMSG)
++ static int R_INIT rmsg_maxentries_setup(char *line)
++ {
++ rsbac_rmsg_maxentries = simple_strtoul(line, NULL, 0);
++ return 1;
++ }
++ __setup("rsbac_rmsg_maxentries=", rmsg_maxentries_setup);
++ #endif
++ #if defined(CONFIG_RSBAC_LOG_REMOTE)
++// module_param_string(rsbac_log_remote_addr,
++// rsbac_log_remote_addr_string,
++// sizeof(rsbac_log_remote_addr_string),
++// S_IRUGO);
++ static int R_INIT log_remote_addr_setup(char *line)
++ {
++ strncpy(rsbac_log_remote_addr_string, line, RSBAC_MAXNAMELEN - 1);
++ rsbac_log_remote_addr_string[RSBAC_MAXNAMELEN - 1]=0;
++ return 1;
++ }
++ __setup("rsbac_log_remote_addr=", log_remote_addr_setup);
++// module_param(rsbac_log_remote_port,
++// int,
++// S_IRUGO);
++ static int R_INIT log_remote_port_setup(char *line)
++ {
++ __u16 tmp_port;
++
++ tmp_port = simple_strtoul(line, NULL, 0);
++ rsbac_log_remote_port = htons(tmp_port);
++ return 1;
++ }
++ __setup("rsbac_log_remote_port=", log_remote_port_setup);
++ static int R_INIT log_remote_maxentries_setup(char *line)
++ {
++ rsbac_log_remote_maxentries = simple_strtoul(line, NULL, 0);
++ return 1;
++ }
++ __setup("rsbac_log_remote_maxentries=", log_remote_maxentries_setup);
++ #endif
++ #ifdef CONFIG_RSBAC_INIT_DELAY
++// module_param(rsbac_no_delay_init, bool, S_IRUGO);
++ static int R_INIT no_delay_init_setup(char *line)
++ {
++ rsbac_no_delay_init = 1;
++ return 1;
++ }
++ __setup("rsbac_no_delay_init", no_delay_init_setup);
++// module_param_named(rsbac_no_init_delay, rsbac_no_delay_init, bool, S_IRUGO);
++ static int R_INIT no_init_delay_setup(char *line)
++ {
++ rsbac_no_delay_init = 1;
++ return 1;
++ }
++ __setup("rsbac_no_init_delay", no_init_delay_setup);
++ char rsbac_delayed_root_str[20] = "";
++// module_param_string(rsbac_delayed_root,
++// rsbac_delayed_root_str,
++// sizeof(rsbac_delayed_root_str),
++// S_IRUGO);
++ static int R_INIT delayed_root_setup(char *line)
++ {
++ strncpy(rsbac_delayed_root_str, line, 19);
++ rsbac_delayed_root_str[19]=0;
++ return 1;
++ }
++ __setup("rsbac_delayed_root=", delayed_root_setup);
++ #endif
++ #ifdef CONFIG_RSBAC_SYSLOG_RATE
++// module_param(rsbac_syslog_rate,
++// int,
++// S_IRUGO);
++ static int R_INIT syslog_rate_setup(char *line)
++ {
++ rsbac_syslog_rate = simple_strtoul(line, NULL, 0);
++ return 1;
++ }
++ __setup("rsbac_syslog_rate=", syslog_rate_setup);
++ #endif
++#ifdef CONFIG_RSBAC_FD_CACHE
++// module_param(rsbac_fd_cache_ttl,
++// int,
++// S_IRUGO);
++ static int R_INIT fd_cache_ttl_setup(char *line)
++ {
++ rsbac_fd_cache_ttl = simple_strtoul(line, NULL, 0);
++ return 1;
++ }
++ __setup("rsbac_fd_cache_ttl=", fd_cache_ttl_setup);
++// module_param(rsbac_fd_cache_disable, bool, S_IRUGO);
++ static int R_INIT fd_cache_disable_setup(char *line)
++ {
++ rsbac_fd_cache_disable = 1;
++ return 1;
++ }
++ __setup("rsbac_fd_cache_disable", fd_cache_disable_setup);
++#endif
++#if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++// module_param(rsbac_list_check_interval,
++// int,
++// S_IRUGO);
++ static int R_INIT list_check_interval_setup(char *line)
++ {
++ rsbac_list_check_interval = simple_strtoul(line, NULL, 0);
++ return 1;
++ }
++ __setup("rsbac_list_check_interval=", list_check_interval_setup);
++#endif
++
++#ifdef CONFIG_RSBAC_DEBUG
++ #ifdef CONFIG_RSBAC_NET
++ /* RSBAC: debug for net data structures? */
++// module_param(rsbac_debug_ds_net, bool, S_IRUGO);
++ static int R_INIT debug_ds_net_setup(char *line)
++ {
++ rsbac_debug_ds_net = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_ds_net", debug_ds_net_setup);
++ /* RSBAC: debug for net syscalls/AEF? */
++// module_param(rsbac_debug_aef_net, bool, S_IRUGO);
++ static int R_INIT debug_aef_net_setup(char *line)
++ {
++ rsbac_debug_aef_net = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_aef_net", debug_aef_net_setup);
++ /* RSBAC: debug for net decisions/ADF? */
++// module_param(rsbac_debug_adf_net, bool, S_IRUGO);
++ static int R_INIT debug_adf_net_setup(char *line)
++ {
++ rsbac_debug_adf_net = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_adf_net", debug_adf_net_setup);
++ #endif
++
++ #if defined(CONFIG_RSBAC_MAC)
++// module_param(rsbac_debug_ds_mac, bool, S_IRUGO);
++ static int R_INIT debug_ds_mac_setup(char *line)
++ {
++ rsbac_debug_ds_mac = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_ds_mac", debug_ds_mac_setup);
++// module_param(rsbac_debug_aef_mac, bool, S_IRUGO);
++ static int R_INIT debug_aef_mac_setup(char *line)
++ {
++ rsbac_debug_aef_mac = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_aef_mac", debug_aef_mac_setup);
++// module_param(rsbac_debug_adf_mac, bool, S_IRUGO);
++ static int R_INIT debug_adf_mac_setup(char *line)
++ {
++ rsbac_debug_adf_mac = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_adf_mac", debug_adf_mac_setup);
++ #if defined(CONFIG_RSBAC_SWITCH_MAC) && defined(RSBAC_SWITCH_BOOT_OFF)
++// module_param(rsbac_switch_off_mac, bool, S_IRUGO);
++ static int R_INIT switch_off_mac_setup(char *line)
++ {
++ rsbac_switch_mac = 0;
++ return 1;
++ }
++ __setup("rsbac_switch_off_mac", switch_off_mac_setup);
++ #endif
++ #endif
++ #if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT)
++// module_param(rsbac_debug_ds_pm, bool, S_IRUGO);
++ static int R_INIT debug_ds_pm_setup(char *line)
++ {
++ rsbac_debug_ds_pm = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_ds_pm", debug_ds_pm_setup);
++// module_param(rsbac_debug_aef_pm, bool, S_IRUGO);
++ static int R_INIT debug_aef_pm_setup(char *line)
++ {
++ rsbac_debug_aef_pm = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_aef_pm", debug_aef_pm_setup);
++// module_param(rsbac_debug_adf_pm, bool, S_IRUGO);
++ static int R_INIT debug_adf_pm_setup(char *line)
++ {
++ rsbac_debug_adf_pm = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_adf_pm", debug_adf_pm_setup);
++ #if defined(CONFIG_RSBAC_SWITCH_PM) && defined(RSBAC_SWITCH_BOOT_OFF)
++// module_param(rsbac_switch_off_mac, bool, S_IRUGO);
++ static int R_INIT switch_off_pm_setup(char *line)
++ {
++ rsbac_switch_pm = 0;
++ return 1;
++ }
++ __setup("rsbac_switch_off_pm", switch_off_pm_setup);
++ #endif
++ #endif
++ #if defined(CONFIG_RSBAC_DAZ)
++// module_param(rsbac_debug_adf_daz, bool, S_IRUGO);
++ static int R_INIT debug_adf_daz_setup(char *line)
++ {
++ rsbac_debug_adf_daz = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_adf_daz", debug_adf_daz_setup);
++ #if defined(CONFIG_RSBAC_SWITCH_DAZ) && defined(RSBAC_SWITCH_BOOT_OFF)
++// module_param(rsbac_switch_off_mac, bool, S_IRUGO);
++ static int R_INIT switch_off_daz_setup(char *line)
++ {
++ rsbac_switch_daz = 0;
++ return 1;
++ }
++ __setup("rsbac_switch_off_daz", switch_off_daz_setup);
++ #endif
++ #endif
++ #if defined(CONFIG_RSBAC_RC) || defined(CONFIG_RSBAC_RC_MAINT)
++// module_param(rsbac_debug_ds_rc, bool, S_IRUGO);
++ static int R_INIT debug_ds_rc_setup(char *line)
++ {
++ rsbac_debug_ds_rc = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_ds_rc", debug_ds_rc_setup);
++// module_param(rsbac_debug_aef_rc, bool, S_IRUGO);
++ static int R_INIT debug_aef_rc_setup(char *line)
++ {
++ rsbac_debug_aef_rc = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_aef_rc", debug_aef_rc_setup);
++// module_param(rsbac_debug_adf_rc, bool, S_IRUGO);
++ static int R_INIT debug_adf_rc_setup(char *line)
++ {
++ rsbac_debug_adf_rc = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_adf_rc", debug_adf_rc_setup);
++ #if defined(CONFIG_RSBAC_SWITCH_RC) && defined(RSBAC_SWITCH_BOOT_OFF)
++// module_param(rsbac_switch_off_rc, bool, S_IRUGO);
++ static int R_INIT switch_off_rc_setup(char *line)
++ {
++ rsbac_switch_rc = 0;
++ return 1;
++ }
++ __setup("rsbac_switch_off_rc", switch_off_rc_setup);
++ #endif
++ #endif
++ #if defined(CONFIG_RSBAC_AUTH) || defined(CONFIG_RSBAC_AUTH_MAINT)
++// module_param(rsbac_debug_ds_auth, bool, S_IRUGO);
++ static int R_INIT debug_ds_auth_setup(char *line)
++ {
++ rsbac_debug_ds_auth = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_ds_auth", debug_ds_auth_setup);
++// module_param(rsbac_debug_aef_auth, bool, S_IRUGO);
++ static int R_INIT debug_aef_auth_setup(char *line)
++ {
++ rsbac_debug_aef_auth = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_aef_auth", debug_aef_auth_setup);
++// module_param(rsbac_debug_adf_auth, bool, S_IRUGO);
++ static int R_INIT debug_adf_auth_setup(char *line)
++ {
++ rsbac_debug_adf_auth = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_adf_auth", debug_adf_auth_setup);
++ #if defined(CONFIG_RSBAC_SWITCH_AUTH) && defined(RSBAC_SWITCH_BOOT_OFF)
++// module_param(rsbac_switch_off_auth, bool, S_IRUGO);
++ static int R_INIT switch_off_auth_setup(char *line)
++ {
++ rsbac_switch_auth = 0;
++ return 1;
++ }
++ __setup("rsbac_switch_off_auth", switch_off_auth_setup);
++ #endif
++ #endif
++ #if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++// module_param(rsbac_debug_reg, bool, S_IRUGO);
++ static int R_INIT debug_reg_setup(char *line)
++ {
++ rsbac_debug_reg = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_reg", debug_reg_setup);
++ #endif
++ #if defined(CONFIG_RSBAC_ACL) || defined(CONFIG_RSBAC_ACL_MAINT)
++// module_param(rsbac_debug_ds_acl, bool, S_IRUGO);
++ static int R_INIT debug_ds_acl_setup(char *line)
++ {
++ rsbac_debug_ds_acl = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_ds_acl", debug_ds_acl_setup);
++// module_param(rsbac_debug_aef_acl, bool, S_IRUGO);
++ static int R_INIT debug_aef_acl_setup(char *line)
++ {
++ rsbac_debug_aef_acl = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_aef_acl", debug_aef_acl_setup);
++// module_param(rsbac_debug_adf_acl, bool, S_IRUGO);
++ static int R_INIT debug_adf_acl_setup(char *line)
++ {
++ rsbac_debug_adf_acl = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_adf_acl", debug_adf_acl_setup);
++ #if defined(CONFIG_RSBAC_SWITCH_ACL) && defined(RSBAC_SWITCH_BOOT_OFF)
++// module_param(rsbac_switch_off_acl, bool, S_IRUGO);
++ static int R_INIT switch_off_acl_setup(char *line)
++ {
++ rsbac_switch_acl = 0;
++ return 1;
++ }
++ __setup("rsbac_switch_off_acl", switch_off_acl_setup);
++ #endif
++ #endif
++ #if defined(CONFIG_RSBAC_JAIL)
++// module_param(rsbac_debug_aef_jail, bool, S_IRUGO);
++ static int R_INIT debug_aef_jail_setup(char *line)
++ {
++ rsbac_debug_aef_jail = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_aef_jail", debug_aef_jail_setup);
++// module_param(rsbac_debug_adf_jail, bool, S_IRUGO);
++ static int R_INIT debug_adf_jail_setup(char *line)
++ {
++ rsbac_debug_adf_jail = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_adf_jail", debug_adf_jail_setup);
++ #if defined(CONFIG_RSBAC_SWITCH_JAIL) && defined(RSBAC_SWITCH_BOOT_OFF)
++// module_param(rsbac_switch_off_jail, bool, S_IRUGO);
++ static int R_INIT switch_off_jail_setup(char *line)
++ {
++ rsbac_switch_jail = 0;
++ return 1;
++ }
++ __setup("rsbac_switch_off_jail", switch_off_jail_setup);
++ #endif
++ #endif
++ #if defined(CONFIG_RSBAC_PAX)
++// module_param(rsbac_debug_adf_pax, bool, S_IRUGO);
++ static int R_INIT debug_adf_pax_setup(char *line)
++ {
++ rsbac_debug_adf_pax = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_adf_pax", debug_adf_pax_setup);
++ #if defined(CONFIG_RSBAC_SWITCH_PAX) && defined(RSBAC_SWITCH_BOOT_OFF)
++// module_param(rsbac_switch_off_pax, bool, S_IRUGO);
++ static int R_INIT switch_off_pax_setup(char *line)
++ {
++ rsbac_switch_pax = 0;
++ return 1;
++ }
++ __setup("rsbac_switch_off_pax", switch_off_pax_setup);
++ #endif
++ #endif
++ #if defined(CONFIG_RSBAC_SWITCH_FF) && defined(RSBAC_SWITCH_BOOT_OFF)
++// module_param(rsbac_switch_off_ff, bool, S_IRUGO);
++ static int R_INIT switch_off_ff_setup(char *line)
++ {
++ rsbac_switch_ff = 0;
++ return 1;
++ }
++ __setup("rsbac_switch_off_ff", switch_off_ff_setup);
++ #endif
++ #if defined(CONFIG_RSBAC_SWITCH_RES) && defined(RSBAC_SWITCH_BOOT_OFF)
++// module_param(rsbac_switch_off_res, bool, S_IRUGO);
++ static int R_INIT switch_off_res_setup(char *line)
++ {
++ rsbac_switch_res = 0;
++ return 1;
++ }
++ __setup("rsbac_switch_off_res", switch_off_res_setup);
++ #endif
++ #if defined(CONFIG_RSBAC_SWITCH_CAP) && defined(RSBAC_SWITCH_BOOT_OFF)
++// module_param(rsbac_switch_off_cap, bool, S_IRUGO);
++ static int R_INIT switch_off_cap_setup(char *line)
++ {
++ rsbac_switch_cap = 0;
++ return 1;
++ }
++ __setup("rsbac_switch_off_cap", switch_off_cap_setup);
++ #endif
++ #if defined(CONFIG_RSBAC_UM)
++// module_param(rsbac_debug_ds_um, bool, S_IRUGO);
++ static int R_INIT debug_ds_um_setup(char *line)
++ {
++ rsbac_debug_ds_um = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_ds_um", debug_ds_um_setup);
++// module_param(rsbac_debug_aef_um, bool, S_IRUGO);
++ static int R_INIT debug_aef_um_setup(char *line)
++ {
++ rsbac_debug_aef_um = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_aef_um", debug_aef_um_setup);
++// module_param(rsbac_debug_adf_um, bool, S_IRUGO);
++ static int R_INIT debug_adf_um_setup(char *line)
++ {
++ rsbac_debug_adf_um = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_adf_um", debug_adf_um_setup);
++ #endif
++ #if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++// module_param(rsbac_debug_auto, bool, S_IRUGO);
++ static int R_INIT debug_auto_setup(char *line)
++ {
++ rsbac_debug_auto = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_auto", debug_auto_setup);
++ #endif
++ /* RSBAC: debug_lists */
++// module_param(rsbac_debug_lists, bool, S_IRUGO);
++ static int R_INIT debug_lists_setup(char *line)
++ {
++ rsbac_debug_lists = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_lists", debug_lists_setup);
++ /* RSBAC: debug_stack */
++// module_param(rsbac_debug_stack, bool, S_IRUGO);
++ static int R_INIT debug_stack_setup(char *line)
++ {
++ rsbac_debug_stack = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_stack", debug_stack_setup);
++ /* RSBAC: debug for data structures? */
++// module_param(rsbac_debug_ds, bool, S_IRUGO);
++ static int R_INIT debug_ds_setup(char *line)
++ {
++ rsbac_debug_ds = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_ds", debug_ds_setup);
++ /* RSBAC: debug for writing of data structures? */
++// module_param(rsbac_debug_write, bool, S_IRUGO);
++ static int R_INIT debug_write_setup(char *line)
++ {
++ rsbac_debug_write = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_write", debug_write_setup);
++ /* RSBAC: debug for AEF? */
++// module_param(rsbac_debug_aef, bool, S_IRUGO);
++ static int R_INIT debug_aef_setup(char *line)
++ {
++ rsbac_debug_aef = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_aef", debug_aef_setup);
++ /* RSBAC: debug_no_write for ds */
++// module_param(rsbac_debug_no_write, bool, S_IRUGO);
++ static int R_INIT debug_no_write_setup(char *line)
++ {
++ rsbac_debug_no_write = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_no_write", debug_no_write_setup);
++ /* RSBAC: debug default for ADF */
++// module_param(rsbac_debug_adf_default, int, S_IRUGO);
++ static int R_INIT debug_adf_default_setup(char *line)
++ {
++ rsbac_debug_adf_default = 1;
++ return 1;
++ }
++ __setup("rsbac_debug_adf_default", debug_adf_default_setup);
++#endif /* DEBUG */
++
++#if defined(CONFIG_RSBAC_RMSG)
++static DEFINE_SPINLOCK(rsbac_log_lock);
++
++#if defined(CONFIG_RSBAC_LOG_REMOTE)
++static DEFINE_SPINLOCK(rsbac_log_remote_lock);
++#endif
++
++/*
++ * Commands to do_syslog:
++ *
++ * 0 -- Close the log. Currently a NOP.
++ * 1 -- Open the log. Currently a NOP.
++ * 2 -- Read from the log.
++ * 3 -- Read all messages remaining in the ring buffer.
++ * 4 -- Read and clear all messages remaining in the ring buffer
++ * 5 -- Clear ring buffer.
++ * 9 -- Return number of unread characters in the log buffer
++ */
++int rsbac_log(int type, char * buf, int len)
++{
++ unsigned long count;
++ int do_clear = 0;
++ int error = 0;
++ char * k_buf;
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ struct rsbac_log_list_item_t * log_item;
++
++ /* RSBAC */
++ rsbac_target_id.scd = ST_rsbac_log;
++ rsbac_attribute_value.dummy = 0;
++ if ((type == 4) || (type == 5))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "rsbac_log(): function %u, calling ADF for MODIFY_SYSTEM_DATA\n", type);
++ }
++#endif
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ error = -EPERM;
++ goto out;
++ }
++ }
++ else
++ if(type >= 1)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "rsbac_log(): function %u, calling ADF for GET_STATUS_DATA\n", type);
++ }
++#endif
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ error = -EPERM;
++ goto out;
++ }
++ }
++
++ switch (type) {
++ case 0: /* Close log */
++ break;
++ case 1: /* Open log */
++ break;
++ case 2: /* Read from log */
++ error = -EINVAL;
++ if (!buf || len < 0)
++ goto out;
++ error = 0;
++ if (!len)
++ goto out;
++ error = access_ok(VERIFY_WRITE,buf,len);
++ if (!error)
++ goto out;
++ error = wait_event_interruptible(rlog_wait, log_list_head.count);
++ if (error)
++ goto out;
++ if (len > RSBAC_LOG_MAXREADBUF)
++ len = RSBAC_LOG_MAXREADBUF;
++ k_buf = rsbac_kmalloc(len);
++ count = 0;
++ spin_lock(&rsbac_log_lock);
++ log_item = log_list_head.head;
++ while (log_item && (count + log_item->size < len)) {
++ memcpy(k_buf + count, log_item->buffer, log_item->size);
++ count += log_item->size;
++ log_item = log_item->next;
++ kfree(log_list_head.head);
++ log_list_head.head = log_item;
++ if(!log_item)
++ log_list_head.tail = NULL;
++ log_list_head.count--;
++ }
++ spin_unlock(&rsbac_log_lock);
++ error = copy_to_user(buf, k_buf, count);
++ if (!error)
++ error = count;
++ rsbac_kfree(k_buf);
++ break;
++ case 4: /* Read/clear last kernel messages */
++ do_clear = 1;
++ /* FALL THRU */
++ case 3: /* Read last kernel messages */
++ error = -EINVAL;
++ if (!buf || len < 0)
++ goto out;
++ error = 0;
++ if (!len)
++ goto out;
++ error = access_ok(VERIFY_WRITE,buf,len);
++ if (!error)
++ goto out;
++ if (len > RSBAC_LOG_MAXREADBUF)
++ len = RSBAC_LOG_MAXREADBUF;
++ k_buf = rsbac_kmalloc(len);
++ count = 0;
++ spin_lock(&rsbac_log_lock);
++ log_item = log_list_head.head;
++ while (log_item && (count + log_item->size < len)) {
++ memcpy(k_buf + count, log_item->buffer, log_item->size);
++ count += log_item->size;
++ log_item = log_item->next;
++ if(do_clear) {
++ kfree(log_list_head.head);
++ log_list_head.head = log_item;
++ if(!log_item)
++ log_list_head.tail = NULL;
++ log_list_head.count--;
++ }
++ }
++ spin_unlock(&rsbac_log_lock);
++ error = copy_to_user(buf, k_buf, count);
++ if (!error)
++ error = count;
++ rsbac_kfree(k_buf);
++ break;
++ case 5: /* Clear ring buffer */
++ spin_lock(&rsbac_log_lock);
++ log_item = log_list_head.head;
++ while (log_item) {
++ log_item = log_item->next;
++ kfree(log_list_head.head);
++ log_list_head.head = log_item;
++ }
++ log_list_head.tail = NULL;
++ log_list_head.count = 0;
++ spin_unlock(&rsbac_log_lock);
++ error = 0;
++ break;
++ case 9: /* Number of chars in the log buffer */
++ error = 0;
++ spin_lock(&rsbac_log_lock);
++ log_item = log_list_head.head;
++ while (log_item) {
++ error += log_item->size;
++ log_item = log_item->next;
++ }
++ spin_unlock(&rsbac_log_lock);
++ break;
++ default:
++ error = -EINVAL;
++ break;
++ }
++out:
++ return error;
++}
++#endif /* RMSG */
++
++#ifdef CONFIG_RSBAC_SYSLOG_RATE
++static void syslog_rate_reset(u_long dummy)
++ {
++ if(syslog_count > rsbac_syslog_rate)
++ printk(KERN_INFO "syslog_rate_reset: resetting syslog_count at %u, next message is %u\n",
++ syslog_count, log_seq);
++ syslog_count = 0;
++ mod_timer(&rsbac_syslog_rate_timer, jiffies + HZ);
++ }
++#endif
++
++EXPORT_SYMBOL(rsbac_printk);
++int rsbac_printk(const char *fmt, ...)
++{
++ va_list args;
++ int printed_len;
++ char * buf;
++#if defined(CONFIG_RSBAC_RMSG)
++ struct rsbac_log_list_item_t * log_item;
++#endif
++
++ if (rsbac_is_initialized())
++ buf = rsbac_kmalloc(RSBAC_LOG_MAXLINE);
++ else
++ buf = kmalloc(RSBAC_LOG_MAXLINE, GFP_ATOMIC);
++ if (!buf)
++ return -ENOMEM;
++ /* Emit the output into the buffer */
++ va_start(args, fmt);
++ printed_len = vsnprintf(buf + 11, RSBAC_LOG_MAXLINE - 14, fmt, args);
++ va_end(args);
++ if(printed_len < 4) {
++ kfree(buf);
++ return printed_len;
++ }
++ buf[0] = '<';
++ buf[1] = buf[12];
++ buf[2] = '>';
++ sprintf(buf + 3, "%010u", log_seq++);
++ buf[13] = '|';
++ /* Terminate string */
++ buf[printed_len + 11] = 0;
++
++ /* copy to printk */
++#ifdef CONFIG_RSBAC_RMSG_NOSYSLOG
++ if (!rsbac_nosyslog)
++#endif
++ {
++#ifdef CONFIG_RSBAC_SYSLOG_RATE
++ syslog_count++;
++ if(syslog_count < rsbac_syslog_rate)
++#endif
++ printk("%s", buf);
++#ifdef CONFIG_RSBAC_SYSLOG_RATE
++ else
++ if(syslog_count == rsbac_syslog_rate)
++ printk(KERN_INFO "rsbac_printk: Applying syslog rate limit at count %u, message %u!\n",
++ syslog_count, log_seq - 1);
++#endif
++ }
++ /* Buffer is ready, now link into log list */
++#if defined(CONFIG_RSBAC_RMSG)
++ if (rsbac_is_initialized())
++ log_item = rsbac_kmalloc(sizeof(*log_item) + printed_len + 12);
++ else
++ log_item = kmalloc(sizeof(*log_item) + printed_len + 12, GFP_ATOMIC);
++ if(log_item) {
++ memcpy(log_item->buffer, buf, printed_len + 11);
++ log_item->size = printed_len + 11;
++ log_item->next = NULL;
++ spin_lock(&rsbac_log_lock);
++ if (log_list_head.tail) {
++ log_list_head.tail->next = log_item;
++ } else {
++ log_list_head.head = log_item;
++ }
++ log_list_head.tail = log_item;
++ log_list_head.count++;
++ while(log_list_head.count > rsbac_rmsg_maxentries) {
++ log_item = log_list_head.head;
++ log_list_head.head = log_item->next;
++ log_list_head.count--;
++ log_list_head.lost++;
++ kfree(log_item);
++ }
++ spin_unlock(&rsbac_log_lock);
++ wake_up_interruptible(&rlog_wait);
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_LOG_REMOTE)
++ /* Link into remote log list */
++ if (rsbac_is_initialized())
++ log_item = rsbac_kmalloc(sizeof(*log_item) + printed_len + 12);
++ else
++ log_item = kmalloc(sizeof(*log_item) + printed_len + 12, GFP_ATOMIC);
++ if(log_item) {
++ memcpy(log_item->buffer, buf, printed_len + 11);
++ log_item->size = printed_len + 11;
++ log_item->next = NULL;
++ spin_lock(&rsbac_log_remote_lock);
++ if (remote_log_list_head.tail) {
++ remote_log_list_head.tail->next = log_item;
++ } else {
++ remote_log_list_head.head = log_item;
++ }
++ remote_log_list_head.tail = log_item;
++ remote_log_list_head.count++;
++ while(remote_log_list_head.count > rsbac_log_remote_maxentries) {
++ log_item = remote_log_list_head.head;
++ remote_log_list_head.head = log_item->next;
++ remote_log_list_head.count--;
++ remote_log_list_head.lost++;
++ kfree(log_item);
++ }
++ spin_unlock(&rsbac_log_remote_lock);
++#ifdef CONFIG_RSBAC_LOG_REMOTE_SYNC
++ wake_up_interruptible(&rsbaclogd_wait);
++#endif
++ }
++#endif
++
++ kfree(buf);
++ return printed_len;
++}
++
++#if defined(CONFIG_RSBAC_RMSG)
++#if defined(CONFIG_RSBAC_PROC)
++static int rmsg_open(struct inode * inode, struct file * file)
++{
++ return rsbac_log(1,NULL,0);
++}
++
++static int rmsg_release(struct inode * inode, struct file * file)
++{
++ (void) rsbac_log(0,NULL,0);
++ return 0;
++}
++
++static ssize_t rmsg_read(struct file * file, char * buf,
++ size_t count, loff_t *ppos)
++{
++ return rsbac_log(2,buf,count);
++}
++
++static unsigned int rmsg_poll(struct file *file, poll_table * wait)
++{
++ poll_wait(file, &rlog_wait, wait);
++ if (rsbac_log(9, 0, 0))
++ return POLLIN | POLLRDNORM;
++ return 0;
++}
++
++static struct file_operations rmsg_proc_fops = {
++ .read = rmsg_read,
++ .poll = rmsg_poll, /* rmsg_poll */
++ .open = rmsg_open,
++ .release = rmsg_release
++};
++
++static struct proc_dir_entry *rmsg;
++
++#endif /* PROC */
++#endif /* RMSG */
++
++#if defined(CONFIG_RSBAC_PROC)
++#ifndef PROC_BLOCK_SIZE
++#define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */
++#endif
++
++static int
++log_levels_proc_show(struct seq_file *m, void *v)
++{
++ int i,j;
++ char * name;
++ char * name2;
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_is_initialized())
++ return (-ENOSYS);
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "log_levels_proc_info(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++
++ name = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if(!name)
++ return -ENOMEM;
++ name2 = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if(!name2)
++ {
++ rsbac_kfree(name);
++ return -ENOMEM;
++ }
++
++ seq_printf(m, "RSBAC Log Levels\n----------------\n");
++ seq_printf(m, "Name\t\t\tFILE\tDIR\tFIFO\tSYMLINK\tDEV\tIPC\tSCD\tUSER\tPROCESS\tNETDEV\tNETTEMP\tNETOBJ\tNETT_NT\tNONE\n");
++
++ for (i = 0; i<R_NONE; i++)
++ {
++ seq_printf(m, "%-23s",
++ get_request_name(name, i));
++ for(j = 0; j<=T_NONE; j++)
++ {
++ if(j != T_FD)
++ seq_printf(m, "\t%u",
++ rsbac_log_levels[i][j]);
++ }
++ seq_printf(m, "\n");
++ }
++
++ rsbac_kfree(name);
++ rsbac_kfree(name2);
++
++ return 0;
++}
++
++static ssize_t log_levels_proc_write(struct file * file, const char __user * buf,
++ size_t count, loff_t *ppos)
++{
++ ssize_t err;
++ char * k_buf;
++ char * p;
++ unsigned int log_level;
++ char rname[RSBAC_MAXNAMELEN];
++ int i,j;
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if(count > PROC_BLOCK_SIZE) {
++ return(-EOVERFLOW);
++ }
++
++ if (!(k_buf = (char *) __get_free_page(GFP_KERNEL)))
++ return(-ENOMEM);
++ err = copy_from_user(k_buf, buf, count);
++ if(err < 0)
++ return err;
++
++ err = count;
++ if(count < 15 || strncmp("log_levels", k_buf, 10))
++ {
++ goto out;
++ }
++ if (!rsbac_is_initialized())
++ {
++ err = -ENOSYS;
++ goto out;
++ }
++
++ /*
++ * Usage: echo "log_levels request #N" > /proc/rsbac_info/log_levels
++ * to set log level for request to given value
++ */
++ for(i=0; i<R_NONE; i++)
++ {
++ get_request_name(rname,i);
++ if(!strncmp(rname, k_buf + 11, strlen(rname)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "log_levels_proc_write(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.dummy = 0;
++ rsbac_attribute_value.request = i;
++ if (!rsbac_adf_request(R_SWITCH_LOG,
++ task_pid(current),
++ T_NONE,
++ rsbac_target_id,
++ A_request,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ p = k_buf + 11 + strlen(rname)+1;
++
++ if( *p == '\0' )
++ goto out;
++
++ log_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if( (log_level == LL_none)
++ || (log_level == LL_denied)
++ || (log_level == LL_full)
++ )
++ {
++ rsbac_printk(KERN_INFO
++ "log_levels_proc_write(): setting %s log level for all target types to %u\n",
++ rname, log_level);
++ for(j = 0; j<=T_NONE; j++)
++ {
++ rsbac_log_levels[i][j] = log_level;
++ }
++ err = count;
++ goto out;
++ }
++ else
++ {
++ rsbac_printk(KERN_INFO
++ "log_levels_proc_write(): rejecting invalid log level (should be %u, %u or %u)\n",
++ LL_none, LL_denied, LL_full);
++ goto out;
++ }
++ }
++ }
++
++out:
++ free_page((ulong) k_buf);
++ return(err);
++ }
++
++static int log_levels_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, log_levels_proc_show, NULL);
++}
++
++static const struct file_operations log_levels_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = log_levels_proc_open,
++ .read = seq_read,
++ .write = log_levels_proc_write,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *log_levels;
++
++static int
++debug_proc_show(struct seq_file *m, void *v)
++{
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (!rsbac_is_initialized())
++ return (-ENOSYS);
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "debug_proc_info(): calling ADF\n");
++#endif
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++ seq_printf(m, "RSBAC Debug Settings\n--------------------\n");
++
++#ifdef CONFIG_RSBAC_SOFTMODE
++ seq_printf(m, "rsbac_softmode is %i\n",
++ rsbac_softmode);
++ seq_printf(m, "rsbac_softmode_prohibit is %i\n",
++ rsbac_softmode_prohibit);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++#ifdef CONFIG_RSBAC_MAC
++ seq_printf(m, "rsbac_ind_softmode[MAC] is %i\n",
++ rsbac_ind_softmode[SW_MAC]);
++#endif
++#ifdef CONFIG_RSBAC_PM
++ seq_printf(m, "rsbac_ind_softmode[PM] is %i\n",
++ rsbac_ind_softmode[SW_PM]);
++#endif
++#ifdef CONFIG_RSBAC_DAZ
++ seq_printf(m, "rsbac_ind_softmode[DAZ] is %i\n",
++ rsbac_ind_softmode[SW_DAZ]);
++#endif
++#ifdef CONFIG_RSBAC_FF
++ seq_printf(m, "rsbac_ind_softmode[FF] is %i\n",
++ rsbac_ind_softmode[SW_FF]);
++#endif
++#ifdef CONFIG_RSBAC_RC
++ seq_printf(m, "rsbac_ind_softmode[RC] is %i\n",
++ rsbac_ind_softmode[SW_RC]);
++#endif
++#ifdef CONFIG_RSBAC_AUTH
++ seq_printf(m, "rsbac_ind_softmode[AUTH] is %i\n",
++ rsbac_ind_softmode[SW_AUTH]);
++#endif
++#ifdef CONFIG_RSBAC_REG
++ seq_printf(m, "rsbac_ind_softmode[REG] is %i\n",
++ rsbac_ind_softmode[SW_REG]);
++#endif
++#ifdef CONFIG_RSBAC_ACL
++ seq_printf(m, "rsbac_ind_softmode[ACL] is %i\n",
++ rsbac_ind_softmode[SW_ACL]);
++#endif
++#ifdef CONFIG_RSBAC_CAP
++ seq_printf(m, "rsbac_ind_softmode[CAP] is %i\n",
++ rsbac_ind_softmode[SW_CAP]);
++#endif
++#ifdef CONFIG_RSBAC_JAIL
++ seq_printf(m, "rsbac_ind_softmode[JAIL] is %i\n",
++ rsbac_ind_softmode[SW_JAIL]);
++#endif
++#ifdef CONFIG_RSBAC_RES
++ seq_printf(m, "rsbac_ind_softmode[RES] is %i\n",
++ rsbac_ind_softmode[SW_RES]);
++#endif
++#endif
++#endif
++#ifdef CONFIG_RSBAC_FREEZE
++ seq_printf(m, "rsbac_freeze is %i\n",
++ rsbac_freeze);
++#endif
++ seq_printf(m, "rsbac_list_recover is %i (read-only)\n",
++ rsbac_list_recover);
++ seq_printf(m, "rsbac_list_rcu_rate is %u\n",
++ rsbac_list_rcu_rate);
++#if defined(CONFIG_RSBAC_DAZ_CACHE)
++ /* RSBAC: DAZ - set cache ttl */
++ seq_printf(m, "rsbac_daz_ttl is %u\n",
++ rsbac_daz_get_ttl());
++#endif
++#ifdef CONFIG_RSBAC_CAP_PROC_HIDE
++ seq_printf(m, "rsbac_cap_process_hiding is %i\n",
++ rsbac_cap_process_hiding);
++#endif
++#ifdef CONFIG_RSBAC_CAP_LOG_MISSING
++ seq_printf(m, "rsbac_cap_log_missing is %i\n",
++ rsbac_cap_log_missing);
++#endif
++#ifdef CONFIG_RSBAC_JAIL_LOG_MISSING
++ seq_printf(m, "rsbac_jail_log_missing is %i\n",
++ rsbac_jail_log_missing);
++#endif
++
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_FULL
++ seq_printf(m, "rsbac_dac_disable is %i\n",
++ rsbac_dac_disable);
++#endif
++
++#ifdef CONFIG_RSBAC_RMSG_NOSYSLOG
++ seq_printf(m, "rsbac_nosyslog is %i\n",
++ rsbac_nosyslog);
++#endif
++
++#ifdef CONFIG_RSBAC_SYSLOG_RATE
++ seq_printf(m, "rsbac_syslog_rate is %u\n",
++ rsbac_syslog_rate);
++#endif
++
++#ifdef CONFIG_RSBAC_FD_CACHE
++ if (!rsbac_fd_cache_disable)
++ seq_printf(m, "rsbac_fd_cache_ttl is %u\n",
++ rsbac_fd_cache_ttl);
++#endif
++#if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++ seq_printf(m, "rsbac_list_check_interval is %u\n",
++ rsbac_list_check_interval);
++#endif
++
++#if defined(CONFIG_RSBAC_LOG_REMOTE)
++#if defined(CONFIG_RSBAC_LOG_REMOTE_TCP)
++ seq_printf(m, "rsbac_log_remote_addr (TCP) is %u.%u.%u.%u\n",
++ NIPQUAD(rsbac_log_remote_addr));
++#else
++ seq_printf(m, "rsbac_log_remote_addr (UDP) is %u.%u.%u.%u\n",
++ NIPQUAD(rsbac_log_remote_addr));
++#endif
++ seq_printf(m, "rsbac_log_remote_port is %u\n",
++ ntohs(rsbac_log_remote_port));
++#endif
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++ seq_printf(m, "rsbac_no_delay_init is %i\n",
++ rsbac_no_delay_init);
++ seq_printf(m, "rsbac_delayed_root is %02u:%02u\n",
++ RSBAC_MAJOR(rsbac_delayed_root), RSBAC_MINOR(rsbac_delayed_root));
++#endif
++
++#if defined(CONFIG_RSBAC_UM_EXCL)
++ seq_printf(m, "rsbac_um_no_excl is %i\n",
++ rsbac_um_no_excl);
++#endif
++
++#if defined(CONFIG_RSBAC_RC_LEARN)
++ seq_printf(m, "rsbac_rc_learn is %i\n",
++ rsbac_rc_learn);
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH)
++ seq_printf(m, "rsbac_auth_enable_login is %i\n",
++ rsbac_auth_enable_login);
++
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++ seq_printf(m, "rsbac_auth_learn is %i\n",
++ rsbac_auth_learn);
++#endif
++#endif
++
++#if defined(CONFIG_RSBAC_CAP_LEARN)
++ seq_printf(m, "rsbac_cap_learn is %i\n",
++ rsbac_cap_learn);
++#endif
++
++#if defined(CONFIG_RSBAC_ACL_LEARN)
++ seq_printf(m, "rsbac_acl_learn_fd is %i\n",
++ rsbac_acl_learn_fd);
++#endif
++
++ seq_printf(m, "rsbac_no_defaults is %i\n",
++ rsbac_no_defaults);
++#ifdef CONFIG_RSBAC_DEBUG
++ seq_printf(m, "rsbac_debug_write is %i\n",
++ rsbac_debug_write);
++ seq_printf(m, "rsbac_debug_stack is %i\n",
++ rsbac_debug_stack);
++ seq_printf(m, "rsbac_debug_lists is %i\n",
++ rsbac_debug_lists);
++ seq_printf(m, "rsbac_debug_ds is %i\n",
++ rsbac_debug_ds);
++ seq_printf(m, "rsbac_debug_aef is %i\n",
++ rsbac_debug_aef);
++ seq_printf(m, "rsbac_debug_no_write is %i\n",
++ rsbac_debug_no_write);
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++/* Boolean debug switch for REG */
++ seq_printf(m, "rsbac_debug_reg is %i\n",
++ rsbac_debug_reg);
++#endif
++
++#if defined(CONFIG_RSBAC_NET)
++/* Boolean debug switch for NET data structures */
++ seq_printf(m, "rsbac_debug_ds_net is %i\n",
++ rsbac_debug_ds_net);
++/* Boolean debug switch for NET syscalls / AEF */
++ seq_printf(m, "rsbac_debug_aef_net is %i\n",
++ rsbac_debug_aef_net);
++/* Boolean debug switch for NET decisions / ADF */
++ seq_printf(m, "rsbac_debug_adf_net is %i\n",
++ rsbac_debug_adf_net);
++#endif
++
++#if defined(CONFIG_RSBAC_MAC)
++/* Boolean debug switch for MAC data structures */
++ seq_printf(m, "rsbac_debug_ds_mac is %i\n",
++ rsbac_debug_ds_mac);
++/* Boolean debug switch for MAC syscalls / AEF */
++ seq_printf(m, "rsbac_debug_aef_mac is %i\n",
++ rsbac_debug_aef_mac);
++/* Boolean debug switch for MAC decisions / ADF */
++ seq_printf(m, "rsbac_debug_adf_mac is %i\n",
++ rsbac_debug_adf_mac);
++#endif
++
++#if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT)
++/* Boolean debug switch for PM data structures */
++ seq_printf(m, "rsbac_debug_ds_pm is %i\n",
++ rsbac_debug_ds_pm);
++/* Boolean debug switch for PM syscalls / AEF */
++ seq_printf(m, "rsbac_debug_aef_pm is %i\n",
++ rsbac_debug_aef_pm);
++/* Boolean debug switch for PM decisions / ADF */
++ seq_printf(m, "rsbac_debug_adf_pm is %i\n",
++ rsbac_debug_adf_pm);
++#endif
++
++#if defined(CONFIG_RSBAC_DAZ)
++/* Boolean debug switch for DAZ decisions / ADF */
++ seq_printf(m, "rsbac_debug_adf_daz is %i\n",
++ rsbac_debug_adf_daz);
++#endif
++
++#if defined(CONFIG_RSBAC_RC) || defined(CONFIG_RSBAC_RC_MAINT)
++/* Boolean debug switch for RC data structures */
++ seq_printf(m, "rsbac_debug_ds_rc is %i\n",
++ rsbac_debug_ds_rc);
++/* Boolean debug switch for RC syscalls / AEF */
++ seq_printf(m, "rsbac_debug_aef_rc is %i\n",
++ rsbac_debug_aef_rc);
++/* Boolean debug switch for RC decisions / ADF */
++ seq_printf(m, "rsbac_debug_adf_rc is %i\n",
++ rsbac_debug_adf_rc);
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH)
++/* Boolean debug switch for AUTH data structures */
++ seq_printf(m, "rsbac_debug_ds_auth is %i\n",
++ rsbac_debug_ds_auth);
++
++/* Boolean debug switch for AUTH syscalls / AEF */
++ seq_printf(m, "rsbac_debug_aef_auth is %i\n",
++ rsbac_debug_aef_auth);
++
++/* Boolean debug switch for AUTH decisions / ADF */
++ seq_printf(m, "rsbac_debug_adf_auth is %i\n",
++ rsbac_debug_adf_auth);
++#endif
++
++#if defined(CONFIG_RSBAC_ACL)
++/* Boolean debug switch for ACL data structures */
++ seq_printf(m, "rsbac_debug_ds_acl is %i\n",
++ rsbac_debug_ds_acl);
++
++/* Boolean debug switch for ACL syscalls / AEF */
++ seq_printf(m, "rsbac_debug_aef_acl is %i\n",
++ rsbac_debug_aef_acl);
++
++/* Boolean debug switch for ACL decisions / ADF */
++ seq_printf(m, "rsbac_debug_adf_acl is %i\n",
++ rsbac_debug_adf_acl);
++#endif
++
++#if defined(CONFIG_RSBAC_JAIL)
++/* Boolean debug switch for JAIL syscalls / AEF */
++ seq_printf(m, "rsbac_debug_aef_jail is %i\n",
++ rsbac_debug_aef_jail);
++/* Boolean debug switch for JAIL decisions / ADF */
++ seq_printf(m, "rsbac_debug_adf_jail is %i\n",
++ rsbac_debug_adf_jail);
++#endif
++#if defined(CONFIG_RSBAC_PAX)
++/* Boolean debug switch for PAX decisions / ADF */
++ seq_printf(m, "rsbac_debug_adf_pax is %i\n",
++ rsbac_debug_adf_pax);
++#endif
++#if defined(CONFIG_RSBAC_UM)
++/* Boolean debug switch for UM data structures */
++ seq_printf(m, "rsbac_debug_ds_um is %i\n",
++ rsbac_debug_ds_um);
++/* Boolean debug switch for UM syscalls / AEF */
++ seq_printf(m, "rsbac_debug_aef_um is %i\n",
++ rsbac_debug_aef_um);
++/* Boolean debug switch for UM decisions / ADF */
++ seq_printf(m, "rsbac_debug_adf_um is %i\n",
++ rsbac_debug_adf_um);
++#endif
++
++#if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++ seq_printf(m, "rsbac_debug_auto is %i\n",
++ rsbac_debug_auto);
++#endif /* CONFIG_RSBAC_AUTO_WRITE > 0 */
++#endif /* DEBUG */
++
++#if defined(CONFIG_RSBAC_RMSG)
++ seq_printf(m, "rsbac_rmsg_maxentries is %u\n",
++ rsbac_rmsg_maxentries);
++ seq_printf(m, "%u messages in log buffer, %lu messages lost, sequence is %u\n",
++ log_list_head.count, log_list_head.lost, log_seq);
++#if defined(CONFIG_RSBAC_LOG_REMOTE)
++ seq_printf(m, "rsbac_log_remote_maxentries is %u\n",
++ rsbac_log_remote_maxentries);
++ seq_printf(m, "%u messages in remote log buffer, %lu messages lost\n",
++ remote_log_list_head.count, remote_log_list_head.lost);
++#endif
++#endif
++
++ return 0;
++}
++
++static ssize_t debug_proc_write(struct file * file, const char __user * buf, size_t count, loff_t *ppos)
++{
++ ssize_t err;
++ char * k_buf;
++ char * p;
++ unsigned int debug_level;
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ enum rsbac_switch_target_t sw_target;
++#endif
++
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if(count > PROC_BLOCK_SIZE) {
++ return(-EOVERFLOW);
++ }
++ if(count < 10)
++ return -EINVAL;
++
++ if (!(k_buf = (char *) __get_free_page(GFP_KERNEL)))
++ return(-ENOMEM);
++ err = copy_from_user(k_buf, buf, count);
++ if(err < 0)
++ return err;
++
++ err = count;
++
++ if(!strncmp("debug", k_buf, 5) || !strncmp("rsbac", k_buf, 5))
++ {
++ p = k_buf + 6;
++ }
++ else
++ if(!strncmp("rsbac_debug", k_buf, 11))
++ {
++ p = k_buf + 12;
++ }
++ else
++ goto out;
++
++ if (!rsbac_is_initialized())
++ {
++ err = -ENOSYS;
++ goto out;
++ }
++ if(count < 10)
++ return -EINVAL;
++
++
++#ifdef CONFIG_RSBAC_SOFTMODE
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++/* Boolean switch for RSBAC individual soft mode */
++ /*
++ * Usage: echo "debug ind_softmode modname #N" > /proc/rsbac_info/debug
++ * to set rsbac_ind_softmode[module] to given value
++ */
++ if(!strncmp("ind_softmode", k_buf + 6, 12))
++ {
++ char tmp[20];
++
++ p += 13;
++
++ if( *p == '\0' )
++ goto out;
++
++ sw_target = get_switch_target_nr(p);
++ if(sw_target == SW_NONE)
++ goto out;
++ get_switch_target_name(tmp, sw_target);
++ p += strlen(tmp)+1;
++ if( *p == '\0' )
++ goto out;
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ if(debug_level && rsbac_softmode_prohibit)
++ {
++ rsbac_printk(KERN_WARNING
++ "debug_proc_write(): setting of softmode prohibited!\n");
++ err = -EPERM;
++ goto out;
++ }
++#if defined(CONFIG_RSBAC_DEBUG)
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "debug_proc_write(): calling ADF for switching\n");
++ }
++#endif
++ rsbac_target_id.dummy = 0;
++ rsbac_attribute_value.switch_target = sw_target;
++ if (!rsbac_adf_request(R_SWITCH_MODULE,
++ task_pid(current),
++ T_NONE,
++ rsbac_target_id,
++ A_switch_target,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_ind_softmode[%s] to %u\n",
++ tmp,
++ debug_level);
++ rsbac_ind_softmode[sw_target] = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): rejecting invalid ind_softmode value (should be 0 or 1)\n");
++ err = -EINVAL;
++ goto out;
++ }
++ }
++#endif
++
++/* Boolean switch for RSBAC soft mode prohibit */
++ /*
++ * Usage: echo "debug softmode_prohibit #N" > /proc/rsbac_info/debug
++ * to set rsbac_softmode to given value
++ */
++ if(!strncmp("softmode_prohibit", k_buf + 6, 17))
++ {
++ p += 18;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ if(!debug_level && rsbac_softmode_prohibit)
++ {
++ rsbac_printk(KERN_WARNING
++ "debug_proc_write(): setting of softmode prohibited!\n");
++ err = -EPERM;
++ goto out;
++ }
++#if defined(CONFIG_RSBAC_DEBUG)
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "debug_proc_write(): calling ADF for softmode\n");
++ }
++#endif
++ rsbac_target_id.dummy = 0;
++ rsbac_attribute_value.switch_target = SW_SOFTMODE;
++ if (!rsbac_adf_request(R_SWITCH_MODULE,
++ task_pid(current),
++ T_NONE,
++ rsbac_target_id,
++ A_switch_target,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_softmode_prohibit to %u\n",
++ debug_level);
++ rsbac_softmode_prohibit = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): rejecting invalid softmode_prohibit value (should be 0 or 1)\n");
++ err = -EINVAL;
++ goto out;
++ }
++ }
++/* Boolean switch for RSBAC soft mode */
++ /*
++ * Usage: echo "debug softmode #N" > /proc/rsbac_info/debug
++ * to set rsbac_softmode to given value
++ */
++ if(!strncmp("softmode", k_buf + 6, 8))
++ {
++ p += 9;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ if(debug_level && rsbac_softmode_prohibit)
++ {
++ rsbac_printk(KERN_WARNING
++ "debug_proc_write(): setting of softmode prohibited!\n");
++ err = -EPERM;
++ goto out;
++ }
++#if defined(CONFIG_RSBAC_DEBUG)
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "debug_proc_write(): calling ADF for softmode\n");
++ }
++#endif
++ rsbac_target_id.dummy = 0;
++ rsbac_attribute_value.switch_target = SW_SOFTMODE;
++ if (!rsbac_adf_request(R_SWITCH_MODULE,
++ task_pid(current),
++ T_NONE,
++ rsbac_target_id,
++ A_switch_target,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_softmode to %u\n",
++ debug_level);
++ rsbac_softmode = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): rejecting invalid softmode value (should be 0 or 1)\n");
++ err = -EINVAL;
++ goto out;
++ }
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_FULL
++/* Boolean switch for disabling Linux DAC */
++ /*
++ * Usage: echo "debug dac_disable #N" > /proc/rsbac_info/debug
++ * to set dac_disable to given value
++ */
++ if(!strncmp("dac_disable", k_buf + 6, 11))
++ {
++ p += 12;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++#if defined(CONFIG_RSBAC_DEBUG)
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "debug_proc_write(): calling ADF for dac_disable\n");
++ }
++#endif
++ rsbac_target_id.dummy = 0;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_PERMISSIONS_DATA,
++ task_pid(current),
++ T_NONE,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_dac_disable to %u\n",
++ debug_level);
++ rsbac_dac_disable = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): rejecting invalid dac_disabled value (should be 0 or 1)\n");
++ err = -EINVAL;
++ goto out;
++ }
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_FREEZE
++/* Boolean switch to enable freezing */
++ /*
++ * Usage: echo "debug freeze #N" > /proc/rsbac_info/debug
++ * to set freeze to given value
++ */
++ if(!strncmp("freeze", k_buf + 6, 6))
++ {
++ p += 7;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ if(!debug_level && rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "debug_proc_write(): RSBAC configuration frozen, no administration allowed!\n");
++ err = -EPERM;
++ goto out;
++ }
++
++#if defined(CONFIG_RSBAC_DEBUG)
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "debug_proc_write(): calling ADF for freeze\n");
++ }
++#endif
++ rsbac_target_id.dummy = 0;
++ rsbac_attribute_value.switch_target = SW_FREEZE;
++ if (!rsbac_adf_request(R_SWITCH_MODULE,
++ task_pid(current),
++ T_NONE,
++ rsbac_target_id,
++ A_switch_target,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_freeze to %u\n",
++ debug_level);
++ rsbac_freeze = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): rejecting invalid freeze value (should be 0 or 1)\n");
++ err = -EINVAL;
++ goto out;
++ }
++ }
++#endif
++
++/* Set list rcu rate limit */
++ /*
++ * Usage: echo "debug list_rcu_rate #n" > /proc/rsbac_info/debug
++ * to set rate limit to given value
++ */
++ if(!strncmp("list_rcu_rate", k_buf + 6, 13))
++ {
++ u_int tmp_rate;
++
++ p += 14;
++ if( *p == '\0' )
++ goto out;
++
++ tmp_rate = simple_strtoul(p, NULL, 0);
++ if (tmp_rate < 100)
++ tmp_rate = 100;
++ else
++ if (tmp_rate > 100000)
++ tmp_rate = 100000;
++
++#if defined(CONFIG_RSBAC_DEBUG)
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "debug_proc_write(): calling ADF for list_rcu_rate\n");
++ }
++#endif
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_list_rcu_rate to %u\n",
++ tmp_rate);
++ rsbac_list_rcu_rate = tmp_rate;
++ err = count;
++ goto out;
++ }
++
++#ifdef CONFIG_RSBAC_DAZ_CACHE
++/* Set DAZ cache ttl */
++ /*
++ * Usage: echo "debug daz_ttl #n" > /proc/rsbac_info/debug
++ * to set daz cache ttl to given value
++ */
++ if(!strncmp("daz_ttl", k_buf + 6, 7))
++ {
++ rsbac_time_t tmp_ttl;
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++#endif
++
++ p += 8;
++ if( *p == '\0' )
++ goto out;
++
++ tmp_ttl = simple_strtoul(p, NULL, 0);
++#if defined(CONFIG_RSBAC_DEBUG)
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "debug_proc_write(): calling ADF for daz_ttl\n");
++ }
++#endif
++#ifndef CONFIG_RSBAC_MAINT
++ /* Security Officer? */
++ i_tid.user = current_uid();
++ if (rsbac_get_attr(SW_DAZ,
++ T_USER,
++ i_tid,
++ A_daz_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "debug_proc_write(): rsbac_get_attr() returned error!\n");
++ return -EPERM;
++ }
++ /* if not sec_officer or admin, deny */
++ if (i_attr_val1.system_role != SR_security_officer)
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_DAZ]
++ #endif
++ )
++ #endif
++ return -EPERM;
++#endif
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_daz_ttl to %u\n",
++ tmp_ttl);
++ rsbac_daz_set_ttl(tmp_ttl);
++ err = count;
++ goto out;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_LOG_REMOTE)
++/* Set remote address for remote logging */
++ /*
++ * Usage: echo "debug log_remote_addr a.b.c.d" > /proc/rsbac_info/debug
++ * to set log_remote_addr to given value
++ */
++ if(!strncmp("log_remote_addr", k_buf + 6, 15))
++ {
++ __u32 tmp_addr;
++ char * tmp;
++
++ p += 16;
++ if( *p == '\0' )
++ goto out;
++
++ tmp=p;
++ while(*tmp)
++ {
++ if( (*tmp != '.')
++ && ( (*tmp < '0')
++ || (*tmp > '9')
++ )
++ )
++ {
++ *tmp = 0;
++ break;
++ }
++ tmp++;
++ }
++ err = rsbac_net_str_to_inet(p, &tmp_addr);
++ if(!err)
++ {
++#if defined(CONFIG_RSBAC_DEBUG)
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "debug_proc_write(): calling ADF for remote_log_addr\n");
++ }
++#endif
++ rsbac_target_id.scd = ST_rsbac_remote_log;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_log_remote_addr to %u.%u.%u.%u\n",
++ NIPQUAD(tmp_addr));
++ rsbac_log_remote_addr = tmp_addr;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ get_error_name(tmp, err);
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): converting remote socket address %s failed with error %s, exiting!\n",
++ p,
++ tmp);
++ rsbac_kfree(tmp);
++ }
++ err = -EINVAL;
++ goto out;
++ }
++ }
++/* Set remote port for remote logging */
++ /*
++ * Usage: echo "debug log_remote_port #n" > /proc/rsbac_info/debug
++ * to set log_remote_port to given value
++ */
++ if(!strncmp("log_remote_port", k_buf + 6, 15))
++ {
++ __u16 tmp_port;
++
++ p += 16;
++ if( *p == '\0' )
++ goto out;
++
++ tmp_port = simple_strtoul(p, NULL, 0);
++#if defined(CONFIG_RSBAC_DEBUG)
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "debug_proc_write(): calling ADF for remote_log_port\n");
++ }
++#endif
++ rsbac_target_id.scd = ST_rsbac_remote_log;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_log_remote_port to %u\n",
++ tmp_port);
++ rsbac_log_remote_port = htons(tmp_port);
++ err = count;
++ goto out;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_SYSLOG_RATE
++/* Set syslog rate limit */
++ /*
++ * Usage: echo "debug syslog_rate #n" > /proc/rsbac_info/debug
++ * to set rate limit to given value
++ */
++ if(!strncmp("syslog_rate", k_buf + 6, 11))
++ {
++ u_int tmp_rate;
++
++ p += 12;
++ if( *p == '\0' )
++ goto out;
++
++ tmp_rate = simple_strtoul(p, NULL, 0);
++#if defined(CONFIG_RSBAC_DEBUG)
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "debug_proc_write(): calling ADF for syslog_rate\n");
++ }
++#endif
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_syslog_rate to %u\n",
++ tmp_rate);
++ rsbac_syslog_rate = tmp_rate;
++ err = count;
++ goto out;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_FD_CACHE
++/* Set fd_cache_ttl */
++ /*
++ * Usage: echo "debug fd_cache_ttl #n" > /proc/rsbac_info/debug
++ * to set ttl to given value
++ */
++ if(!strncmp("fd_cache_ttl", k_buf + 6, 12))
++ {
++ u_int tmp_ttl;
++
++ p += 13;
++ if( *p == '\0' )
++ goto out;
++
++ tmp_ttl = simple_strtoul(p, NULL, 0);
++#if defined(CONFIG_RSBAC_DEBUG)
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "debug_proc_write(): calling ADF for fd_cache_ttl\n");
++ }
++#endif
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_fd_cache_ttl to %u\n",
++ tmp_ttl);
++ rsbac_fd_cache_ttl = tmp_ttl;
++ err = count;
++ goto out;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++/* Set rsbac_list_check_interval */
++ /*
++ * Usage: echo "debug list_check_interval #n" > /proc/rsbac_info/debug
++ * to set ttl to given value
++ */
++ if(!strncmp("list_check_interval", k_buf + 6, 19))
++ {
++ u_int tmp_ttl;
++
++ p += 20;
++ if( *p == '\0' )
++ goto out;
++
++ tmp_ttl = simple_strtoul(p, NULL, 0);
++#if defined(CONFIG_RSBAC_DEBUG)
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "debug_proc_write(): calling ADF for list_check_interval\n");
++ }
++#endif
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_list_check_interval to %u\n",
++ tmp_ttl);
++ rsbac_list_check_interval = tmp_ttl;
++ err = count;
++ goto out;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_RMSG_NOSYSLOG
++/* Boolean switch for disabling logging to syslog */
++ /*
++ * Usage: echo "debug nosyslog #N" > /proc/rsbac_info/debug
++ * to set rsbac_nosyslog to given value
++ */
++ if(!strncmp("nosyslog", k_buf + 6, 8))
++ {
++ p += 9;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++#if defined(CONFIG_RSBAC_DEBUG)
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "debug_proc_write(): calling ADF for nosyslog\n");
++ }
++#endif
++ rsbac_target_id.dummy = 0;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_SWITCH_LOG,
++ task_pid(current),
++ T_NONE,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_nosyslog to %u\n",
++ debug_level);
++ rsbac_nosyslog = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): rejecting invalid nosyslog value (should be 0 or 1)\n");
++ err = -EINVAL;
++ goto out;
++ }
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_RMSG
++/* Set rsbac log messages limit */
++ /*
++ * Usage: echo "debug rmsg_maxentries #n" > /proc/rsbac_info/debug
++ * to set limit to given value
++ */
++ if(!strncmp("rmsg_maxentries", k_buf + 6, 15))
++ {
++ u_int tmp_rate;
++
++ p += 16;
++ if( *p == '\0' )
++ goto out;
++
++ tmp_rate = simple_strtoul(p, NULL, 0);
++#if defined(CONFIG_RSBAC_DEBUG)
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "debug_proc_write(): calling ADF for rmsg_maxentries\n");
++ }
++#endif
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rmsg_maxentries to %u\n",
++ tmp_rate);
++ rsbac_rmsg_maxentries = tmp_rate;
++ err = count;
++ goto out;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_LOG_REMOTE
++/* Set rsbac remote log messages limit */
++ /*
++ * Usage: echo "debug log_remote_maxentries #n" > /proc/rsbac_info/debug
++ * to set limit to given value
++ */
++ if(!strncmp("log_remote_maxentries", k_buf + 6, 21))
++ {
++ u_int tmp_rate;
++
++ p += 22;
++ if( *p == '\0' )
++ goto out;
++
++ tmp_rate = simple_strtoul(p, NULL, 0);
++#if defined(CONFIG_RSBAC_DEBUG)
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "debug_proc_write(): calling ADF for log_remote_maxentries\n");
++ }
++#endif
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting log_remote_maxentries to %u\n",
++ tmp_rate);
++ rsbac_log_remote_maxentries = tmp_rate;
++ err = count;
++ goto out;
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_RC_LEARN)
++/* Boolean switch for RC learning mode */
++ /*
++ * Usage: echo "debug rc_learn #N" > /proc/rsbac_info/debug
++ * to set rsbac_rc_learn to given value
++ */
++ if(!strncmp("rc_learn", k_buf + 6, 8))
++ {
++ p += 9;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_target_id.dummy = 0;
++ rsbac_attribute_value.rc_learn = debug_level;
++ if (!rsbac_adf_request(R_MODIFY_ATTRIBUTE,
++ task_pid(current),
++ T_NONE,
++ rsbac_target_id,
++ A_rc_learn,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_rc_learn to %u\n",
++ debug_level);
++ rsbac_rc_learn = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH_LEARN)
++/* Boolean switch for AUTH learning mode */
++ /*
++ * Usage: echo "debug auth_learn #N" > /proc/rsbac_info/debug
++ * to set rsbac_auth_learn to given value
++ */
++ if(!strncmp("auth_learn", k_buf + 6, 10))
++ {
++ p += 11;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_target_id.dummy = 0;
++ rsbac_attribute_value.auth_learn = debug_level;
++ if (!rsbac_adf_request(R_MODIFY_ATTRIBUTE,
++ task_pid(current),
++ T_NONE,
++ rsbac_target_id,
++ A_auth_learn,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_auth_learn to %u\n",
++ debug_level);
++ rsbac_auth_learn = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_CAP_LEARN)
++/* Boolean switch for CAP learning mode */
++ /*
++ * Usage: echo "debug cap_learn #N" > /proc/rsbac_info/debug
++ * to set rsbac_cap_learn to given value
++ */
++ if(!strncmp("cap_learn", k_buf + 6, 9))
++ {
++ p += 10;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_target_id.dummy = 0;
++ rsbac_attribute_value.cap_learn = debug_level;
++ if (!rsbac_adf_request(R_MODIFY_ATTRIBUTE,
++ task_pid(current),
++ T_NONE,
++ rsbac_target_id,
++ A_cap_learn,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_cap_learn to %u\n",
++ debug_level);
++ rsbac_cap_learn = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_CAP_LOG_MISSING
++/* Boolean switch for CAP logging of missing caps */
++ /*
++ * Usage: echo "debug cap_log_missing #N" > /proc/rsbac_info/debug
++ * to set rsbac_cap_log_missing to given value
++ */
++ if(!strncmp("cap_log_missing", k_buf + 6, 15))
++ {
++ p += 16;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_cap_log_missing to %u\n",
++ debug_level);
++ rsbac_cap_log_missing = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_JAIL_LOG_MISSING
++/* Boolean switch for JAIL logging of missing caps */
++ /*
++ * Usage: echo "debug jail_log_missing #N" > /proc/rsbac_info/debug
++ * to set rsbac_jail_log_missing to given value
++ */
++ if(!strncmp("jail_log_missing", k_buf + 6, 16))
++ {
++ p += 17;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_jail_log_missing to %u\n",
++ debug_level);
++ rsbac_jail_log_missing = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif
++
++
++#if defined(CONFIG_RSBAC_ACL_LEARN)
++/* Boolean switch for ACL FD learning mode */
++ /*
++ * Usage: echo "debug acl_learn_fd #N" > /proc/rsbac_info/debug
++ * to set rsbac_acl_learn_fd to given value
++ */
++ if(!strncmp("acl_learn_fd", k_buf + 6, 12))
++ {
++ p += 13;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ /* use default acls */
++ rsbac_target_id.file.device = RSBAC_ZERO_DEV;
++ rsbac_target_id.file.inode = 0;
++ rsbac_target_id.file.dentry_p = NULL;
++ rsbac_attribute_value.acl_learn = debug_level;
++
++ if (!rsbac_adf_request(R_MODIFY_ATTRIBUTE,
++ task_pid(current),
++ T_FILE,
++ rsbac_target_id,
++ A_acl_learn,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_acl_learn_fd to %u\n",
++ debug_level);
++ rsbac_acl_learn_fd = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_DEBUG)
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "debug_proc_write(): calling ADF\n");
++ }
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_SYSTEM_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out;
++ }
++
++#if defined(CONFIG_RSBAC_NET)
++/* Boolean debug switch for NET data structures */
++ /*
++ * Usage: echo "debug ds_net #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_ds_net to given value
++ */
++ if(!strncmp("ds_net", k_buf + 6, 6))
++ {
++ p += 7;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_ds_net to %u\n",
++ debug_level);
++ rsbac_debug_ds_net = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++/* Boolean debug switch for NET syscalls / AEF */
++ /*
++ * Usage: echo "debug aef_net #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_aef_net to given value
++ */
++ if(!strncmp("aef_net", k_buf + 6, 7))
++ {
++ p += 8;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_aef_net to %u\n",
++ debug_level);
++ rsbac_debug_aef_net = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++
++/* Boolean debug switch for NET decisions / ADF */
++ /*
++ * Usage: echo "debug adf_net #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_adf_net to given value
++ */
++ if(!strncmp("adf_net", k_buf + 6, 7))
++ {
++ p += 8;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_adf_net to %u\n",
++ debug_level);
++ rsbac_debug_adf_net = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_MAC)
++/* Boolean debug switch for MAC data structures */
++ /*
++ * Usage: echo "debug ds_mac #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_ds_mac to given value
++ */
++ if(!strncmp("ds_mac", k_buf + 6, 6))
++ {
++ p += 7;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_ds_mac to %u\n",
++ debug_level);
++ rsbac_debug_ds_mac = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++/* Boolean debug switch for MAC syscalls / AEF */
++ /*
++ * Usage: echo "debug aef_mac #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_aef_mac to given value
++ */
++ if(!strncmp("aef_mac", k_buf + 6, 7))
++ {
++ p += 8;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_aef_mac to %u\n",
++ debug_level);
++ rsbac_debug_aef_mac = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++
++/* Boolean debug switch for MAC decisions / ADF */
++ /*
++ * Usage: echo "debug adf_mac #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_adf_mac to given value
++ */
++ if(!strncmp("adf_mac", k_buf + 6, 7))
++ {
++ p += 8;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_adf_mac to %u\n",
++ debug_level);
++ rsbac_debug_adf_mac = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_PM) || defined(CONFIG_RSBAC_PM_MAINT)
++/* Boolean debug switch for PM data structures */
++ /*
++ * Usage: echo "debug ds_pm #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_ds_pm to given value
++ */
++ if(!strncmp("ds_pm", k_buf + 6, 5))
++ {
++ p += 6;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_ds_pm to %u\n",
++ debug_level);
++ rsbac_debug_ds_pm = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++/* Boolean debug switch for PM syscalls / AEF */
++ /*
++ * Usage: echo "debug aef_pm #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_aef_pm to given value
++ */
++ if(!strncmp("aef_pm", k_buf + 6, 6))
++ {
++ p += 7;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_aef_pm to %u\n",
++ debug_level);
++ rsbac_debug_aef_pm = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++
++/* Boolean debug switch for PM decisions / ADF */
++ /*
++ * Usage: echo "debug adf_pm #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_adf_pm to given value
++ */
++ if(!strncmp("adf_pm", k_buf + 6, 6))
++ {
++ p += 7;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_adf_pm to %u\n",
++ debug_level);
++ rsbac_debug_adf_pm = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_DAZ)
++/* Boolean debug switch for DAZ decisions / ADF */
++ /*
++ * Usage: echo "debug adf_daz #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_adf_daz to given value
++ */
++ if(!strncmp("adf_daz", k_buf + 6, 7))
++ {
++ p += 8;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_adf_daz to %u\n",
++ debug_level);
++ rsbac_debug_adf_daz = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_RC) || defined(CONFIG_RSBAC_RC_MAINT)
++/* Boolean debug switch for RC data structures */
++ /*
++ * Usage: echo "debug ds_rc #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_ds_rc to given value
++ */
++ if(!strncmp("ds_rc", k_buf + 6, 5))
++ {
++ p += 6;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_ds_rc to %u\n",
++ debug_level);
++ rsbac_debug_ds_rc = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++/* Boolean debug switch for RC syscalls / AEF */
++ /*
++ * Usage: echo "debug aef_rc #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_aef_rc to given value
++ */
++ if(!strncmp("aef_rc", k_buf + 6, 6))
++ {
++ p += 7;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_aef_rc to %u\n",
++ debug_level);
++ rsbac_debug_aef_rc = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++
++/* Boolean debug switch for RC decisions / ADF */
++ /*
++ * Usage: echo "debug adf_rc #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_adf_rc to given value
++ */
++ if(!strncmp("adf_rc", k_buf + 6, 6))
++ {
++ p += 7;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_adf_rc to %u\n",
++ debug_level);
++ rsbac_debug_adf_rc = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_AUTH)
++/* Boolean debug switch for AUTH data structures */
++ /*
++ * Usage: echo "debug ds_auth #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_ds_auth to given value
++ */
++ if(!strncmp("ds_auth", k_buf + 6, 7))
++ {
++ p += 8;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_ds_auth to %u\n",
++ debug_level);
++ rsbac_debug_ds_auth = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++/* Boolean debug switch for AUTH syscalls / AEF */
++ /*
++ * Usage: echo "debug aef_auth #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_aef_auth to given value
++ */
++ if(!strncmp("aef_auth", k_buf + 6, 8))
++ {
++ p += 9;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_aef_auth to %u\n",
++ debug_level);
++ rsbac_debug_aef_auth = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++
++/* Boolean debug switch for AUTH decisions / ADF */
++ /*
++ * Usage: echo "debug adf_auth #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_adf_auth to given value
++ */
++ if(!strncmp("adf_auth", k_buf + 6, 8))
++ {
++ p += 9;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_adf_auth to %u\n",
++ debug_level);
++ rsbac_debug_adf_auth = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++
++#endif
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++/* Boolean debug switch for REG */
++ /*
++ * Usage: echo "debug reg #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_reg to given value
++ */
++ if(!strncmp("reg", k_buf + 6, 3))
++ {
++ p += 3;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_reg to %u\n",
++ debug_level);
++ rsbac_debug_reg = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_ACL)
++/* Boolean debug switch for ACL data structures */
++ /*
++ * Usage: echo "debug ds_acl #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_ds_acl to given value
++ */
++ if(!strncmp("ds_acl", k_buf + 6, 6))
++ {
++ p += 7;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_ds_acl to %u\n",
++ debug_level);
++ rsbac_debug_ds_acl = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++/* Boolean debug switch for ACL syscalls / AEF */
++ /*
++ * Usage: echo "debug aef_acl #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_aef_acl to given value
++ */
++ if(!strncmp("aef_acl", k_buf + 6, 7))
++ {
++ p += 8;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_aef_acl to %u\n",
++ debug_level);
++ rsbac_debug_aef_acl = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++
++/* Boolean debug switch for ACL decisions / ADF */
++ /*
++ * Usage: echo "debug adf_acl #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_adf_acl to given value
++ */
++ if(!strncmp("adf_acl", k_buf + 6, 7))
++ {
++ p += 8;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_adf_acl to %u\n",
++ debug_level);
++ rsbac_debug_adf_acl = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_JAIL)
++/* Boolean debug switch for JAIL syscalls / AEF */
++ /*
++ * Usage: echo "debug aef_jail #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_aef_jail to given value
++ */
++ if(!strncmp("aef_jail", k_buf + 6, 8))
++ {
++ p += 9;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_aef_jail to %u\n",
++ debug_level);
++ rsbac_debug_aef_jail = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++
++/* Boolean debug switch for JAIL decisions / ADF */
++ /*
++ * Usage: echo "debug adf_jail #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_adf_jail to given value
++ */
++ if(!strncmp("adf_jail", k_buf + 6, 8))
++ {
++ p += 9;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_adf_jail to %u\n",
++ debug_level);
++ rsbac_debug_adf_jail = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_PAX)
++/* Boolean debug switch for PAX decisions / ADF */
++ /*
++ * Usage: echo "debug adf_pax #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_adf_pax to given value
++ */
++ if(!strncmp("adf_pax", k_buf + 6, 7))
++ {
++ p += 8;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_adf_pax to %u\n",
++ debug_level);
++ rsbac_debug_adf_pax = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif
++
++#if defined(CONFIG_RSBAC_UM)
++/* Boolean debug switch for UM data structures */
++ /*
++ * Usage: echo "debug ds_um #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_ds_um to given value
++ */
++ if(!strncmp("ds_um", k_buf + 6, 5))
++ {
++ p += 6;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_ds_um to %u\n",
++ debug_level);
++ rsbac_debug_ds_um = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++/* Boolean debug switch for UM syscalls / AEF */
++ /*
++ * Usage: echo "debug aef_um #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_aef_um to given value
++ */
++ if(!strncmp("aef_um", k_buf + 6, 6))
++ {
++ p += 7;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_aef_um to %u\n",
++ debug_level);
++ rsbac_debug_aef_um = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++
++/* Boolean debug switch for UM decisions / ADF */
++ /*
++ * Usage: echo "debug adf_um #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_adf_um to given value
++ */
++ if(!strncmp("adf_um", k_buf + 6, 6))
++ {
++ p += 7;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_adf_um to %u\n",
++ debug_level);
++ rsbac_debug_adf_um = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif
++
++ /*
++ * Usage: echo "debug ds #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_ds to given value
++ */
++ if(!strncmp("ds", k_buf + 6, 2))
++ {
++ p += 3;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_ds to %u\n",
++ debug_level);
++ rsbac_debug_ds = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++
++ /*
++ * Usage: echo "debug write #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_write to given value
++ */
++ if(!strncmp("write", k_buf + 6, 5))
++ {
++ p += 6;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_write to %u\n",
++ debug_level);
++ rsbac_debug_write = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++
++ /*
++ * Usage: echo "debug stack #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_stack to given value
++ */
++ if(!strncmp("stack", k_buf + 6, 5))
++ {
++ p += 6;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_stack to %u\n",
++ debug_level);
++ rsbac_debug_stack = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++
++ /*
++ * Usage: echo "debug lists #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_lists to given value
++ */
++ if(!strncmp("lists", k_buf + 6, 5))
++ {
++ p += 6;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_lists to %u\n",
++ debug_level);
++ rsbac_debug_lists = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++
++ /* Boolean debug switch for AEF */
++ /*
++ * Usage: echo "debug aef #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_aef to given value
++ */
++ if(!strncmp("aef", k_buf + 6, 3))
++ {
++ p += 4;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_aef to %u\n",
++ debug_level);
++ rsbac_debug_aef = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++
++/* Boolean debug switch for NO_WRITE */
++ /*
++ * Usage: echo "debug no_write #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_no_write to given value
++ */
++ if(!strncmp("no_write", k_buf + 6, 8))
++ {
++ p += 9;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_no_write to %u\n",
++ debug_level);
++ rsbac_debug_no_write = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++
++#if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++ /*
++ * Usage: echo "debug auto #N" > /proc/rsbac_info/debug
++ * to set rsbac_debug_auto to given value
++ */
++ if(!strncmp("auto", k_buf + 6, 4))
++ {
++ p += 5;
++
++ if( *p == '\0' )
++ goto out;
++
++ debug_level = simple_strtoul(p, NULL, 0);
++ /* only accept 0 or 1 */
++ if(!debug_level || (debug_level == 1))
++ {
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): setting rsbac_debug_auto to %u\n",
++ debug_level);
++ rsbac_debug_auto = debug_level;
++ err = count;
++ goto out;
++ }
++ else
++ {
++ goto out_inv;
++ }
++ }
++#endif /* CONFIG_RSBAC_AUTO_WRITE > 0 */
++#endif /* DEBUG */
++
++out:
++ free_page((ulong) k_buf);
++ return(err);
++
++out_inv:
++ rsbac_printk(KERN_INFO
++ "debug_proc_write(): rejecting invalid debug level (should be 0 or 1)\n");
++ err = -EINVAL;
++ goto out;
++ }
++
++static int debug_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, debug_proc_show, NULL);
++}
++
++static const struct file_operations debug_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = debug_proc_open,
++ .read = seq_read,
++ .write = debug_proc_write,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static struct proc_dir_entry *debug;
++#endif /* defined(CONFIG_RSBAC_PROC) */
++
++#if defined(CONFIG_RSBAC_LOG_REMOTE)
++
++#ifndef CONFIG_RSBAC_LOG_REMOTE_SYNC
++/* rsbac kernel timer for auto-write */
++static void wakeup_rsbaclogd(u_long dummy)
++ {
++ wake_up(&rsbaclogd_wait);
++ }
++#endif
++
++/* rsbac kernel daemon for remote logging */
++static int rsbaclogd(void * dummy)
++ {
++ struct task_struct *tsk = current;
++ int err;
++ int sock_fd;
++ struct rsbac_log_list_item_t * log_item;
++ struct sockaddr_in addr;
++ char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ mm_segment_t oldfs;
++
++ rsbac_printk(KERN_INFO "rsbaclogd(): Initializing.\n");
++
++#ifdef CONFIG_RSBAC_DEBUG
++ rsbac_printk(KERN_DEBUG "rsbaclogd(): Setting auto timer.\n");
++#endif
++#ifndef CONFIG_RSBAC_LOG_REMOTE_SYNC
++ init_timer(&rsbac_log_remote_timer);
++ rsbac_log_remote_timer.function = wakeup_rsbaclogd;
++ rsbac_log_remote_timer.data = 0;
++ rsbac_log_remote_timer.expires = jiffies + rsbac_log_remote_interval;
++ add_timer(&rsbac_log_remote_timer);
++#endif
++ interruptible_sleep_on(&rsbaclogd_wait);
++
++ /* create a socket */
++#ifndef CONFIG_RSBAC_LOG_REMOTE_TCP
++ sock_fd = sys_socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP);
++ if(sock_fd < 0)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbaclogd(): creating local log socket failed with error %s, exiting!\n",
++ get_error_name(tmp, sock_fd));
++ rsbaclogd_pid = 0;
++ return -RSBAC_EWRITEFAILED;
++ }
++ /* bind local address */
++ addr.sin_family = PF_INET;
++ addr.sin_port = htons(CONFIG_RSBAC_LOG_LOCAL_PORT);
++ err = rsbac_net_str_to_inet(CONFIG_RSBAC_LOG_LOCAL_ADDR,
++ &addr.sin_addr.s_addr);
++ if(err < 0)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbaclogd(): converting local socket address %s failed with error %s, exiting!\n",
++ CONFIG_RSBAC_LOG_LOCAL_ADDR,
++ get_error_name(tmp, err));
++ sys_close(sock_fd);
++ rsbaclogd_pid = 0;
++ return -RSBAC_EINVALIDVALUE;
++ }
++ /* change data segment - sys_bind reads address from user space */
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++ err = sys_bind(sock_fd, (struct sockaddr *)&addr, sizeof(addr));
++ set_fs(oldfs);
++ if(err < 0)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbaclogd(): binding local socket address %u.%u.%u.%u:%u failed with error %s, exiting!\n",
++ NIPQUAD(addr.sin_addr.s_addr),
++ CONFIG_RSBAC_LOG_LOCAL_PORT,
++ get_error_name(tmp, err));
++ sys_close(sock_fd);
++ rsbaclogd_pid = 0;
++ return -RSBAC_EWRITEFAILED;
++ }
++#endif /* ifndef CONFIG_RSBAC_LOG_REMOTE_TCP */
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_stack)
++ {
++ unsigned long * n = (unsigned long *) (current+1);
++
++ while (!*n)
++ n++;
++ rsbac_printk(KERN_DEBUG "rsbaclogd: free stack: %lu\n",
++ (unsigned long) n - (unsigned long)(current+1));
++ }
++#endif
++ for(;;)
++ {
++ /* wait */
++#ifndef CONFIG_RSBAC_LOG_REMOTE_SYNC
++ /* set new timer (only, if not woken up by rsbac_printk()) */
++ mod_timer(&rsbac_log_remote_timer, jiffies + rsbac_log_remote_interval);
++#endif
++ interruptible_sleep_on(&rsbaclogd_wait);
++#ifdef CONFIG_PM
++ if (try_to_freeze())
++ continue;
++ /* sleep */
++#endif
++
++ /* Unblock all signals. */
++ flush_signals(tsk);
++ spin_lock_irq(&tsk->sighand->siglock);
++ flush_signal_handlers(tsk, 1);
++ sigemptyset(&tsk->blocked);
++ recalc_sigpending();
++ spin_unlock_irq(&tsk->sighand->siglock);
++
++ /* Do nothing without remote address */
++ if(!rsbac_log_remote_addr || !rsbac_log_remote_port || !remote_log_list_head.head)
++ continue;
++
++
++#ifdef CONFIG_RSBAC_LOG_REMOTE_TCP
++ sock_fd = sys_socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
++ if(sock_fd < 0)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbaclogd(): creating local log socket failed with error %s, exiting!\n",
++ get_error_name(tmp, sock_fd));
++ continue;
++ }
++ /* bind local address */
++ addr.sin_family = PF_INET;
++ addr.sin_port = htons(CONFIG_RSBAC_LOG_LOCAL_PORT);
++ err = rsbac_net_str_to_inet(CONFIG_RSBAC_LOG_LOCAL_ADDR,
++ &addr.sin_addr.s_addr);
++ if(err < 0)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbaclogd(): converting local socket address %s failed with error %s, exiting!\n",
++ CONFIG_RSBAC_LOG_LOCAL_ADDR,
++ get_error_name(tmp, err));
++ sys_close(sock_fd);
++ continue;
++ }
++ /* change data segment - sys_bind reads address from user space */
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++ err = sys_bind(sock_fd, (struct sockaddr *)&addr, sizeof(addr));
++ set_fs(oldfs);
++ if(err < 0)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbaclogd(): binding local socket address %u.%u.%u.%u:%u failed with error %s, exiting!\n",
++ NIPQUAD(addr.sin_addr.s_addr),
++ CONFIG_RSBAC_LOG_LOCAL_PORT,
++ get_error_name(tmp, err));
++ sys_close(sock_fd);
++ continue;
++ }
++ /* Target address might have changed */
++ addr.sin_family = PF_INET;
++ addr.sin_port = rsbac_log_remote_port;
++ addr.sin_addr.s_addr = rsbac_log_remote_addr;
++ /* connect to remote socket */
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++ err = sys_connect(sock_fd,
++ (struct sockaddr *)&addr,
++ sizeof(addr));
++ set_fs(oldfs);
++ if(err < 0)
++ {
++ printk(KERN_WARNING
++ "rsbaclogd(): connecting to remote TCP address %u.%u.%u.%u:%u failed with error %s, exiting!\n",
++ NIPQUAD(addr.sin_addr.s_addr),
++ ntohs(addr.sin_port),
++ get_error_name(tmp, err));
++ sys_close(sock_fd);
++ continue;
++ }
++#else
++ /* Target address might have changed */
++ addr.sin_family = PF_INET;
++ addr.sin_port = rsbac_log_remote_port;
++ addr.sin_addr.s_addr = rsbac_log_remote_addr;
++#endif
++ while(remote_log_list_head.head)
++ {
++ spin_lock(&rsbac_log_remote_lock);
++ log_item = remote_log_list_head.head;
++ remote_log_list_head.head = log_item->next;
++ if(!remote_log_list_head.head)
++ remote_log_list_head.tail = NULL;
++ remote_log_list_head.count--;
++ spin_unlock(&rsbac_log_remote_lock);
++
++#ifdef CONFIG_RSBAC_LOG_REMOTE_TCP
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++ err = sys_send(sock_fd,
++ log_item->buffer,
++ log_item->size,
++ 0);
++ set_fs(oldfs);
++#else
++ /* change data segment - sys_sendto reads data and address from user space */
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++ err = sys_sendto(sock_fd,
++ log_item->buffer,
++ log_item->size,
++ MSG_DONTWAIT,
++ (struct sockaddr *)&addr,
++ sizeof(addr));
++ set_fs(oldfs);
++#endif
++ if( (err < log_item->size)
++// && (err != -EPERM)
++ )
++ {
++ if((err < 0) && (err != -EAGAIN))
++ printk(KERN_WARNING
++ "rsbaclogd(): sending to remote socket address %u.%u.%u.%u:%u failed with error %i!\n",
++ NIPQUAD(addr.sin_addr.s_addr),
++ ntohs(addr.sin_port),
++ err);
++ /* Restore log item to beginning of the list */
++ spin_lock(&rsbac_log_remote_lock);
++ log_item->next = remote_log_list_head.head;
++ remote_log_list_head.head = log_item;
++ if(!remote_log_list_head.tail)
++ remote_log_list_head.tail = log_item;
++ remote_log_list_head.count++;
++ spin_unlock(&rsbac_log_remote_lock);
++ break;
++ }
++ else {
++ kfree(log_item);
++ }
++ }
++#ifdef CONFIG_RSBAC_LOG_REMOTE_TCP
++ sys_close(sock_fd);
++#endif
++ }
++ return 0;
++ }
++#endif
++
++static int ll_conv(
++ void * old_desc,
++ void * old_data,
++ void * new_desc,
++ void * new_data)
++ {
++ rsbac_log_entry_t * new_aci = new_data;
++ rsbac_old_log_entry_t * old_aci = old_data;
++ int i;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_adf_request_int_t));
++ for(i=0; i < T_NONE - 1; i++)
++ (*new_aci)[i] = (*old_aci)[i];
++ (*new_aci)[T_NONE - 1] = LL_denied;
++ (*new_aci)[T_NONE] = (*old_aci)[T_NONE - 1];
++ return 0;
++ }
++
++static int ll_old_conv(
++ void * old_desc,
++ void * old_data,
++ void * new_desc,
++ void * new_data)
++ {
++ rsbac_log_entry_t * new_aci = new_data;
++ rsbac_old_log_entry_t * old_aci = old_data;
++ int i;
++
++ memcpy(new_desc, old_desc, sizeof(rsbac_adf_request_int_t));
++ for(i=0; i < T_NONE - 2; i++)
++ (*new_aci)[i] = (*old_aci)[i];
++ (*new_aci)[T_NONE - 1] = LL_denied;
++ (*new_aci)[T_NONE - 2] = LL_denied;
++ (*new_aci)[T_NONE] = (*old_aci)[T_NONE - 1];
++ return 0;
++ }
++
++rsbac_list_conv_function_t * ll_get_conv(rsbac_version_t old_version)
++ {
++ switch(old_version)
++ {
++ case RSBAC_LOG_LEVEL_OLD_VERSION:
++ return ll_conv;
++ case RSBAC_LOG_LEVEL_OLD_OLD_VERSION:
++ return ll_old_conv;
++ default:
++ return NULL;
++ }
++ }
++
++
++/********************************/
++/* Init */
++/********************************/
++
++#ifdef CONFIG_RSBAC_INIT_DELAY
++inline void rsbac_init_debug(void)
++#else
++inline void __init rsbac_init_debug(void)
++#endif
++ {
++ int i;
++#if defined(CONFIG_RSBAC_LOG_REMOTE)
++ struct task_struct * rsbaclogd_thread;
++#endif
++
++ if (!debug_initialized)
++ {
++ struct rsbac_list_info_t * info_p;
++ int tmperr;
++ rsbac_enum_t * def_data_p;
++
++ rsbac_printk(KERN_INFO "rsbac_init_debug(): Initializing\n");
++ info_p = rsbac_kmalloc(sizeof(*info_p));
++ if(!info_p)
++ {
++ memset(rsbac_log_levels, LL_denied, sizeof(rsbac_log_levels));
++ return;
++ }
++ def_data_p = rsbac_kmalloc(sizeof(rsbac_log_entry_t));
++ if(!def_data_p)
++ {
++ memset(rsbac_log_levels, LL_denied, sizeof(rsbac_log_levels));
++ rsbac_kfree(info_p);
++ return;
++ }
++ /* register log_levels list */
++ for(i=0; i<=T_NONE; i++)
++ def_data_p[i] = LL_denied;
++ info_p->version = RSBAC_LOG_LEVEL_VERSION;
++ info_p->key = RSBAC_LOG_LEVEL_KEY;
++ info_p->desc_size = sizeof(rsbac_adf_request_int_t);
++ info_p->data_size = sizeof(rsbac_log_entry_t);
++ info_p->max_age = 0;
++ tmperr = rsbac_list_register(RSBAC_LIST_VERSION,
++ &log_levels_handle,
++ info_p,
++ RSBAC_LIST_PERSIST | RSBAC_LIST_DEF_DATA,
++ NULL,
++ ll_get_conv,
++ def_data_p,
++ RSBAC_LOG_LEVEL_LIST_NAME,
++ RSBAC_AUTO_DEV);
++ rsbac_kfree(info_p);
++ rsbac_kfree(def_data_p);
++ if(tmperr)
++ {
++ char * tmp;
++
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if(tmp)
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_init_debug(): registering log levels list ll failed with error %s!\n",
++ get_error_name(tmp, tmperr));
++ rsbac_kfree(tmp);
++ }
++ memset(rsbac_log_levels, LL_denied, sizeof(rsbac_log_levels));
++ }
++ else
++ {
++ rsbac_adf_request_int_t req;
++
++ for(req = 0; req < R_NONE; req++)
++ rsbac_list_get_data(log_levels_handle, &req, rsbac_log_levels[req]);
++ }
++
++ #if defined(CONFIG_RSBAC_PROC)
++ log_levels = proc_create("log_levels", S_IFREG | S_IRUGO | S_IWUGO, proc_rsbac_root_p, &log_levels_proc_fops);
++
++ debug = proc_create("debug", S_IFREG | S_IRUGO | S_IWUGO, proc_rsbac_root_p, &debug_proc_fops);
++
++ #if defined(CONFIG_RSBAC_RMSG)
++ rmsg = proc_create("rmsg", S_IFREG | S_IRUGO, proc_rsbac_root_p, &rmsg_proc_fops);
++ #endif
++ #endif
++
++ #if defined(CONFIG_RSBAC_LOG_REMOTE)
++ /* Start rsbac logging thread for auto write */
++ if(!rsbac_log_remote_port)
++ rsbac_log_remote_port = htons(CONFIG_RSBAC_LOG_REMOTE_PORT);
++ tmperr = rsbac_net_str_to_inet(rsbac_log_remote_addr_string,
++ &rsbac_log_remote_addr);
++ if(tmperr < 0)
++ {
++ char * tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if(tmp)
++ {
++ get_error_name(tmp, tmperr);
++ rsbac_printk(KERN_WARNING
++ "rsbac_init_debug(): converting remote socket address %s failed with error %s, exiting!\n",
++ rsbac_log_remote_addr_string,
++ tmp);
++ rsbac_log_remote_addr = 0;
++ rsbac_kfree(tmp);
++ }
++ }
++ rsbaclogd_thread = kthread_create(rsbaclogd, NULL, "rsbaclogd");
++ wake_up_process(rsbaclogd_thread);
++ rsbac_printk(KERN_INFO "rsbac_init_debug(): Started rsbaclogd thread with pid %u\n",
++ rsbaclogd_pid);
++ #endif
++
++ #ifdef CONFIG_RSBAC_SYSLOG_RATE
++ init_timer(&rsbac_syslog_rate_timer);
++ rsbac_syslog_rate_timer.function = syslog_rate_reset;
++ rsbac_syslog_rate_timer.data = 0;
++ rsbac_syslog_rate_timer.expires = jiffies + HZ;
++ add_timer(&rsbac_syslog_rate_timer);
++ #endif
++
++ debug_initialized = TRUE;
++ }
++
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if(rsbac_softmode)
++ rsbac_printk(KERN_DEBUG "rsbac_softmode is set\n");
++ if(rsbac_softmode_prohibit)
++ rsbac_printk(KERN_DEBUG "rsbac_softmode_prohibit is set\n");
++ #endif
++ #ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ rsbac_printk(KERN_DEBUG "rsbac_freeze is set\n");
++ #endif
++ if(rsbac_list_recover)
++ rsbac_printk(KERN_DEBUG "rsbac_list_recover is set\n");
++ #if defined(CONFIG_RSBAC_UM_EXCL)
++ if(rsbac_um_no_excl)
++ rsbac_printk(KERN_DEBUG "rsbac_um_no_excl is set\n");
++ #endif
++ #if defined(CONFIG_RSBAC_DAZ_CACHE)
++ rsbac_printk(KERN_DEBUG "rsbac_daz_ttl is %u\n",
++ rsbac_daz_get_ttl());
++ #endif
++ #if defined(CONFIG_RSBAC_RC_LEARN)
++ if(rsbac_rc_learn)
++ rsbac_printk(KERN_DEBUG "rsbac_rc_learn is set\n");
++ #endif
++ #if defined(CONFIG_RSBAC_AUTH_LEARN)
++ if(rsbac_auth_learn)
++ rsbac_printk(KERN_DEBUG "rsbac_auth_learn is set\n");
++ #endif
++ #if defined(CONFIG_RSBAC_CAP_LEARN)
++ if(rsbac_cap_learn)
++ rsbac_printk(KERN_DEBUG "rsbac_cap_learn is set\n");
++ #endif
++ #if defined(CONFIG_RSBAC_ACL_LEARN)
++ if(rsbac_acl_learn_fd)
++ rsbac_printk(KERN_DEBUG "rsbac_acl_learn_fd is set\n");
++ #endif
++ #ifdef CONFIG_RSBAC_CAP_PROC_HIDE
++ if(rsbac_cap_process_hiding)
++ rsbac_printk(KERN_DEBUG "rsbac_cap_process_hiding is set\n");
++ #endif
++ #ifdef CONFIG_RSBAC_CAP_LOG_MISSING
++ if(rsbac_cap_log_missing)
++ rsbac_printk(KERN_DEBUG "rsbac_cap_log_missing is set\n");
++ #endif
++ #ifdef CONFIG_RSBAC_JAIL_LOG_MISSING
++ if(rsbac_jail_log_missing)
++ rsbac_printk(KERN_DEBUG "rsbac_jail_log_missing is set\n");
++ #endif
++ #ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_FULL
++ if(rsbac_dac_disable)
++ rsbac_printk(KERN_DEBUG "rsbac_dac_disable is set\n");
++ #endif
++ #ifdef CONFIG_RSBAC_RMSG_NOSYSLOG
++ if(rsbac_nosyslog)
++ rsbac_printk(KERN_DEBUG "rsbac_nosyslog is set\n");
++ #endif
++ #ifdef CONFIG_RSBAC_SYSLOG_RATE
++ if(rsbac_syslog_rate != CONFIG_RSBAC_SYSLOG_RATE_DEF)
++ rsbac_printk(KERN_DEBUG "rsbac_syslog_rate is %u\n",
++ rsbac_syslog_rate);
++ #endif
++#ifdef CONFIG_RSBAC_FD_CACHE
++ if(rsbac_fd_cache_disable) {
++ rsbac_printk(KERN_DEBUG "rsbac_fd_cache_disable is %u\n",
++ rsbac_fd_cache_disable);
++ } else {
++ if(rsbac_fd_cache_ttl != CONFIG_RSBAC_FD_CACHE_TTL)
++ rsbac_printk(KERN_DEBUG "rsbac_fd_cache_ttl is %u\n",
++ rsbac_fd_cache_ttl);
++ }
++#endif
++#if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++ if(rsbac_list_check_interval != CONFIG_RSBAC_LIST_CHECK_INTERVAL)
++ rsbac_printk(KERN_DEBUG "rsbac_list_check_interval is %u\n",
++ rsbac_list_check_interval);
++#endif
++ #ifdef CONFIG_RSBAC_INIT_DELAY
++ if(rsbac_no_delay_init)
++ rsbac_printk(KERN_DEBUG "rsbac_no_delay_init is set\n");
++ if(rsbac_delayed_root_str[0])
++ rsbac_printk(KERN_DEBUG "rsbac_delayed_root is %s\n",
++ rsbac_delayed_root_str);
++ #endif
++ if(rsbac_no_defaults)
++ rsbac_printk(KERN_DEBUG "rsbac_no_defaults is set\n");
++
++#if defined(CONFIG_RSBAC_DEBUG)
++ if(rsbac_debug_ds)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_ds is set\n");
++ if(rsbac_debug_write)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_write is set\n");
++ if(rsbac_debug_no_write)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_no_write is set\n");
++ if(rsbac_debug_stack)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_stack is set\n");
++ if(rsbac_debug_lists)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_lists is set\n");
++ if(rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_aef is set\n");
++ if(rsbac_debug_adf_default != 1)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_adf_default is set to %i\n",
++ rsbac_debug_adf_default);
++
++ #if defined(CONFIG_RSBAC_REG)
++ if(rsbac_debug_reg)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_reg is set\n");
++ #endif
++
++ #if defined(CONFIG_RSBAC_NET)
++ if(rsbac_debug_ds_net)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_ds_net is set\n");
++ if(rsbac_debug_aef_net)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_aef_net is set\n");
++ if(rsbac_debug_adf_net)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_adf_net is set\n");
++ #endif
++
++ #if defined(CONFIG_RSBAC_MAC)
++ if(rsbac_debug_ds_mac)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_ds_mac is set\n");
++ if(rsbac_debug_aef_mac)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_aef_mac is set\n");
++ if(rsbac_debug_adf_mac)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_adf_mac is set\n");
++ #endif
++
++ #if defined(CONFIG_RSBAC_PM)
++ if(rsbac_debug_ds_pm)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_ds_pm is set\n");
++ if(rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_aef_pm is set\n");
++ if(rsbac_debug_adf_pm)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_adf_pm is set\n");
++ #endif
++
++ #if defined(CONFIG_RSBAC_DAZ)
++ if(rsbac_debug_adf_daz)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_adf_daz is set\n");
++ #endif
++
++ #if defined(CONFIG_RSBAC_RC)
++ if(rsbac_debug_ds_rc)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_ds_rc is set\n");
++ if(rsbac_debug_aef_rc)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_aef_rc is set\n");
++ if(rsbac_debug_adf_rc)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_adf_rc is set\n");
++ #endif
++
++ #if defined(CONFIG_RSBAC_AUTH)
++ if(rsbac_debug_ds_auth)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_ds_auth is set\n");
++ if(rsbac_debug_aef_auth)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_aef_auth is set\n");
++ if(rsbac_debug_adf_auth)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_adf_auth is set\n");
++ #endif
++
++ #if defined(CONFIG_RSBAC_ACL)
++ if(rsbac_debug_ds_acl)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_ds_acl is set\n");
++ if(rsbac_debug_aef_acl)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_aef_acl is set\n");
++ if(rsbac_debug_adf_acl)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_adf_acl is set\n");
++ #endif
++
++ #if defined(CONFIG_RSBAC_JAIL)
++ if(rsbac_debug_aef_jail)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_aef_jail is set\n");
++ if(rsbac_debug_adf_jail)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_adf_jail is set\n");
++ #endif
++
++ #if defined(CONFIG_RSBAC_PAX)
++ if(rsbac_debug_adf_pax)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_adf_pax is set\n");
++ #endif
++
++ #if defined(CONFIG_RSBAC_UM)
++ if(rsbac_debug_ds_um)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_ds_um is set\n");
++ if(rsbac_debug_aef_um)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_aef_um is set\n");
++ if(rsbac_debug_adf_um)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_adf_um is set\n");
++ #endif
++
++ #if defined(CONFIG_RSBAC_AUTO_WRITE) && (CONFIG_RSBAC_AUTO_WRITE > 0)
++ if(rsbac_debug_auto)
++ rsbac_printk(KERN_DEBUG "rsbac_debug_auto is set\n");
++ #endif
++#endif /* DEBUG */
++
++ }
++
+diff --git a/rsbac/help/getname.c b/rsbac/help/getname.c
+new file mode 100644
+index 0000000..9dbfd88
+--- /dev/null
++++ b/rsbac/help/getname.c
+@@ -0,0 +1,1834 @@
++/************************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2009: */
++/* Amon Ott <ao@rsbac.org> */
++/* Helper functions for all parts */
++/* Last modified: 05/Oct/2009 */
++/************************************* */
++
++#include <rsbac/types.h>
++#include <rsbac/getname.h>
++#include <rsbac/helpers.h>
++#include <rsbac/error.h>
++#include <rsbac/pax_getname.h>
++
++#ifdef __KERNEL__
++#include <linux/string.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/network.h>
++#include <rsbac/net_getname.h>
++#else
++#include <string.h>
++#include <stdio.h>
++#include <errno.h>
++#endif
++
++static char request_list[R_NONE + 1][24] = {
++ "ADD_TO_KERNEL",
++ "ALTER",
++ "APPEND_OPEN",
++ "CHANGE_GROUP",
++ "CHANGE_OWNER",
++ "CHDIR",
++ "CLONE",
++ "CLOSE",
++ "CREATE",
++ "DELETE",
++ "EXECUTE",
++ "GET_PERMISSIONS_DATA",
++ "GET_STATUS_DATA",
++ "LINK_HARD",
++ "MODIFY_ACCESS_DATA",
++ "MODIFY_ATTRIBUTE",
++ "MODIFY_PERMISSIONS_DATA",
++ "MODIFY_SYSTEM_DATA",
++ "MOUNT",
++ "READ",
++ "READ_ATTRIBUTE",
++ "READ_WRITE_OPEN",
++ "READ_OPEN",
++ "REMOVE_FROM_KERNEL",
++ "RENAME",
++ "SEARCH",
++ "SEND_SIGNAL",
++ "SHUTDOWN",
++ "SWITCH_LOG",
++ "SWITCH_MODULE",
++ "TERMINATE",
++ "TRACE",
++ "TRUNCATE",
++ "UMOUNT",
++ "WRITE",
++ "WRITE_OPEN",
++ "MAP_EXEC",
++ "BIND",
++ "LISTEN",
++ "ACCEPT",
++ "CONNECT",
++ "SEND",
++ "RECEIVE",
++ "NET_SHUTDOWN",
++ "CHANGE_DAC_EFF_OWNER",
++ "CHANGE_DAC_FS_OWNER",
++ "CHANGE_DAC_EFF_GROUP",
++ "CHANGE_DAC_FS_GROUP",
++ "IOCTL",
++ "LOCK",
++ "AUTHENTICATE",
++ "NONE"
++};
++
++static char result_list[UNDEFINED + 1][12] = {
++ "NOT_GRANTED",
++ "GRANTED",
++ "DO_NOT_CARE",
++ "UNDEFINED"
++};
++
++static rsbac_switch_target_int_t attr_mod_list[A_none + 1] = {
++ SW_GEN, /* pseudo */
++ SW_MAC, /* security_level */
++ SW_MAC, /* initial_security_level */
++ SW_MAC, /* local_sec_level */
++ SW_MAC, /* remote_sec_level */
++ SW_MAC, /* min_security_level */
++ SW_MAC, /* mac_categories */
++ SW_MAC, /* mac_initial_categories */
++ SW_MAC, /* local_mac_categories */
++ SW_MAC, /* remote_mac_categories */
++ SW_MAC, /* mac_min_categories */
++ SW_MAC, /* mac_user_flags */
++ SW_MAC, /* mac_process_flags */
++ SW_MAC, /* mac_file_flags */
++ SW_NONE, /* system_role */
++ SW_MAC, /* mac_role */
++ SW_DAZ, /* daz_role */
++ SW_FF, /* ff_role */
++ SW_AUTH, /* auth_role */
++ SW_CAP, /* cap_role */
++ SW_JAIL, /* jail_role */
++ SW_PAX, /* pax_role */
++ SW_MAC, /* current_sec_level */
++ SW_MAC, /* mac_curr_categories */
++ SW_MAC, /* min_write_open */
++ SW_MAC, /* min_write_categories */
++ SW_MAC, /* max_read_open */
++ SW_MAC, /* max_read_categories */
++ SW_MAC, /* mac_auto */
++ SW_MAC, /* mac_check */
++ SW_MAC, /* mac_prop_trusted */
++ SW_PM, /* pm_role */
++ SW_PM, /* pm_process_type */
++ SW_PM, /* pm_current_task */
++ SW_PM, /* pm_object_class */
++ SW_PM, /* local_pm_object_class */
++ SW_PM, /* remote_pm_object_class */
++ SW_PM, /* pm_ipc_purpose */
++ SW_PM, /* local_pm_ipc_purpose */
++ SW_PM, /* remote_pm_ipc_purpose */
++ SW_PM, /* pm_object_type */
++ SW_PM, /* local_pm_object_type */
++ SW_PM, /* remote_pm_object_type */
++ SW_PM, /* pm_program_type */
++ SW_PM, /* pm_tp */
++ SW_PM, /* pm_task_set */
++ SW_DAZ, /* daz_scanned */
++ SW_DAZ, /* daz_scanner */
++ SW_FF, /* ff_flags */
++ SW_RC, /* rc_type */
++ SW_RC, /* rc_select_type */
++ SW_RC, /* local_rc_type */
++ SW_RC, /* remote_rc_type */
++ SW_RC, /* rc_type_fd */
++ SW_RC, /* rc_type_nt */
++ SW_RC, /* rc_force_role */
++ SW_RC, /* rc_initial_role */
++ SW_RC, /* rc_role */
++ SW_RC, /* rc_def_role */
++ SW_AUTH, /* auth_may_setuid */
++ SW_AUTH, /* auth_may_set_cap */
++ SW_AUTH, /* auth_learn */
++ SW_CAP, /* min_caps */
++ SW_CAP, /* max_caps */
++ SW_CAP, /* max_caps_user */
++ SW_CAP, /* max_caps_program */
++ SW_JAIL, /* jail_id */
++ SW_JAIL, /* jail_parent */
++ SW_JAIL, /* jail_ip */
++ SW_JAIL, /* jail_flags */
++ SW_JAIL, /* jail_max_caps */
++ SW_JAIL, /* jail_scd_get */
++ SW_JAIL, /* jail_scd_modify */
++ SW_PAX, /* pax_flags */
++ SW_RES, /* res_role */
++ SW_RES, /* res_min */
++ SW_RES, /* res_max */
++ SW_GEN, /* log_array_low */
++ SW_GEN, /* local_log_array_low */
++ SW_GEN, /* remote_log_array_low */
++ SW_GEN, /* log_array_high */
++ SW_GEN, /* local_log_array_high */
++ SW_GEN, /* remote_log_array_high */
++ SW_GEN, /* log_program_based */
++ SW_GEN, /* log_user_based */
++ SW_GEN, /* symlink_add_remote_ip */
++ SW_GEN, /* symlink_add_uid */
++ SW_GEN, /* symlink_add_mac_level */
++ SW_GEN, /* symlink_add_rc_role */
++ SW_GEN, /* linux_dac_disable */
++ SW_CAP, /* cap_process_hiding */
++ SW_GEN, /* fake_root_uid */
++ SW_GEN, /* audit_uid */
++ SW_GEN, /* auid_exempt */
++ SW_AUTH, /* auth_last_auth */
++ SW_GEN, /* remote_ip */
++ SW_CAP, /* cap_ld_env */
++ SW_DAZ, /* daz_do_scan */
++ SW_GEN, /* vset */
++#ifdef __KERNEL__
++ /* adf-request helpers */
++ SW_NONE, /* group */
++ SW_NONE, /* signal */
++ SW_NONE, /* mode */
++ SW_NONE, /* nlink */
++ SW_NONE, /* switch_target */
++ SW_NONE, /* mod_name */
++ SW_NONE, /* request */
++ SW_NONE, /* trace_request */
++ SW_NONE, /* auth_add_f_cap */
++ SW_NONE, /* auth_remove_f_cap */
++ SW_NONE, /* auth_get_caplist */
++ SW_NONE, /* prot_bits */
++ SW_NONE, /* internal */
++ SW_NONE, /* create_data */
++ SW_NONE, /* new_object */
++ SW_NONE, /* rlimit */
++ SW_NONE, /* new_dir_dentry_p */
++ SW_NONE, /* auth_program_file */
++ SW_NONE, /* auth_start_uid */
++ SW_NONE, /* auth_start_euid */
++ SW_NONE, /* auth_start_gid */
++ SW_NONE, /* auth_start_egid */
++ SW_NONE, /* acl_learn */
++ SW_NONE, /* priority */
++ SW_NONE, /* pgid */
++ SW_NONE, /* kernel_thread */
++ SW_NONE, /* open_flag */
++ SW_NONE, /* reboot_cmd */
++ SW_NONE, /* setsockopt_level */
++ SW_NONE, /* ioctl_cmd */
++ SW_NONE, /* f_mode */
++ SW_NONE, /* process */
++ SW_NONE, /* sock_type */
++ SW_NONE, /* pagenr */
++#endif
++ SW_NONE /* none */
++};
++
++static char attribute_list[A_none + 1][23] = {
++ "pseudo",
++ "security_level",
++ "initial_security_level",
++ "local_sec_level",
++ "remote_sec_level",
++ "min_security_level",
++ "mac_categories",
++ "mac_initial_categories",
++ "local_mac_categories",
++ "remote_mac_categories",
++ "mac_min_categories",
++ "mac_user_flags",
++ "mac_process_flags",
++ "mac_file_flags",
++ "system_role",
++ "mac_role",
++ "daz_role",
++ "ff_role",
++ "auth_role",
++ "cap_role",
++ "jail_role",
++ "pax_role",
++ "current_sec_level",
++ "mac_curr_categories",
++ "min_write_open",
++ "min_write_categories",
++ "max_read_open",
++ "max_read_categories",
++ "mac_auto",
++ "mac_check",
++ "mac_prop_trusted",
++ "pm_role",
++ "pm_process_type",
++ "pm_current_task",
++ "pm_object_class",
++ "local_pm_object_class",
++ "remote_pm_object_class",
++ "pm_ipc_purpose",
++ "local_pm_ipc_purpose",
++ "remote_pm_ipc_purpose",
++ "pm_object_type",
++ "local_pm_object_type",
++ "remote_pm_object_type",
++ "pm_program_type",
++ "pm_tp",
++ "pm_task_set",
++ "daz_scanned",
++ "daz_scanner",
++ "ff_flags",
++ "rc_type",
++ "rc_select_type",
++ "local_rc_type",
++ "remote_rc_type",
++ "rc_type_fd",
++ "rc_type_nt",
++ "rc_force_role",
++ "rc_initial_role",
++ "rc_role",
++ "rc_def_role",
++ "auth_may_setuid",
++ "auth_may_set_cap",
++ "auth_learn",
++ "min_caps",
++ "max_caps",
++ "max_caps_user",
++ "max_caps_program",
++ "jail_id",
++ "jail_parent",
++ "jail_ip",
++ "jail_flags",
++ "jail_max_caps",
++ "jail_scd_get",
++ "jail_scd_modify",
++ "pax_flags",
++ "res_role",
++ "res_min",
++ "res_max",
++ "log_array_low",
++ "local_log_array_low",
++ "remote_log_array_low",
++ "log_array_high",
++ "local_log_array_high",
++ "remote_log_array_high",
++ "log_program_based",
++ "log_user_based",
++ "symlink_add_remote_ip",
++ "symlink_add_uid",
++ "symlink_add_mac_level",
++ "symlink_add_rc_role",
++ "linux_dac_disable",
++ "cap_process_hiding",
++ "fake_root_uid",
++ "audit_uid",
++ "auid_exempt",
++ "auth_last_auth",
++ "remote_ip",
++ "cap_ld_env",
++ "daz_do_scan",
++ "vset",
++#ifdef __KERNEL__
++ /* adf-request helpers */
++ "owner",
++ "group",
++ "signal",
++ "mode",
++ "nlink",
++ "switch_target",
++ "mod_name",
++ "request",
++ "trace_request",
++ "auth_add_f_cap",
++ "auth_remove_f_cap",
++ "auth_get_caplist",
++ "prot_bits",
++ "internal",
++ "create_data",
++ "new_object",
++ "rlimit",
++ "new_dir_dentry_p",
++ "program_file",
++ "auth_start_uid",
++ "auth_start_euid",
++ "auth_start_gid",
++ "auth_start_egid",
++ "acl_learn",
++ "priority",
++ "pgid",
++ "kernel_thread",
++ "open_flag",
++ "reboot_cmd",
++ "setsockopt_level",
++ "ioctl_cmd",
++ "f_mode",
++ "process",
++ "sock_type",
++ "pagenr",
++ "cap_learn",
++ "rc_learn",
++#endif
++ "none"
++};
++
++static char target_list[T_NONE + 1][11] = {
++ "FILE",
++ "DIR",
++ "FIFO",
++ "SYMLINK",
++ "DEV",
++ "IPC",
++ "SCD",
++ "USER",
++ "PROCESS",
++ "NETDEV",
++ "NETTEMP",
++ "NETOBJ",
++ "NETTEMP_NT",
++ "GROUP",
++ "FD",
++ "UNIXSOCK",
++ "NONE"
++};
++
++static char ipc_target_list[I_none + 1][9] = {
++ "sem",
++ "msg",
++ "shm",
++ "anonpipe",
++ "mqueue",
++ "anonunix",
++ "none"
++};
++
++static char switch_target_list[SW_NONE + 1][12] = {
++ "GEN",
++ "MAC",
++ "PM",
++ "DAZ",
++ "FF",
++ "RC",
++ "AUTH",
++ "REG",
++ "ACL",
++ "CAP",
++ "JAIL",
++ "RES",
++ "PAX",
++ "SOFTMODE",
++ "DAC_DISABLE",
++ "UM",
++ "FREEZE",
++ "NONE"
++};
++
++static char error_list[RSBAC_EMAX][26] = {
++ "RSBAC_EPERM",
++ "RSBAC_EACCESS",
++ "RSBAC_EREADFAILED",
++ "RSBAC_EWRITEFAILED",
++ "RSBAC_EINVALIDPOINTER",
++ "RSBAC_ENOROOTDIR",
++ "RSBAC_EPATHTOOLONG",
++ "RSBAC_ENOROOTDEV",
++ "RSBAC_ENOTFOUND",
++ "RSBAC_ENOTINITIALIZED",
++ "RSBAC_EREINIT",
++ "RSBAC_ECOULDNOTADDDEVICE",
++ "RSBAC_ECOULDNOTADDITEM",
++ "RSBAC_ECOULDNOTCREATEPATH",
++ "RSBAC_EINVALIDATTR",
++ "RSBAC_EINVALIDDEV",
++ "RSBAC_EINVALIDTARGET",
++ "RSBAC_EINVALIDVALUE",
++ "RSBAC_EEXISTS",
++ "RSBAC_EINTERNONLY",
++ "RSBAC_EINVALIDREQUEST",
++ "RSBAC_ENOTWRITABLE",
++ "RSBAC_EMALWAREDETECTED",
++ "RSBAC_ENOMEM",
++ "RSBAC_EDECISIONMISMATCH",
++ "RSBAC_EINVALIDVERSION",
++ "RSBAC_EINVALIDMODULE",
++ "RSBAC_EEXPIRED",
++ "RSBAC_EMUSTCHANGE",
++ "RSBAC_EBUSY",
++ "RSBAC_EINVALIDTRANSACTION",
++ "RSBAC_EWEAKPASSWORD",
++ "RSBAC_EINVALIDLIST",
++ "RSBAC_EFROMINTERRUPT"
++};
++
++static char scd_type_list[ST_none + 1][17] = {
++ "time_strucs",
++ "clock",
++ "host_id",
++ "net_id",
++ "ioports",
++ "rlimit",
++ "swap",
++ "syslog",
++ "rsbac",
++ "rsbac_log",
++ "other",
++ "kmem",
++ "network",
++ "firewall",
++ "priority",
++ "sysfs",
++ "rsbac_remote_log",
++ "quota",
++ "sysctl",
++ "nfsd",
++ "ksyms",
++ "mlock",
++ "capability",
++ "kexec",
++ "videomem",
++ "none"
++};
++
++/* Attribute types */
++
++#ifndef __KERNEL__
++static char attribute_param_list[A_none + 1][194] = {
++ "user-pseudo (positive long integer)", /* pseudo */
++ "0 = unclassified, 1 = confidential, 2 = secret,\n\t3 = top secret, 254 = inherit, max. level 252", /* security_level */
++ "0 = unclassified, 1 = confidential, 2 = secret,\n\t3 = top secret, 254 = inherit, max. level 252", /* initial_security_level */
++ "0 = unclassified, 1 = confidential, 2 = secret,\n\t3 = top secret, 254 = inherit, max. level 252", /* local_sec_level */
++ "0 = unclassified, 1 = confidential, 2 = secret,\n\t3 = top secret, 254 = inherit, max. level 252", /* remote_sec_level */
++ "0 = unclassified, 1 = confidential, 2 = secret,\n\t3 = top secret, 254 = inherit, max. level 252", /* min_security_level */
++ "Bit Set String of length 64 for all categories", /* mac_categories */
++ "Bit Set String of length 64 for all categories", /* mac_initial_categories */
++ "Bit Set String of length 64 for all categories", /* local_mac_categories */
++ "Bit Set String of length 64 for all categories", /* remote_mac_categories */
++ "Bit Set String of length 64 for all categories", /* mac_min_categories */
++ "1 = override, 4 = trusted, 8 = write_up, 16 = read_up,\n\t32 = write_down, 64 = allow_mac_auto", /* mac_user_flags */
++ "1 = override, 2 = auto, 4 = trusted, 8 = write_up,\n\t16 = read_up, 32 = write_down, 128 = prop_trusted", /* mac_process_flags */
++ "2 = auto, 4 = trusted, 8 = write_up, 16 = read_up,\n\t32 = write_down", /* mac_file_flags */
++ "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", /* system_role */
++ "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", /* mac_role */
++ "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", /* daz_role */
++ "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", /* ff_role */
++ "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", /* auth_role */
++ "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", /* cap_role */
++ "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", /* jail_role */
++ "0 = user, 1 = security officer, 2 = administrator,\n\t3 = auditor", /* pax_role */
++ "0 = unclassified, 1 = confidential, 2 = secret,\n\t3 = top secret, max. level 252", /* current_sec_level */
++ "Bit Set String of length 64 for all categories", /* mac_curr_categories */
++ "0 = unclassified, 1 = confidential, 2 = secret,\n\t3 = top secret, max. level 252", /* min_write_open */
++ "Bit Set String of length 64 for all categories", /* min_write_categories */
++ "0 = unclassified, 1 = confidential, 2 = secret,\n\t3 = top secret, max. level 252", /* max_read_open */
++ "Bit Set String of length 64 for all categories", /* max_read_categories */
++ "0 = no, 1 = yes, 2 = inherit (default value)", /* mac_auto */
++ "0 = false, 1 = true", /* mac_check */
++ "0 = false, 1 = true", /* mac_prop_trusted */
++ "0 = user, 1 = security officer, 2 = data protection officer,\n\t3 = TP-manager, 4 = system-admin", /* pm_role */
++ "0 = none, 1 = TP", /* pm_process_type */
++ "Task-ID (positive integer)", /* pm_current_task */
++ "Class-ID (positive integer)", /* pm_object_class */
++ "Class-ID (positive integer)", /* local_pm_object_class */
++ "Class-ID (positive integer)", /* remote_pm_object_class */
++ "Purpose-ID (positive integer)", /* pm_ipc_purpose */
++ "Purpose-ID (positive integer)", /* local_pm_ipc_purpose */
++ "Purpose-ID (positive integer)", /* remote_pm_ipc_purpose */
++ "0 = none, 1 = TP, 2 = personal data, 3 = non-personal data,\n\t4 = ipc, 5 = dir", /* pm_object_type */
++ "0 = none, 1 = TP, 2 = personal data, 3 = non-personal data,\n\t4 = ipc, 5 = dir", /* local_pm_object_type */
++ "0 = none, 1 = TP, 2 = personal data, 3 = non-personal data,\n\t4 = ipc, 5 = dir", /* remote_pm_object_type */
++ "0 = none, 1 = TP", /* pm_program_type */
++ "TP-ID (positive integer)", /* pm_tp */
++ "pm-task-list-ID (positive integer)", /* pm_task_set */
++ "0 = unscanned, 1 = infected, 2 = clean", /* daz_scanned */
++ "0 = FALSE, 1 = TRUE", /* daz_scanner */
++ "1 = read_only, 2 = execute_only, 4 = search_only, 8 = write_only,\n\t16 = secure_delete, 32 = no_execute, 64 = no_delete_or_rename,\n\t128 = add_inherited (or'd), 256 = append_only, 512 = no_mount", /* ff_flags */
++ "RC-type-id", /* rc_type */
++ "RC-type-id (-7 = use fd)", /* rc_select_type */
++ "RC-type-id", /* local_rc_type */
++ "RC-type-id", /* remote_rc_type */
++ "RC-type-id (-2 = inherit from parent)", /* rc_type_fd */
++ "RC-type-id", /* rc_type_nt */
++ "RC-role-id (-1 = inherit_user, -2 = inherit_process (keep),\n\t-3 = inherit_parent (def.),\n\t-4 = inherit_user_on_chown_only (root default)", /* rc_force_role */
++ "RC-role-id (-3 = inherit_parent (default),\n\t-5 = use_force_role (root default)", /* rc_initial_role */
++ "RC-role-id", /* rc_role */
++ "RC-role-id", /* rc_def_role */
++ "0 = off, 1 = full, 2 = last_auth_only, 3 = last_auth_and_gid", /* auth_may_setuid */
++ "0 = false, 1 = true", /* auth_may_set_cap */
++ "0 = false, 1 = true", /* auth_learn */
++ "Bit-Vector value or name list of desired caps", /* min_caps */
++ "Bit-Vector value or name list of desired caps", /* max_caps */
++ "Bit-Vector value or name list of desired caps", /* max_caps_user */
++ "Bit-Vector value or name list of desired caps", /* max_caps_program */
++ "JAIL ID (0 = off)", /* jail_id */
++ "JAIL ID (0 = no parent jail)", /* jail_parent */
++ "JAIL IP address a.b.c.d", /* jail_ip */
++ "JAIL flags (or'd, 1 = allow external IPC, 2 = allow all net families,\n\t4 = allow_rlimit, 8 = allow raw IP, 16 = auto adjust IP,\n\t32 = allow localhost, 64 = allow scd clock)", /* jail_flags */
++ "Bit-Vector value or name list of desired caps", /* jail_max_caps */
++ "List of SCD targets", /* jail_scd_get */
++ "List of SCD targets", /* jail_scd_modify */
++ "PAX flags with capital=on, non-capital=off, e.g. PeMRxS", /* pax_flags */
++ "0 = user, 1 = security officer, 2 = administrator", /* res_role */
++ "array of non-negative integer values, all 0 for unset", /* res_min */
++ "array of non-negative integer values, all 0 for unset", /* res_max */
++ "Bit-String for all Requests, low bit", /* log_array_low */
++ "Bit-String for all Requests, low bit", /* local_log_array_low */
++ "Bit-String for all Requests, low bit", /* remote_log_array_low */
++ "Bit-String for all Requests, high bit (l=0,h=0 = none, l=1,h=0 = denied,\n\tl=0,h=1 = full, l=1,h=1 = request based)", /* log_array_high */
++ "Bit-String for all Requests, high bit (l=0,h=0 = none, l=1,h=0 = denied,\n\tl=0,h=1 = full, l=1,h=1 = request based)", /* local_log_array_high */
++ "Bit-String for all Requests, high bit (l=0,h=0 = none, l=1,h=0 = denied,\n\tl=0,h=1 = full, l=1,h=1 = request based)", /* remote_log_array_high */
++ "Bit-String for all Requests", /* log_program_based */
++ "Bit-String for all Requests", /* log_user_based */
++ "Number of bytes to add, 0 to turn off", /* symlink_add_remote_ip */
++ "0 = false, 1 = true", /* symlink_add_uid */
++ "0 = false, 1 = true", /* symlink_add_mac_level */
++ "0 = false, 1 = true", /* symlink_add_rc_role */
++ "0 = false, 1 = true, 2 = inherit (default)", /* linux_dac_disable */
++ "0 = off (default), 1 = from other users, 2 = full", /* cap_process_hiding */
++ "0 = off (default), 1 = uid_only, 2 = euid_only, 3 = both", /* fake_root_uid */
++ "-3 = unset, uid otherwise", /* audit_uid */
++ "-3 = unset, uid otherwise", /* auid_exempt */
++ "-3 = unset, uid otherwise", /* auth_last_auth */
++ "32 Bit value in network byte order", /* remote_ip */
++ "0 = disallow executing of program file with LD_ variables set,\n\t1 = do not care (default)", /* cap_ld_env */
++ "0 = never, 1 = registered, 2 = always, 3 = inherit", /* daz_do_scan */
++ "non-negative virtual set number, 0 = default main set",
++ "INVALID!"
++};
++#endif
++
++static char log_level_list[LL_invalid + 1][9] = {
++ "none",
++ "denied",
++ "full",
++ "request",
++ "invalid!"
++};
++
++static char cap_list[RSBAC_CAP_MAX + 1][17] = {
++ "CHOWN",
++ "DAC_OVERRIDE",
++ "DAC_READ_SEARCH",
++ "FOWNER",
++ "FSETID",
++ "KILL",
++ "SETGID",
++ "SETUID",
++ "SETPCAP",
++ "LINUX_IMMUTABLE",
++ "NET_BIND_SERVICE",
++ "NET_BROADCAST",
++ "NET_ADMIN",
++ "NET_RAW",
++ "IPC_LOCK",
++ "IPC_OWNER",
++ "SYS_MODULE",
++ "SYS_RAWIO",
++ "SYS_CHROOT",
++ "SYS_PTRACE",
++ "SYS_PACCT",
++ "SYS_ADMIN",
++ "SYS_BOOT",
++ "SYS_NICE",
++ "SYS_RESOURCE",
++ "SYS_TIME",
++ "SYS_TTY_CONFIG",
++ "MKNOD",
++ "LEASE",
++ "AUDIT_WRITE",
++ "AUDIT_CONTROL",
++ "SETFCAP",
++ "MAC_OVERRIDE",
++ "MAC_ADMIN",
++ "NONE"
++};
++
++#ifdef CONFIG_RSBAC_XSTATS
++static char syscall_list[RSYS_none + 1][30] = {
++ "version",
++ "stats",
++ "check",
++ "get_attr",
++ "get_attr_n",
++ "set_attr",
++ "set_attr_n",
++ "remove_target",
++ "remove_target_n",
++ "net_list_all_netdev",
++ "net_template",
++ "net_list_all_template",
++ "switch",
++ "get_switch",
++ "adf_log_switch",
++ "get_adf_log",
++ "write",
++ "log",
++ "mac_set_curr_level",
++ "mac_get_curr_level",
++ "mac_get_max_level",
++ "mac_get_min_level",
++ "mac_add_p_tru",
++ "mac_remove_p_tru",
++ "mac_add_f_tru",
++ "mac_remove_f_tru",
++ "mac_get_f_trulist",
++ "mac_get_p_trulist",
++ "stats_pm",
++ "pm",
++ "pm_change_current_task",
++ "pm_create_file",
++ "daz_flush_cache",
++ "rc_copy_role",
++ "rc_copy_type",
++ "rc_get_item",
++ "rc_set_item",
++ "rc_change_role",
++ "rc_get_eff_rights_n",
++ "rc_get_list",
++ "auth_add_p_cap",
++ "auth_remove_p_cap",
++ "auth_add_f_cap",
++ "auth_remove_f_cap",
++ "auth_get_f_caplist",
++ "auth_get_p_caplist",
++ "acl",
++ "acl_n",
++ "acl_get_rights",
++ "acl_get_rights_n",
++ "acl_get_tlist",
++ "acl_get_tlist_n",
++ "acl_get_mask",
++ "acl_get_mask_n",
++ "acl_group",
++ "reg",
++ "jail",
++ "init",
++ "rc_get_current_role",
++ "um_auth_name",
++ "um_auth_uid",
++ "um_add_user",
++ "um_add_group",
++ "um_add_gm",
++ "um_mod_user",
++ "um_mod_group",
++ "um_get_user_item",
++ "um_get_group_item",
++ "um_remove_user",
++ "um_remove_group",
++ "um_remove_gm",
++ "um_user_exists",
++ "um_group_exists",
++ "um_get_next_user",
++ "um_get_user_list",
++ "um_get_gm_list",
++ "um_get_gm_user_list",
++ "um_get_group_list",
++ "um_get_uid",
++ "um_get_gid",
++ "um_set_pass",
++ "um_set_pass_name",
++ "um_set_group_pass",
++ "um_check_account",
++ "um_check_account_name",
++ "list_ta_begin",
++ "list_ta_refresh",
++ "list_ta_commit",
++ "list_ta_forget",
++ "list_all_dev",
++ "acl_list_all_dev",
++ "list_all_user",
++ "acl_list_all_user",
++ "list_all_group",
++ "acl_list_all_group",
++ "list_all_ipc",
++ "rc_select_fd_create_type",
++ "um_select_vset",
++ "um_add_onetime",
++ "um_add_onetime_name",
++ "um_remove_all_onetime",
++ "um_remove_all_onetime_name",
++ "um_count_onetime",
++ "um_count_onetime_name",
++ "list_ta_begin_name",
++ "um_get_max_history",
++ "um_get_max_history_name",
++ "um_set_max_history",
++ "um_set_max_history_name",
++ "none"
++};
++
++char *get_syscall_name(char *syscall_name,
++ enum rsbac_syscall_t syscall)
++{
++ if (!syscall_name)
++ return (NULL);
++ if (syscall >= RSYS_none)
++ strcpy(syscall_name, "ERROR!");
++ else
++ strcpy(syscall_name, syscall_list[syscall]);
++ return (syscall_name);
++}
++#endif
++
++/*****************************************/
++
++#ifdef __KERNEL__
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(get_request_name);
++#endif
++#endif
++
++char *get_request_name(char *request_name,
++ enum rsbac_adf_request_t request)
++{
++ if (!request_name)
++ return (NULL);
++ if (request >= R_NONE)
++ strcpy(request_name, "ERROR!");
++ else
++ strcpy(request_name, request_list[request]);
++ return (request_name);
++}
++
++enum rsbac_adf_request_t get_request_nr(const char *request_name)
++{
++ enum rsbac_adf_request_t i;
++
++ if (!request_name)
++ return (R_NONE);
++ for (i = 0; i < R_NONE; i++) {
++ if (!strcmp(request_name, request_list[i])) {
++ return (i);
++ }
++ }
++ return (R_NONE);
++}
++
++
++char *get_result_name(char *res_name, enum rsbac_adf_req_ret_t res)
++{
++ if (!res_name)
++ return (NULL);
++ if (res > UNDEFINED)
++ strcpy(res_name, "ERROR!");
++ else
++ strcpy(res_name, result_list[res]);
++ return (res_name);
++}
++
++enum rsbac_adf_req_ret_t get_result_nr(const char *res_name)
++{
++ enum rsbac_adf_req_ret_t i;
++
++ if (!res_name)
++ return (UNDEFINED);
++ for (i = 0; i < UNDEFINED; i++) {
++ if (!strcmp(res_name, result_list[i])) {
++ return (i);
++ }
++ }
++ return (UNDEFINED);
++}
++
++
++enum rsbac_switch_target_t get_attr_module(enum rsbac_attribute_t attr)
++{
++ if (attr > A_none)
++ return SW_NONE;
++ else
++ return attr_mod_list[attr];
++}
++
++#ifdef __KERNEL__
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(get_attribute_name);
++#endif
++#endif
++
++char *get_attribute_name(char *attr_name, enum rsbac_attribute_t attr)
++{
++ if (!attr_name)
++ return (NULL);
++ if (attr > A_none)
++ strcpy(attr_name, "ERROR!");
++ else
++ strcpy(attr_name, attribute_list[attr]);
++ return (attr_name);
++}
++
++enum rsbac_attribute_t get_attribute_nr(const char *attr_name)
++{
++ enum rsbac_attribute_t i;
++
++ if (!attr_name)
++ return (A_none);
++ for (i = 0; i < A_none; i++) {
++ if (!strcmp(attr_name, attribute_list[i])) {
++ return (i);
++ }
++ }
++ return (A_none);
++}
++
++#ifdef __KERNEL__
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(get_attribute_value_name);
++#endif
++#endif
++
++char *get_attribute_value_name(char *attr_val_name,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t *attr_val_p)
++{
++ if (!attr_val_name)
++ return (NULL);
++ if (attr > A_none)
++ strcpy(attr_val_name, "ERROR!");
++ else
++ switch (attr) {
++ case A_none:
++ strcpy(attr_val_name, "none");
++ break;
++#ifdef __KERNEL__
++ case A_create_data:
++ {
++ char *tmp =
++ rsbac_kmalloc(RSBAC_MAXNAMELEN);
++
++ if (tmp) {
++ if (attr_val_p->create_data.
++ dentry_p)
++ snprintf(attr_val_name,
++ RSBAC_MAXNAMELEN -
++ 1,
++ "%s %s, mode %o",
++ get_target_name_only
++ (tmp,
++ attr_val_p->
++ create_data.
++ target),
++ attr_val_p->
++ create_data.
++ dentry_p->d_name.
++ name,
++ attr_val_p->
++ create_data.
++ mode & S_IALLUGO);
++ else
++ snprintf(attr_val_name,
++ RSBAC_MAXNAMELEN -
++ 1, "%s, mode %o",
++ get_target_name_only
++ (tmp,
++ attr_val_p->
++ create_data.
++ target),
++ attr_val_p->
++ create_data.
++ mode & S_IALLUGO);
++ rsbac_kfree(tmp);
++ }
++ }
++ break;
++ case A_mode:
++ sprintf(attr_val_name, "%o", attr_val_p->mode);
++ break;
++ case A_rlimit:
++ sprintf(attr_val_name, "%u:%lu:%lu",
++ attr_val_p->rlimit.resource,
++ attr_val_p->rlimit.limit.rlim_cur,
++ attr_val_p->rlimit.limit.rlim_max);
++ break;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ case A_owner:
++ if(RSBAC_UID_SET(attr_val_p->owner))
++ sprintf(attr_val_name, "%u/%u",
++ RSBAC_UID_SET(attr_val_p->owner),
++ RSBAC_UID_NUM(attr_val_p->owner));
++ else
++ sprintf(attr_val_name, "%u",
++ RSBAC_UID_NUM(attr_val_p->owner));
++ break;
++ case A_group:
++ if(RSBAC_GID_SET(attr_val_p->group))
++ sprintf(attr_val_name, "%u/%u",
++ RSBAC_GID_SET(attr_val_p->group),
++ RSBAC_GID_NUM(attr_val_p->group));
++ else
++ sprintf(attr_val_name, "%u",
++ RSBAC_GID_NUM(attr_val_p->group));
++ break;
++#endif
++ case A_priority:
++ sprintf(attr_val_name, "%i", attr_val_p->priority);
++ break;
++ case A_process:
++ case A_pgid:
++ {
++ struct task_struct *task_p;
++
++ read_lock(&tasklist_lock);
++ task_p = pid_task(attr_val_p->process, PIDTYPE_PID);
++ if (task_p) {
++ if(task_p->parent)
++ sprintf(attr_val_name, "%u(%s,parent=%u(%s))", task_p->pid, task_p->comm, task_p->parent->pid, task_p->parent->comm);
++ else
++ sprintf(attr_val_name, "%u(%s)", task_p->pid, task_p->comm);
++ }
++ else
++ sprintf(attr_val_name, "%u", pid_nr(attr_val_p->process));
++ read_unlock(&tasklist_lock);
++ }
++ break;
++ case A_mod_name:
++ if (attr_val_p->mod_name)
++ strncpy(attr_val_name,
++ attr_val_p->mod_name,
++ RSBAC_MAXNAMELEN - 1);
++ else
++ strcpy(attr_val_name, "unknown");
++ attr_val_name[RSBAC_MAXNAMELEN - 1] = 0;
++ break;
++ case A_auth_add_f_cap:
++ case A_auth_remove_f_cap:
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if( RSBAC_UID_SET(attr_val_p->auth_cap_range.first)
++ || RSBAC_UID_SET(attr_val_p->auth_cap_range.last)
++ )
++ sprintf(attr_val_name, "%u/%u:%u/%u",
++ RSBAC_UID_SET(attr_val_p->auth_cap_range.first),
++ RSBAC_UID_NUM(attr_val_p->auth_cap_range.first),
++ RSBAC_UID_SET(attr_val_p->auth_cap_range.last),
++ RSBAC_UID_NUM(attr_val_p->auth_cap_range.last));
++ else
++#endif
++ sprintf(attr_val_name, "%u:%u",
++ RSBAC_UID_NUM(attr_val_p->auth_cap_range.first),
++ RSBAC_UID_NUM(attr_val_p->auth_cap_range.last));
++ break;
++ case A_switch_target:
++ get_switch_target_name(attr_val_name,
++ attr_val_p->switch_target);
++ break;
++ case A_request:
++ get_request_name(attr_val_name,
++ attr_val_p->request);
++ break;
++ case A_sock_type:
++ rsbac_get_net_type_name(attr_val_name,
++ attr_val_p->sock_type);
++ break;
++#endif
++#if defined(CONFIG_RSBAC_PAX) || !defined(__KERNEL__)
++ case A_pax_flags:
++ pax_print_flags(attr_val_name,
++ attr_val_p->pax_flags);
++ break;
++#endif
++#if defined(CONFIG_RSBAC_AUTH) || !defined(__KERNEL__)
++ case A_auth_last_auth:
++#if defined(CONFIG_RSBAC_AUTH_LEARN) && defined(__KERNEL__)
++ case A_auth_start_uid:
++ case A_auth_start_euid:
++#endif
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(attr_val_p->auth_last_auth))
++ sprintf(attr_val_name, "%u/%u",
++ RSBAC_UID_SET(attr_val_p->auth_last_auth),
++ RSBAC_UID_NUM(attr_val_p->auth_last_auth));
++ else
++#endif
++ sprintf(attr_val_name, "%u",
++ RSBAC_UID_NUM(attr_val_p->auth_last_auth));
++ break;
++#endif
++#ifdef CONFIG_RSBAC_AUTH_GROUP
++ case A_auth_start_gid:
++#ifdef CONFIG_RSBAC_AUTH_DAC_GROUP
++ case A_auth_start_egid:
++#endif
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_GID_SET(attr_val_p->auth_last_auth))
++ sprintf(attr_val_name, "%u/%u",
++ RSBAC_GID_SET(attr_val_p->auth_last_auth),
++ RSBAC_GID_NUM(attr_val_p->auth_last_auth));
++ else
++#endif
++ sprintf(attr_val_name, "%u",
++ RSBAC_GID_NUM(attr_val_p->auth_start_gid));
++ break;
++#endif
++ default:
++ snprintf(attr_val_name, RSBAC_MAXNAMELEN - 1, "%u",
++ attr_val_p->u_dummy);
++ }
++ return (attr_val_name);
++}
++
++
++#ifdef __KERNEL__
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(get_scd_type_name);
++#endif
++#endif
++
++char *get_scd_type_name(char *res_name, enum rsbac_scd_type_t res)
++{
++ if (!res_name)
++ return (NULL);
++ if (res > ST_none)
++ strcpy(res_name, "ERROR!");
++ else
++ strcpy(res_name, scd_type_list[res]);
++ return (res_name);
++}
++
++enum rsbac_scd_type_t get_scd_type_nr(const char *res_name)
++{
++ enum rsbac_scd_type_t i;
++
++ if (!res_name)
++ return (ST_none);
++ for (i = 0; i < ST_none; i++) {
++ if (!strcmp(res_name, scd_type_list[i])) {
++ return (i);
++ }
++ }
++ return (ST_none);
++}
++
++
++#ifdef __KERNEL__
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(get_target_name);
++#endif
++#endif
++
++char *get_target_name(char *target_type_name,
++ enum rsbac_target_t target,
++ char *target_id_name, union rsbac_target_id_t tid)
++{
++#ifdef __KERNEL__
++ char *help_name;
++#else
++ char help_name[RSBAC_MAXNAMELEN + 4];
++#endif
++
++#ifdef __KERNEL__
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ help_name = rsbac_kmalloc(CONFIG_RSBAC_MAX_PATH_LEN + 4);
++#else
++ help_name = rsbac_kmalloc(RSBAC_MAXNAMELEN + 4);
++#endif
++ if (!help_name)
++ return NULL;
++#endif
++
++ switch (target) {
++#ifdef __KERNEL__
++ case T_FD:
++ if(target_type_name)
++ strcpy(target_type_name, "FD");
++ if (!target_id_name)
++ break;
++ sprintf(target_id_name, "Device %02u:%02u Inode %u",
++ RSBAC_MAJOR(tid.file.device),
++ RSBAC_MINOR(tid.file.device), tid.file.inode);
++ if (tid.file.dentry_p && tid.file.dentry_p->d_name.name
++ && tid.file.dentry_p->d_name.len) {
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ if (rsbac_get_full_path
++ (tid.file.dentry_p, help_name,
++ CONFIG_RSBAC_MAX_PATH_LEN) > 0) {
++ strcat(target_id_name, " Path ");
++ strcat(target_id_name, help_name);
++ }
++#else
++ int namelen =
++ rsbac_min(tid.file.dentry_p->d_name.len,
++ RSBAC_MAXNAMELEN);
++
++ strcat(target_id_name, " Name ");
++ strncpy(help_name, tid.file.dentry_p->d_name.name,
++ namelen);
++ help_name[namelen] = 0;
++ strcat(target_id_name, help_name);
++#endif
++ }
++ break;
++ case T_FILE:
++ if(target_type_name)
++ strcpy(target_type_name, "FILE");
++ if (!target_id_name)
++ break;
++ sprintf(target_id_name, "Device %02u:%02u Inode %u",
++ RSBAC_MAJOR(tid.file.device),
++ RSBAC_MINOR(tid.file.device), tid.file.inode);
++ if (tid.file.dentry_p && tid.file.dentry_p->d_name.name
++ && tid.file.dentry_p->d_name.len) {
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ if (rsbac_get_full_path
++ (tid.file.dentry_p, help_name,
++ CONFIG_RSBAC_MAX_PATH_LEN) > 0) {
++ strcat(target_id_name, " Path ");
++ strcat(target_id_name, help_name);
++ }
++#else
++ int namelen =
++ rsbac_min(tid.file.dentry_p->d_name.len,
++ RSBAC_MAXNAMELEN);
++
++ strcat(target_id_name, " Name ");
++ strncpy(help_name, tid.file.dentry_p->d_name.name,
++ namelen);
++ help_name[namelen] = 0;
++ strcat(target_id_name, help_name);
++#endif
++ }
++ break;
++ case T_DIR:
++ if(target_type_name)
++ strcpy(target_type_name, "DIR");
++ if (!target_id_name)
++ break;
++ sprintf(target_id_name, "Device %02u:%02u Inode %u",
++ RSBAC_MAJOR(tid.file.device),
++ RSBAC_MINOR(tid.file.device), tid.dir.inode);
++ if (tid.dir.dentry_p && tid.dir.dentry_p->d_name.name
++ && tid.dir.dentry_p->d_name.len) {
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ if (rsbac_get_full_path
++ (tid.dir.dentry_p, help_name,
++ CONFIG_RSBAC_MAX_PATH_LEN) > 0) {
++ strcat(target_id_name, " Path ");
++ strcat(target_id_name, help_name);
++ }
++#else
++ int namelen =
++ rsbac_min(tid.dir.dentry_p->d_name.len,
++ RSBAC_MAXNAMELEN);
++
++ strcat(target_id_name, " Name ");
++ strncpy(help_name, tid.dir.dentry_p->d_name.name,
++ namelen);
++ help_name[namelen] = 0;
++ strcat(target_id_name, help_name);
++#endif
++ }
++ break;
++ case T_FIFO:
++ if(target_type_name)
++ strcpy(target_type_name, "FIFO");
++ if (!target_id_name)
++ break;
++ sprintf(target_id_name, "Device %02u:%02u Inode %u",
++ RSBAC_MAJOR(tid.file.device),
++ RSBAC_MINOR(tid.file.device), tid.fifo.inode);
++ if (tid.fifo.dentry_p && tid.fifo.dentry_p->d_name.name
++ && tid.fifo.dentry_p->d_name.len) {
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ if (rsbac_get_full_path
++ (tid.fifo.dentry_p, help_name,
++ CONFIG_RSBAC_MAX_PATH_LEN) > 0) {
++ strcat(target_id_name, " Path ");
++ strcat(target_id_name, help_name);
++ }
++#else
++ int namelen =
++ rsbac_min(tid.fifo.dentry_p->d_name.len,
++ RSBAC_MAXNAMELEN);
++
++ strcat(target_id_name, " Name ");
++ strncpy(help_name, tid.fifo.dentry_p->d_name.name,
++ namelen);
++ help_name[namelen] = 0;
++ strcat(target_id_name, help_name);
++#endif
++ }
++ break;
++ case T_SYMLINK:
++ if(target_type_name)
++ strcpy(target_type_name, "SYMLINK");
++ if (!target_id_name)
++ break;
++ sprintf(target_id_name, "Device %02u:%02u Inode %u",
++ RSBAC_MAJOR(tid.symlink.device),
++ RSBAC_MINOR(tid.symlink.device), tid.symlink.inode);
++ if (tid.symlink.dentry_p
++ && tid.symlink.dentry_p->d_name.name
++ && tid.symlink.dentry_p->d_name.len) {
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ if (rsbac_get_full_path
++ (tid.symlink.dentry_p, help_name,
++ CONFIG_RSBAC_MAX_PATH_LEN) > 0) {
++ strcat(target_id_name, " Path ");
++ strcat(target_id_name, help_name);
++ }
++#else
++ int namelen =
++ rsbac_min(tid.symlink.dentry_p->d_name.len,
++ RSBAC_MAXNAMELEN);
++
++ strcat(target_id_name, " Name ");
++ strncpy(help_name,
++ tid.symlink.dentry_p->d_name.name,
++ namelen);
++ help_name[namelen] = 0;
++ strcat(target_id_name, help_name);
++#endif
++ }
++ break;
++ case T_UNIXSOCK:
++ if(target_type_name)
++ strcpy(target_type_name, "UNIXSOCK");
++ if (!target_id_name)
++ break;
++ sprintf(target_id_name, "Device %02u:%02u Inode %u",
++ RSBAC_MAJOR(tid.unixsock.device),
++ RSBAC_MINOR(tid.unixsock.device), tid.unixsock.inode);
++ if (tid.symlink.dentry_p
++ && tid.unixsock.dentry_p->d_name.name
++ && tid.unixsock.dentry_p->d_name.len) {
++#ifdef CONFIG_RSBAC_LOG_FULL_PATH
++ if (rsbac_get_full_path
++ (tid.unixsock.dentry_p, help_name,
++ CONFIG_RSBAC_MAX_PATH_LEN) > 0) {
++ strcat(target_id_name, " Path ");
++ strcat(target_id_name, help_name);
++ }
++#else
++ int namelen =
++ rsbac_min(tid.unixsock.dentry_p->d_name.len,
++ RSBAC_MAXNAMELEN);
++
++ strcat(target_id_name, " Name ");
++ strncpy(help_name,
++ tid.unixsock.dentry_p->d_name.name,
++ namelen);
++ help_name[namelen] = 0;
++ strcat(target_id_name, help_name);
++#endif
++ }
++ break;
++ case T_DEV:
++ if(target_type_name)
++ strcpy(target_type_name, "DEV");
++ if (!target_id_name)
++ break;
++ switch (tid.dev.type) {
++ case D_block:
++ sprintf(target_id_name, "block %02u:%02u",
++ tid.dev.major, tid.dev.minor);
++ break;
++ case D_char:
++ sprintf(target_id_name, "char %02u:%02u",
++ tid.dev.major, tid.dev.minor);
++ break;
++ case D_block_major:
++ sprintf(target_id_name, "block major %02u",
++ tid.dev.major);
++ break;
++ case D_char_major:
++ sprintf(target_id_name, "char major %02u",
++ tid.dev.major);
++ break;
++ default:
++ sprintf(target_id_name, "*unknown* %02u:%02u",
++ tid.dev.major, tid.dev.minor);
++ }
++ break;
++ case T_NETOBJ:
++ if(target_type_name)
++ strcpy(target_type_name, "NETOBJ");
++ if (!target_id_name)
++ break;
++#ifdef CONFIG_NET
++ if (tid.netobj.sock_p
++ && tid.netobj.sock_p->ops && tid.netobj.sock_p->sk) {
++ char type_name[RSBAC_MAXNAMELEN];
++
++ switch (tid.netobj.sock_p->ops->family) {
++ case AF_INET:
++ {
++ __u32 saddr;
++ __u16 sport;
++ __u32 daddr;
++ __u16 dport;
++ struct net_device *dev;
++ char ldevname[RSBAC_IFNAMSIZ + 10];
++ char rdevname[RSBAC_IFNAMSIZ + 10];
++
++ if (tid.netobj.local_addr) {
++ struct sockaddr_in *addr =
++ tid.netobj.local_addr;
++
++ saddr =
++ addr->sin_addr.s_addr;
++ sport =
++ ntohs(addr->sin_port);
++ } else {
++ saddr =
++ inet_sk(tid.netobj.
++ sock_p->sk)->
++ inet_saddr;
++ sport =
++ inet_sk(tid.netobj.
++ sock_p->sk)->
++ inet_num;
++ }
++ if (tid.netobj.remote_addr) {
++ struct sockaddr_in *addr =
++ tid.netobj.remote_addr;
++
++ daddr =
++ addr->sin_addr.s_addr;
++ dport =
++ ntohs(addr->sin_port);
++ } else {
++ daddr =
++ inet_sk(tid.netobj.
++ sock_p->sk)->
++ inet_daddr;
++ dport =
++ ntohs(inet_sk
++ (tid.netobj.
++ sock_p->sk)->
++ inet_dport);
++ }
++ dev = ip_dev_find(&init_net, saddr);
++
++ if (dev) {
++ sprintf(ldevname, "%s:",
++ dev->name);
++ dev_put(dev);
++ } else
++ ldevname[0] = 0;
++ dev = ip_dev_find(&init_net, daddr);
++ if (dev) {
++ sprintf(rdevname, "%s:",
++ dev->name);
++ dev_put(dev);
++ } else
++ rdevname[0] = 0;
++ sprintf(target_id_name,
++ "%p INET %s proto %s local %s%u.%u.%u.%u:%u remote %s%u.%u.%u.%u:%u",
++ tid.netobj.sock_p,
++ rsbac_get_net_type_name
++ (type_name,
++ tid.netobj.sock_p->type),
++ rsbac_get_net_protocol_name
++ (help_name,
++ tid.netobj.sock_p->sk->
++ sk_protocol),
++ ldevname,
++ NIPQUAD(saddr),
++ sport,
++ rdevname,
++ NIPQUAD(daddr), dport);
++ }
++ break;
++ case AF_NETLINK:
++ if (tid.netobj.local_addr || tid.netobj.remote_addr) {
++ struct sockaddr_nl *addr;
++
++ if(tid.netobj.local_addr)
++ addr = tid.netobj.local_addr;
++ else
++ addr = tid.netobj.remote_addr;
++
++ sprintf(target_id_name,
++ "%p NETLINK %s %s %u",
++ tid.netobj.sock_p,
++ rsbac_get_net_type_name
++ (type_name,
++ tid.netobj.sock_p->type),
++ rsbac_get_net_netlink_family_name(
++ help_name,
++ tid.netobj.sock_p->sk->sk_protocol),
++ addr->nl_pid);
++ } else {
++ sprintf(target_id_name,
++ "%p NETLINK %s %s",
++ tid.netobj.sock_p,
++ rsbac_get_net_type_name
++ (type_name,
++ tid.netobj.sock_p->type),
++ rsbac_get_net_netlink_family_name(
++ help_name,
++ tid.netobj.sock_p->sk->sk_protocol));
++ }
++ break;
++ default:
++ sprintf(target_id_name, "%p %s %s",
++ tid.netobj.sock_p,
++ rsbac_get_net_family_name
++ (help_name,
++ tid.netobj.sock_p->ops->family),
++ rsbac_get_net_type_name(type_name,
++ tid.netobj.
++ sock_p->
++ type));
++ }
++ } else
++#endif /* CONFIG_NET */
++ {
++ sprintf(target_id_name, "%p", tid.netobj.sock_p);
++ }
++ break;
++#endif /* __KERNEL__ */
++ case T_IPC:
++ if(target_type_name)
++ strcpy(target_type_name, "IPC");
++ if (!target_id_name)
++ break;
++ switch (tid.ipc.type) {
++ case I_sem:
++ strcpy(target_id_name, "Sem-ID ");
++ break;
++ case I_msg:
++ strcpy(target_id_name, "Msg-ID ");
++ break;
++ case I_shm:
++ strcpy(target_id_name, "Shm-ID ");
++ break;
++ case I_anonpipe:
++ strcpy(target_id_name, "AnonPipe-ID ");
++ break;
++ case I_mqueue:
++ strcpy(target_id_name, "Mqueue-ID ");
++ break;
++ case I_anonunix:
++ strcpy(target_id_name, "AnonUnix-ID ");
++ break;
++ default:
++ strcpy(target_id_name, "ID ");
++ break;
++ };
++ sprintf(help_name, "%lu", tid.ipc.id.id_nr);
++ strcat(target_id_name, help_name);
++ break;
++ case T_SCD:
++ if(target_type_name)
++ strcpy(target_type_name, "SCD");
++ if (target_id_name)
++ get_scd_type_name(target_id_name, tid.scd);
++ break;
++ case T_USER:
++ if(target_type_name)
++ strcpy(target_type_name, "USER");
++ if (target_id_name) {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(tid.user))
++ sprintf(target_id_name, "%u/%u",
++ RSBAC_UID_SET(tid.user),
++ RSBAC_UID_NUM(tid.user));
++ else
++#endif
++ sprintf(target_id_name, "%u", RSBAC_UID_NUM(tid.user));
++ }
++ break;
++ case T_PROCESS:
++ if(target_type_name)
++ strcpy(target_type_name, "PROCESS");
++ if (target_id_name) {
++ struct task_struct *task_p;
++
++ read_lock(&tasklist_lock);
++ task_p = pid_task(tid.process, PIDTYPE_PID);
++ if (task_p) {
++ if(task_p->parent)
++ sprintf(target_id_name, "%u(%s,parent=%u(%s))", task_p->pid, task_p->comm, task_p->parent->pid, task_p->parent->comm);
++ else
++ sprintf(target_id_name, "%u(%s)", task_p->pid, task_p->comm);
++ }
++ else
++ sprintf(target_id_name, "%u", pid_nr(tid.process));
++ read_unlock(&tasklist_lock);
++ }
++ break;
++ case T_GROUP:
++ if(target_type_name)
++ strcpy(target_type_name, "GROUP");
++ if (target_id_name) {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_GID_SET(tid.group))
++ sprintf(target_id_name, "%u/%u",
++ RSBAC_GID_SET(tid.group),
++ RSBAC_GID_NUM(tid.group));
++ else
++#endif
++ sprintf(target_id_name, "%u", RSBAC_GID_NUM(tid.group));
++ }
++ break;
++ case T_NETDEV:
++ if(target_type_name)
++ strcpy(target_type_name, "NETDEV");
++ if (!target_id_name)
++ break;
++ strncpy(target_id_name, tid.netdev, RSBAC_IFNAMSIZ);
++ target_id_name[RSBAC_IFNAMSIZ] = 0;
++ break;
++ case T_NETTEMP:
++ if(target_type_name)
++ strcpy(target_type_name, "NETTEMP");
++ if (target_id_name)
++ sprintf(target_id_name, "%u", tid.nettemp);
++ break;
++ case T_NETTEMP_NT:
++ if(target_type_name)
++ strcpy(target_type_name, "NETTEMP_NT");
++ if (target_id_name)
++ sprintf(target_id_name, "%u", tid.nettemp);
++ break;
++ case T_NONE:
++ if(target_type_name)
++ strcpy(target_type_name, "NONE");
++ if (target_id_name)
++ strcpy(target_id_name, "NONE");
++ break;
++ default:
++ if(target_type_name)
++ strcpy(target_type_name, "ERROR!!!");
++ if (target_id_name)
++ sprintf(target_id_name, "%u", target);
++ }
++#ifdef __KERNEL__
++ rsbac_kfree(help_name);
++#endif
++ if(target_type_name)
++ return target_type_name;
++ else
++ return target_id_name;
++}
++
++char *get_target_name_only(char *target_type_name,
++ enum rsbac_target_t target)
++{
++ if (!target_type_name)
++ return (NULL);
++
++ switch (target) {
++ case T_FILE:
++ strcpy(target_type_name, "FILE");
++ break;
++ case T_DIR:
++ strcpy(target_type_name, "DIR");
++ break;
++ case T_FIFO:
++ strcpy(target_type_name, "FIFO");
++ break;
++ case T_SYMLINK:
++ strcpy(target_type_name, "SYMLINK");
++ break;
++ case T_UNIXSOCK:
++ strcpy(target_type_name, "UNIXSOCK");
++ break;
++ case T_FD:
++ strcpy(target_type_name, "FD");
++ break;
++ case T_DEV:
++ strcpy(target_type_name, "DEV");
++ break;
++ case T_NETOBJ:
++ strcpy(target_type_name, "NETOBJ");
++ break;
++ case T_IPC:
++ strcpy(target_type_name, "IPC");
++ break;
++ case T_SCD:
++ strcpy(target_type_name, "SCD");
++ break;
++ case T_USER:
++ strcpy(target_type_name, "USER");
++ break;
++ case T_PROCESS:
++ strcpy(target_type_name, "PROCESS");
++ break;
++ case T_GROUP:
++ strcpy(target_type_name, "GROUP");
++ break;
++ case T_NETDEV:
++ strcpy(target_type_name, "NETDEV");
++ break;
++ case T_NETTEMP:
++ strcpy(target_type_name, "NETTEMP");
++ break;
++ case T_NETTEMP_NT:
++ strcpy(target_type_name, "NETTEMP_NT");
++ break;
++ case T_NONE:
++ strcpy(target_type_name, "NONE");
++ break;
++ default:
++ strcpy(target_type_name, "ERROR!!!");
++ }
++ return (target_type_name);
++}
++
++enum rsbac_target_t get_target_nr(const char *target_name)
++{
++ enum rsbac_target_t i;
++
++ if (!target_name)
++ return (T_NONE);
++ for (i = 0; i < T_NONE; i++) {
++ if (!strcmp(target_name, target_list[i])) {
++ return (i);
++ }
++ }
++ return (T_NONE);
++}
++
++char *get_ipc_target_name(char *ipc_name, enum rsbac_ipc_type_t target)
++{
++ if (!ipc_name)
++ return (NULL);
++ if (target > I_none)
++ strcpy(ipc_name, "ERROR!");
++ else
++ strcpy(ipc_name, ipc_target_list[target]);
++ return (ipc_name);
++}
++
++enum rsbac_ipc_type_t get_ipc_target_nr(const char *ipc_name)
++{
++ enum rsbac_ipc_type_t i;
++
++ if (!ipc_name)
++ return (I_none);
++ for (i = 0; i < I_none; i++) {
++ if (!strcmp(ipc_name, ipc_target_list[i])) {
++ return (i);
++ }
++ }
++ return (I_none);
++}
++
++
++#ifdef __KERNEL__
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(get_switch_target_name);
++#endif
++#endif
++
++char *get_switch_target_name(char *switch_name,
++ enum rsbac_switch_target_t target)
++{
++ if (!switch_name)
++ return (NULL);
++ if (target > SW_NONE)
++ strcpy(switch_name, "ERROR!");
++ else
++ strcpy(switch_name, switch_target_list[target]);
++ return (switch_name);
++}
++
++enum rsbac_switch_target_t get_switch_target_nr(const char *switch_name)
++{
++ enum rsbac_switch_target_t i;
++
++ if (!switch_name)
++ return (SW_NONE);
++ for (i = 0; i < SW_NONE; i++) {
++#ifdef __KERNEL__
++ if (!strncmp
++ (switch_name, switch_target_list[i],
++ strlen(switch_target_list[i])))
++#else
++ if (!strcmp(switch_name, switch_target_list[i]))
++#endif
++ {
++ return (i);
++ }
++ }
++ return (SW_NONE);
++}
++
++
++#ifdef __KERNEL__
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(get_error_name);
++#endif
++#endif
++
++char *get_error_name(char *error_name, int error)
++{
++ if (!error_name)
++ return (NULL);
++#ifndef __KERNEL__
++ if((error == -1) && RSBAC_ERROR(-errno))
++ error = -errno;
++#endif
++ if (RSBAC_ERROR(error))
++ strcpy(error_name, error_list[(-error) - RSBAC_EPERM]);
++ else
++#ifdef __KERNEL__
++ inttostr(error_name, error);
++#else
++ strcpy(error_name, strerror(errno));
++#endif
++ return (error_name);
++}
++
++#ifndef __KERNEL__
++char *get_attribute_param(char *attr_name, enum rsbac_attribute_t attr)
++{
++ if (!attr_name)
++ return (NULL);
++ if (attr > A_none)
++ strcpy(attr_name, "ERROR!");
++ else
++ strcpy(attr_name, attribute_param_list[attr]);
++ return (attr_name);
++}
++#endif
++
++char *get_log_level_name(char *ll_name, enum rsbac_log_level_t target)
++{
++ if (!ll_name)
++ return (NULL);
++ if (target > LL_invalid)
++ strcpy(ll_name, "ERROR!");
++ else
++ strcpy(ll_name, log_level_list[target]);
++ return (ll_name);
++}
++
++enum rsbac_log_level_t get_log_level_nr(const char *ll_name)
++{
++ enum rsbac_log_level_t i;
++
++ if (!ll_name)
++ return (LL_invalid);
++ for (i = 0; i < LL_invalid; i++) {
++ if (!strcmp(ll_name, log_level_list[i])) {
++ return (i);
++ }
++ }
++ return (LL_invalid);
++}
++
++char *get_cap_name(char *name, u_int value)
++{
++ if (!name)
++ return (NULL);
++ if (value > CAP_NONE)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, cap_list[value]);
++ return (name);
++}
++
++int get_cap_nr(const char *name)
++{
++ int i;
++
++ if (!name)
++ return (RT_NONE);
++ for (i = 0; i < CAP_NONE; i++) {
++ if (!strcmp(name, cap_list[i])) {
++ return (i);
++ }
++ }
++ return (CAP_NONE);
++}
+diff --git a/rsbac/help/helpers.c b/rsbac/help/helpers.c
+new file mode 100644
+index 0000000..4d67b6f
+--- /dev/null
++++ b/rsbac/help/helpers.c
+@@ -0,0 +1,1211 @@
++/************************************* */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2012: */
++/* Amon Ott <ao@rsbac.org> */
++/* Helper functions for all parts */
++/* Last modified: 23/May/2012 */
++/************************************* */
++
++#ifndef __KERNEL__
++#include <stdlib.h>
++#endif
++#include <rsbac/types.h>
++#include <rsbac/error.h>
++#include <rsbac/helpers.h>
++#include <rsbac/rc_types.h>
++#include <rsbac/getname.h>
++#include <rsbac/cap_getname.h>
++#include <rsbac/adf.h>
++
++#ifdef __KERNEL__
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/highmem.h>
++#include <linux/binfmts.h>
++#include <net/sock.h>
++#include <net/af_unix.h>
++#include <rsbac/aci.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/debug.h>
++#ifdef CONFIG_RSBAC_RC
++#include <rsbac/rc_getname.h>
++#endif
++#endif
++#ifndef __KERNEL__
++#include <stdio.h>
++#include <string.h>
++#include <errno.h>
++#include <pwd.h>
++#include <grp.h>
++#endif
++
++int rsbac_get_vset_num(char * sourcename, rsbac_um_set_t * vset_p)
++ {
++ if (!sourcename || !vset_p)
++ return -RSBAC_EINVALIDPOINTER;
++ if (!strcmp(sourcename,"all")) {
++ *vset_p = RSBAC_UM_VIRTUAL_ALL;
++ return 0;
++ }
++ if (!strcmp(sourcename,"auto") || !strcmp(sourcename,"keep")) {
++ *vset_p = RSBAC_UM_VIRTUAL_KEEP;
++ return 0;
++ }
++#ifdef __KERNEL__
++ *vset_p = simple_strtoul(sourcename, NULL, 0);
++#else
++ *vset_p = strtoul(sourcename, NULL, 0);
++#endif
++ if(!*vset_p && strcmp(sourcename,"0"))
++ return -RSBAC_EINVALIDVALUE;
++ if (*vset_p > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++ return 0;
++ }
++
++#ifndef __KERNEL__
++int rsbac_u32_compare(__u32 * a, __u32 * b)
++ {
++ if(*a < *b)
++ return -1;
++ if(*a > *b)
++ return 1;
++ return 0;
++ }
++
++int rsbac_user_compare(const void * a, const void * b)
++ {
++ return rsbac_u32_compare((__u32 *) a, (__u32 *) b);
++ }
++
++int rsbac_group_compare(const void * a, const void * b)
++ {
++ return rsbac_u32_compare((__u32 *) a, (__u32 *) b);
++ }
++
++int rsbac_nettemp_id_compare(const void * a, const void * b)
++ {
++ return rsbac_u32_compare((__u32 *) a, (__u32 *) b);
++ }
++
++int rsbac_dev_compare(const void *desc1, const void *desc2)
++{
++ int result;
++
++ result = memcmp(&((struct rsbac_dev_desc_t *)desc1)->type,
++ &((struct rsbac_dev_desc_t *)desc2)->type,
++ sizeof(((struct rsbac_dev_desc_t *)desc1)->type));
++ if (result)
++ return result;
++ result = memcmp(&((struct rsbac_dev_desc_t *)desc1)->major,
++ &((struct rsbac_dev_desc_t *)desc2)->major,
++ sizeof(((struct rsbac_dev_desc_t *)desc1)->major));
++ if (result)
++ return result;
++ return memcmp(&((struct rsbac_dev_desc_t *)desc1)->minor,
++ &((struct rsbac_dev_desc_t *)desc2)->minor,
++ sizeof(((struct rsbac_dev_desc_t *)desc1)->minor));
++}
++#endif
++
++char * inttostr(char * str, int i)
++ {
++ int j = 0;
++
++ if(!str)
++ return(NULL);
++
++ if (i<0)
++ {
++ str[j] = '-';
++ j++;
++ i = -i;
++ }
++ if (i>=10000)
++ {
++ str[j] = '0' + (i / 10000);
++ j++;
++ }
++ if (i>=1000)
++ {
++ str[j] = '0' + ((i % 10000) / 1000);
++ j++;
++ }
++ if (i>=100)
++ {
++ str[j] = '0' + ((i % 1000) / 100);
++ j++;
++ }
++ if (i>=10)
++ {
++ str[j] = '0' + ((i % 100) / 10);
++ j++;
++ }
++ str[j] = '0' + (i % 10);
++ j++;
++ str[j] = 0;
++ return (str);
++ };
++
++char * ulongtostr(char * str, u_long i)
++ {
++ int j = 0;
++ u_long k = 1000000000;
++
++ if(!str)
++ return(NULL);
++
++ if (i>=k)
++ {
++ str[j] = '0' + ((i / k) % 100);
++ j++;
++ }
++ k /= 10;
++
++ while (k>1)
++ {
++ if (i>=k)
++ {
++ str[j] = '0' + ((i % (k*10)) / k);
++ j++;
++ }
++ k /= 10;
++ };
++
++ str[j] = '0' + (i % 10);
++ j++;
++ str[j] = 0;
++ return (str);
++ };
++
++char * longtostr(char * str, long i)
++ {
++ int j = 0;
++ u_long k = 1000000000;
++
++ if(!str)
++ return(NULL);
++
++ if (i<0)
++ {
++ str[0] = '-';
++ j = 1;
++ i = -i;
++ }
++ if (i>=k)
++ {
++ str[j] = '0' + ((i / k) % 100);
++ j++;
++ }
++ k /= 10;
++
++ while (k>1)
++ {
++ if (i>=k)
++ {
++ str[j] = '0' + ((i % (k*10)) / k);
++ j++;
++ }
++ k /= 10;
++ };
++
++ str[j] = '0' + (i % 10);
++ j++;
++ str[j] = 0;
++ return (str);
++ };
++
++char * u64tostrmac(char * str, __u64 i)
++ {
++ int j = 0;
++ __u64 k;
++
++ if(!str)
++ return(NULL);
++
++ k = 1;
++ for(j = RSBAC_MAC_MAX_CAT;j >= 0;j--)
++ {
++ if (i & k)
++ str[j] = '1';
++ else
++ str[j] = '0';
++ k<<=1;
++ };
++
++ str[RSBAC_MAC_NR_CATS] = 0;
++ return (str);
++ };
++
++#ifndef __KERNEL__
++
++void error_exit(int error)
++ {
++ char tmp1[80];
++
++ if(error<0)
++ {
++ get_error_name(tmp1,error);
++ fprintf(stderr, "Error: %s\n", tmp1);
++ exit(1);
++ }
++ }
++
++void show_error(int error)
++ {
++ char tmp1[80];
++
++ if(error<0)
++ {
++ get_error_name(tmp1,error);
++ fprintf(stderr, "Error: %s\n", tmp1);
++ }
++ }
++
++int rsbac_get_uid_name(rsbac_uid_t * uid, char * name, char * sourcename)
++ {
++ struct passwd * user_info_p;
++ rsbac_uid_t uid_i;
++
++ if(!(user_info_p = getpwnam(sourcename)))
++ {
++ uid_i = strtoul(sourcename,0,10);
++ if( !uid_i
++ && strcmp("0", sourcename)
++ )
++ {
++ return -RSBAC_EINVALIDVALUE;
++ }
++ if(name)
++ {
++ if((user_info_p = getpwuid(uid_i)))
++ strcpy(name, user_info_p->pw_name);
++ else
++ sprintf(name, "%u", uid_i);
++ }
++ }
++ else
++ {
++ uid_i = user_info_p->pw_uid;
++ if(name)
++ strcpy(name, user_info_p->pw_name);
++ }
++ if(uid)
++ *uid = uid_i;
++ return 0;
++ }
++
++int rsbac_get_fullname(char * fullname, rsbac_uid_t uid)
++ {
++ struct passwd * user_info_p;
++ rsbac_uid_t uid_i;
++
++ if(!fullname)
++ return -RSBAC_EINVALIDPOINTER;
++ if(!(user_info_p = getpwuid(uid)))
++ {
++ sprintf(fullname, "%u", uid);
++ }
++ else
++ {
++ strcpy(fullname, user_info_p->pw_gecos);
++ }
++ return 0;
++ }
++
++char * get_user_name(rsbac_uid_t user, char * name)
++ {
++ struct passwd * user_info_p;
++
++ if((user_info_p = getpwuid(user)))
++ {
++ strcpy(name, user_info_p->pw_name);
++ }
++ else
++ {
++ sprintf(name, "%u", user);
++ }
++ return name;
++ }
++
++char * get_group_name(rsbac_gid_t group, char * name)
++ {
++ struct group * group_info_p;
++
++ if((group_info_p = getgrgid(group)))
++ {
++ strcpy(name, group_info_p->gr_name);
++ }
++ else
++ {
++ sprintf(name, "%u", group);
++ }
++ return name;
++ }
++
++int rsbac_get_gid_name(rsbac_gid_t * gid, char * name, char * sourcename)
++ {
++ struct group * group_info_p;
++ rsbac_gid_t gid_i;
++
++ if(!(group_info_p = getgrnam(sourcename)))
++ {
++ gid_i = strtoul(sourcename,0,10);
++ if( !gid_i
++ && strcmp("0", sourcename)
++ )
++ {
++ return -RSBAC_EINVALIDVALUE;
++ }
++ if(name)
++ {
++ if((group_info_p = getgrgid(gid_i)))
++ strcpy(name, group_info_p->gr_name);
++ else
++ sprintf(name, "%u", gid_i);
++ }
++ }
++ else
++ {
++ gid_i = group_info_p->gr_gid;
++ if(name)
++ strcpy(name, group_info_p->gr_name);
++ }
++ if(gid)
++ *gid = gid_i;
++ return 0;
++ }
++
++
++char * u64tostrlog(char * str, __u64 i)
++ {
++ int j = 0;
++ __u64 k;
++
++ if(!str)
++ return(NULL);
++
++ k = 1;
++ for(j = R_NONE - 1;j >= 0;j--)
++ {
++ if (i & k)
++ str[j] = '1';
++ else
++ str[j] = '0';
++ k<<=1;
++ };
++
++ str[R_NONE] = 0;
++ return (str);
++ };
++
++__u64 strtou64log(char * str, __u64 * i_p)
++ {
++ int j;
++ __u64 k = 1, res=0;
++
++ if(!str)
++ return(0);
++
++ if (strlen(str) < R_NONE)
++ return(-1);
++ for(j=R_NONE-1;j>=0;j--)
++ {
++ if(str[j] != '0')
++ {
++ res |= k;
++ }
++ k <<= 1;
++ }
++ for(j=R_NONE;j<64;j++)
++ {
++ res |= k;
++ k <<= 1;
++ }
++ *i_p = res;
++ return(res);
++ };
++
++char * u64tostrrc(char * str, __u64 i)
++ {
++ int j = 0;
++ __u64 k;
++
++ if(!str)
++ return(NULL);
++
++ k = 1;
++ for(j = 63;j >= 0;j--)
++ {
++ if (i & k)
++ str[j] = '1';
++ else
++ str[j] = '0';
++ k<<=1;
++ };
++
++ str[64] = 0;
++ return (str);
++ };
++
++__u64 strtou64rc(char * str, __u64 * i_p)
++ {
++ int j;
++ __u64 k = 1, res=0;
++
++ if(!str)
++ return(0);
++
++ if (strlen(str) < 64)
++ return(-1);
++ for(j=63;j>=0;j--)
++ {
++ if(str[j] != '0')
++ {
++ res |= k;
++ }
++ k <<= 1;
++ }
++ *i_p = res;
++ return(res);
++ };
++
++char * u64tostrrcr(char * str, __u64 i)
++ {
++ int j = 0;
++ __u64 k;
++
++ if(!str)
++ return(NULL);
++
++ k = 1;
++ for(j = RCR_NONE - 1;j >= 0;j--)
++ {
++ if (i & k)
++ str[j] = '1';
++ else
++ str[j] = '0';
++ k<<=1;
++ };
++
++ str[RCR_NONE] = 0;
++ return (str);
++ };
++
++__u64 strtou64rcr(char * str, __u64 * i_p)
++ {
++ int j;
++ __u64 k = 1, res=0;
++
++ if(!str)
++ return(0);
++
++ if (strlen(str) < RCR_NONE)
++ return(-1);
++ for(j=RCR_NONE-1;j>=0;j--)
++ {
++ if(str[j] != '0')
++ {
++ res |= k;
++ }
++ k <<= 1;
++ }
++ for(j=RCR_NONE;j<64;j++)
++ {
++ res |= k;
++ k <<= 1;
++ }
++ *i_p = res;
++ return(res);
++ };
++
++__u64 strtou64mac(char * str, __u64 * i_p)
++ {
++ int j;
++ __u64 k = 1, res=0;
++
++ if(!str)
++ return(0);
++
++ if (strlen(str) < RSBAC_MAC_NR_CATS)
++ return(-1);
++ for(j=RSBAC_MAC_MAX_CAT;j>=0;j--)
++ {
++ if(str[j] != '0')
++ {
++ res |= k;
++ }
++ k <<= 1;
++ }
++ for(j=RSBAC_MAC_NR_CATS;j<64;j++)
++ {
++ res |= k;
++ k <<= 1;
++ }
++ *i_p = res;
++ return(res);
++ };
++
++__u64 strtou64acl(char * str, __u64 * i_p)
++ {
++ int j;
++ __u64 k = 1, res=0;
++
++ if(!str)
++ return(0);
++
++ if (strlen(str) < (ACLR_NONE - 1))
++ return(-1);
++ for(j=ACLR_NONE-1;j>=0;j--)
++ {
++ if(str[j] != '0')
++ {
++ res |= k;
++ }
++ k <<= 1;
++ }
++ for(j=ACLR_NONE-1;j<64;j++)
++ {
++ res |= k;
++ k <<= 1;
++ }
++ *i_p = res;
++ return(res);
++ }
++
++int strtodevdesc(char * str, struct rsbac_dev_desc_t * dev_p)
++ {
++ char * p;
++ char * c;
++
++ if(!str)
++ return -RSBAC_EINVALIDVALUE;
++ if(!strcmp(str, ":DEFAULT:"))
++ {
++ *dev_p = RSBAC_ZERO_DEV_DESC;
++ return 0;
++ }
++ p = str;
++ c = strchr(p,':');
++ switch(*p)
++ {
++ case 'b':
++ case 'B':
++ if(c)
++ dev_p->type = D_block;
++ else
++ dev_p->type = D_block_major;
++ break;
++ case 'c':
++ case 'C':
++ if(c)
++ dev_p->type = D_char;
++ else
++ dev_p->type = D_char_major;
++ break;
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++ p++;
++ dev_p->major = strtoul(p,0,0);
++ if(c)
++ {
++ c++;
++ dev_p->minor = strtoul(c,0,0);
++ }
++ else
++ dev_p->minor = 0;
++ return 0;
++ }
++
++char * devdesctostr(char * str, struct rsbac_dev_desc_t dev)
++ {
++ if(RSBAC_IS_ZERO_DEV_DESC(dev))
++ {
++ sprintf(str, ":DEFAULT:");
++ return str;
++ }
++ switch(dev.type)
++ {
++ case D_block:
++ case D_char:
++ sprintf(str, "%c%u:%u", 'b' + dev.type, dev.major, dev.minor);
++ break;
++ case D_block_major:
++ case D_char_major:
++ sprintf(str, "%c%u",
++ 'b' + dev.type - (D_block_major - D_block),
++ dev.major);
++ break;
++ default:
++ sprintf(str, "invalid!");
++ }
++ return str;
++ }
++#endif /* ifndef __KERNEL__ */
++
++char * u64tostracl(char * str, __u64 i)
++ {
++ int j = 0;
++ __u64 k;
++
++ if(!str)
++ return(NULL);
++
++ k = 1;
++ for(j = ACLR_NONE - 1;j >= 0;j--)
++ {
++ if (i & k)
++ str[j] = '1';
++ else
++ str[j] = '0';
++ k<<=1;
++ };
++
++ str[ACLR_NONE] = 0;
++ return (str);
++ };
++
++char * u32tostrcap(char * str, __u32 i)
++ {
++ int j = 0;
++ __u32 k;
++
++ if(!str)
++ return(NULL);
++
++ k = 1;
++ for(j = CAP_NONE - 1;j >= 0;j--)
++ {
++ if (i & k)
++ str[j] = '1';
++ else
++ str[j] = '0';
++ k<<=1;
++ };
++
++ str[CAP_NONE] = 0;
++ return (str);
++ };
++int kcaptostrcap(char * str, rsbac_cap_vector_t i)
++ {
++ int j = 0;
++ int off;
++ __u32 k;
++
++ if(!str)
++ return(-1);
++
++ k = 1;
++ for(j = CAP_NONE - 1;j >= 32;j--)
++ {
++ if (i.cap[1] & k)
++ str[j-32] = '1';
++ else
++ str[j-32] = '0';
++ k<<=1;
++ };
++ k = 1;
++ off = CAP_NONE-32;
++ for(j = 31+off;j >= off;j--)
++ {
++ if (i.cap[0] & k)
++ str[j] = '1';
++ else
++ str[j] = '0';
++ k<<=1;
++ };
++
++ str[CAP_NONE] = 0;
++
++ return 0;
++ };
++
++int strcaptokcap(char * str, rsbac_cap_vector_t * i)
++ {
++ int j;
++ int off;
++ __u32 k = 1;
++
++ if(!str)
++ return -1;
++ if (strlen(str) < CAP_NONE)
++ return -1;
++
++ for(j = CAP_NONE-1; j >= 32; j--)
++ {
++ if(str[j-32] != '0')
++ {
++ i->cap[1] |= k;
++ }
++ k <<= 1;
++ }
++ k = 1;
++ off = CAP_NONE-32;
++ for(j =31+off ;j >= off; j--) {
++ if(str[j] != '0') {
++ i->cap[0] |= k;
++ }
++ k <<= 1;
++ }
++/* for(j=CAP_NONE;j<32;j++)
++ {
++ res |= k;
++ k <<= 1;
++ }*/
++/* *i_p = res;*/
++
++ return 0;
++ }
++__u32 strtou32cap(char * str, __u32 * i_p)
++ {
++ int j;
++ __u32 k = 1, res=0;
++
++ if(!str)
++ return(0);
++
++ if (strlen(str) < CAP_NONE)
++ return(-1);
++ for(j=CAP_NONE-1;j>=0;j--)
++ {
++ if(str[j] != '0')
++ {
++ res |= k;
++ }
++ k <<= 1;
++ }
++ for(j=CAP_NONE;j<32;j++)
++ {
++ res |= k;
++ k <<= 1;
++ }
++ *i_p = res;
++ return(res);
++ };
++
++
++#ifdef __KERNEL__
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++rsbac_um_set_t rsbac_get_vset(void)
++ {
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val;
++
++ i_tid.process = task_pid(current);
++ if(rsbac_get_attr(SW_GEN,
++ T_PROCESS,
++ i_tid,
++ A_vset,
++ &i_attr_val,
++ TRUE))
++ return 0;
++ else
++ return i_attr_val.vset;
++ }
++#endif
++
++/* find the current owner of this process */
++int rsbac_get_owner(rsbac_uid_t * user_p)
++ {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ *user_p = RSBAC_GEN_UID(rsbac_get_vset(), current_uid());
++#else
++ *user_p = current_uid();
++#endif
++ return 0;
++ }
++
++void rsbac_ds_get_error(const char * function, enum rsbac_attribute_t attr)
++ {
++ if(!function)
++ return;
++ if(attr != A_none)
++ {
++ char tmp[80];
++
++ get_attribute_name(tmp, attr);
++ rsbac_printk(KERN_WARNING
++ "%s: rsbac_get_attr() for %s returned error!\n",
++ function, tmp);
++ }
++ else
++ {
++ rsbac_printk(KERN_WARNING
++ "%s: rsbac_get_attr() returned error!\n",
++ function);
++ }
++ }
++
++void rsbac_ds_get_error_num(const char * function, enum rsbac_attribute_t attr, int err)
++ {
++ char tmp2[80];
++
++ if(!function)
++ return;
++ if(attr != A_none)
++ {
++ char tmp[80];
++
++ get_attribute_name(tmp, attr);
++ get_error_name(tmp2, err);
++ rsbac_printk(KERN_WARNING
++ "%s: rsbac_get_attr() for %s returned error %s!\n",
++ function, tmp, tmp2);
++ }
++ else
++ {
++ get_error_name(tmp2, err);
++ rsbac_printk(KERN_WARNING
++ "%s: rsbac_get_attr() returned error %s!\n",
++ function, tmp2);
++ }
++ }
++
++void rsbac_ds_set_error(const char * function, enum rsbac_attribute_t attr)
++ {
++ if(!function)
++ return;
++ if(attr != A_none)
++ {
++ char tmp[80];
++
++ get_attribute_name(tmp, attr);
++ rsbac_printk(KERN_WARNING
++ "%s: rsbac_set_attr() for %s returned error!\n",
++ function, tmp);
++ }
++ else
++ {
++ rsbac_printk(KERN_WARNING
++ "%s: rsbac_set_attr() returned error!\n",
++ function);
++ }
++ }
++
++void rsbac_ds_set_error_num(const char * function, enum rsbac_attribute_t attr, int err)
++ {
++ char tmp2[80];
++
++ if(!function)
++ return;
++ if(attr != A_none)
++ {
++ char tmp[80];
++
++ get_attribute_name(tmp, attr);
++ get_error_name(tmp2, err);
++ rsbac_printk(KERN_WARNING
++ "%s: rsbac_set_attr() for %s returned error %s!\n",
++ function, tmp, tmp2);
++ }
++ else
++ {
++ get_error_name(tmp2, err);
++ rsbac_printk(KERN_WARNING
++ "%s: rsbac_set_attr() returned error %s!\n",
++ function, tmp2);
++ }
++ }
++
++#ifdef CONFIG_RSBAC_RC
++void rsbac_rc_ds_get_error(const char * function, enum rsbac_rc_item_t item)
++ {
++ if(!function)
++ return;
++ if(item != RI_none)
++ {
++ char tmp[80];
++
++ get_rc_item_name(tmp, item);
++ rsbac_printk(KERN_WARNING
++ "%s: rsbac_rc_get_item() for %s returned error!\n",
++ function, tmp);
++ }
++ else
++ {
++ rsbac_printk(KERN_WARNING
++ "%s: rsbac_rc_get_item() returned error!\n",
++ function);
++ }
++ }
++
++void rsbac_rc_ds_set_error(const char * function, enum rsbac_rc_item_t item)
++ {
++ if(!function)
++ return;
++ if(item != RI_none)
++ {
++ char tmp[80];
++
++ get_rc_item_name(tmp, item);
++ rsbac_printk(KERN_WARNING
++ "%s: rsbac_rc_set_item() for %s returned error!\n",
++ function, tmp);
++ }
++ else
++ {
++ rsbac_printk(KERN_WARNING
++ "%s: rsbac_rc_set_item() returned error!\n",
++ function);
++ }
++ }
++#endif
++
++int rsbac_handle_filldir(const struct file *file, const char *name, const unsigned int namlen, const ino_t ino)
++{
++ enum rsbac_target_t rsbac_target = T_NONE;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ struct dentry *obj_dentry = NULL;
++ int err = 1;
++
++ if(!rsbac_initialized) {
++ goto old_func;
++ }
++
++ if(!name || !file || !file->f_dentry || !file->f_dentry->d_sb
++ || !MAJOR(file->f_dentry->d_sb->s_dev))
++ goto old_func;
++
++ if (in_interrupt())
++ {
++ printk(KERN_WARNING "rsbac_handle_filldir(): called from interrupt: pid %u(%s)!\n",
++ current->pid, current->comm);
++ goto old_func;
++ }
++
++ obj_dentry = rsbac_lookup_one_len(name, file->f_dentry, namlen);
++ if (!obj_dentry || IS_ERR(obj_dentry)) {
++ goto old_func;
++ }
++ if (!obj_dentry->d_inode || IS_ERR(obj_dentry->d_inode)) {
++ goto out_dput;
++ }
++ if (!obj_dentry->d_inode->i_mode || !obj_dentry->d_inode->i_sb || !obj_dentry->d_inode->i_sb->s_dev || !ino) {
++ goto out_dput;
++ }
++ if (!obj_dentry->d_sb || !obj_dentry->d_sb->s_magic) {
++ goto out_dput;
++ }
++ rsbac_pr_debug(aef, "[readdir(), sys_getdents()]: calling ADF\n");
++
++ if (S_ISFIFO(obj_dentry->d_inode->i_mode)) {
++ if(obj_dentry->d_sb->s_magic != PIPEFS_MAGIC) {
++ rsbac_target = T_FIFO;
++ rsbac_target_id.fifo.device = obj_dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.fifo.inode = ino;
++ rsbac_target_id.fifo.dentry_p = obj_dentry;
++ }
++ } else
++ if (S_ISDIR(obj_dentry->d_inode->i_mode)) {
++ rsbac_target = T_DIR;
++ rsbac_target_id.dir.device = obj_dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.dir.inode = ino;
++ rsbac_target_id.dir.dentry_p = obj_dentry;
++ } else
++ if (S_ISLNK(obj_dentry->d_inode->i_mode)) {
++ rsbac_target = T_SYMLINK;
++ rsbac_target_id.file.device = obj_dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.file.inode = ino;
++ rsbac_target_id.file.dentry_p = obj_dentry;
++ } else
++ if (S_ISSOCK(obj_dentry->d_inode->i_mode)) {
++ if (obj_dentry->d_inode->i_sb->s_magic != SOCKFS_MAGIC) {
++ rsbac_target = T_UNIXSOCK;
++ rsbac_target_id.unixsock.device = obj_dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.unixsock.inode = ino;
++ rsbac_target_id.unixsock.dentry_p = obj_dentry;
++ }
++ } else {
++ rsbac_target = T_FILE;
++ rsbac_target_id.file.device = obj_dentry->d_inode->i_sb->s_dev;
++ rsbac_target_id.file.inode = ino;
++ rsbac_target_id.file.dentry_p = obj_dentry;
++ }
++ rsbac_attribute_value.dummy = 0;
++ if (rsbac_target != T_NONE)
++ if (!rsbac_adf_request(R_SEARCH,
++ task_pid(current),
++ rsbac_target,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = 0;
++ goto out_dput;
++ }
++
++out_dput:
++ if (obj_dentry)
++ dput(obj_dentry);
++old_func:
++ return err;
++}
++int rsbac_handle_rw_req(const struct file *file, struct rsbac_rw_req *rsbac_rw_req_obj)
++{
++ int err = 1;
++
++ if(!rsbac_initialized) {
++ goto out;
++ }
++
++/* if (rsbac_rw_req_obj->rsbac_target != T_NONE){printk("i'm here! going out because of target ! =T_NONE\n");
++ goto out;}
++*/
++ if(!file || !file->f_dentry || !file->f_dentry->d_sb
++ || !MAJOR(file->f_dentry->d_sb->s_dev)
++ || !file->f_dentry->d_sb->s_magic
++ || !file->f_dentry->d_inode
++ || IS_ERR(file->f_dentry->d_inode)
++ || !file->f_dentry->d_inode->i_mode
++ || !file->f_dentry->d_inode->i_ino)
++ goto out;
++
++ if (in_interrupt())
++ {
++ printk(KERN_WARNING "rsbac_handle_rw_req(): called from interrupt: pid %u(%s)!\n",
++ current->pid, current->comm);
++ goto out;
++ }
++
++ rsbac_pr_debug(aef, "rsbac_handle_rw_req(): calling ADF\n");
++
++ rsbac_rw_req_obj->rsbac_attribute = A_none;
++ rsbac_rw_req_obj->rsbac_attribute_value.dummy = 0;
++
++ if (S_ISFIFO(file->f_dentry->d_inode->i_mode)) {
++ if(file->f_dentry->d_sb->s_magic != PIPEFS_MAGIC) {
++ rsbac_rw_req_obj->rsbac_target = T_FIFO;
++ rsbac_rw_req_obj->rsbac_target_id.fifo.device = file->f_dentry->d_inode->i_sb->s_dev;
++ rsbac_rw_req_obj->rsbac_target_id.fifo.inode = file->f_dentry->d_inode->i_ino;
++ rsbac_rw_req_obj->rsbac_target_id.fifo.dentry_p = file->f_dentry;
++ }
++ } else
++ if (S_ISREG(file->f_dentry->d_inode->i_mode)) {
++ rsbac_rw_req_obj->rsbac_target = T_FILE;
++ rsbac_rw_req_obj->rsbac_target_id.file.device = file->f_dentry->d_inode->i_sb->s_dev;
++ rsbac_rw_req_obj->rsbac_target_id.file.inode = file->f_dentry->d_inode->i_ino;
++ rsbac_rw_req_obj->rsbac_target_id.file.dentry_p = file->f_dentry;
++ } else
++ if (S_ISSOCK(file->f_dentry->d_inode->i_mode)) {
++ struct socket * sock = SOCKET_I(file->f_dentry->d_inode);
++ if (sock->ops && (sock->ops->family == AF_UNIX)) {
++ if (sock->sk) {
++ if (unix_sk(unix_sk(sock->sk)->peer)) {
++ if (unix_sk(unix_sk(sock->sk)->peer)->path.dentry && unix_sk(unix_sk(sock->sk)->peer)->path.dentry->d_inode) {
++ rsbac_rw_req_obj->rsbac_target = T_UNIXSOCK;
++ rsbac_rw_req_obj->rsbac_target_id.unixsock.device = unix_sk(unix_sk(sock->sk)->peer)->path.dentry->d_sb->s_dev;
++ rsbac_rw_req_obj->rsbac_target_id.unixsock.inode = unix_sk(unix_sk(sock->sk)->peer)->path.dentry->d_inode->i_ino;
++ rsbac_rw_req_obj->rsbac_target_id.unixsock.dentry_p = unix_sk(unix_sk(sock->sk)->peer)->path.dentry;
++ } else {
++ rsbac_rw_req_obj->rsbac_target = T_IPC;
++ rsbac_rw_req_obj->rsbac_target_id.ipc.type = I_anonunix;
++ if (unix_sk(unix_sk(sock->sk)->peer)->path.dentry
++ && unix_sk(unix_sk(sock->sk)->peer)->path.dentry->d_inode
++ && SOCKET_I(unix_sk(unix_sk(sock->sk)->peer)->path.dentry->d_inode)->file
++ && SOCKET_I(unix_sk(unix_sk(sock->sk)->peer)->path.dentry->d_inode)->file->f_dentry
++ && SOCKET_I(unix_sk(unix_sk(sock->sk)->peer)->path.dentry->d_inode)->file->f_dentry->d_inode)
++ rsbac_rw_req_obj->rsbac_target_id.ipc.id.id_nr = SOCKET_I(unix_sk(unix_sk(sock->sk)->peer)->path.dentry->d_inode)->file->f_dentry->d_inode->i_ino;
++ else
++ if (sock->file && sock->file->f_dentry && sock->file->f_dentry->d_inode)
++ rsbac_rw_req_obj->rsbac_target_id.ipc.id.id_nr = sock->file->f_dentry->d_inode->i_ino;
++ else
++ rsbac_rw_req_obj->rsbac_target_id.ipc.id.id_nr = 0;
++ }
++ } else {
++ if (unix_sk(sock->sk)->path.dentry && unix_sk(sock->sk)->path.dentry->d_inode) {
++ rsbac_rw_req_obj->rsbac_target = T_UNIXSOCK;
++ rsbac_rw_req_obj->rsbac_target_id.unixsock.device = unix_sk(sock->sk)->path.dentry->d_sb->s_dev;
++ rsbac_rw_req_obj->rsbac_target_id.unixsock.inode = unix_sk(sock->sk)->path.dentry->d_inode->i_ino;
++ rsbac_rw_req_obj->rsbac_target_id.unixsock.dentry_p = unix_sk(sock->sk)->path.dentry;
++ } else {
++ rsbac_rw_req_obj->rsbac_target = T_IPC;
++ rsbac_rw_req_obj->rsbac_target_id.ipc.type = I_anonunix;
++ if (sock->file && sock->file->f_dentry && sock->file->f_dentry->d_inode)
++ rsbac_rw_req_obj->rsbac_target_id.ipc.id.id_nr = sock->file->f_dentry->d_inode->i_ino;
++ else
++ rsbac_rw_req_obj->rsbac_target_id.ipc.id.id_nr = 0;
++ }
++ }
++ if (sock->sk->sk_peer_pid) {
++ rsbac_rw_req_obj->rsbac_attribute = A_process;
++ rsbac_rw_req_obj->rsbac_attribute_value.process = sock->sk->sk_peer_pid;
++ }
++ else if (unix_sk(sock->sk)->peer && unix_sk(sock->sk)->peer->sk_peer_pid) {
++ rsbac_rw_req_obj->rsbac_attribute = A_process;
++ rsbac_rw_req_obj->rsbac_attribute_value.process = unix_sk(sock->sk)->peer->sk_peer_pid;
++ } else {
++ rsbac_rw_req_obj->rsbac_attribute = A_sock_type;
++ rsbac_rw_req_obj->rsbac_attribute_value.sock_type = sock->type;
++ }
++ }
++ }
++ } else
++ if (S_ISBLK(file->f_dentry->d_inode->i_mode)) {
++ rsbac_rw_req_obj->rsbac_target = T_DEV;
++ rsbac_rw_req_obj->rsbac_target_id.dev.type = D_block;
++ rsbac_rw_req_obj->rsbac_target_id.dev.major = RSBAC_MAJOR(file->f_dentry->d_inode->i_rdev);
++ rsbac_rw_req_obj->rsbac_target_id.dev.minor = RSBAC_MINOR(file->f_dentry->d_inode->i_rdev);
++ } else
++ if (S_ISCHR(file->f_dentry->d_inode->i_mode)) {
++ rsbac_rw_req_obj->rsbac_target = T_DEV;
++ rsbac_rw_req_obj->rsbac_target_id.dev.type = D_char;
++ rsbac_rw_req_obj->rsbac_target_id.dev.major = RSBAC_MAJOR(file->f_dentry->d_inode->i_rdev);
++ rsbac_rw_req_obj->rsbac_target_id.dev.minor = RSBAC_MINOR(file->f_dentry->d_inode->i_rdev);
++ }
++/*
++ printk("i_mode %i\n", file->f_dentry->d_inode->i_mode);
++ printk("req %i %i\n", rsbac_rw_req_obj->rsbac_request, rsbac_rw_req_obj->rsbac_target);
++ if (S_ISCHR(file->f_dentry->d_inode->i_mode))
++ printk("CHR");
++ if (S_ISBLK(file->f_dentry->d_inode->i_mode))
++ printk("BLK");
++ if (S_ISSOCK(file->f_dentry->d_inode->i_mode))
++ printk("SOCK");
++ if (S_ISREG(file->f_dentry->d_inode->i_mode))
++ printk("REG");
++ if (S_ISFIFO(file->f_dentry->d_inode->i_mode))
++ printk("FIFO");
++*/
++ if (rsbac_rw_req_obj->rsbac_target != T_NONE)
++ if (!rsbac_adf_request(rsbac_rw_req_obj->rsbac_request,
++ task_pid(current),
++ rsbac_rw_req_obj->rsbac_target,
++ rsbac_rw_req_obj->rsbac_target_id,
++ A_none,
++ rsbac_rw_req_obj->rsbac_attribute_value))
++ {
++ err = 0;
++ goto out;
++ }
++
++out:
++ return err;
++}
++
++int rsbac_handle_rw_up(struct rsbac_rw_req *rsbac_rw_req_obj)
++{
++ int err = 0;
++
++ if (rsbac_rw_req_obj->rsbac_target != T_NONE) {
++ rsbac_rw_req_obj->rsbac_new_target_id.dummy = 0;
++ err = rsbac_adf_set_attr(rsbac_rw_req_obj->rsbac_request,
++ task_pid(current),
++ rsbac_rw_req_obj->rsbac_target,
++ rsbac_rw_req_obj->rsbac_target_id,
++ T_NONE,
++ rsbac_rw_req_obj->rsbac_new_target_id,
++ rsbac_rw_req_obj->rsbac_attribute,
++ rsbac_rw_req_obj->rsbac_attribute_value);
++ if (err)
++ rsbac_printk(KERN_WARNING "rsbac_handle_rw_up(): rsbac_adf_set_attr() returned error\n");
++ }
++
++ return err;
++}
++#endif
++/* __KERNEL__ */
+diff --git a/rsbac/help/jail_getname.c b/rsbac/help/jail_getname.c
+new file mode 100644
+index 0000000..dde82bc
+--- /dev/null
++++ b/rsbac/help/jail_getname.c
+@@ -0,0 +1,61 @@
++/*********************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2011: */
++/* Amon Ott <ao@rsbac.org> */
++/* Getname functions for JAIL module */
++/* Last modified: 12/Jul/2011 */
++/*********************************** */
++
++#include <rsbac/getname.h>
++#include <rsbac/jail_getname.h>
++#include <rsbac/helpers.h>
++#include <rsbac/error.h>
++
++#ifdef __KERNEL__
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <rsbac/debug.h>
++#include <rsbac/aci.h>
++#include <rsbac/rkmem.h>
++#else
++#include <string.h>
++#endif
++
++#ifdef __KERNEL__
++#ifdef CONFIG_RSBAC_JAIL_LOG_MISSING
++void rsbac_jail_log_missing_cap(int cap)
++ {
++ char * tmp;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ i_tid.process = task_pid(current);
++ if (rsbac_get_attr(SW_JAIL,
++ T_PROCESS,
++ i_tid,
++ A_jail_max_caps,
++ &i_attr_val1,
++ FALSE))
++ {
++ rsbac_ds_get_error("rsbac_jail_log_missing_cap()", A_jail_max_caps);
++ }
++ else
++ {
++ if(!((i_attr_val1.jail_max_caps.cap[0] & (1 << cap)) || (i_attr_val1.jail_max_caps.cap[1] & (1 << cap))))
++ {
++ tmp = rsbac_kmalloc(RSBAC_MAXNAMELEN);
++ if(tmp)
++ {
++ get_cap_name(tmp, cap);
++ rsbac_printk(KERN_DEBUG
++ "capable(): pid %u(%.15s), uid %u: missing jail_max_cap %s!\n",
++ current->pid, current->comm,
++ current_uid(),
++ tmp);
++ rsbac_kfree(tmp);
++ }
++ }
++ }
++ }
++#endif
++#endif
+diff --git a/rsbac/help/net_getname.c b/rsbac/help/net_getname.c
+new file mode 100644
+index 0000000..4559e36
+--- /dev/null
++++ b/rsbac/help/net_getname.c
+@@ -0,0 +1,352 @@
++/*
++ * net_getname.c: Getname functions for the Network
++ *
++ * Author and Copyright (C) 1999-2009 Amon Ott <ao@rsbac.org>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2.
++ *
++ * Last modified 03/Feb/2009.
++ */
++
++#include <rsbac/types.h>
++#include <rsbac/getname.h>
++#include <rsbac/net_getname.h>
++#include <rsbac/helpers.h>
++#include <rsbac/error.h>
++
++#ifdef __KERNEL__
++#include <linux/string.h>
++#include <linux/module.h>
++#else
++#include <linux/socket.h>
++#include <string.h>
++#include <stdio.h>
++#endif
++
++static char net_temp_syscall_list[NTS_none + 1][19] = {
++ "new_template",
++ "copy_template",
++ "delete_template",
++ "check_id",
++ "get_address",
++ "get_address_family",
++ "get_type",
++ "get_protocol",
++ "get_netdev",
++ "get_ports",
++ "get_name",
++ "set_address",
++ "set_address_family",
++ "set_type",
++ "set_protocol",
++ "set_netdev",
++ "set_ports",
++ "set_name",
++ "none"
++};
++
++static char net_family_list[AF_MAX + 1][19] = {
++ "ANY", /* 0 */
++ "UNIX", /* 1 Unix domain sockets */
++ "INET", /* 2 Internet IP Protocol */
++ "AX25", /* 3 Amateur Radio AX.25 */
++ "IPX", /* 4 Novell IPX */
++ "APPLETALK", /* 5 AppleTalk DDP */
++ "NETROM", /* 6 Amateur Radio NET/ROM */
++ "BRIDGE", /* 7 Multiprotocol bridge */
++ "ATMPVC", /* 8 ATM PVCs */
++ "X25", /* 9 Reserved for X.25 project */
++ "INET6", /* 10 IP version 6 */
++ "ROSE", /* 11 Amateur Radio X.25 PLP */
++ "DECnet", /* 12 Reserved for DECnet project */
++ "NETBEUI", /* 13 Reserved for 802.2LLC project */
++ "SECURITY", /* 14 Security callback pseudo AF */
++ "KEY", /* 15 PF_KEY key management API */
++ "NETLINK", /* 16 */
++ "PACKET", /* 17 Packet family */
++ "ASH", /* 18 Ash */
++ "ECONET", /* 19 Acorn Econet */
++ "ATMSVC", /* 20 ATM SVCs */
++ "(undefined)", /* 21 */
++ "SNA", /* 22 Linux SNA Project (nutters!) */
++ "IRDA", /* 23 IRDA sockets */
++ "PPPOX", /* 24 PPPoX sockets */
++ "WANPIPE", /* 25 Wanpipe API Sockets */
++ "(undefined)", /* 26 */
++ "(undefined)", /* 27 */
++ "(undefined)", /* 28 */
++ "(undefined)", /* 29 */
++ "(undefined)", /* 30 */
++ "BLUETOOTH", /* 31 Bluetooth sockets */
++ "MAX"
++};
++
++#define NETLINK_FAM_MAX 19
++
++static char net_netlink_family_list[NETLINK_FAM_MAX + 1][15] = {
++ "ROUTE", /* 0 Routing/device hook */
++ "UNUSED", /* 1 Unused number */
++ "USERSOCK", /* 2 Reserved for user mode socket protocols */
++ "FIREWALL", /* 3 Firewalling hook */
++ "INET_DIAG", /* 4 INET socket monitoring */
++ "NFLOG", /* 5 netfilter/iptables ULOG */
++ "XFRM", /* 6 ipsec */
++ "SELINUX", /* 7 SELinux event notifications */
++ "ISCSI", /* 8 Open-iSCSI */
++ "AUDIT", /* 9 auditing */
++ "FIB_LOOKUP",
++ "CONNECTOR",
++ "NETFILTER", /* 12 netfilter subsystem */
++ "IP6_FW",
++ "DNRTMSG", /* 14 DECnet routing messages */
++ "KOBJECT_UEVENT", /* 15 Kernel messages to userspace */
++ "GENERIC",
++ "DM", /* 17 (DM Events) */
++ "SCSITRANSPORT", /* 18 SCSI Transports */
++ "ECRYPTFS"
++};
++
++struct proto_desc_t {
++ char name[19];
++ int nr;
++};
++#define NR_PROTO 18
++
++static struct proto_desc_t net_protocol_list[NR_PROTO] = {
++ {"ANY", 0}, /* 0 Dummy protocol for TCP */
++ {"ICMP", 1}, /* Internet Control Message Protocol */
++ {"IGMP", 2}, /* Internet Group Management Protocol */
++ {"IPIP", 4}, /* IPIP tunnels (older KA9Q tunnels use 94) */
++ {"TCP", 6}, /* Transmission Control Protocol */
++ {"EGP", 8}, /* Exterior Gateway Protocol */
++ {"PUP", 12}, /* PUP protocol */
++ {"UDP", 17}, /* User Datagram Protocol */
++ {"IDP", 22}, /* XNS IDP protocol */
++ {"RSVP", 46}, /* RSVP protocol */
++ {"GRE", 47}, /* Cisco GRE tunnels (rfc 1701,1702) */
++ {"IPV6", 41}, /* IPv6-in-IPv4 tunnelling */
++ {"PIM", 103}, /* Protocol Independent Multicast */
++ {"ESP", 50}, /* Encapsulation Security Payload protocol */
++ {"AH", 51}, /* Authentication Header protocol */
++ {"COMP", 108}, /* Compression Header protocol */
++ {"RAW", 255}, /* Raw IP packets */
++ {"MAX", RSBAC_NET_PROTO_MAX}
++};
++
++static char rsbac_net_type_list[RSBAC_NET_TYPE_MAX + 1][19] = {
++ "ANY",
++ "STREAM", /* 1 stream (connection) socket */
++ "DGRAM", /* 2 datagram (conn.less) socket */
++ "RAW", /* 3 raw socket */
++ "RDM", /* 4 reliably-delivered message */
++ "SEQPACKET", /* 5 sequential packet socket */
++ "(undefined)", /* 6 */
++ "(undefined)", /* 7 */
++ "(undefined)", /* 8 */
++ "(undefined)", /* 9 */
++ "PACKET", /* 10 linux specific way of */
++ /* getting packets at the dev */
++ /* level. For writing rarp and */
++ /* other similar things on the */
++ /* user level. */
++ "MAX"
++};
++
++/*****************************************/
++
++char *rsbac_get_net_temp_syscall_name(char *name,
++ enum rsbac_net_temp_syscall_t value)
++{
++ if (!name)
++ return NULL;
++ if (value > NTS_none)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, net_temp_syscall_list[value]);
++ return name;
++};
++
++#ifndef __KERNEL__
++enum rsbac_net_temp_syscall_t rsbac_get_net_temp_syscall_nr(const char
++ *name)
++{
++ enum rsbac_net_temp_syscall_t i;
++
++ if (!name)
++ return NTS_none;
++ for (i = 0; i < NTS_none; i++) {
++ if (!strcmp(name, net_temp_syscall_list[i])) {
++ return i;
++ }
++ }
++ return NTS_none;
++};
++#endif
++
++#ifdef __KERNEL__
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_get_net_family_name);
++#endif
++#endif
++char *rsbac_get_net_family_name(char *name, u_int value)
++{
++ if (!name)
++ return NULL;
++ if (value > AF_MAX)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, net_family_list[value]);
++ return name;
++};
++
++#ifdef __KERNEL__
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_get_net_netlink_family_name);
++#endif
++#endif
++char *rsbac_get_net_netlink_family_name(char *name, u_int value)
++{
++ if (!name)
++ return NULL;
++ if (value == RSBAC_NET_NETLINK_PROTO_ANY)
++ strcpy(name, "ANY");
++ if (value > NETLINK_FAM_MAX)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, net_netlink_family_list[value]);
++ return name;
++};
++
++#ifndef __KERNEL__
++int rsbac_get_net_family_nr(const char *name)
++{
++ int i;
++
++ if (!name)
++ return AF_MAX;
++ if (!strcmp(name, "ANY")
++ return RSBAC_NET_NETLINK_PROTO_ANY;
++ for (i = 0; i < AF_MAX; i++) {
++ if (!strcmp(name, net_family_list[i])) {
++ return i;
++ }
++ }
++ return AF_MAX;
++};
++#endif
++
++#ifdef __KERNEL__
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_get_net_protocol_name);
++#endif
++#endif
++char *rsbac_get_net_protocol_name(char *name, u_int value)
++{
++ int i;
++
++ if (!name)
++ return NULL;
++ if (value >= RSBAC_NET_PROTO_MAX)
++ strcpy(name, "ERROR!");
++ else {
++ for (i = 0; i < NR_PROTO; i++) {
++ if (net_protocol_list[i].nr == value) {
++ strcpy(name, net_protocol_list[i].name);
++ return name;
++ }
++ }
++ sprintf(name, "%u", value);
++ }
++ return name;
++};
++
++#ifndef __KERNEL__
++int rsbac_get_net_protocol_nr(const char *name)
++{
++ int i;
++
++ if (!name)
++ return RSBAC_NET_PROTO_MAX;
++ for (i = 0; i < NR_PROTO; i++) {
++ if (!strcmp(name, net_protocol_list[i].name)) {
++ return net_protocol_list[i].nr;
++ }
++ }
++ return RSBAC_NET_PROTO_MAX;
++};
++#endif
++
++#ifdef __KERNEL__
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_get_net_type_name);
++#endif
++#endif
++char *rsbac_get_net_type_name(char *name, u_int value)
++{
++ if (!name)
++ return NULL;
++ if (value > RSBAC_NET_TYPE_MAX)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, rsbac_net_type_list[value]);
++ return name;
++};
++
++#ifndef __KERNEL__
++int rsbac_get_net_type_nr(const char *name)
++{
++ int i;
++
++ if (!name)
++ return RSBAC_NET_TYPE_MAX;
++ for (i = 0; i < RSBAC_NET_TYPE_MAX; i++) {
++ if (!strcmp(name, rsbac_net_type_list[i])) {
++ return i;
++ }
++ }
++ return RSBAC_NET_TYPE_MAX;
++};
++#endif
++
++#ifdef __KERNEL__
++int rsbac_net_str_to_inet(char *str, __u32 * addr)
++{
++ char *end;
++ __u32 s0, s1, s2, s3;
++
++ if (!str || !addr)
++ return -RSBAC_EINVALIDPOINTER;
++ end = str;
++ while (*end) {
++ if ((*end != '.')
++ && (*end != '\n')
++ && (*end != ' ')
++ && ((*end < '0')
++ || (*end > '9')
++ )
++ )
++ return -RSBAC_EINVALIDVALUE;
++ end++;
++ }
++ s0 = simple_strtoul(str, &end, 10);
++ if (!*end || (s0 > 255))
++ return -RSBAC_EINVALIDVALUE;
++ end++;
++ s1 = simple_strtoul(end, &end, 10);
++ if (!*end || (s1 > 255))
++ return -RSBAC_EINVALIDVALUE;
++ end++;
++ s2 = simple_strtoul(end, &end, 10);
++ if (!*end || (s2 > 255))
++ return -RSBAC_EINVALIDVALUE;
++ end++;
++ s3 = simple_strtoul(end, &end, 10);
++ if (*end || (s3 > 255))
++ return -RSBAC_EINVALIDVALUE;
++ *addr = s3 | (s2 << 8) | (s1 << 16) | (s0 << 24);
++ *addr = htonl(*addr);
++ return 0;
++}
++#endif
+diff --git a/rsbac/help/net_helpers.c b/rsbac/help/net_helpers.c
+new file mode 100644
+index 0000000..fbbb308
+--- /dev/null
++++ b/rsbac/help/net_helpers.c
+@@ -0,0 +1,117 @@
++/*
++ * net_helpers.c: Helper functions for the Network.
++ *
++ * Author and Copyright (C) 1999-2009 Amon Ott <ao@rsbac.org>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2.
++ *
++ * Last modified 03/Feb/2009.
++ */
++
++#include <rsbac/types.h>
++#ifdef __KERNEL__
++#include <rsbac/network.h>
++#endif
++
++static __u32 ipv4_mask[32] = {
++ 0x00000000, 0x00000080, 0x000000C0, 0x000000E0,
++ 0x000000F0, 0x000000F8, 0x000000FC, 0x000000FE,
++ 0x000000FF, 0x000080FF, 0x0000C0FF, 0x0000E0FF,
++ 0x0000F0FF, 0x0000F8FF, 0x0000FCFF, 0x0000FEFF,
++ 0x0000FFFF, 0x0080FFFF, 0x00C0FFFF, 0x00E0FFFF,
++ 0x00F0FFFF, 0x00F8FFFF, 0x00FCFFFF, 0x00FEFFFF,
++ 0x00FFFFFF, 0x80FFFFFF, 0xC0FFFFFF, 0xE0FFFFFF,
++ 0xF0FFFFFF, 0xF8FFFFFF, 0xFCFFFFFF, 0xFEFFFFFF
++};
++
++static inline __u32 rsbac_net_make_mask_u32(__u8 bits)
++{
++ if (bits >= 32)
++ return (__u32)-1UL;
++ return ipv4_mask[bits];
++}
++
++#ifdef __KERNEL__
++/* The lookup data param is always second, so we use it as description here! */
++int rsbac_net_compare_data(void *data1, void *data2)
++{
++ struct rsbac_net_temp_data_t *temp = data1;
++ struct rsbac_net_description_t *desc = data2;
++
++ if (!temp || !desc)
++ return 1;
++ if ((temp->address_family != RSBAC_NET_ANY)
++ && (temp->address_family != desc->address_family)
++ )
++ return 1;
++ switch (desc->address_family) {
++ case AF_INET:
++ {
++ __u32 mask;
++ int i;
++
++ if(temp->address.inet.nr_addr == 0)
++ return 1;
++ if ((temp->type != RSBAC_NET_ANY)
++ && (desc->type != temp->type)
++ )
++ return 1;
++ if ((temp->protocol != RSBAC_NET_ANY)
++ && (desc->protocol != temp->protocol)
++ )
++ return 1;
++ if(temp->ports.nr_ports > 0) {
++ i=0;
++ while(i < temp->ports.nr_ports) {
++ if ((desc->port >= temp->ports.ports[i].min)
++ && (desc->port <= temp->ports.ports[i].max))
++ break;
++ i++;
++ }
++ if(i == temp->ports.nr_ports)
++ return 1;
++ }
++ if (temp->netdev[0]
++ && (!desc->netdev[0]
++ || strncmp(desc->netdev, temp->netdev,
++ RSBAC_IFNAMSIZ))
++ )
++ return 1;
++ if (!desc->address)
++ return 1;
++ i=0;
++ while(i < temp->address.inet.nr_addr) {
++ mask = rsbac_net_make_mask_u32(temp->address.inet.valid_bits[i]);
++ if ((((*(__u32 *) desc->address) & mask) ==
++ (temp->address.inet.addr[i] & mask))
++ )
++ return 0;
++ i++;
++ }
++ return 1;
++ }
++
++ case AF_NETLINK:
++ if ((temp->type != RSBAC_NET_ANY)
++ && (desc->type != temp->type)
++ )
++ return 1;
++ if ((temp->protocol != RSBAC_NET_NETLINK_PROTO_ANY)
++ && (desc->protocol != temp->protocol)
++ )
++ return 1;
++ return 0;
++
++ /* Other address families: only socket type checks for now */
++ default:
++ if ((temp->type != RSBAC_NET_ANY)
++ && (desc->type != temp->type)
++ )
++ return 1;
++ return 0;
++ }
++ return 1;
++}
++#endif
+diff --git a/rsbac/help/pax_getname.c b/rsbac/help/pax_getname.c
+new file mode 100644
+index 0000000..0822b6e
+--- /dev/null
++++ b/rsbac/help/pax_getname.c
+@@ -0,0 +1,95 @@
++/********************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2004: */
++/* Amon Ott <ao@rsbac.org> */
++/* Getname functions for PAX module */
++/* Last modified: 06/Jan/2004 */
++/********************************** */
++
++#include <rsbac/types.h>
++#include <rsbac/pax_getname.h>
++#include <rsbac/helpers.h>
++#include <rsbac/error.h>
++
++#ifdef __KERNEL__
++#include <linux/string.h>
++#else
++#include <stdio.h>
++#include <string.h>
++#endif
++
++char * pax_print_flags(char * string, rsbac_pax_flags_t flags)
++ {
++ sprintf(string, "%c%c%c%c%c%c",
++ flags & PF_PAX_PAGEEXEC ? 'P' : 'p',
++ flags & PF_PAX_EMUTRAMP ? 'E' : 'e',
++ flags & PF_PAX_MPROTECT ? 'M' : 'm',
++ flags & PF_PAX_RANDMMAP ? 'R' : 'r',
++ flags & PF_PAX_RANDEXEC ? 'X' : 'x',
++ flags & PF_PAX_SEGMEXEC ? 'S' : 's');
++ return string;
++ }
++
++#ifndef __KERNEL__
++rsbac_pax_flags_t pax_strtoflags(char * string, rsbac_pax_flags_t init_flags)
++ {
++ char * p = string;
++ rsbac_pax_flags_t add_flags = 0;
++ rsbac_pax_flags_t remove_flags = 0;
++
++ if(!p)
++ return init_flags;
++ while(*p)
++ {
++ switch(*p)
++ {
++ case 'P':
++ add_flags |= PF_PAX_PAGEEXEC;
++ break;
++ case 'p':
++ remove_flags |= PF_PAX_PAGEEXEC;
++ break;
++ case 'E':
++ add_flags |= PF_PAX_EMUTRAMP;
++ break;
++ case 'e':
++ remove_flags |= PF_PAX_EMUTRAMP;
++ break;
++ case 'M':
++ add_flags |= PF_PAX_MPROTECT;
++ break;
++ case 'm':
++ remove_flags |= PF_PAX_MPROTECT;
++ break;
++ case 'R':
++ add_flags |= PF_PAX_RANDMMAP;
++ break;
++ case 'r':
++ remove_flags |= PF_PAX_RANDMMAP;
++ break;
++ case 'X':
++ add_flags |= PF_PAX_RANDEXEC;
++ break;
++ case 'x':
++ remove_flags |= PF_PAX_RANDEXEC;
++ break;
++ case 'S':
++ add_flags |= PF_PAX_SEGMEXEC;
++ break;
++ case 's':
++ remove_flags |= PF_PAX_SEGMEXEC;
++ break;
++ case 'z':
++ remove_flags = RSBAC_PAX_ALL_FLAGS;
++ break;
++ case 'a':
++ add_flags = RSBAC_PAX_ALL_FLAGS;
++ break;
++ default:
++ break;
++ }
++ p++;
++ }
++ return (init_flags | add_flags) & ~remove_flags;
++ }
++#endif
+diff --git a/rsbac/help/pm_getname.c b/rsbac/help/pm_getname.c
+new file mode 100644
+index 0000000..fe79468
+--- /dev/null
++++ b/rsbac/help/pm_getname.c
+@@ -0,0 +1,554 @@
++/******************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2004: */
++/* Amon Ott <ao@rsbac.org> */
++/* PM getname functions */
++/* Last modified: 19/Nov/2004 */
++/******************************** */
++
++#include <rsbac/pm_getname.h>
++#include <rsbac/helpers.h>
++#include <rsbac/error.h>
++
++#ifdef __KERNEL__
++#include <linux/string.h>
++#else
++#include <string.h>
++#endif
++
++static char pm_list[PL_none][6] = {
++ "task",
++ "class",
++ "na",
++ "cs",
++ "tp",
++ "pp",
++ "tkt" };
++
++static char pm_all_list[PA_none][11] = {
++ "task",
++ "class",
++ "na",
++ "cs",
++ "tp",
++ "pp",
++ "tkt",
++ "task_set",
++ "tp_set",
++ "ru_set",
++ "pp_set",
++ "in_pp_set",
++ "out_pp_set" };
++
++static char pm_role[PR_none+1][24] = {
++ "user",
++ "security_officer",
++ "data_protection_officer",
++ "tp_manager",
++ "system_admin",
++ "none" };
++
++static char pm_process_type[PP_TP+1][5] = {
++ "none",
++ "tp" };
++
++static char pm_object_type[PO_dir+1][18] = {
++ "none",
++ "tp",
++ "personal_data",
++ "non_personal_data",
++ "ipc",
++ "dir" };
++
++#ifdef __KERNEL__
++static char pm_set[PS_NONE+1][5] = {
++ "TASK",
++ "TP",
++ "RU",
++ "PP",
++ "NONE" };
++
++static char pm_target[PMT_NONE+1][6] = {
++ "TASK",
++ "CLASS",
++ "NA",
++ "CS",
++ "TP",
++ "PP",
++ "TKT",
++ "NONE" };
++
++static char pm_data[PD_none+1][15] = {
++ "purpose",
++ "tp_set",
++ "ru_set",
++ "pp_set",
++ "task",
++ "class",
++ "tp",
++ "accesses",
++ "file",
++ "issuer",
++ "function_type",
++ "function_param",
++ "valid_until",
++ "def_class",
++ "none" };
++#endif
++
++static char pm_function_type[PF_none+1][24] = {
++ "add_na",
++ "delete_na",
++ "add_task",
++ "delete_task",
++ "add_object_class",
++ "delete_object_class",
++ "add_authorized_tp",
++ "delete_authorized_tp",
++ "add_consent",
++ "delete_consent",
++ "add_purpose",
++ "delete_purpose",
++ "add_responsible_user",
++ "delete_responsible_user",
++ "delete_user_aci",
++ "set_role",
++ "set_object_class",
++ "switch_pm",
++ "switch_auth",
++ "set_device_object_type",
++ "set_auth_may_setuid",
++ "set_auth_may_set_cap",
++ /* issued by user also */
++ "add_authorized_task",
++ "delete_authorized_task",
++ /* called by tp_manager */
++ "create_tp",
++ "delete_tp",
++ "set_tp",
++ "create_ticket",
++ "none"};
++
++#ifndef __KERNEL__
++static char pm_function_param[PF_none+1][123] = {
++ "\t\tticket task class tp accesses (class can be IPC, DEV or NIL)",
++ "\tticket task class tp accesses (class can be IPC, DEV or NIL)",
++ "\tticket id purpose",
++ "\tticket id",
++ "ticket id purpose1 purpose2 ...",
++ "ticket id",
++ "ticket task tp",
++ "ticket task tp",
++ "\tticket filename purpose",
++ "\tticket filename purpose",
++ "\tticket id default-class\n (class created, if necessary, and purpose added to pp-list of class)",
++ "\tticket id",
++ "ticket user task",
++ "ticket user task",
++ "ticket id",
++ "\tticket user role\n (roles: user|security_officer|data_protection_officer|tp_manager|system_admin)",
++ "ticket filename object_class\n (also sets object_type personal_data (cl!=0) or non_personal_data (cl=0)",
++ "\tticket value (0 or 1)",
++ "\tticket value (0 or 1)",
++ "ticket devicename object_type [object_class]\n (types: none, tp, personal_data, non_personal_data)\n (default class is DEV)",
++ "ticket filename value(0 or 1)",
++ "ticket filename value(0 or 1)",
++ /* issued by user also */
++ "ticket user task",
++ "ticket user task",
++ /* called by tp_manager */
++ "\tid",
++ "\tid",
++ "\t\tfilename id",
++ /* create_ticket */
++ "(call with create_ticket for params)",
++ "INVALID"};
++#endif
++
++static char pm_tkt_function_type[PTF_none+1][25] = {
++ "add_na",
++ "delete_na",
++ "add_task",
++ "delete_task",
++ "add_object_class",
++ "delete_object_class",
++ "add_authorized_tp",
++ "delete_authorized_tp",
++ "add_consent",
++ "delete_consent",
++ "add_purpose",
++ "delete_purpose",
++ "add_responsible_user",
++ "delete_responsible_user",
++ "delete_user_aci",
++ "set_role",
++ "set_object_class",
++ "switch_pm",
++ "switch_auth",
++ "set_device_object_type",
++ "set_auth_may_setuid",
++ "set_auth_may_set_cap",
++ /* issued by user also */
++ "add_authorized_task",
++ "delete_authorized_task",
++ "none"};
++
++#ifndef __KERNEL__
++static char pm_tkt_function_param[PTF_none+1][116] = {
++ "\t\ttask class tp accesses (class can be IPC, DEV or NIL)",
++ "\ttask class tp accesses (class can be IPC, DEV or NIL)",
++ "\tid purpose",
++ "\tid",
++ "id purpose1 purpose2 ...",
++ "id",
++ "task tp",
++ "task tp",
++ "\tfilename purpose",
++ "\tfilename purpose",
++ "\tid default-class (class must not be NIL, IPC or DEV)",
++ "\tid",
++ "user task",
++ "user task",
++ "user",
++ "\tuser role\n (roles: user|security_officer|data_protection_officer|tp_manager|system_admin)",
++ "filename object_class\n (sets object_type personal_data (cl!=0) or non_personal_data (cl=0)",
++ "\tvalue (0 or 1)",
++ "\tvalue (0 or 1)",
++ "devicename object_type [object_class]\n (types: none, tp, personal_data, non_personal_data)\n (default class is DEV)",
++ "filename value(0 or 1)",
++ "filename value(0 or 1)",
++ /* issued by user also */
++ "user task",
++ "user task",
++ "INVALID"};
++#endif
++
++/*****************************************/
++
++char * get_pm_list_name(char * name,
++ enum rsbac_pm_list_t value)
++ {
++ if(!name)
++ return(NULL);
++ if(value > PL_none)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, pm_list[value]);
++ return(name);
++ };
++
++enum rsbac_pm_list_t get_pm_list_nr(const char * name)
++ {
++ enum rsbac_pm_list_t i;
++
++ if(!name)
++ return(PL_none);
++ for (i = 0; i < PL_none; i++)
++ {
++ if (!strcmp(name,pm_list[i]))
++ {
++ return(i);
++ }
++ }
++ return(PL_none);
++ };
++
++char * get_pm_all_list_name(char * name,
++ enum rsbac_pm_all_list_t value)
++ {
++ if(!name)
++ return(NULL);
++ if(value > PA_none)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, pm_all_list[value]);
++ return(name);
++ };
++
++enum rsbac_pm_all_list_t get_pm_all_list_nr(const char * name)
++ {
++ enum rsbac_pm_all_list_t i;
++
++ if(!name)
++ return(PA_none);
++ for (i = 0; i < PA_none; i++)
++ {
++ if (!strcmp(name,pm_all_list[i]))
++ {
++ return(i);
++ }
++ }
++ return(PA_none);
++ };
++
++/****/
++
++char * get_pm_role_name(char * name,
++ enum rsbac_pm_role_t value)
++ {
++ if(!name)
++ return(NULL);
++ if(value > PR_none)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, pm_role[value]);
++ return(name);
++ };
++
++enum rsbac_pm_role_t get_pm_role_nr(const char * name)
++ {
++ enum rsbac_pm_role_t i;
++
++ if(!name)
++ return(PR_none);
++ for (i = 0; i < PR_none; i++)
++ {
++ if (!strcmp(name,pm_role[i]))
++ {
++ return(i);
++ }
++ }
++ return(PR_none);
++ };
++
++/****/
++
++char * get_pm_process_type_name(char * name,
++ enum rsbac_pm_process_type_t value)
++ {
++ if(!name)
++ return(NULL);
++ if(value > PP_TP)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, pm_process_type[value]);
++ return(name);
++ };
++
++enum rsbac_pm_process_type_t get_pm_process_type_nr(const char * name)
++ {
++ enum rsbac_pm_process_type_t i;
++
++ if(!name)
++ return(PP_none);
++ for (i = 0; i < PP_TP; i++)
++ {
++ if (!strcmp(name,pm_process_type[i]))
++ {
++ return(i);
++ }
++ }
++ return(PP_none);
++ };
++
++
++/****/
++
++char * get_pm_object_type_name(char * name,
++ enum rsbac_pm_object_type_t value)
++ {
++ if(!name)
++ return(NULL);
++ if(value > PO_dir)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, pm_object_type[value]);
++ return(name);
++ };
++
++enum rsbac_pm_object_type_t get_pm_object_type_nr(const char * name)
++ {
++ enum rsbac_pm_object_type_t i;
++
++ if(!name)
++ return(PO_none);
++ for (i = 0; i < PO_dir; i++)
++ {
++ if (!strcmp(name,pm_object_type[i]))
++ {
++ return(i);
++ }
++ }
++ return(PO_none);
++ };
++
++/****/
++
++#ifdef __KERNEL__
++char * get_pm_set_name(char * name,
++ enum rsbac_pm_set_t value)
++ {
++ if(!name)
++ return(NULL);
++ if(value > PS_NONE)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, pm_set[value]);
++ return(name);
++ };
++
++enum rsbac_pm_set_t get_pm_set_nr(const char * name)
++ {
++ enum rsbac_pm_set_t i;
++
++ if(!name)
++ return(PS_NONE);
++ for (i = 0; i < PS_NONE; i++)
++ {
++ if (!strcmp(name,pm_set[i]))
++ {
++ return(i);
++ }
++ }
++ return(PS_NONE);
++ };
++
++/****/
++
++char * get_pm_target_name(char * name,
++ enum rsbac_pm_target_t value)
++ {
++ if(!name)
++ return(NULL);
++ if(value > PMT_NONE)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, pm_target[value]);
++ return(name);
++ };
++
++enum rsbac_pm_target_t get_pm_target_nr(const char * name)
++ {
++ enum rsbac_pm_target_t i;
++
++ if(!name)
++ return(PMT_NONE);
++ for (i = 0; i < PMT_NONE; i++)
++ {
++ if (!strcmp(name,pm_target[i]))
++ {
++ return(i);
++ }
++ }
++ return(PMT_NONE);
++ };
++
++/****/
++
++char * get_pm_data_name(char * name,
++ enum rsbac_pm_data_t value)
++ {
++ if(!name)
++ return(NULL);
++ if(value > PD_none)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, pm_data[value]);
++ return(name);
++ };
++
++enum rsbac_pm_data_t get_pm_data_nr(const char * name)
++ {
++ enum rsbac_pm_data_t i;
++
++ if(!name)
++ return(PD_none);
++ for (i = 0; i < PD_none; i++)
++ {
++ if (!strcmp(name,pm_data[i]))
++ {
++ return(i);
++ }
++ }
++ return(PD_none);
++ };
++#endif /* def __KERNEL__ */
++
++/****/
++
++char * get_pm_function_type_name(char * name,
++ enum rsbac_pm_function_type_t value)
++ {
++ if(!name)
++ return(NULL);
++ if(value > PF_none)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, pm_function_type[value]);
++ return(name);
++ };
++
++enum rsbac_pm_function_type_t get_pm_function_type_nr(const char * name)
++ {
++ enum rsbac_pm_function_type_t i;
++
++ if(!name)
++ return(PF_none);
++ for (i = 0; i < PF_none; i++)
++ {
++ if (!strcmp(name,pm_function_type[i]))
++ {
++ return(i);
++ }
++ }
++ return(PF_none);
++ };
++
++#ifndef __KERNEL__
++char * get_pm_function_param(char * name,
++ enum rsbac_pm_function_type_t value)
++ {
++ if(!name)
++ return(NULL);
++ if(value > PF_none)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, pm_function_param[value]);
++ return(name);
++ };
++#endif
++
++/****/
++
++char * get_pm_tkt_function_type_name(char * name,
++ enum rsbac_pm_tkt_function_type_t value)
++ {
++ if(!name)
++ return(NULL);
++ if(value > PTF_none)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, pm_tkt_function_type[value]);
++ return(name);
++ };
++
++enum rsbac_pm_tkt_function_type_t
++ get_pm_tkt_function_type_nr(const char * name)
++ {
++ enum rsbac_pm_tkt_function_type_t i;
++
++ if(!name)
++ return(PTF_none);
++ for (i = 0; i < PTF_none; i++)
++ {
++ if (!strcmp(name,pm_tkt_function_type[i]))
++ {
++ return(i);
++ }
++ }
++ return(PTF_none);
++ };
++
++#ifndef __KERNEL__
++char * get_pm_tkt_function_param(char * name,
++ enum rsbac_pm_tkt_function_type_t value)
++ {
++ if(!name)
++ return(NULL);
++ if(value > PTF_none)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, pm_tkt_function_param[value]);
++ return(name);
++ };
++#endif
+diff --git a/rsbac/help/rc_getname.c b/rsbac/help/rc_getname.c
+new file mode 100644
+index 0000000..97b3eae
+--- /dev/null
++++ b/rsbac/help/rc_getname.c
+@@ -0,0 +1,305 @@
++/*
++ * rc_getname.c: Getname functions for the RC module.
++ *
++ * Author and Copyright (C) 1999-2005 Amon Ott (ao@rsbac.org)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2.
++ *
++ * Last modified 21/12/2004.
++ */
++
++#include <rsbac/getname.h>
++#include <rsbac/rc_getname.h>
++#include <rsbac/helpers.h>
++#include <rsbac/error.h>
++
++#ifdef __KERNEL__
++#include <linux/string.h>
++#else
++#include <string.h>
++#endif
++
++#ifndef NULL
++#define NULL ((void *) 0)
++#endif
++
++static char rc_target_list[RT_NONE + 1][13] = {
++ "ROLE",
++ "TYPE",
++ "NONE"
++};
++
++static char rc_admin_list[RC_none + 1][13] = {
++ "no_admin",
++ "role_admin",
++ "system_admin",
++ "none"
++};
++
++static char rc_scd_type_list[RST_none - RST_min + 1][20] = {
++ "auth_administration",
++ "none"
++};
++
++static char rc_item_list[RI_none + 1][30] = {
++ "role_comp",
++ "admin_roles",
++ "assign_roles",
++ "type_comp_fd",
++ "type_comp_dev",
++ "type_comp_user",
++ "type_comp_process",
++ "type_comp_ipc",
++ "type_comp_scd",
++ "type_comp_group",
++ "type_comp_netdev",
++ "type_comp_nettemp",
++ "type_comp_netobj",
++ "admin_type",
++ "name",
++ "def_fd_create_type",
++ "def_fd_ind_create_type",
++ "def_user_create_type",
++ "def_process_create_type",
++ "def_process_chown_type",
++ "def_process_execute_type",
++ "def_ipc_create_type",
++ "def_group_create_type",
++ "def_unixsock_create_type",
++ "boot_role",
++ "req_reauth",
++ "type_fd_name",
++ "type_dev_name",
++ "type_ipc_name",
++ "type_user_name",
++ "type_process_name",
++ "type_group_name",
++ "type_netdev_name",
++ "type_nettemp_name",
++ "type_netobj_name",
++ "type_fd_need_secdel",
++ "type_scd_name",
++ "remove_role",
++ "def_fd_ind_create_type_remove",
++ "type_fd_remove",
++ "type_dev_remove",
++ "type_ipc_remove",
++ "type_user_remove",
++ "type_process_remove",
++ "type_group_remove",
++ "type_netdev_remove",
++ "type_nettemp_remove",
++ "type_netobj_remove",
++#ifdef __KERNEL__
++#endif
++ "none"
++};
++
++#ifndef __KERNEL__
++static char rc_item_param_list[RI_none + 1][100] = {
++ "\t0 = FALSE, 1 = TRUE",
++ "\t0 = FALSE, 1 = TRUE",
++ "\t0 = FALSE, 1 = TRUE",
++ "\t0 = FALSE, 1 = TRUE",
++ "\t0 = FALSE, 1 = TRUE",
++ "\t0 = FALSE, 1 = TRUE",
++ "0 = FALSE, 1 = TRUE",
++ "\t0 = FALSE, 1 = TRUE",
++ "\t0 = FALSE, 1 = TRUE",
++ "\t0 = FALSE, 1 = TRUE",
++ "0 = FALSE, 1 = TRUE",
++ "0 = FALSE, 1 = TRUE",
++ "0 = FALSE, 1 = TRUE",
++ "\t0 = no_admin, 1 = role_admin, 2 = system_admin\n\t\t\t(for RC administration only)",
++ "\t\tString, max. 15 chars",
++ "number, -2 = inherit from parent, -3 = no_create",
++ "parent_type new_type, -2 = inherit from parent,\n\t\t\t-3 = no_create",
++ "number, -2 = inherit from parent, -3 = no_create",
++ "number, -1 = inherit from process,\n\t\t\t-3 = no_create",
++ "number, -2 = inherit from parent (keep),\n\t\t\t-3 = no_create",
++ "number, -2 = inherit from parent (keep),\n\t\t\t-5 = use def_create of new role, -6 = no_chown",
++ "number, -1 = inherit from process (keep),\n\t\t\t-4 = no_execute",
++ "number, -3 = no_create",
++ "number, -7 = use_template (do not set)",
++ "\t0 = FALSE, 1 = TRUE",
++ "\tString, max. 15 chars",
++ "\tString, max. 15 chars",
++ "\tString, max. 15 chars",
++ "\tString, max. 15 chars",
++ "String, max. 15 chars",
++ "\tString, max. 15 chars",
++ "String, max. 15 chars",
++ "String, max. 15 chars",
++ "String, max. 15 chars",
++ "0 = FALSE, 1 = TRUE",
++ "\tString, max. 15 chars (read-only)",
++ "\t\t(none)"
++};
++#endif
++
++static char rc_special_right_list[RCR_NONE - RSBAC_RC_SPECIAL_RIGHT_BASE +
++ 1][20] = {
++ "ADMIN",
++ "ASSIGN",
++ "ACCESS_CONTROL",
++ "SUPERVISOR",
++ "MODIFY_AUTH",
++ "CHANGE_AUTHED_OWNER",
++ "SELECT",
++ "NONE"
++};
++
++/*****************************************/
++
++char *get_rc_target_name(char *name, enum rsbac_rc_target_t value)
++{
++ if (!name)
++ return (NULL);
++ if (value > RT_NONE)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, rc_target_list[value]);
++ return (name);
++};
++
++enum rsbac_rc_target_t get_rc_target_nr(const char *name)
++{
++ enum rsbac_rc_target_t i;
++
++ if (!name)
++ return (RT_NONE);
++ for (i = 0; i < RT_NONE; i++) {
++ if (!strcmp(name, rc_target_list[i])) {
++ return (i);
++ }
++ }
++ return (RT_NONE);
++};
++
++char *get_rc_admin_name(char *name, enum rsbac_rc_admin_type_t value)
++{
++ if (!name)
++ return (NULL);
++ if (value > RC_none)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, rc_admin_list[value]);
++ return (name);
++};
++
++enum rsbac_rc_admin_type_t get_rc_admin_nr(const char *name)
++{
++ enum rsbac_rc_admin_type_t i;
++
++ if (!name)
++ return (RC_none);
++ for (i = 0; i < RC_none; i++) {
++ if (!strcmp(name, rc_admin_list[i])) {
++ return (i);
++ }
++ }
++ return (RC_none);
++};
++
++char *get_rc_scd_type_name(char *name, enum rsbac_rc_scd_type_t value)
++{
++ if (!name)
++ return (NULL);
++ if (value < RST_min) {
++ return (get_scd_type_name(name, value));
++ }
++ value -= RST_min;
++ if (value > RST_none) {
++ strcpy(name, "ERROR!");
++ return (name);
++ }
++ strcpy(name, rc_scd_type_list[value]);
++ return (name);
++};
++
++enum rsbac_rc_scd_type_t get_rc_scd_type_nr(const char *name)
++{
++ enum rsbac_rc_scd_type_t i;
++
++ if (!name)
++ return (RC_none);
++ for (i = 0; i < RC_none - RST_min; i++) {
++ if (!strcmp(name, rc_scd_type_list[i])) {
++ return (i + RST_min);
++ }
++ }
++ return (get_scd_type_nr(name));
++};
++
++char *get_rc_item_name(char *name, enum rsbac_rc_item_t value)
++{
++ if (!name)
++ return (NULL);
++ if (value > RI_none)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, rc_item_list[value]);
++ return (name);
++};
++
++enum rsbac_rc_item_t get_rc_item_nr(const char *name)
++{
++ enum rsbac_rc_item_t i;
++
++ if (!name)
++ return (RI_none);
++ for (i = 0; i < RI_none; i++) {
++ if (!strcmp(name, rc_item_list[i])) {
++ return (i);
++ }
++ }
++ return (RI_none);
++};
++
++#ifndef __KERNEL__
++char *get_rc_item_param(char *name, enum rsbac_rc_item_t value)
++{
++ if (!name)
++ return (NULL);
++ if (value > RI_none)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, rc_item_param_list[value]);
++ return (name);
++};
++#endif
++
++char *get_rc_special_right_name(char *name,
++ enum rsbac_rc_special_rights_t value)
++{
++ if (!name)
++ return (NULL);
++ if (value < RSBAC_RC_SPECIAL_RIGHT_BASE) {
++ return (get_request_name(name, value));
++ }
++ value -= RSBAC_RC_SPECIAL_RIGHT_BASE;
++ if (value > RCR_NONE) {
++ strcpy(name, "ERROR!");
++ return (name);
++ }
++ strcpy(name, rc_special_right_list[value]);
++ return (name);
++};
++
++#ifndef __KERNEL__
++enum rsbac_rc_special_rights_t get_rc_special_right_nr(const char *name)
++{
++ enum rsbac_rc_special_rights_t i;
++
++ if (!name)
++ return (RCR_NONE);
++ for (i = 0; i < (RCR_NONE - RSBAC_RC_SPECIAL_RIGHT_BASE); i++) {
++ if (!strcmp(name, rc_special_right_list[i])) {
++ return (i + RSBAC_RC_SPECIAL_RIGHT_BASE);
++ }
++ }
++ return (get_request_nr(name));
++}
++#endif
+diff --git a/rsbac/help/res_getname.c b/rsbac/help/res_getname.c
+new file mode 100644
+index 0000000..0641eb7
+--- /dev/null
++++ b/rsbac/help/res_getname.c
+@@ -0,0 +1,62 @@
++/********************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 2002: */
++/* Amon Ott <ao@rsbac.org> */
++/* Getname functions for RES module */
++/* Last modified: 22/Nov/2002 */
++/********************************** */
++
++#ifndef __KERNEL__
++
++#include <rsbac/getname.h>
++#include <rsbac/res_getname.h>
++#include <rsbac/helpers.h>
++#include <rsbac/error.h>
++
++#include <string.h>
++
++static char res_list[RSBAC_RES_MAX+2][8] = {
++ "cpu",
++ "fsize",
++ "data",
++ "stack",
++ "core",
++ "rss",
++ "nproc",
++ "nofile",
++ "memlock",
++ "as",
++ "locks",
++ "NONE" };
++
++/*****************************************/
++
++char * get_res_name(char * name,
++ u_int value)
++ {
++ if(!name)
++ return(NULL);
++ if(value > RSBAC_RES_MAX)
++ strcpy(name, "ERROR!");
++ else
++ strcpy(name, res_list[value]);
++ return(name);
++ };
++
++int get_res_nr(const char * name)
++ {
++ int i;
++
++ if(!name)
++ return(RSBAC_RES_NONE);
++ for (i = 0; i <= RSBAC_RES_MAX; i++)
++ {
++ if (!strcmp(name, res_list[i]))
++ {
++ return(i);
++ }
++ }
++ return(RSBAC_RES_NONE);
++ };
++
++#endif /* !__KERNEL__ */
+diff --git a/rsbac/help/rkmem.c b/rsbac/help/rkmem.c
+new file mode 100644
+index 0000000..9faf4f2
+--- /dev/null
++++ b/rsbac/help/rkmem.c
+@@ -0,0 +1,77 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Author and (c) 1999-2010: Amon Ott <ao@rsbac.org> */
++/* (a lot copied from mm/slab.c, with other */
++/* copyrights) */
++/* Memory allocation functions for all parts */
++/* Last modified: 24/Jun/2010 */
++/*************************************************** */
++
++#include <rsbac/types.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/debug.h>
++#include <rsbac/aci.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/timer.h>
++
++/**
++ * rsbac_kmalloc - allocate memory
++ * @size: how many bytes of memory are required.
++ *
++ * rsbac_kmalloc is the normal method of allocating memory for RSBAC
++ * in the kernel. It will always be of type GFP_KERNEL in 2.4 and
++ * GFP_ATOMIC in 2.6.
++ *
++ * rsbac_kmalloc'd memory is freed by rsbac_kfree
++ */
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_kmalloc);
++#endif
++void * rsbac_kmalloc (size_t size)
++{
++ if(!size)
++ return NULL;
++
++ return kmalloc(size, GFP_ATOMIC);
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_kmalloc_unlocked);
++#endif
++void * rsbac_kmalloc_unlocked (size_t size)
++{
++ if(!size)
++ return NULL;
++
++ return kmalloc(size, GFP_KERNEL);
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_kmalloc_clear);
++#endif
++void * rsbac_kmalloc_clear (size_t size)
++{
++ if(!size)
++ return NULL;
++
++ return kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
++}
++
++#if defined(CONFIG_RSBAC_REG) || defined(CONFIG_RSBAC_REG_MAINT)
++EXPORT_SYMBOL(rsbac_kmalloc_clear_unlocked);
++#endif
++void * rsbac_kmalloc_clear_unlocked (size_t size)
++{
++ if(!size)
++ return NULL;
++
++ return kmalloc(size, GFP_KERNEL | __GFP_ZERO);
++}
++
++void rsbac_kfree (const void *objp)
++{
++ kfree(objp);
++}
+diff --git a/rsbac/help/syscalls.c b/rsbac/help/syscalls.c
+new file mode 100644
+index 0000000..2190490
+--- /dev/null
++++ b/rsbac/help/syscalls.c
+@@ -0,0 +1,8730 @@
++/*************************************************** */
++/* Rule Set Based Access Control */
++/* Implementation of RSBAC general system calls */
++/* Author and (C) 1999-2012: Amon Ott <ao@rsbac.org> */
++/* */
++/* Last modified: 07/May/2012 */
++/*************************************************** */
++
++#include <rsbac/types.h>
++#include <rsbac/aci.h>
++#include <rsbac/mac.h>
++#include <rsbac/pm.h>
++#include <rsbac/auth.h>
++#include <rsbac/acl.h>
++#include <rsbac/reg.h>
++#include <rsbac/error.h>
++#include <rsbac/debug.h>
++#include <rsbac/helpers.h>
++#include <rsbac/getname.h>
++#include <rsbac/network.h>
++#include <linux/semaphore.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <rsbac/rkmem.h>
++#include <rsbac/gen_lists.h>
++#include <asm/uaccess.h>
++#include <linux/delay.h>
++#include <linux/namei.h>
++
++#include <rsbac/adf.h>
++#include <rsbac/adf_main.h>
++#include <rsbac/adf_syshelpers.h>
++#include <rsbac/rc.h>
++#include <rsbac/um.h>
++#include <rsbac/um_types.h>
++#include <rsbac/syscalls.h>
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++#include <rsbac/network.h>
++#endif
++#ifdef CONFIG_RSBAC_DAZ
++#include <rsbac/daz.h>
++#endif
++
++/************************************************************************** */
++/* Global Variables */
++/************************************************************************** */
++
++extern struct semaphore rsbac_write_sem;
++
++#ifdef CONFIG_RSBAC_XSTATS
++extern __u64 syscall_count[RSYS_none];
++#endif
++
++/************************************************* */
++/* Declarations */
++/************************************************* */
++
++/************************************************* */
++/* General functions */
++/************************************************* */
++
++/* All functions return 0, if no error occurred, and a negative error code */
++/* otherwise. The error codes are defined in rsbac/error.h. */
++
++int sys_rsbac_stats(void)
++ {
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_stats(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++
++ return rsbac_stats();
++ }
++
++long sys_sync(void);
++
++int sys_rsbac_check(int correct, int check_inode)
++ {
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ int result;
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_check(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++
++ rsbac_printk(KERN_INFO
++ "sys_rsbac_check(): triggering RSBAC consistency check, correct = %u, check_inode = %u!\n",
++ correct, check_inode);
++
++ result=rsbac_check_lists(correct);
++
++ /* call other checks */
++#if defined(CONFIG_RSBAC_ACL)
++ if(!result)
++ result=rsbac_check_acl(correct);
++#endif
++#if defined(CONFIG_RSBAC_REG)
++ if(!result)
++ result=rsbac_check_reg(correct, check_inode);
++#endif
++
++ return result;
++ }
++
++int sys_rsbac_write(void)
++ {
++#if defined(CONFIG_RSBAC_AUTO_WRITE)
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_write(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_WRITE,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++
++ return rsbac_write();
++#else
++ return 0;
++#endif /* CONFIG_RSBAC_AUTO_WRITE */
++ };
++
++/************************************************* */
++/* Attribute functions */
++/************************************************* */
++
++int sys_rsbac_get_attr(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t __user * tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t __user * value,
++ int inherit)
++ {
++ union rsbac_target_id_t k_tid;
++ union rsbac_attribute_value_t k_value;
++ int err = 0;
++ rsbac_boolean_t i_inherit;
++
++ if(module > SW_NONE)
++ return -RSBAC_EINVALIDMODULE;
++ if(!tid || (target >= T_NONE))
++ return -RSBAC_EINVALIDTARGET;
++ if(!value)
++ return -RSBAC_EINVALIDPOINTER;
++ if(attr >= A_none)
++ return -RSBAC_EINVALIDATTR;
++
++ if(module == SW_NONE)
++ {
++ module = get_attr_module(attr);
++ if(module == SW_NONE)
++ return -RSBAC_EINVALIDMODULE;
++ }
++
++ /* get values from user space */
++ rsbac_get_user(&k_tid, tid, sizeof(k_tid) );
++ rsbac_get_user(&k_value, value, sizeof(k_value) );
++
++ switch (target) {
++ case T_FD:
++ return -RSBAC_EINVALIDTARGET;
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ k_tid.file.dentry_p = NULL;
++ k_tid.dir.dentry_p = NULL;
++ break;
++ case T_USER:
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(k_tid.user) == RSBAC_UM_VIRTUAL_KEEP)
++ k_tid.user = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(k_tid.user));
++ else
++ if ( (RSBAC_UID_SET(k_tid.user) > RSBAC_UM_VIRTUAL_MAX)
++ && (RSBAC_UID_SET(k_tid.user) != RSBAC_UM_VIRTUAL_ALL)
++ )
++ return -RSBAC_EINVALIDTARGET;
++#else
++ k_tid.user = RSBAC_UID_NUM(k_tid.user);
++#endif
++ break;
++ case T_GROUP:
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_GID_SET(k_tid.group) == RSBAC_UM_VIRTUAL_KEEP)
++ k_tid.group = RSBAC_GEN_GID (rsbac_get_vset(), RSBAC_GID_NUM(k_tid.group));
++ else
++ if ( (RSBAC_GID_SET(k_tid.group) > RSBAC_UM_VIRTUAL_MAX)
++ && (RSBAC_GID_SET(k_tid.group) != RSBAC_UM_VIRTUAL_ALL)
++ )
++ return -RSBAC_EINVALIDTARGET;
++#else
++ k_tid.group = RSBAC_GID_NUM(k_tid.group);
++#endif
++ break;
++ case T_PROCESS:
++ k_tid.process = find_pid_ns(k_tid.uprocess, &init_pid_ns);
++ if(!k_tid.process)
++ return -RSBAC_EINVALIDTARGET;
++ break;
++ default:
++ break;
++ }
++
++ if(inherit)
++ i_inherit = TRUE;
++ else
++ i_inherit = FALSE;
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++ /* sanity check before using pointer */
++ if( (target == T_NETOBJ)
++ && ( !k_tid.netobj.sock_p
++ || k_tid.netobj.remote_addr
++ || !k_tid.netobj.sock_p->file
++ || !k_tid.netobj.sock_p->file->f_dentry
++ || !k_tid.netobj.sock_p->file->f_dentry->d_inode
++ || (SOCKET_I(k_tid.netobj.sock_p->file->f_dentry->d_inode) != k_tid.netobj.sock_p)
++ )
++ )
++ return -RSBAC_EINVALIDTARGET;
++#endif
++
++ /* call ADF */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_get_attr(): calling ADF\n");
++ }
++#endif
++ if (!rsbac_adf_request(R_READ_ATTRIBUTE,
++ task_pid(current),
++ target,
++ k_tid,
++ attr,
++ k_value))
++ {
++ return -EPERM;
++ }
++
++ err = rsbac_ta_get_attr(ta_number, module, target, k_tid, attr, &k_value, i_inherit);
++ /* put result value to user space */
++ if(!err)
++ {
++ err = rsbac_put_user(&k_value, value, sizeof(k_value) );
++ }
++ return err;
++ } /* end of sys_rsbac_get_attr() */
++
++
++int sys_rsbac_get_attr_n(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ char __user * t_name,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t __user * value,
++ int inherit)
++ {
++ union rsbac_attribute_value_t k_value;
++ struct dentry * t_dentry;
++ int err = 0;
++ union rsbac_target_id_t tid;
++/* struct passwd * user_description_p; */
++ rsbac_boolean_t i_inherit;
++ struct path path;
++
++ if(module > SW_NONE)
++ return -RSBAC_EINVALIDMODULE;
++ if(!t_name || (target >= T_NONE))
++ return -RSBAC_EINVALIDTARGET;
++ if(!value)
++ return -RSBAC_EINVALIDPOINTER;
++ if(attr >= A_none)
++ return -RSBAC_EINVALIDATTR;
++
++ if(module == SW_NONE)
++ {
++ module = get_attr_module(attr);
++ if(module == SW_NONE)
++ return -RSBAC_EINVALIDMODULE;
++ }
++
++ if(inherit)
++ i_inherit = TRUE;
++ else
++ i_inherit = FALSE;
++
++ /* get values from user space */
++ rsbac_get_user(&k_value, value, sizeof(k_value) );
++ switch (target) {
++ case T_FD:
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ tid.file.dentry_p = NULL;
++ tid.dir.dentry_p = NULL;
++ break;
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ /* lookup filename */
++ if ((err = user_lpath(t_name, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_get_attr_n(): call to user_lpath() returned %i\n", err);
++ }
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ if (!t_dentry->d_inode)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_get_attr_n(): file not found\n");
++ }
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++
++ switch (target)
++ {
++ /* is inode of right type? */
++ case T_FD:
++ if(S_ISREG(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ if(S_ISDIR(t_dentry->d_inode->i_mode))
++ {
++ target = T_DIR;
++ }
++ else
++ if(S_ISLNK(t_dentry->d_inode->i_mode))
++ {
++ target = T_SYMLINK;
++ }
++ else
++ if(S_ISFIFO(t_dentry->d_inode->i_mode))
++ {
++ target = T_FIFO;
++ }
++ else
++ if(S_ISBLK(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ if(S_ISCHR(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ if(S_ISSOCK(t_dentry->d_inode->i_mode))
++ {
++ target = T_UNIXSOCK;
++ }
++ else
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_get_attr_n(): no filesystem object\n");
++ }
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_FILE:
++ if ( !(S_ISREG(t_dentry->d_inode->i_mode))
++ && !(S_ISBLK(t_dentry->d_inode->i_mode))
++ && !(S_ISCHR(t_dentry->d_inode->i_mode)) )
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_get_attr_n(): no file\n");
++ }
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_DIR:
++ if ( !(S_ISDIR(t_dentry->d_inode->i_mode)) )
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_get_attr_n(): no dir\n");
++ }
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_FIFO:
++ /* is inode of type fifo? */
++ if ( !(S_ISFIFO(t_dentry->d_inode->i_mode)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_get_attr_n(): no fifo\n");
++ }
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_UNIXSOCK:
++ /* is inode of type socket? */
++ if ( !(S_ISSOCK(t_dentry->d_inode->i_mode)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_get_attr_n(): no socket\n");
++ }
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_SYMLINK:
++ if ( !(S_ISLNK(t_dentry->d_inode->i_mode)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_get_attr_n(): no symlink\n");
++ }
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_DEV:
++ if ( !(S_ISBLK(t_dentry->d_inode->i_mode))
++ && !(S_ISCHR(t_dentry->d_inode->i_mode)) )
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_get_attr_n(): no dev\n");
++ }
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++
++ if(target == T_DEV)
++ {
++ if(S_ISBLK(t_dentry->d_inode->i_mode))
++ tid.dev.type = D_block;
++ else
++ tid.dev.type = D_char;
++ tid.dev.major = RSBAC_MAJOR(t_dentry->d_inode->i_rdev);
++ tid.dev.minor = RSBAC_MINOR(t_dentry->d_inode->i_rdev);
++ }
++ else
++ {
++ /* fill target id and call internal function */
++ tid.file.device = t_dentry->d_sb->s_dev;
++ tid.file.inode = t_dentry->d_inode->i_ino;
++ tid.file.dentry_p = t_dentry;
++ }
++ /* call ADF */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_get_attr_n(): calling ADF\n");
++ }
++#endif
++ if (!rsbac_adf_request(R_READ_ATTRIBUTE,
++ task_pid(current),
++ target,
++ tid,
++ attr,
++ k_value))
++ {
++ err = -EPERM;
++ }
++ else
++ {
++ err = rsbac_ta_get_attr(ta_number, module, target, tid, attr, &k_value, i_inherit);
++ /* put result value to user space */
++ if(!err)
++ rsbac_put_user(&k_value, value, sizeof(k_value) );
++ }
++
++out_dput:
++ path_put(&path);
++
++out:
++ return err;
++ } /* end of sys_rsbac_get_attr_n() */
++
++/************************************************************************** */
++
++int sys_rsbac_set_attr(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t __user * tid,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t __user * value)
++ {
++ union rsbac_target_id_t k_tid;
++ union rsbac_attribute_value_t k_value;
++ int err = 0;
++
++ if(module > SW_NONE)
++ return -RSBAC_EINVALIDMODULE;
++ if(!tid || (target >= T_NONE))
++ return -RSBAC_EINVALIDTARGET;
++ if(!value)
++ return -RSBAC_EINVALIDPOINTER;
++ if(attr >= A_none)
++ return -RSBAC_EINVALIDATTR;
++
++ if(module == SW_NONE)
++ {
++ module = get_attr_module(attr);
++ if(module == SW_NONE)
++ return -RSBAC_EINVALIDMODULE;
++ }
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_set_attr(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++ /* get values from user space */
++ rsbac_get_user(&k_tid, tid, sizeof(k_tid) );
++ rsbac_get_user(&k_value, value, sizeof(k_value) );
++
++
++ switch(target)
++ {
++ case T_PROCESS:
++ k_tid.process = find_pid_ns(k_tid.uprocess, &init_pid_ns);
++ if(!k_tid.process)
++ return -RSBAC_EINVALIDTARGET;
++ break;
++
++#ifdef CONFIG_RSBAC_NET_OBJ
++ /* sanity check before using pointer */
++ case T_NETOBJ:
++ if( !k_tid.netobj.sock_p
++ || k_tid.netobj.remote_addr
++ || !k_tid.netobj.sock_p->file
++ || !k_tid.netobj.sock_p->file->f_dentry
++ || !k_tid.netobj.sock_p->file->f_dentry->d_inode
++ || (SOCKET_I(k_tid.netobj.sock_p->file->f_dentry->d_inode) != k_tid.netobj.sock_p)
++ )
++ return -RSBAC_EINVALIDTARGET;
++#endif
++ break;
++ case T_USER:
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(k_tid.user) == RSBAC_UM_VIRTUAL_KEEP)
++ k_tid.user = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(k_tid.user));
++ else
++ if ( (RSBAC_UID_SET(k_tid.user) > RSBAC_UM_VIRTUAL_MAX)
++ && (RSBAC_UID_SET(k_tid.user) != RSBAC_UM_VIRTUAL_ALL)
++ )
++ return -RSBAC_EINVALIDTARGET;
++#else
++ k_tid.user = RSBAC_UID_NUM(k_tid.user);
++#endif
++ break;
++ case T_GROUP:
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_GID_SET(k_tid.group) == RSBAC_UM_VIRTUAL_KEEP)
++ k_tid.group = RSBAC_GEN_GID (rsbac_get_vset(), RSBAC_GID_NUM(k_tid.group));
++ else
++ if ( (RSBAC_GID_SET(k_tid.group) > RSBAC_UM_VIRTUAL_MAX)
++ && (RSBAC_GID_SET(k_tid.group) != RSBAC_UM_VIRTUAL_ALL)
++ )
++ return -RSBAC_EINVALIDTARGET;
++#else
++ k_tid.group = RSBAC_GID_NUM(k_tid.group);
++#endif
++ break;
++
++ case T_FD:
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_SYMLINK:
++ case T_UNIXSOCK:
++ return -RSBAC_EINVALIDTARGET;
++
++ default:
++ break;
++ }
++
++ /* call ADF */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG
++ "sys_rsbac_set_attr(): calling ADF\n");
++#endif
++ if (!rsbac_adf_request(R_MODIFY_ATTRIBUTE,
++ task_pid(current),
++ target,
++ k_tid,
++ attr,
++ k_value))
++ {
++ return -EPERM;
++ }
++ err = rsbac_ta_set_attr(ta_number, module, target, k_tid, attr, k_value);
++ return err;
++ } /* end of sys_rsbac_set_attr() */
++
++int sys_rsbac_set_attr_n(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_switch_target_t module,
++ enum rsbac_target_t target,
++ char __user * t_name,
++ enum rsbac_attribute_t attr,
++ union rsbac_attribute_value_t __user * value)
++ {
++ struct dentry * t_dentry;
++ int err = 0;
++ union rsbac_attribute_value_t k_value;
++ union rsbac_target_id_t tid;
++ struct path path;
++
++ if(module > SW_NONE)
++ return -RSBAC_EINVALIDMODULE;
++ if(!t_name || (target >= T_NONE))
++ return -RSBAC_EINVALIDTARGET;
++ if(!value)
++ return -RSBAC_EINVALIDPOINTER;
++ if(attr >= A_none)
++ return -RSBAC_EINVALIDATTR;
++
++ if(module == SW_NONE)
++ {
++ module = get_attr_module(attr);
++ if(module == SW_NONE)
++ return -RSBAC_EINVALIDMODULE;
++ }
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_set_attr_n(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++ /* get values from user space */
++ rsbac_get_user(&k_value, value, sizeof(k_value) );
++
++ /* lookup filename */
++ if ((err = user_lpath(t_name, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_set_attr_n(): call to user_lpath() returned %i\n", err);
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ if (!t_dentry->d_inode)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_set_attr_n(): file not found\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++
++ switch (target)
++ {
++ /* is inode of right type? */
++ case T_FD:
++ if(S_ISREG(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ if(S_ISDIR(t_dentry->d_inode->i_mode))
++ {
++ target = T_DIR;
++ }
++ else
++ if(S_ISLNK(t_dentry->d_inode->i_mode))
++ {
++ target = T_SYMLINK;
++ }
++ else
++ if(S_ISFIFO(t_dentry->d_inode->i_mode))
++ {
++ target = T_FIFO;
++ }
++ else
++ if(S_ISSOCK(t_dentry->d_inode->i_mode))
++ {
++ target = T_UNIXSOCK;
++ }
++ else
++ if(S_ISBLK(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ if(S_ISCHR(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_set_attr_n(): no filesystem object\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_FILE:
++ if ( !(S_ISREG(t_dentry->d_inode->i_mode))
++ && !(S_ISBLK(t_dentry->d_inode->i_mode))
++ && !(S_ISCHR(t_dentry->d_inode->i_mode)) )
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_set_attr_n(): no file\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_DIR:
++ if ( !(S_ISDIR(t_dentry->d_inode->i_mode)) )
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef) rsbac_printk(KERN_DEBUG "sys_rsbac_get_attr(): no dir\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_FIFO:
++ /* is inode of type fifo? */
++ if ( !(S_ISFIFO(t_dentry->d_inode->i_mode)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_set_attr_n(): no fifo\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_UNIXSOCK:
++ /* is inode of type fifo? */
++ if ( !(S_ISSOCK(t_dentry->d_inode->i_mode)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_set_attr_n(): no socket\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_SYMLINK:
++ if ( !(S_ISLNK(t_dentry->d_inode->i_mode)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_set_attr_n(): no symlink\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_DEV:
++ if ( !(S_ISBLK(t_dentry->d_inode->i_mode))
++ && !(S_ISCHR(t_dentry->d_inode->i_mode)) )
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef) rsbac_printk(KERN_DEBUG "sys_rsbac_set_attr_n(): no dev\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++
++ if(target == T_DEV)
++ {
++ if(S_ISBLK(t_dentry->d_inode->i_mode))
++ tid.dev.type = D_block;
++ else
++ tid.dev.type = D_char;
++ tid.dev.major = RSBAC_MAJOR(t_dentry->d_inode->i_rdev);
++ tid.dev.minor = RSBAC_MINOR(t_dentry->d_inode->i_rdev);
++ }
++ else
++ {
++ /* fill target id and call internal function */
++ tid.file.device = t_dentry->d_sb->s_dev;
++ tid.file.inode = t_dentry->d_inode->i_ino;
++ tid.file.dentry_p = t_dentry;
++ }
++ /* call ADF */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef) rsbac_printk(KERN_DEBUG "sys_rsbac_set_attr_n(): calling ADF\n");
++#endif
++ if (!rsbac_adf_request(R_MODIFY_ATTRIBUTE,
++ task_pid(current),
++ target,
++ tid,
++ attr,
++ k_value))
++ {
++ err = -EPERM;
++ }
++ else
++ {
++ err = rsbac_ta_set_attr(ta_number, module, target, tid, attr, k_value);
++ }
++
++out_dput:
++ path_put(&path);
++
++out:
++ return err;
++ } /* end of sys_rsbac_set_attr_n() */
++
++/************************************************************************** */
++
++int sys_rsbac_remove_target(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t __user * tid)
++ {
++ union rsbac_target_id_t k_tid;
++ int err = 0;
++
++ /* for adf_request */
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if(!tid || (target >= T_NONE))
++ return -RSBAC_EINVALIDTARGET;
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_remove_target(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++ /* get values from user space */
++ rsbac_get_user(&k_tid, tid, sizeof(k_tid) );
++
++ switch (target) {
++ case T_USER:
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(k_tid.user) == RSBAC_UM_VIRTUAL_KEEP)
++ k_tid.user = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(k_tid.user));
++#else
++ k_tid.user = RSBAC_UID_NUM(k_tid.user);
++#endif
++ break;
++ case T_GROUP:
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_GID_SET(k_tid.group) == RSBAC_UM_VIRTUAL_KEEP)
++ k_tid.group = RSBAC_GEN_GID (rsbac_get_vset(), RSBAC_GID_NUM(k_tid.group));
++#else
++ k_tid.group = RSBAC_GID_NUM(k_tid.group);
++#endif
++ break;
++ case T_PROCESS:
++ k_tid.process = find_pid_ns(k_tid.uprocess, &init_pid_ns);
++ if(!k_tid.process)
++ return -RSBAC_EINVALIDTARGET;
++ break;
++
++ default:
++ break;
++ }
++
++ /* call ADF */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef) rsbac_printk(KERN_DEBUG "sys_rsbac_remove_target(): calling ADF\n");
++#endif
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_ATTRIBUTE,
++ task_pid(current),
++ target,
++ k_tid,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++ err = rsbac_ta_remove_target(ta_number, target, k_tid);
++ return err;
++ } /* end of sys_rsbac_remove_target() */
++
++int sys_rsbac_remove_target_n(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ char __user * t_name)
++ {
++ struct dentry * t_dentry;
++ int err = 0;
++ union rsbac_target_id_t tid;
++
++ /* for adf_request */
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++/* struct passwd * user_description_p; */
++
++ struct path path;
++
++ if(!t_name || (target >= T_NONE))
++ return -RSBAC_EINVALIDTARGET;
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_remove_target_n(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++ /* lookup filename */
++ if ((err = user_lpath(t_name, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_remove_target_n(): call to user_lpath() returned %i\n", err);
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ if (!t_dentry->d_inode)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_remove_target_n(): file not found\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++
++ switch (target)
++ {
++ /* is inode of right type? */
++ case T_FD:
++ if(S_ISREG(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ if(S_ISDIR(t_dentry->d_inode->i_mode))
++ {
++ target = T_DIR;
++ }
++ else
++ if(S_ISLNK(t_dentry->d_inode->i_mode))
++ {
++ target = T_SYMLINK;
++ }
++ else
++ if(S_ISFIFO(t_dentry->d_inode->i_mode))
++ {
++ target = T_FIFO;
++ }
++ else
++ if(S_ISSOCK(t_dentry->d_inode->i_mode))
++ {
++ target = T_UNIXSOCK;
++ }
++ else
++ if(S_ISBLK(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ if(S_ISCHR(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_remove_target_n(): no filesystem object\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_FILE:
++ if ( !(S_ISREG(t_dentry->d_inode->i_mode))
++ && !(S_ISBLK(t_dentry->d_inode->i_mode))
++ && !(S_ISCHR(t_dentry->d_inode->i_mode)) )
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_remove_target_n(): no file\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_DIR:
++ if ( !(S_ISDIR(t_dentry->d_inode->i_mode)) )
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef) rsbac_printk(KERN_DEBUG "sys_rsbac_get_attr(): no dir\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_FIFO:
++ /* is inode of type fifo? */
++ if ( !(S_ISFIFO(t_dentry->d_inode->i_mode)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_remove_target_n(): no fifo\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_UNIXSOCK:
++ /* is inode of type fifo? */
++ if ( !(S_ISSOCK(t_dentry->d_inode->i_mode)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_remove_target_n(): no socket\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_SYMLINK:
++ if ( !(S_ISLNK(t_dentry->d_inode->i_mode)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_remove_target_n(): no symlink\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_DEV:
++ if ( !(S_ISBLK(t_dentry->d_inode->i_mode))
++ && !(S_ISCHR(t_dentry->d_inode->i_mode)) )
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef) rsbac_printk(KERN_DEBUG "sys_rsbac_remove_target_n(): no dev\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++
++ if(target == T_DEV)
++ {
++ if(S_ISBLK(t_dentry->d_inode->i_mode))
++ tid.dev.type = D_block;
++ else
++ tid.dev.type = D_char;
++ tid.dev.major = RSBAC_MAJOR(t_dentry->d_inode->i_rdev);
++ tid.dev.minor = RSBAC_MINOR(t_dentry->d_inode->i_rdev);
++ }
++ else
++ {
++ /* fill target id and call internal function */
++ tid.file.device = t_dentry->d_sb->s_dev;
++ tid.file.inode = t_dentry->d_inode->i_ino;
++ tid.file.dentry_p = t_dentry;
++ }
++ /* call ADF */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef) rsbac_printk(KERN_DEBUG "sys_rsbac_remove_target_n(): calling ADF\n");
++#endif
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_ATTRIBUTE,
++ task_pid(current),
++ target,
++ tid,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ }
++ else
++ {
++ err = rsbac_ta_remove_target(ta_number, target, tid);
++ }
++
++out_dput:
++ path_put(&path);
++
++out:
++ return err;
++ } /* end of sys_rsbac_remove_target_n() */
++
++int sys_rsbac_list_all_dev(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_dev_desc_t __user * id_p,
++ u_long maxnum)
++ {
++ int err = 0;
++ long count;
++
++ if(id_p && maxnum)
++ {
++ struct rsbac_dev_desc_t * k_id_p = NULL;
++
++ count = rsbac_ta_list_all_dev(ta_number, &k_id_p);
++ if(count <= 0)
++ return count;
++ if(count > maxnum)
++ count = maxnum;
++
++ err = rsbac_put_user(k_id_p, id_p, count * sizeof(*k_id_p) );
++
++ rsbac_kfree(k_id_p);
++
++ if(err)
++ return err;
++ else
++ return count;
++ }
++ else
++ return rsbac_ta_list_all_dev(ta_number, NULL);
++ }
++
++int sys_rsbac_list_all_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t __user * id_p,
++ u_long maxnum)
++ {
++ int err = 0;
++ long count;
++
++ if(id_p && maxnum)
++ {
++ rsbac_uid_t * k_id_p = NULL;
++
++ count = rsbac_ta_list_all_user(ta_number, &k_id_p);
++ if(count <= 0)
++ return count;
++ if(count > maxnum)
++ count = maxnum;
++
++ err = rsbac_put_user(k_id_p, id_p, count * sizeof(*k_id_p) );
++
++ rsbac_kfree(k_id_p);
++
++ if(err)
++ return err;
++ else
++ return count;
++ }
++ else
++ return rsbac_ta_list_all_user(ta_number, NULL);
++ }
++
++int sys_rsbac_list_all_group(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t __user * id_p,
++ u_long maxnum)
++ {
++ int err = 0;
++ long count;
++
++ if(id_p && maxnum)
++ {
++ rsbac_gid_t * k_id_p = NULL;
++
++ count = rsbac_ta_list_all_group(ta_number, &k_id_p);
++ if(count <= 0)
++ return count;
++ if(count > maxnum)
++ count = maxnum;
++
++ err = rsbac_put_user(k_id_p, id_p, count * sizeof(*k_id_p) );
++
++ rsbac_kfree(k_id_p);
++
++ if(err)
++ return err;
++ else
++ return count;
++ }
++ else
++ return rsbac_ta_list_all_group(ta_number, NULL);
++ }
++
++int sys_rsbac_list_all_ipc(rsbac_list_ta_number_t ta_number,
++ struct rsbac_ipc_t __user * id_p, u_long maxnum)
++{
++ int err = 0;
++ long count;
++
++ if (id_p && maxnum) {
++ struct rsbac_ipc_t *k_id_p = NULL;
++
++ count = rsbac_ta_list_all_ipc(ta_number, &k_id_p);
++ if (count <= 0)
++ return count;
++ if (count > maxnum)
++ count = maxnum;
++
++ err =
++ rsbac_put_user(k_id_p, id_p,
++ count * sizeof(*k_id_p));
++
++ rsbac_kfree(k_id_p);
++
++ if (err)
++ return err;
++ else
++ return count;
++ } else
++ return rsbac_ta_list_all_ipc(ta_number, NULL);
++}
++
++int sys_rsbac_net_list_all_netdev(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_netdev_id_t __user * id_p,
++ u_long maxnum)
++ {
++#ifdef CONFIG_RSBAC_NET_DEV
++ int err = 0;
++ long count;
++
++ if(id_p && maxnum)
++ {
++ rsbac_netdev_id_t * k_id_p = NULL;
++
++ count = rsbac_ta_net_list_all_netdev(ta_number, &k_id_p);
++ if(count <= 0)
++ return count;
++ if(count > maxnum)
++ count = maxnum;
++
++ err = rsbac_put_user(k_id_p, id_p, count * sizeof(*k_id_p) );
++
++ rsbac_kfree(k_id_p);
++
++ if(err)
++ return err;
++ else
++ return count;
++ }
++ else
++ return rsbac_ta_net_list_all_netdev(ta_number, NULL);
++
++#else
++ return -RSBAC_EINVALIDREQUEST;
++#endif /* CONFIG_RSBAC_NET_DEV */
++ }
++
++int sys_rsbac_net_template(rsbac_list_ta_number_t ta_number,
++ enum rsbac_net_temp_syscall_t call,
++ rsbac_net_temp_id_t id,
++ union rsbac_net_temp_syscall_data_t __user * data_p)
++{
++#ifdef CONFIG_RSBAC_NET_OBJ
++ union rsbac_net_temp_syscall_data_t k_data;
++ int err = 0;
++ /* for adf_request */
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val;
++#endif
++
++ if (!id)
++ return -RSBAC_EINVALIDVALUE;
++ if (!data_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++ /* get data values from user space */
++ switch (call) {
++ case NTS_set_address:
++ case NTS_set_address_family:
++ case NTS_set_type:
++ case NTS_set_protocol:
++ case NTS_set_netdev:
++ case NTS_set_ports:
++ case NTS_set_name:
++ case NTS_new_template:
++ case NTS_copy_template:
++ case NTS_delete_template:
++#ifdef CONFIG_RSBAC_FREEZE
++ if (rsbac_freeze) {
++ rsbac_printk(KERN_WARNING "sys_rsbac_net_template(): RSBAC configuration frozen, no administration allowed\n");
++ return -EPERM;
++ }
++#endif
++ if (call != NTS_delete_template) {
++ err =
++ rsbac_get_user(&k_data, data_p, sizeof(k_data));
++ if (err)
++ return err;
++ }
++ break;
++ case NTS_check_id:
++ case NTS_get_address:
++ case NTS_get_address_family:
++ case NTS_get_type:
++ case NTS_get_protocol:
++ case NTS_get_netdev:
++ case NTS_get_ports:
++ case NTS_get_name:
++ break;
++
++ default:
++ return -RSBAC_EINVALIDREQUEST;
++ }
++
++ if ( (call != NTS_new_template)
++ && (call != NTS_copy_template)
++ && !rsbac_ta_net_template_exists(ta_number, id)
++ )
++ return -RSBAC_EINVALIDTARGET;
++
++#ifndef CONFIG_RSBAC_MAINT
++ rsbac_pr_debug(aef, "calling ADF\n");
++ i_tid.nettemp = id;
++ i_attr_val.dummy = 0;
++ switch (call) {
++ case NTS_new_template:
++ if (!rsbac_adf_request(R_CREATE,
++ task_pid(current),
++ T_NETTEMP,
++ i_tid, A_none, i_attr_val))
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if (!rsbac_softmode)
++#endif
++ return -EPERM;
++ break;
++
++ case NTS_copy_template:
++ if (!rsbac_ta_net_template_exist(ta_number, id)) {
++ if (!rsbac_adf_request(R_CREATE,
++ task_pid(current),
++ T_NETTEMP,
++ i_tid, A_none, i_attr_val))
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if (!rsbac_softmode)
++#endif
++ return -EPERM;
++ } else {
++ if (!rsbac_adf_request(R_WRITE,
++ task_pid(current),
++ T_NETTEMP,
++ i_tid, A_none, i_attr_val))
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if (!rsbac_softmode)
++#endif
++ return -EPERM;
++ }
++ i_tid.nettemp = k_data.id;
++ if (!rsbac_adf_request(R_READ,
++ task_pid(current),
++ T_NETTEMP,
++ i_tid, A_none, i_attr_val))
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if (!rsbac_softmode)
++#endif
++ return -EPERM;
++ break;
++
++ case NTS_delete_template:
++ if (!rsbac_adf_request(R_DELETE,
++ task_pid(current),
++ T_NETTEMP,
++ i_tid, A_none, i_attr_val))
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if (!rsbac_softmode)
++#endif
++ return -EPERM;
++ break;
++
++ case NTS_get_address:
++ case NTS_get_address_family:
++ case NTS_get_type:
++ case NTS_get_protocol:
++ case NTS_get_netdev:
++ case NTS_get_ports:
++ if (!rsbac_adf_request(R_READ,
++ task_pid(current),
++ T_NETTEMP,
++ i_tid, A_none, i_attr_val))
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if (!rsbac_softmode)
++#endif
++ return -EPERM;
++ break;
++
++ case NTS_set_address:
++ case NTS_set_address_family:
++ case NTS_set_type:
++ case NTS_set_protocol:
++ case NTS_set_netdev:
++ case NTS_set_ports:
++ case NTS_set_name:
++ if (!rsbac_adf_request(R_WRITE,
++ task_pid(current),
++ T_NETTEMP,
++ i_tid, A_none, i_attr_val))
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if (!rsbac_softmode)
++#endif
++ return -EPERM;
++ break;
++
++ default:
++ break;
++ }
++#endif /* !MAINT */
++
++ err = rsbac_ta_net_template(ta_number, call, id, &k_data);
++ if (!err) {
++ /* put data values to user space */
++ switch (call) {
++ case NTS_check_id:
++ case NTS_get_address:
++ case NTS_get_address_family:
++ case NTS_get_type:
++ case NTS_get_protocol:
++ case NTS_get_netdev:
++ case NTS_get_ports:
++ case NTS_get_name:
++ err = rsbac_put_user(&k_data,
++ data_p,
++ sizeof(k_data));
++ break;
++ default:
++ break;
++ }
++ }
++ return err;
++
++#else
++ return -RSBAC_EINVALIDREQUEST;
++#endif /* NET_OBJ */
++}
++
++int sys_rsbac_net_list_all_template(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_net_temp_id_t __user * id_p,
++ u_long maxnum)
++ {
++#ifdef CONFIG_RSBAC_NET_OBJ
++ int err = 0;
++ int count;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val;
++
++ i_tid.nettemp = 0;
++ i_attr_val.dummy = 0;
++ if (!rsbac_adf_request(R_READ,
++ task_pid(current),
++ T_NETTEMP,
++ i_tid,
++ A_none,
++ i_attr_val))
++ return -EPERM;
++ if(id_p && maxnum)
++ {
++ rsbac_net_temp_id_t * k_id_p = NULL;
++
++ count = rsbac_ta_net_list_all_template(ta_number, &k_id_p);
++ if(count <= 0)
++ return count;
++ if(count > maxnum)
++ count = maxnum;
++
++ err = rsbac_put_user(k_id_p, id_p, count * sizeof(*k_id_p) );
++
++ rsbac_kfree(k_id_p);
++
++ if(err)
++ return err;
++ else
++ return count;
++ }
++ else
++ return rsbac_ta_net_list_all_template(ta_number, NULL);
++
++#else
++ return -RSBAC_EINVALIDREQUEST;
++#endif /* CONFIG_RSBAC_NET_OBJ */
++ }
++
++
++/************************************************* */
++/* ADF functions */
++/************************************************* */
++
++int sys_rsbac_switch(enum rsbac_switch_target_t module, int value)
++ {
++#if defined(CONFIG_RSBAC_SWITCH) || defined(CONFIG_RSBAC_SOFTMODE)
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ char * switch_name;
++
++ /* call ADF */
++ if(module >= SW_NONE)
++ return -RSBAC_EINVALIDTARGET;
++ if ( (value < 0)
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ || (value > 3)
++#else
++ || (value > 1)
++#endif
++ )
++ return -RSBAC_EINVALIDVALUE;
++
++#ifdef CONFIG_RSBAC_SOFTMODE
++ if( rsbac_softmode_prohibit
++ && ( ( (value == 1)
++ && (module == SW_SOFTMODE)
++ )
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ || (value == 3)
++#endif
++ )
++ )
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_switch(): setting of softmode prohibited!\n");
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_switch(): calling ADF\n");
++#endif
++ rsbac_target_id.dummy = 0;
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE
++ if(module == SW_DAC_DISABLE)
++ {
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(value > 1)
++ return -RSBAC_EINVALIDVALUE;
++#endif
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_PERMISSIONS_DATA,
++ task_pid(current),
++ T_NONE,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++ }
++ else
++#endif
++ {
++ rsbac_attribute_value.switch_target = module;
++ if (!rsbac_adf_request(R_SWITCH_MODULE,
++ task_pid(current),
++ T_NONE,
++ rsbac_target_id,
++ A_switch_target,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++ }
++
++ switch(value)
++ {
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ case 2:
++ case 3:
++ rsbac_ind_softmode[module] = value - 2;
++ break;
++#endif
++
++ default:
++ switch (module)
++ {
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_FULL
++ case SW_DAC_DISABLE: rsbac_dac_disable = value;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_SOFTMODE
++ case SW_SOFTMODE: rsbac_softmode = value;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_FREEZE
++ case SW_FREEZE:
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_switch(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++ rsbac_freeze = value;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_MAC
++ case SW_MAC:
++#ifndef CONFIG_RSBAC_SWITCH_ON
++ if(value)
++ return -RSBAC_EINVALIDMODULE;
++#endif
++ rsbac_switch_mac = value;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_PM
++ case SW_PM:
++#ifndef CONFIG_RSBAC_SWITCH_ON
++ if(value)
++ return -RSBAC_EINVALIDMODULE;
++#endif
++ rsbac_switch_pm = value;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_DAZ
++ case SW_DAZ:
++ rsbac_switch_daz = value;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_FF
++ case SW_FF:
++ rsbac_switch_ff = value;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_RC
++ case SW_RC:
++#ifndef CONFIG_RSBAC_SWITCH_ON
++ if(value)
++ return -RSBAC_EINVALIDMODULE;
++#endif
++ rsbac_switch_rc = value;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_AUTH
++ case SW_AUTH:
++ rsbac_switch_auth = value;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ case SW_ACL:
++ rsbac_switch_acl = value;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_CAP
++ case SW_CAP:
++ rsbac_switch_cap = value;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_JAIL
++ case SW_JAIL:
++ rsbac_switch_jail = value;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_RES
++ case SW_RES:
++ rsbac_switch_res = value;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_SWITCH_PAX
++ case SW_PAX:
++ rsbac_switch_pax = value;
++ break;
++#endif
++ default:
++ return -RSBAC_EINVALIDMODULE;
++ }
++ }
++
++ switch_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(switch_name)
++ {
++ int show_value = value;
++
++ get_switch_target_name(switch_name, module);
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ switch(value)
++ {
++ case 2:
++ case 3:
++ strcat(switch_name, " softmode");
++ show_value -= 2;
++ break;
++ default:
++ break;
++ }
++#endif
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_switch(): user %u switched RSBAC module %s to %i!\n",
++ current_uid(), switch_name, show_value);
++ rsbac_kfree(switch_name);
++ }
++ return 0;
++#else
++ return -RSBAC_EINVALIDREQUEST;
++#endif /* SWITCH || SOFTMODE*/
++ }
++
++/**
++ * sys_rsbac_get_switch - get the module status
++ * (is switchable ? is softmodable ?)
++ *
++ * @module: the target module
++ * @value_p: 0: module is enabled
++ * 1: module is softmodded
++ * @switchable_p: 0: module can be turned on
++ * 1: module can be turned off
++ * 2: softmode can be turned on, but not off
++ * 3: softmode can be turned on or off
++ *
++ * Returns 0 on success
++ */
++int sys_rsbac_get_switch(enum rsbac_switch_target_t module,
++ int __user * value_p,
++ int __user * switchable_p)
++{
++ int value = 1; // default if module exists and RSBAC_SWITCH is not compiled
++ int switchable = 0;
++ int allow_softmode = 0;
++ int err = 0;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if(module >= SW_NONE)
++ return -RSBAC_EINVALIDTARGET;
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if(rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_get_switch(): calling ADF\n");
++#endif
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if(!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++ switch(module)
++ {
++ case SW_GEN:
++ allow_softmode = 0;
++ switchable = 0;
++ break;
++#ifdef CONFIG_RSBAC_UM
++ case SW_UM:
++ allow_softmode = 0;
++ switchable = 0;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_REG
++ case SW_REG:
++ allow_softmode = 1;
++ switchable = 0;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_ALLOW_DAC_DISABLE_FULL
++ case SW_DAC_DISABLE:
++ allow_softmode = 0;
++ value = rsbac_dac_disable;
++ switchable = 3;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_SOFTMODE
++ case SW_SOFTMODE:
++ allow_softmode = 0;
++ value = rsbac_softmode;
++ switchable = (rsbac_softmode_prohibit?2:3);
++ break;
++#endif
++#ifdef CONFIG_RSBAC_FREEZE
++ case SW_FREEZE:
++ allow_softmode = 0;
++ value = rsbac_freeze;
++ switchable = 1;
++ break;
++#endif
++#ifdef CONFIG_RSBAC_MAC
++ case SW_MAC:
++ allow_softmode = 1;
++#ifdef CONFIG_RSBAC_SWITCH_MAC
++ value = rsbac_switch_mac;
++#ifdef CONFIG_RSBAC_SWITCH_ON
++ switchable = 3;
++#else
++ switchable = 2;
++#endif
++#else
++ switchable = 0;
++#endif
++ break;
++#endif
++#ifdef CONFIG_RSBAC_PM
++ case SW_PM:
++ allow_softmode = 1;
++#ifdef CONFIG_RSBAC_SWITCH_PM
++ value = rsbac_switch_pm;
++#ifdef CONFIG_RSBAC_SWITCH_ON
++ switchable = 3;
++#else
++ switchable = 2;
++#endif
++#else
++ switchable = 0;
++#endif
++ break;
++#endif
++#ifdef CONFIG_RSBAC_DAZ
++ case SW_DAZ:
++ allow_softmode = 1;
++#ifdef CONFIG_RSBAC_SWICHT_DAZ
++ value = rsbac_switch_daz;
++ switchable = 3;
++#else
++ switchable = 0;
++#endif
++ break;
++#endif
++#ifdef CONFIG_RSBAC_FF
++ case SW_FF:
++ allow_softmode = 1;
++#ifdef CONFIG_RSBAC_SWITCH_FF
++ value = rsbac_switch_ff;
++ switchable = 3;
++#else
++ switchable = 0;
++#endif
++ break;
++#endif
++#ifdef CONFIG_RSBAC_RC
++ case SW_RC:
++ allow_softmode = 1;
++#ifdef CONFIG_RSBAC_SWITCH_RC
++ value = rsbac_switch_rc;
++#ifdef CONFIG_RSBAC_SWITCH_ON
++ switchable = 3;
++#else
++ switchable = 2;
++#endif
++#else
++ switchable = 0;
++#endif
++ break;
++#endif
++#ifdef CONFIG_RSBAC_AUTH
++ case SW_AUTH:
++ allow_softmode = 1;
++#ifdef CONFIG_RSBAC_SWITCH_AUTH
++ value = rsbac_switch_auth;
++ switchable = 3;
++#else
++ switchable = 0;
++#endif
++ break;
++#endif
++#ifdef CONFIG_RSBAC_ACL
++ case SW_ACL:
++ allow_softmode = 1;
++#ifdef CONFIG_RSBAC_SWITCH_ACL
++ value = rsbac_switch_acl;
++ switchable = 3;
++#else
++ switchable = 0;
++#endif
++ break;
++#endif
++#ifdef CONFIG_RSBAC_CAP
++ case SW_CAP:
++ allow_softmode = 1;
++#ifdef CONFIG_RSBAC_SWITCH_CAP
++ value = rsbac_switch_cap;
++ switchable = 3;
++#else
++ switchable = 0;
++#endif
++ break;
++#endif
++#ifdef CONFIG_RSBAC_JAIL
++ case SW_JAIL:
++ allow_softmode = 1;
++#ifdef CONFIG_RSBAC_SWITCH_JAIL
++ value = rsbac_switch_jail;
++ switchable = 3;
++#else
++ switchable = 0;
++#endif
++ break;
++#endif
++#ifdef CONFIG_RSBAC_RES
++ case SW_RES:
++ allow_softmode = 1;
++#ifdef CONFIG_RSBAC_SWITCH_RES
++ value = rsbac_switch_res;
++ switchable = 3;
++#else
++ switchable = 0;
++#endif
++ break;
++#endif
++#ifdef CONFIG_RSBAC_PAX
++ case SW_PAX:
++ allow_softmode = 1;
++#ifdef CONFIG_RSBAC_SWITCH_PAX
++ value = rsbac_switch_pax;
++ switchable = 3;
++#else
++ switchable = 0;
++#endif
++ break;
++#endif
++ default:
++ return -RSBAC_EINVALIDMODULE;
++ }
++
++#ifdef CONFIG_RSBAC_SOFTMODE_IND
++ if(allow_softmode) {
++ value |= rsbac_ind_softmode[module] << 1;
++ switchable |= (rsbac_softmode_prohibit?2:3) << 2;
++ }
++#endif
++ if(value_p)
++ err = rsbac_put_user(&value, value_p, sizeof(int));
++ if(!err && switchable_p)
++ err = rsbac_put_user(&switchable, switchable_p, sizeof(int));
++ return err;
++}
++
++/************** MAC ***************/
++
++#ifdef CONFIG_RSBAC_MAC
++int sys_rsbac_mac_set_curr_level(rsbac_security_level_t level,
++ rsbac_mac_category_vector_t __user * categories_p)
++ {
++ rsbac_mac_category_vector_t k_categories;
++ int err;
++
++ if(!categories_p)
++ return -RSBAC_EINVALIDPOINTER;
++ err = rsbac_get_user(&k_categories, categories_p, sizeof(k_categories));
++ if(err)
++ return err;
++ return rsbac_mac_set_curr_level(level, k_categories);
++ }
++
++int sys_rsbac_mac_get_curr_level(rsbac_security_level_t __user * level_p,
++ rsbac_mac_category_vector_t __user * categories_p)
++ {
++ int err = 0;
++ rsbac_security_level_t k_level;
++ rsbac_mac_category_vector_t k_categories;
++
++ err = rsbac_mac_get_curr_level(&k_level, &k_categories);
++ if(err)
++ return err;
++ if(level_p)
++ {
++ err = rsbac_put_user(&k_level, level_p, sizeof(k_level));
++ if(err)
++ return err;
++ }
++ if(categories_p)
++ {
++ err = rsbac_put_user(&k_categories, categories_p, sizeof(k_categories));
++ }
++ return err;
++ }
++
++int sys_rsbac_mac_get_max_level(rsbac_security_level_t __user * level_p,
++ rsbac_mac_category_vector_t __user * categories_p)
++ {
++ int err = 0;
++ rsbac_security_level_t k_level;
++ rsbac_mac_category_vector_t k_categories;
++
++ err = rsbac_mac_get_max_level(&k_level, &k_categories);
++ if(err)
++ return err;
++ if(level_p)
++ {
++ err = rsbac_put_user(&k_level, level_p, sizeof(k_level));
++ if(err)
++ return err;
++ }
++ if(categories_p)
++ {
++ err = rsbac_put_user(&k_categories, categories_p, sizeof(k_categories));
++ }
++ return err;
++ }
++
++int sys_rsbac_mac_get_min_level(rsbac_security_level_t __user * level_p,
++ rsbac_mac_category_vector_t __user * categories_p)
++ {
++ int err = 0;
++ rsbac_security_level_t k_level;
++ rsbac_mac_category_vector_t k_categories;
++
++ err = rsbac_mac_get_min_level(&k_level, &k_categories);
++ if(err)
++ return err;
++ if(level_p)
++ {
++ err = rsbac_put_user(&k_level, level_p, sizeof(k_level));
++ if(err)
++ return err;
++ }
++ if(categories_p)
++ {
++ err = rsbac_put_user(&k_categories, categories_p, sizeof(k_categories));
++ }
++ return err;
++ }
++
++/* Provide means for adding and removing of capabilities */
++int sys_rsbac_mac_add_p_tru(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_upid_t upid,
++ rsbac_uid_t uid,
++ rsbac_time_t ttl)
++ {
++ rsbac_pid_t pid;
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_mac_add_p_tru(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(uid));
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++ pid = find_pid_ns(upid, &init_pid_ns);
++ if(!pid)
++ return -RSBAC_EINVALIDTARGET;
++
++ return rsbac_mac_add_p_tru(ta_number, pid, uid, ttl);
++ }
++
++int sys_rsbac_mac_remove_p_tru(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_upid_t upid,
++ rsbac_uid_t uid)
++ {
++ rsbac_pid_t pid;
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_mac_remove_p_tru(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(uid));
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++ pid = find_pid_ns(upid, &init_pid_ns);
++ if(!pid)
++ return -RSBAC_EINVALIDTARGET;
++ return rsbac_mac_remove_p_tru(ta_number, pid, uid);
++ }
++
++int sys_rsbac_mac_add_f_tru(
++ rsbac_list_ta_number_t ta_number,
++ char __user * filename,
++ rsbac_uid_t uid,
++ rsbac_time_t ttl)
++ {
++ struct dentry * t_dentry;
++ int err = 0;
++ enum rsbac_target_t target;
++ union rsbac_target_id_t tid;
++
++ struct path path;
++
++ if(!filename)
++ return -RSBAC_EINVALIDTARGET;
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_mac_add_f_tru(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(uid));
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++ if ((err = user_lpath(filename, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_mac)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_mac_add_f_tru(): call to user_lpath() returned %i\n", err);
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ if (!t_dentry->d_inode)
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* is inode of type file? */
++ if(S_ISREG(t_dentry->d_inode->i_mode))
++ target = T_FILE;
++ else
++ if(S_ISDIR(t_dentry->d_inode->i_mode))
++ target = T_DIR;
++ else
++ { /* This is no file or dir */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ tid.file.device = t_dentry->d_sb->s_dev;
++ tid.file.inode = t_dentry->d_inode->i_ino;
++ tid.file.dentry_p = t_dentry;
++
++ err = rsbac_mac_add_f_tru(ta_number, tid.file, uid, ttl);
++
++out_dput:
++ path_put(&path);
++
++out:
++ return err;
++ }
++
++int sys_rsbac_mac_remove_f_tru(
++ rsbac_list_ta_number_t ta_number,
++ char __user * filename,
++ rsbac_uid_t uid)
++ {
++ struct dentry * t_dentry;
++ int err = 0;
++ enum rsbac_target_t target;
++ union rsbac_target_id_t tid;
++
++ struct path path;
++
++ if(!filename)
++ return -RSBAC_EINVALIDTARGET;
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_mac_remove_f_tru(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(uid));
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++ if ((err = user_lpath(filename, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_mac)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_mac_remove_f_tru(): call to user_lpath() returned %i\n", err);
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ if (!t_dentry->d_inode)
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* is inode of type file or dir? */
++ if(S_ISREG(t_dentry->d_inode->i_mode))
++ target = T_FILE;
++ else
++ if(S_ISDIR(t_dentry->d_inode->i_mode))
++ target = T_DIR;
++ else
++ { /* This is no file or dir */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ tid.file.device = t_dentry->d_sb->s_dev;
++ tid.file.inode = t_dentry->d_inode->i_ino;
++ tid.file.dentry_p = t_dentry;
++
++ err = rsbac_mac_remove_f_tru(ta_number, tid.file, uid);
++
++out_dput:
++ path_put(&path);
++out:
++ return err;
++ }
++
++/* trulist must have space for maxnum rsbac_uid_t entries! */
++int sys_rsbac_mac_get_f_trulist(
++ rsbac_list_ta_number_t ta_number,
++ char __user * filename,
++ rsbac_uid_t __user trulist[],
++ rsbac_time_t __user ttllist[],
++ u_int maxnum)
++ {
++ struct dentry * t_dentry;
++ int err = 0, tmperr = 0;
++ enum rsbac_target_t target;
++ union rsbac_target_id_t tid;
++ rsbac_uid_t * k_trulist;
++ rsbac_time_t * k_ttllist;
++
++ struct path path;
++
++ if(!filename)
++ return -RSBAC_EINVALIDTARGET;
++ if(!trulist)
++ return -RSBAC_EINVALIDPOINTER;
++ if(maxnum <= 0)
++ return -RSBAC_EINVALIDVALUE;
++ if(maxnum > RSBAC_MAC_MAX_MAXNUM)
++ maxnum = RSBAC_MAC_MAX_MAXNUM;
++
++ if ((err = user_lpath(filename, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_mac)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_mac_get_f_trulist(): call to user_lpath() returned %i\n", err);
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ if (!t_dentry->d_inode)
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* is inode of type file or dir? */
++ if(S_ISREG(t_dentry->d_inode->i_mode))
++ target = T_FILE;
++ else
++ if(S_ISDIR(t_dentry->d_inode->i_mode))
++ target = T_DIR;
++ else
++ { /* This is no file or dir */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ tid.file.device = t_dentry->d_sb->s_dev;
++ tid.file.inode = t_dentry->d_inode->i_ino;
++ tid.file.dentry_p = t_dentry;
++
++ err = rsbac_mac_get_f_trulist(ta_number, tid.file, &k_trulist, &k_ttllist);
++ if(err>0)
++ {
++ if(err > maxnum)
++ err = maxnum;
++ tmperr = rsbac_put_user(k_trulist, trulist,
++ sizeof(rsbac_uid_t) * err);
++ if(tmperr < 0)
++ err = tmperr;
++ else
++ {
++ if(ttllist)
++ {
++ tmperr = rsbac_put_user(k_ttllist, ttllist,
++ sizeof(rsbac_time_t) * err);
++ if(tmperr < 0)
++ err = tmperr;
++ }
++ }
++ rsbac_kfree(k_trulist);
++ rsbac_kfree(k_ttllist);
++ }
++
++out_dput:
++ path_put(&path);
++out:
++ return err;
++ }
++
++int sys_rsbac_mac_get_p_trulist(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_upid_t upid,
++ rsbac_uid_t __user trulist[],
++ rsbac_time_t __user ttllist[],
++ u_int maxnum)
++ {
++ int err = 0, tmperr = 0;
++ union rsbac_target_id_t tid;
++ rsbac_uid_t * k_trulist;
++ rsbac_time_t * k_ttllist;
++
++ if(!upid)
++ return -RSBAC_EINVALIDTARGET;
++ if(!trulist)
++ return -RSBAC_EINVALIDPOINTER;
++ if(maxnum <= 0)
++ return -RSBAC_EINVALIDVALUE;
++ if(maxnum > RSBAC_MAC_MAX_MAXNUM)
++ maxnum = RSBAC_MAC_MAX_MAXNUM;
++
++ tid.process = find_pid_ns(upid, &init_pid_ns);
++ if(!tid.process)
++ return -RSBAC_EINVALIDTARGET;
++
++ err = rsbac_mac_get_p_trulist(ta_number, tid.process, &k_trulist, &k_ttllist);
++ if(err>0)
++ {
++ if(err > maxnum)
++ err = maxnum;
++ tmperr = rsbac_put_user(k_trulist, trulist,
++ sizeof(rsbac_uid_t) * err);
++ if(tmperr < 0)
++ err = tmperr;
++ else
++ {
++ if(ttllist)
++ {
++ tmperr = rsbac_put_user(k_ttllist, ttllist,
++ sizeof(rsbac_time_t) * err);
++ if(tmperr < 0)
++ err = tmperr;
++ }
++ }
++ rsbac_kfree(k_trulist);
++ rsbac_kfree(k_ttllist);
++ }
++
++ return err;
++ }
++#endif
++
++/************** PM ***************/
++
++#ifdef CONFIG_RSBAC_PM
++int sys_rsbac_stats_pm(void)
++ {
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef) rsbac_printk(KERN_DEBUG "sys_rsbac_stats_pm(): calling ADF\n");
++#endif
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_stats_pm(): getting RSBAC status!\n");
++#endif
++ return rsbac_stats_pm();
++ }
++
++int sys_rsbac_pm(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_pm_function_type_t function,
++ union rsbac_pm_function_param_t __user * param_p,
++ rsbac_pm_tkt_id_t ticket)
++ {
++ union rsbac_pm_function_param_t k_param;
++ int result;
++
++ if(function >= PF_none)
++ return -RSBAC_EINVALIDREQUEST;
++ if(!param_p)
++ return -RSBAC_EINVALIDPOINTER;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_pm(): called for function %i!\n",
++ function);
++#endif
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_pm(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++ /* get parameters from user space */
++ rsbac_get_user(&k_param, param_p, sizeof(k_param) );
++ /* call pm function and return its result */
++ result = rsbac_pm(ta_number, function, k_param, ticket);
++ return result;
++ }
++
++int sys_rsbac_pm_change_current_task(rsbac_pm_task_id_t task)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "sys_rsbac_pm_change_current_task(): called for task %i!\n",
++ task);
++#endif
++ /* call pm function and return its result */
++ return rsbac_pm_change_current_task(task);
++ }
++
++int sys_rsbac_pm_create_file(const char __user * filename,
++ int mode,
++ rsbac_pm_object_class_id_t class)
++ {
++ if(!filename)
++ return -RSBAC_EINVALIDPOINTER;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_pm)
++ rsbac_printk(KERN_DEBUG
++ "sys_rsbac_pm_create_file(): called with class %i!\n",
++ class);
++#endif
++ /* call pm function and return its result */
++ return rsbac_pm_create_file(filename, mode, class);
++ }
++#endif
++
++/************** DAZ ***************/
++
++#ifdef CONFIG_RSBAC_DAZ
++int sys_rsbac_daz_flush_cache(void)
++ {
++#ifndef CONFIG_RSBAC_DAZ_CACHE
++ return 0;
++#else
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val1;
++
++ /* Security Officer or admin? */
++ i_tid.user = current_uid();
++ if (rsbac_get_attr(SW_DAZ,
++ T_USER,
++ i_tid,
++ A_daz_role,
++ &i_attr_val1,
++ TRUE))
++ {
++ rsbac_printk(KERN_WARNING
++ "rsbac_adf_request_daz(): rsbac_get_attr() returned error!\n");
++ return -EPERM;
++ }
++ /* if not sec_officer or admin, deny */
++ if ( (i_attr_val1.system_role != SR_security_officer)
++ && (i_attr_val1.system_role != SR_administrator)
++ )
++ #ifdef CONFIG_RSBAC_SOFTMODE
++ if( !rsbac_softmode
++ #ifdef CONFIG_RSBAC_SOFTMODE_IND
++ && !rsbac_ind_softmode[SW_DAZ]
++ #endif
++ )
++ #endif
++ return -EPERM;
++#endif
++
++ rsbac_printk(KERN_INFO
++ "sys_rsbac_daz_flush_cache(): flushing DAZuko result cache!\n");
++
++ return rsbac_daz_flush_cache();
++#endif
++ }
++#endif
++
++/************** RC ***************/
++
++#ifdef CONFIG_RSBAC_RC
++int sys_rsbac_rc_copy_role(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_rc_role_id_t from_role,
++ rsbac_rc_role_id_t to_role)
++ {
++ if( (from_role > RC_role_max_value)
++ || (from_role > RC_role_max_value))
++ return -RSBAC_EINVALIDVALUE;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_rc)
++ rsbac_printk(KERN_DEBUG
++ "sys_rsbac_rc_copy_role(): from %i, to %i!\n",
++ from_role, to_role);
++#endif
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_rc_copy_role(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++ /* call rc function and return its result */
++ return rsbac_rc_sys_copy_role(ta_number, from_role, to_role);
++ }
++
++int sys_rsbac_rc_copy_type(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ rsbac_rc_type_id_t from_type,
++ rsbac_rc_type_id_t to_type)
++ {
++ if( (from_type > RC_type_max_value)
++ || (from_type > RC_type_max_value))
++ return -RSBAC_EINVALIDVALUE;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_rc)
++ rsbac_printk(KERN_DEBUG
++ "sys_rsbac_rc_copy_type(): from %i, to %i!\n",
++ from_type, to_type);
++#endif
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_rc_copy_type(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++ /* call rc function and return its result */
++ return rsbac_rc_sys_copy_type(ta_number, target, from_type, to_type);
++ }
++
++/* Getting values */
++int sys_rsbac_rc_get_item (
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ union rsbac_rc_target_id_t __user * tid_p,
++ union rsbac_rc_target_id_t __user * subtid_p,
++ enum rsbac_rc_item_t item,
++ union rsbac_rc_item_value_t __user * value_p,
++ rsbac_time_t __user * ttl_p)
++ {
++ union rsbac_rc_target_id_t k_tid;
++ union rsbac_rc_target_id_t k_subtid;
++ union rsbac_rc_item_value_t k_value;
++ rsbac_time_t k_ttl;
++ int err = 0;
++
++ if( (target >= RT_NONE)
++ || (item >= RI_none))
++ return -RSBAC_EINVALIDVALUE;
++ /* get values from user space */
++ rsbac_get_user(&k_tid, tid_p, sizeof(k_tid) );
++ rsbac_get_user(&k_subtid, subtid_p, sizeof(k_subtid) );
++ rsbac_get_user(&k_value, value_p, sizeof(k_value) );
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_rc)
++ rsbac_printk(KERN_DEBUG
++ "sys_rsbac_rc_get_item(): target %i, item %i!\n",
++ target, item);
++#endif
++ /* call rc function and return its result */
++ err = rsbac_rc_sys_get_item(ta_number, target, k_tid, k_subtid,
++ item, &k_value, &k_ttl);
++ /* put result value to user space */
++ if(!err)
++ {
++ err = rsbac_put_user(&k_value, value_p, sizeof(k_value) );
++ if(!err && ttl_p)
++ err = rsbac_put_user(&k_ttl, ttl_p, sizeof(k_ttl) );
++ }
++ return err;
++ }
++
++/* Setting values */
++int sys_rsbac_rc_set_item(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ union rsbac_rc_target_id_t __user * tid_p,
++ union rsbac_rc_target_id_t __user * subtid_p,
++ enum rsbac_rc_item_t item,
++ union rsbac_rc_item_value_t __user * value_p,
++ rsbac_time_t ttl)
++ {
++ union rsbac_rc_target_id_t k_tid;
++ union rsbac_rc_target_id_t k_subtid;
++ union rsbac_rc_item_value_t k_value;
++
++ if( (target >= RT_NONE)
++ || (item >= RI_none))
++ return -RSBAC_EINVALIDVALUE;
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_rc_set_item(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++ /* get values from user space */
++ rsbac_get_user(&k_tid, tid_p, sizeof(k_tid) );
++ rsbac_get_user(&k_subtid, subtid_p, sizeof(k_subtid) );
++ rsbac_get_user(&k_value, value_p, sizeof(k_value) );
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_rc)
++ rsbac_printk(KERN_DEBUG
++ "sys_rsbac_rc_set_item(): target %i, item %i!\n",
++ target, item);
++#endif
++ /* call rc function and return its result */
++ return rsbac_rc_sys_set_item(ta_number, target, k_tid, k_subtid, item, k_value, ttl);
++ }
++
++int sys_rsbac_rc_get_list(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_rc_target_t target,
++ union rsbac_rc_target_id_t __user * tid_p,
++ enum rsbac_rc_item_t item,
++ u_int maxnum,
++ __u32 __user * array_p,
++ rsbac_time_t __user * ttl_array_p)
++ {
++ union rsbac_rc_target_id_t k_tid;
++ int err;
++
++ rsbac_get_user(&k_tid, tid_p, sizeof(k_tid));
++ if(array_p)
++ {
++ __u32 * k_array_p;
++ rsbac_time_t * k_ttl_array_p;
++
++ if(!maxnum)
++ return -RSBAC_EINVALIDVALUE;
++ /* call rc function and return its result */
++ err = rsbac_rc_get_list(ta_number, target, k_tid, item,
++ &k_array_p, &k_ttl_array_p);
++ /* put result value to user space */
++ if(err > 0)
++ {
++ int tmperr;
++
++ if(err > maxnum)
++ err = maxnum;
++ tmperr = rsbac_put_user(k_array_p, array_p, err * sizeof(*k_array_p) );
++ if(tmperr)
++ err = tmperr;
++ rsbac_kfree(k_array_p);
++ if(k_ttl_array_p && ttl_array_p)
++ {
++ tmperr = rsbac_put_user(k_ttl_array_p, ttl_array_p, err * sizeof(*k_ttl_array_p) );
++ if(tmperr)
++ err = tmperr;
++ }
++ rsbac_kfree(k_ttl_array_p);
++ }
++ return err;
++ }
++ else
++ return rsbac_rc_get_list(ta_number, target, k_tid, item, NULL, NULL);
++ }
++
++/* Set own role */
++int sys_rsbac_rc_change_role (rsbac_rc_role_id_t role, char __user * pass)
++ {
++ if(role > RC_role_max_value)
++ return -RSBAC_EINVALIDVALUE;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_rc)
++ rsbac_printk(KERN_DEBUG
++ "sys_rsbac_rc_change_role(): role %i!\n",
++ role);
++#endif
++ /* call rc function and return its result */
++ return rsbac_rc_sys_change_role(role, pass);
++ }
++
++/* Getting own effective rights */
++int sys_rsbac_rc_get_eff_rights_n(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ char __user * t_name,
++ rsbac_rc_request_vector_t __user * request_vector_p,
++ rsbac_time_t __user * ttl_p)
++ {
++ struct dentry * t_dentry;
++ int err = 0;
++ rsbac_rc_request_vector_t k_req_vec;
++ rsbac_time_t k_ttl;
++ union rsbac_target_id_t tid;
++
++ struct path path;
++
++ if(!t_name || (target >= T_NONE))
++ return -RSBAC_EINVALIDTARGET;
++
++ if ((err = user_lpath(t_name, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_rc)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_rc_get_eff_rights_n(): call to user_lpath() returned %i\n", err);
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ if (!t_dentry->d_inode)
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++
++ switch (target)
++ {
++ case T_FD:
++ if(S_ISREG(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ if(S_ISDIR(t_dentry->d_inode->i_mode))
++ {
++ target = T_DIR;
++ }
++ else
++ if(S_ISLNK(t_dentry->d_inode->i_mode))
++ {
++ target = T_SYMLINK;
++ }
++ else
++ if(S_ISFIFO(t_dentry->d_inode->i_mode))
++ {
++ target = T_FIFO;
++ }
++ else
++ if(S_ISSOCK(t_dentry->d_inode->i_mode))
++ {
++ target = T_UNIXSOCK;
++ }
++ else
++ if(S_ISBLK(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ if(S_ISCHR(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_rc_get_eff_rights_n(): no filesystem object\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_FILE:
++ /* is inode of type file, symlink or block/char device? */
++ if ( !(S_ISREG(t_dentry->d_inode->i_mode))
++ && !(S_ISBLK(t_dentry->d_inode->i_mode))
++ && !(S_ISCHR(t_dentry->d_inode->i_mode)) )
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_DIR:
++ if ( !(S_ISDIR(t_dentry->d_inode->i_mode)) )
++ { /* This is no file */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_FIFO:
++ /* is inode of type fifo? */
++ if ( !(S_ISFIFO(t_dentry->d_inode->i_mode)))
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_UNIXSOCK:
++ /* is inode of type fifo? */
++ if ( !(S_ISSOCK(t_dentry->d_inode->i_mode)))
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_SYMLINK:
++ /* is inode of type symlink? */
++ if ( !(S_ISLNK(t_dentry->d_inode->i_mode)))
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ case T_DEV:
++ /* is inode of type block/char device? */
++ if ( !(S_ISBLK(t_dentry->d_inode->i_mode))
++ && !(S_ISCHR(t_dentry->d_inode->i_mode)) )
++ { /* This is no dev */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++
++ if(target == T_DEV)
++ {
++ if(S_ISBLK(t_dentry->d_inode->i_mode))
++ tid.dev.type = D_block;
++ else
++ tid.dev.type = D_char;
++ tid.dev.major = RSBAC_MAJOR(t_dentry->d_inode->i_rdev);
++ tid.dev.minor = RSBAC_MINOR(t_dentry->d_inode->i_rdev);
++ }
++ else
++ {
++ /* fill target id and call internal function */
++ tid.file.device = t_dentry->d_sb->s_dev;
++ tid.file.inode = t_dentry->d_inode->i_ino;
++ tid.file.dentry_p = t_dentry;
++ }
++ err = rsbac_rc_sys_get_eff_rights(ta_number, target, tid, &k_req_vec, &k_ttl);
++ /* put result value to user space */
++ if(!err)
++ {
++ err = rsbac_put_user(&k_req_vec, request_vector_p, sizeof(k_req_vec) );
++ if(!err && ttl_p)
++ err = rsbac_put_user(&k_ttl, ttl_p, sizeof(k_ttl) );
++ }
++
++ out_dput:
++ path_put(&path);
++
++ out:
++ return err;
++ }
++
++/* Get current process role */
++int sys_rsbac_rc_get_current_role (rsbac_rc_role_id_t __user * role_p)
++ {
++ rsbac_rc_role_id_t k_role;
++ int err;
++
++ if(!role_p)
++ return -RSBAC_EINVALIDPOINTER;
++ /* call rc function and return its result */
++ err = rsbac_rc_sys_get_current_role(&k_role);
++ if(!err)
++ {
++ err = rsbac_put_user(&k_role, role_p, sizeof(k_role) );
++ }
++ return err;
++ }
++
++int sys_rsbac_rc_select_fd_create_type(rsbac_rc_type_id_t type)
++{
++ int err;
++
++ err = rsbac_rc_select_fd_create_type(type);
++
++ return err;
++}
++#endif
++
++/************** AUTH ***************/
++
++#ifdef CONFIG_RSBAC_AUTH
++/* Provide means for adding and removing of capabilities */
++int sys_rsbac_auth_add_p_cap(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_upid_t upid,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range,
++ rsbac_time_t ttl)
++ {
++ rsbac_pid_t pid;
++
++ if(cap_type >= ACT_none)
++ return -RSBAC_EINVALIDTARGET;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_range.first) == RSBAC_UM_VIRTUAL_KEEP)
++ cap_range.first = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(cap_range.first));
++ else
++ if ( (RSBAC_UID_SET(cap_range.first) > RSBAC_UM_VIRTUAL_MAX)
++ && (RSBAC_UID_SET(cap_range.first) != RSBAC_UM_VIRTUAL_ALL)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ if (RSBAC_UID_SET(cap_range.last) == RSBAC_UM_VIRTUAL_KEEP)
++ cap_range.last = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(cap_range.last));
++ else
++ if ( (RSBAC_UID_SET(cap_range.last) > RSBAC_UM_VIRTUAL_MAX)
++ && (RSBAC_UID_SET(cap_range.last) != RSBAC_UM_VIRTUAL_ALL)
++ )
++ return -RSBAC_EINVALIDVALUE;
++#else
++ cap_range.first = RSBAC_UID_NUM(cap_range.first);
++ cap_range.last = RSBAC_UID_NUM(cap_range.last);
++#endif
++
++ if(cap_range.first > cap_range.last)
++ return -RSBAC_EINVALIDVALUE;
++ if( (RSBAC_UID_NUM(cap_range.first) > RSBAC_AUTH_MAX_RANGE_UID)
++ || (RSBAC_UID_NUM(cap_range.last) > RSBAC_AUTH_MAX_RANGE_UID)
++ )
++ return -RSBAC_EINVALIDVALUE;
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_auth_add_p_cap(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++ pid = find_pid_ns(upid, &init_pid_ns);
++ if(!pid)
++ return -RSBAC_EINVALIDTARGET;
++
++ /* call auth function and return its result */
++ /* permission checking is done there */
++ return rsbac_auth_add_p_cap(ta_number, pid, cap_type, cap_range, ttl);
++ }
++
++int sys_rsbac_auth_remove_p_cap(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_upid_t upid,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range)
++ {
++ rsbac_pid_t pid;
++
++ if(cap_type >= ACT_none)
++ return -RSBAC_EINVALIDTARGET;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_range.first) == RSBAC_UM_VIRTUAL_KEEP)
++ cap_range.first = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(cap_range.first));
++ else
++ if ( (RSBAC_UID_SET(cap_range.first) > RSBAC_UM_VIRTUAL_MAX)
++ && (RSBAC_UID_SET(cap_range.first) != RSBAC_UM_VIRTUAL_ALL)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ if (RSBAC_UID_SET(cap_range.last) == RSBAC_UM_VIRTUAL_KEEP)
++ cap_range.last = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(cap_range.last));
++ else
++ if ( (RSBAC_UID_SET(cap_range.last) > RSBAC_UM_VIRTUAL_MAX)
++ && (RSBAC_UID_SET(cap_range.last) != RSBAC_UM_VIRTUAL_ALL)
++ )
++ return -RSBAC_EINVALIDVALUE;
++#else
++ cap_range.first = RSBAC_UID_NUM(cap_range.first);
++ cap_range.last = RSBAC_UID_NUM(cap_range.last);
++#endif
++ if(cap_range.first > cap_range.last)
++ return -RSBAC_EINVALIDVALUE;
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_auth_remove_p_cap(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++ pid = find_pid_ns(upid, &init_pid_ns);
++ if(!pid)
++ return -RSBAC_EINVALIDTARGET;
++ /* call auth function and return its result */
++ /* permission checking is done there */
++ return rsbac_auth_remove_p_cap(ta_number, pid, cap_type, cap_range);
++ }
++
++int sys_rsbac_auth_add_f_cap(
++ rsbac_list_ta_number_t ta_number,
++ char __user * filename,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range,
++ rsbac_time_t ttl)
++ {
++ struct dentry * t_dentry;
++ int err = 0;
++ enum rsbac_target_t target;
++ union rsbac_target_id_t tid;
++#if defined(CONFIG_RSBAC_AUTH) && !defined(CONFIG_RSBAC_MAINT)
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ struct path path;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_range.first) == RSBAC_UM_VIRTUAL_KEEP)
++ cap_range.first = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(cap_range.first));
++ else
++ if ( (RSBAC_UID_SET(cap_range.first) > RSBAC_UM_VIRTUAL_MAX)
++ && (RSBAC_UID_SET(cap_range.first) != RSBAC_UM_VIRTUAL_ALL)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ if (RSBAC_UID_SET(cap_range.last) == RSBAC_UM_VIRTUAL_KEEP)
++ cap_range.last = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(cap_range.last));
++ else
++ if ( (RSBAC_UID_SET(cap_range.last) > RSBAC_UM_VIRTUAL_MAX)
++ && (RSBAC_UID_SET(cap_range.last) != RSBAC_UM_VIRTUAL_ALL)
++ )
++ return -RSBAC_EINVALIDVALUE;
++#else
++ cap_range.first = RSBAC_UID_NUM(cap_range.first);
++ cap_range.last = RSBAC_UID_NUM(cap_range.last);
++#endif
++ if(cap_range.first > cap_range.last)
++ return -RSBAC_EINVALIDVALUE;
++
++ if(!filename)
++ return -RSBAC_EINVALIDTARGET;
++ if(cap_type >= ACT_none)
++ return -RSBAC_EINVALIDTARGET;
++ if(cap_range.first > cap_range.last)
++ return -RSBAC_EINVALIDVALUE;
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_auth_add_f_cap(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++ if ((err = user_lpath(filename, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_auth)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_auth_add_f_cap(): call to user_lpath() returned %i\n", err);
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ if (!t_dentry->d_inode)
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* is inode of type file? */
++ if(S_ISREG(t_dentry->d_inode->i_mode))
++ target = T_FILE;
++ else
++ if(S_ISDIR(t_dentry->d_inode->i_mode))
++ target = T_DIR;
++ else
++ { /* This is no file or dir */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ tid.file.device = t_dentry->d_sb->s_dev;
++ tid.file.inode = t_dentry->d_inode->i_ino;
++ tid.file.dentry_p = t_dentry;
++#if defined(CONFIG_RSBAC_AUTH) && !defined(CONFIG_RSBAC_MAINT)
++ /* call ADF */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_auth_add_f_cap(): calling ADF\n");
++#endif
++ rsbac_attribute_value.auth_cap_range = cap_range;
++ if (!rsbac_adf_request(R_MODIFY_ATTRIBUTE,
++ task_pid(current),
++ target,
++ tid,
++ A_auth_add_f_cap,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ }
++ else
++#endif
++ err = rsbac_auth_add_f_cap(ta_number, tid.file, cap_type, cap_range, ttl);
++
++out_dput:
++ path_put(&path);
++out:
++ return err;
++ }
++
++int sys_rsbac_auth_remove_f_cap(
++ rsbac_list_ta_number_t ta_number,
++ char __user * filename,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t cap_range)
++ {
++ struct dentry * t_dentry;
++ int err = 0;
++ enum rsbac_target_t target;
++ union rsbac_target_id_t tid;
++
++ /* for adf_request */
++#if defined(CONFIG_RSBAC_AUTH) && !defined(CONFIG_RSBAC_MAINT)
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ struct path path;
++
++ if(!filename)
++ return -RSBAC_EINVALIDTARGET;
++ if(cap_type >= ACT_none)
++ return -RSBAC_EINVALIDTARGET;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(cap_range.first) == RSBAC_UM_VIRTUAL_KEEP)
++ cap_range.first = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(cap_range.first));
++ else
++ if ( (RSBAC_UID_SET(cap_range.first) > RSBAC_UM_VIRTUAL_MAX)
++ && (RSBAC_UID_SET(cap_range.first) != RSBAC_UM_VIRTUAL_ALL)
++ )
++ return -RSBAC_EINVALIDVALUE;
++ if (RSBAC_UID_SET(cap_range.last) == RSBAC_UM_VIRTUAL_KEEP)
++ cap_range.last = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(cap_range.last));
++ else
++ if ( (RSBAC_UID_SET(cap_range.last) > RSBAC_UM_VIRTUAL_MAX)
++ && (RSBAC_UID_SET(cap_range.last) != RSBAC_UM_VIRTUAL_ALL)
++ )
++ return -RSBAC_EINVALIDVALUE;
++#else
++ cap_range.first = RSBAC_UID_NUM(cap_range.first);
++ cap_range.last = RSBAC_UID_NUM(cap_range.last);
++#endif
++ if(cap_range.first > cap_range.last)
++ return -RSBAC_EINVALIDVALUE;
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_auth_remove_f_cap(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++ if ((err = user_lpath(filename, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_auth)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_auth_remove_f_cap(): call to user_lpath() returned %i\n", err);
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ if (!t_dentry->d_inode)
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* is inode of type file or dir? */
++ if(S_ISREG(t_dentry->d_inode->i_mode))
++ target = T_FILE;
++ else
++ if(S_ISDIR(t_dentry->d_inode->i_mode))
++ target = T_DIR;
++ else
++ { /* This is no file or dir */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ tid.file.device = t_dentry->d_sb->s_dev;
++ tid.file.inode = t_dentry->d_inode->i_ino;
++ tid.file.dentry_p = t_dentry;
++#if defined(CONFIG_RSBAC_AUTH) && !defined(CONFIG_RSBAC_MAINT)
++ /* call ADF */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef) rsbac_printk(KERN_DEBUG "sys_rsbac_auth_add_f_cap(): calling ADF\n");
++#endif
++ rsbac_attribute_value.auth_cap_range = cap_range;
++ if (!rsbac_adf_request(R_MODIFY_ATTRIBUTE,
++ task_pid(current),
++ target,
++ tid,
++ A_auth_remove_f_cap,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ }
++ else
++#endif
++ err = rsbac_auth_remove_f_cap(ta_number, tid.file, cap_type, cap_range);
++
++out_dput:
++ path_put(&path);
++out:
++ return err;
++ }
++
++/* caplist must have space for maxnum auth_cap_range entries - first and last each! */
++int sys_rsbac_auth_get_f_caplist(
++ rsbac_list_ta_number_t ta_number,
++ char __user * filename,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t __user caplist[],
++ rsbac_time_t __user ttllist[],
++ u_int maxnum)
++ {
++ struct dentry * t_dentry;
++ int err = 0, tmperr = 0;
++ enum rsbac_target_t target;
++ union rsbac_target_id_t tid;
++ struct rsbac_auth_cap_range_t * k_caplist;
++ rsbac_time_t * k_ttllist;
++
++ /* for adf_request */
++#if defined(CONFIG_RSBAC_AUTH) && !defined(CONFIG_RSBAC_MAINT)
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ struct path path;
++
++ if(!filename)
++ return -RSBAC_EINVALIDTARGET;
++ if(cap_type >= ACT_none)
++ return -RSBAC_EINVALIDTARGET;
++ if(!caplist)
++ return -RSBAC_EINVALIDPOINTER;
++ if(maxnum <= 0)
++ return -RSBAC_EINVALIDVALUE;
++ if(maxnum > RSBAC_AUTH_MAX_MAXNUM)
++ maxnum = RSBAC_AUTH_MAX_MAXNUM;
++
++ if ((err = user_lpath(filename, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_auth)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_auth_get_f_caplist(): call to user_lpath() returned %i\n", err);
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ if (!t_dentry->d_inode)
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* is inode of type file or dir? */
++ if(S_ISREG(t_dentry->d_inode->i_mode))
++ target = T_FILE;
++ else
++ if(S_ISDIR(t_dentry->d_inode->i_mode))
++ target = T_DIR;
++ else
++ { /* This is no file or dir */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ tid.file.device = t_dentry->d_sb->s_dev;
++ tid.file.inode = t_dentry->d_inode->i_ino;
++ tid.file.dentry_p = t_dentry;
++#if defined(CONFIG_RSBAC_AUTH) && !defined(CONFIG_RSBAC_MAINT)
++ /* call ADF */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef) rsbac_printk(KERN_DEBUG "sys_rsbac_auth_get_f_caplist(): calling ADF\n");
++#endif
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_READ_ATTRIBUTE,
++ task_pid(current),
++ target,
++ tid,
++ A_auth_get_caplist,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ goto out_dput;
++ }
++#endif
++ err = rsbac_auth_get_f_caplist(ta_number, tid.file, cap_type, &k_caplist, &k_ttllist);
++ if(err>0)
++ {
++ if(err > maxnum)
++ err = maxnum;
++ tmperr = rsbac_put_user(k_caplist, caplist,
++ sizeof(struct rsbac_auth_cap_range_t) * err);
++ if(tmperr < 0)
++ err = tmperr;
++ else
++ {
++ if(ttllist)
++ {
++ tmperr = rsbac_put_user(k_ttllist, ttllist,
++ sizeof(rsbac_time_t) * err);
++ if(tmperr < 0)
++ err = tmperr;
++ }
++ }
++ rsbac_kfree(k_caplist);
++ rsbac_kfree(k_ttllist);
++ }
++
++out_dput:
++ path_put(&path);
++out:
++ return err;
++ }
++
++int sys_rsbac_auth_get_p_caplist(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_upid_t upid,
++ enum rsbac_auth_cap_type_t cap_type,
++ struct rsbac_auth_cap_range_t __user caplist[],
++ rsbac_time_t __user ttllist[],
++ u_int maxnum)
++ {
++ int err = 0, tmperr = 0;
++ union rsbac_target_id_t tid;
++ struct rsbac_auth_cap_range_t * k_caplist;
++ rsbac_time_t * k_ttllist;
++
++ /* for adf_request */
++#if defined(CONFIG_RSBAC_AUTH) && !defined(CONFIG_RSBAC_MAINT)
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ if(!upid)
++ return -RSBAC_EINVALIDTARGET;
++ if(cap_type >= ACT_none)
++ return -RSBAC_EINVALIDVALUE;
++ if(!caplist)
++ return -RSBAC_EINVALIDPOINTER;
++ if(maxnum <= 0)
++ return -RSBAC_EINVALIDVALUE;
++ if(maxnum > RSBAC_AUTH_MAX_MAXNUM)
++ maxnum = RSBAC_AUTH_MAX_MAXNUM;
++
++ tid.process = find_pid_ns(upid, &init_pid_ns);
++ if (!tid.process)
++ return -RSBAC_EINVALIDTARGET;
++#if defined(CONFIG_RSBAC_AUTH) && !defined(CONFIG_RSBAC_MAINT)
++ /* call ADF */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef) rsbac_printk(KERN_DEBUG "sys_rsbac_auth_get_p_caplist(): calling ADF\n");
++#endif
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_READ_ATTRIBUTE,
++ task_pid(current),
++ T_PROCESS,
++ tid,
++ A_auth_get_caplist,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif
++ err = rsbac_auth_get_p_caplist(ta_number, tid.process, cap_type,
++ &k_caplist, &k_ttllist);
++ if(err>0)
++ {
++ if(err > maxnum)
++ err = maxnum;
++ tmperr = rsbac_put_user(k_caplist, caplist,
++ sizeof(struct rsbac_auth_cap_range_t) * err);
++ if(tmperr < 0)
++ err = tmperr;
++ else
++ {
++ if(ttllist)
++ {
++ tmperr = rsbac_put_user(k_ttllist, ttllist,
++ sizeof(rsbac_time_t) * err);
++ if(tmperr < 0)
++ err = tmperr;
++ }
++ }
++ rsbac_kfree(k_caplist);
++ rsbac_kfree(k_ttllist);
++ }
++
++ return err;
++ }
++#endif
++
++/**********************************/
++/************** REG ***************/
++
++#ifdef CONFIG_RSBAC_REG
++int sys_rsbac_reg(rsbac_reg_handle_t handle,
++ void __user * arg)
++ {
++ return rsbac_reg_syscall(handle, arg);
++ }
++#endif
++
++/**********************************/
++/************** ACL ***************/
++
++#ifdef CONFIG_RSBAC_ACL
++int sys_rsbac_acl(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_acl_syscall_type_t call,
++ struct rsbac_acl_syscall_arg_t __user * arg)
++ {
++ struct rsbac_acl_syscall_arg_t k_arg;
++ int err = 0;
++
++ if(call >= ACLC_none)
++ return -RSBAC_EINVALIDREQUEST;
++ if(!arg)
++ return -RSBAC_EINVALIDPOINTER;
++
++ /* get values from user space */
++ err = rsbac_get_user(&k_arg, arg, sizeof(k_arg));
++ if(err < 0)
++ return err;
++
++ if(k_arg.target >= T_NONE)
++ return -RSBAC_EINVALIDTARGET;
++/* rsbac_printk(KERN_DEBUG "sys_rsbac_acl(): target = %u, call = %u, subj_type = %u, subj_id = %u!\n",
++ k_arg.target, call, k_arg.subj_type, k_arg.subj_id); */
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_acl(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++ if(call != ACLC_set_mask)
++ {
++ switch(k_arg.subj_type)
++ {
++ case ACLS_USER:
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(k_arg.subj_id) == RSBAC_UM_VIRTUAL_KEEP)
++ k_arg.subj_id = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(k_arg.subj_id));
++ else
++ if (RSBAC_UID_SET(k_arg.subj_id) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ k_arg.subj_id = RSBAC_UID_NUM(k_arg.subj_id);
++#endif
++ break;
++ case ACLS_GROUP:
++ if(k_arg.subj_id != RSBAC_ACL_GROUP_EVERYONE)
++ {
++ struct rsbac_acl_group_entry_t entry;
++ rsbac_uid_t caller;
++
++ if( rsbac_acl_get_group_entry(ta_number, k_arg.subj_id, &entry)
++ || rsbac_get_owner(&caller)
++ || ( (entry.owner != caller)
++ && (entry.type != ACLG_GLOBAL)
++ )
++ )
++ return -RSBAC_EINVALIDVALUE;
++ }
++ break;
++ #if defined(CONFIG_RSBAC_RC)
++ case ACLS_ROLE:
++ if(k_arg.subj_id > RC_role_max_value)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_acl(): Invalid role %u!\n", k_arg.subj_id);
++ return -RSBAC_EINVALIDVALUE;
++ }
++ break;
++ #endif
++ default:
++ rsbac_printk(KERN_DEBUG "sys_rsbac_acl(): Invalid subject type %u!\n", k_arg.subj_type);
++ return -RSBAC_EINVALIDVALUE;
++ }
++ if( (call == ACLC_remove_user)
++ && (k_arg.target != T_USER)
++ )
++ return -RSBAC_EINVALIDTARGET;
++
++ }
++
++ /* call acl function */
++ switch(call)
++ {
++ case ACLC_set_acl_entry:
++ err = rsbac_acl_sys_set_acl_entry(ta_number,
++ k_arg.target,
++ k_arg.tid,
++ k_arg.subj_type,
++ k_arg.subj_id,
++ k_arg.rights,
++ k_arg.ttl);
++ break;
++ case ACLC_remove_acl_entry:
++ err = rsbac_acl_sys_remove_acl_entry(ta_number,
++ k_arg.target,
++ k_arg.tid,
++ k_arg.subj_type,
++ k_arg.subj_id);
++ break;
++ case ACLC_remove_acl:
++ err = rsbac_acl_sys_remove_acl(ta_number,
++ k_arg.target,
++ k_arg.tid);
++ break;
++ case ACLC_add_to_acl_entry:
++ err = rsbac_acl_sys_add_to_acl_entry(ta_number,
++ k_arg.target,
++ k_arg.tid,
++ k_arg.subj_type,
++ k_arg.subj_id,
++ k_arg.rights,
++ k_arg.ttl);
++ break;
++ case ACLC_remove_from_acl_entry:
++ err = rsbac_acl_sys_remove_from_acl_entry(ta_number,
++ k_arg.target,
++ k_arg.tid,
++ k_arg.subj_type,
++ k_arg.subj_id,
++ k_arg.rights);
++ break;
++ case ACLC_set_mask:
++ err = rsbac_acl_sys_set_mask(ta_number,
++ k_arg.target,
++ k_arg.tid,
++ k_arg.rights);
++ break;
++ case ACLC_remove_user:
++ err = rsbac_acl_sys_remove_user(ta_number,
++ k_arg.tid.user);
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDREQUEST;
++ }
++ return err;
++ } /* end of sys_rsbac_acl() */
++
++
++int sys_rsbac_acl_n(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_acl_syscall_type_t call,
++ struct rsbac_acl_syscall_n_arg_t __user * arg)
++ {
++ struct dentry * t_dentry = NULL;
++ int err = 0;
++ union rsbac_target_id_t tid;
++ struct rsbac_acl_syscall_n_arg_t k_arg;
++
++ struct path path;
++
++ if(call >= ACLC_none)
++ return -RSBAC_EINVALIDREQUEST;
++ if(!arg)
++ return -RSBAC_EINVALIDPOINTER;
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_acl_n(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++ /* get values from user space */
++ err = rsbac_get_user(&k_arg, arg, sizeof(k_arg) );
++ if(err < 0)
++ return err;
++
++ if(k_arg.target >= T_NONE)
++ return -RSBAC_EINVALIDTARGET;
++ if(call != ACLC_set_mask)
++ {
++ switch(k_arg.subj_type)
++ {
++ case ACLS_USER:
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(k_arg.subj_id) == RSBAC_UM_VIRTUAL_KEEP)
++ k_arg.subj_id = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(k_arg.subj_id));
++ else
++ if (RSBAC_UID_SET(k_arg.subj_id) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ k_arg.subj_id = RSBAC_UID_NUM(k_arg.subj_id);
++#endif
++ break;
++ case ACLS_GROUP:
++ if(k_arg.subj_id != RSBAC_ACL_GROUP_EVERYONE)
++ {
++ struct rsbac_acl_group_entry_t entry;
++ rsbac_uid_t caller;
++
++ if( rsbac_acl_get_group_entry(ta_number, k_arg.subj_id, &entry)
++ || rsbac_get_owner(&caller)
++ || ( (entry.owner != caller)
++ && (entry.type != ACLG_GLOBAL)
++ )
++ )
++ return -RSBAC_EINVALIDVALUE;
++ }
++ break;
++ #if defined(CONFIG_RSBAC_RC)
++ case ACLS_ROLE:
++ if(k_arg.subj_id > RC_role_max_value)
++ return -RSBAC_EINVALIDVALUE;
++ break;
++ #endif
++ default:
++ return -RSBAC_EINVALIDVALUE;
++ }
++ }
++
++ if(k_arg.name)
++ {
++ /* lookup filename */
++ if ((err = user_lpath(k_arg.name, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_acl_n(): call to user_lpath() returned %i\n", err);
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ if (!t_dentry->d_inode)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_acl_n(): file not found\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ tid.file.device = t_dentry->d_sb->s_dev;
++ tid.file.inode = t_dentry->d_inode->i_ino;
++ tid.file.dentry_p = t_dentry;
++ }
++ else
++ {
++ tid.file.device = RSBAC_ZERO_DEV;
++ tid.file.inode = 0;
++ tid.file.dentry_p = NULL;
++ }
++
++ switch (k_arg.target)
++ {
++ case T_FD:
++ if(k_arg.name)
++ {
++ if(S_ISREG(t_dentry->d_inode->i_mode))
++ {
++ k_arg.target = T_FILE;
++ }
++ else
++ if(S_ISDIR(t_dentry->d_inode->i_mode))
++ {
++ k_arg.target = T_DIR;
++ }
++ else
++ if(S_ISLNK(t_dentry->d_inode->i_mode))
++ {
++ k_arg.target = T_SYMLINK;
++ }
++ else
++ if(S_ISFIFO(t_dentry->d_inode->i_mode))
++ {
++ k_arg.target = T_FIFO;
++ }
++ else
++ if(S_ISSOCK(t_dentry->d_inode->i_mode))
++ {
++ k_arg.target = T_UNIXSOCK;
++ }
++ else
++ if(S_ISBLK(t_dentry->d_inode->i_mode))
++ {
++ k_arg.target = T_FILE;
++ }
++ else
++ if(S_ISCHR(t_dentry->d_inode->i_mode))
++ {
++ k_arg.target = T_FILE;
++ }
++ else
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_acl_n(): no filesystem object\n");
++#endif
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ }
++ else
++ k_arg.target = T_FILE;
++ break;
++
++ case T_FILE:
++ if(k_arg.name)
++ {
++ /* is inode of type file, symlink or block/char device? */
++ if ( !(S_ISREG(t_dentry->d_inode->i_mode))
++ && !(S_ISBLK(t_dentry->d_inode->i_mode))
++ && !(S_ISCHR(t_dentry->d_inode->i_mode)) )
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ }
++ break;
++
++ case T_DIR:
++ if(k_arg.name)
++ {
++ if ( !(S_ISDIR(t_dentry->d_inode->i_mode)) )
++ { /* This is no dir */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ }
++ break;
++
++ case T_FIFO:
++ if(k_arg.name)
++ {
++ /* is inode of type fifo? */
++ if ( !(S_ISFIFO(t_dentry->d_inode->i_mode)))
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ }
++ break;
++
++ case T_UNIXSOCK:
++ if(k_arg.name)
++ {
++ /* is inode of type fifo? */
++ if ( !(S_ISSOCK(t_dentry->d_inode->i_mode)))
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ }
++ break;
++
++ case T_SYMLINK:
++ if(k_arg.name)
++ {
++ /* is inode of type symlink? */
++ if ( !(S_ISLNK(t_dentry->d_inode->i_mode)))
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ }
++ break;
++
++ case T_DEV:
++ if(k_arg.name)
++ {
++ /* is inode of type block/char device? */
++ if ( !(S_ISBLK(t_dentry->d_inode->i_mode))
++ && !(S_ISCHR(t_dentry->d_inode->i_mode)) )
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* fill target id and call internal function */
++ if(S_ISBLK(t_dentry->d_inode->i_mode))
++ tid.dev.type = D_block;
++ else
++ tid.dev.type = D_char;
++ tid.dev.major = RSBAC_MAJOR(t_dentry->d_inode->i_rdev);
++ tid.dev.minor = RSBAC_MINOR(t_dentry->d_inode->i_rdev);
++ }
++ else
++ {
++ tid.dev = RSBAC_ZERO_DEV_DESC;
++ }
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* call acl function */
++ switch(call)
++ {
++ case ACLC_set_acl_entry:
++ err = rsbac_acl_sys_set_acl_entry(ta_number,
++ k_arg.target,
++ tid,
++ k_arg.subj_type,
++ k_arg.subj_id,
++ k_arg.rights,
++ k_arg.ttl);
++ break;
++ case ACLC_remove_acl_entry:
++ err = rsbac_acl_sys_remove_acl_entry(ta_number,
++ k_arg.target,
++ tid,
++ k_arg.subj_type,
++ k_arg.subj_id);
++ break;
++ case ACLC_remove_acl:
++ err = rsbac_acl_sys_remove_acl(ta_number,
++ k_arg.target,
++ tid);
++ break;
++ case ACLC_add_to_acl_entry:
++ err = rsbac_acl_sys_add_to_acl_entry(ta_number,
++ k_arg.target,
++ tid,
++ k_arg.subj_type,
++ k_arg.subj_id,
++ k_arg.rights,
++ k_arg.ttl);
++ break;
++ case ACLC_remove_from_acl_entry:
++ err = rsbac_acl_sys_remove_from_acl_entry(ta_number,
++ k_arg.target,
++ tid,
++ k_arg.subj_type,
++ k_arg.subj_id,
++ k_arg.rights);
++ break;
++ case ACLC_set_mask:
++ err = rsbac_acl_sys_set_mask(ta_number,
++ k_arg.target,
++ tid,
++ k_arg.rights);
++ break;
++
++ default:
++ err = -RSBAC_EINVALIDREQUEST;
++ }
++
++out_dput:
++ if(k_arg.name)
++ {
++ path_put(&path);
++ }
++
++out:
++ return err;
++ } /* end of sys_rsbac_acl_n() */
++
++/************************************************************************** */
++
++int sys_rsbac_acl_get_rights(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_acl_syscall_arg_t __user * arg,
++ rsbac_acl_rights_vector_t __user * rights_p,
++ u_int effective)
++ {
++ struct rsbac_acl_syscall_arg_t k_arg;
++ rsbac_acl_rights_vector_t k_rights = 0;
++ int err = 0;
++
++ if(!arg || !rights_p)
++ return -RSBAC_EINVALIDPOINTER;
++ /* get values from user space */
++ rsbac_get_user(&k_arg, arg, sizeof(k_arg) );
++
++ if(k_arg.target >= T_NONE)
++ return -RSBAC_EINVALIDTARGET;
++/* printk(KERN_DEBUG "sys_rsbac_acl_get_rights(): target = %u, subj_type = %u, subj_id = %u!\n",
++ k_arg.target, k_arg.subj_type, k_arg.subj_id); */
++ switch(k_arg.subj_type)
++ {
++ case ACLS_USER:
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(k_arg.subj_id) == RSBAC_UM_VIRTUAL_KEEP)
++ k_arg.subj_id = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(k_arg.subj_id));
++ else
++ if (RSBAC_UID_SET(k_arg.subj_id) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ k_arg.subj_id = RSBAC_UID_NUM(k_arg.subj_id);
++#endif
++ break;
++ case ACLS_GROUP:
++ if(k_arg.subj_id != RSBAC_ACL_GROUP_EVERYONE)
++ {
++ struct rsbac_acl_group_entry_t entry;
++ rsbac_uid_t caller;
++
++ if( rsbac_acl_get_group_entry(ta_number, k_arg.subj_id, &entry)
++ || rsbac_get_owner(&caller)
++ || ( (entry.owner != caller)
++ && (entry.type != ACLG_GLOBAL)
++ )
++ )
++ return -RSBAC_EINVALIDVALUE;
++ }
++ break;
++ case ACLS_ROLE:
++ #if defined(CONFIG_RSBAC_RC)
++ if(k_arg.subj_id > RC_role_max_value)
++ return -RSBAC_EINVALIDVALUE;
++ #endif
++ break;
++ default:
++ rsbac_printk(KERN_DEBUG "sys_rsbac_acl_get_rights(): Invalid subject type %u!\n", k_arg.subj_type);
++ return -RSBAC_EINVALIDVALUE;
++ }
++
++ /* call acl function */
++ err = rsbac_acl_sys_get_rights(ta_number,
++ k_arg.target,
++ k_arg.tid,
++ k_arg.subj_type,
++ k_arg.subj_id,
++ &k_rights,
++ effective);
++ if(!err)
++ {
++ err = rsbac_put_user(&k_rights, rights_p, sizeof(k_rights) );
++ }
++ return err;
++ } /* end of sys_rsbac_acl_get_rights() */
++
++
++int sys_rsbac_acl_get_rights_n(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_acl_syscall_n_arg_t __user * arg,
++ rsbac_acl_rights_vector_t __user * rights_p,
++ u_int effective)
++ {
++ struct dentry * t_dentry = NULL;
++ rsbac_boolean_t need_put = FALSE;
++ int err = 0;
++ union rsbac_target_id_t tid;
++ struct rsbac_acl_syscall_n_arg_t k_arg;
++ rsbac_acl_rights_vector_t k_rights = 0;
++
++ struct path path;
++
++ if(!arg || !rights_p)
++ return -RSBAC_EINVALIDPOINTER;
++ /* get values from user space */
++ rsbac_get_user(&k_arg, arg, sizeof(k_arg) );
++
++ if(k_arg.target >= T_NONE)
++ return -RSBAC_EINVALIDTARGET;
++ switch(k_arg.subj_type)
++ {
++ case ACLS_USER:
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(k_arg.subj_id) == RSBAC_UM_VIRTUAL_KEEP)
++ k_arg.subj_id = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(k_arg.subj_id));
++ else
++ if (RSBAC_UID_SET(k_arg.subj_id) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ k_arg.subj_id = RSBAC_UID_NUM(k_arg.subj_id);
++#endif
++ break;
++ case ACLS_GROUP:
++ if(k_arg.subj_id != RSBAC_ACL_GROUP_EVERYONE)
++ {
++ struct rsbac_acl_group_entry_t entry;
++ rsbac_uid_t caller;
++
++ if( rsbac_acl_get_group_entry(ta_number, k_arg.subj_id, &entry)
++ || rsbac_get_owner(&caller)
++ || ( (entry.owner != caller)
++ && (entry.type != ACLG_GLOBAL)
++ )
++ )
++ return -RSBAC_EINVALIDVALUE;
++ }
++ break;
++ case ACLS_ROLE:
++ #if defined(CONFIG_RSBAC_RC)
++ if(k_arg.subj_id > RC_role_max_value)
++ return -RSBAC_EINVALIDVALUE;
++ #endif
++ break;
++ default:
++ return -RSBAC_EINVALIDVALUE;
++ }
++
++ switch (k_arg.target)
++ {
++ case T_FD:
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_SYMLINK:
++ if(k_arg.name)
++ {
++ if ((err = user_lpath(k_arg.name, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_acl)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_acl_get_rights_n(): call to user_lpath() returned %i\n", err);
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ need_put = TRUE;
++ if (!t_dentry->d_inode)
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* is inode of type file, symlink or block/char device? */
++ switch(k_arg.target)
++ {
++ case T_FD:
++ if(S_ISREG(t_dentry->d_inode->i_mode))
++ {
++ k_arg.target = T_FILE;
++ }
++ else
++ if(S_ISDIR(t_dentry->d_inode->i_mode))
++ {
++ k_arg.target = T_DIR;
++ }
++ else
++ if(S_ISLNK(t_dentry->d_inode->i_mode))
++ {
++ k_arg.target = T_SYMLINK;
++ }
++ else
++ if(S_ISFIFO(t_dentry->d_inode->i_mode))
++ {
++ k_arg.target = T_FIFO;
++ }
++ else
++ if(S_ISSOCK(t_dentry->d_inode->i_mode))
++ {
++ k_arg.target = T_UNIXSOCK;
++ }
++ else
++ if(S_ISBLK(t_dentry->d_inode->i_mode))
++ {
++ k_arg.target = T_FILE;
++ }
++ else
++ if(S_ISCHR(t_dentry->d_inode->i_mode))
++ {
++ k_arg.target = T_FILE;
++ }
++ else
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ case T_FILE:
++ if ( !(S_ISREG(t_dentry->d_inode->i_mode))
++ && !(S_ISBLK(t_dentry->d_inode->i_mode))
++ && !(S_ISCHR(t_dentry->d_inode->i_mode)) )
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ case T_DIR:
++ if ( !(S_ISDIR(t_dentry->d_inode->i_mode)) )
++ { /* This is no dir */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ case T_FIFO:
++ /* is inode of type fifo? */
++ if ( !(S_ISFIFO(t_dentry->d_inode->i_mode)))
++ { /* This is no fifo */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ case T_UNIXSOCK:
++ if ( !(S_ISSOCK(t_dentry->d_inode->i_mode)))
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ case T_SYMLINK:
++ /* is inode of type symlink? */
++ if ( !(S_ISLNK(t_dentry->d_inode->i_mode)))
++ { /* This is no symlink */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ default:
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ tid.file.device = t_dentry->d_sb->s_dev;
++ tid.file.inode = t_dentry->d_inode->i_ino;
++ tid.file.dentry_p = t_dentry;
++ }
++ else
++ {
++ if(k_arg.target == T_FD)
++ k_arg.target = T_FILE;
++ tid.file.device = RSBAC_ZERO_DEV;
++ tid.file.inode = 0;
++ tid.file.dentry_p = NULL;
++ }
++ break;
++
++ case T_DEV:
++ if(k_arg.name)
++ {
++ if ((err = user_lpath(k_arg.name, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_acl)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_acl_get_rights_n(): call to user_lpath() returned %i\n", err);
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ need_put = TRUE;
++ if (!t_dentry->d_inode)
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* is inode of type file, symlink or block/char device? */
++ if ( !(S_ISBLK(t_dentry->d_inode->i_mode))
++ && !(S_ISCHR(t_dentry->d_inode->i_mode)) )
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* fill target id and call internal function */
++ if(S_ISBLK(t_dentry->d_inode->i_mode))
++ tid.dev.type = D_block;
++ else
++ tid.dev.type = D_char;
++ tid.dev.major = RSBAC_MAJOR(t_dentry->d_inode->i_rdev);
++ tid.dev.minor = RSBAC_MINOR(t_dentry->d_inode->i_rdev);
++ }
++ else
++ {
++ tid.dev = RSBAC_ZERO_DEV_DESC;
++ }
++ break;
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++
++ /* call acl function */
++ err = rsbac_acl_sys_get_rights(ta_number,
++ k_arg.target,
++ tid,
++ k_arg.subj_type,
++ k_arg.subj_id,
++ &k_rights,
++ effective);
++
++out_dput:
++ if(need_put)
++ path_put(&path);
++out:
++ if(!err)
++ {
++ rsbac_put_user(&k_rights, rights_p, sizeof(k_rights) );
++ }
++ return err;
++ } /* end of sys_rsbac_acl_get_rights_n() */
++
++/************************************************************************** */
++
++int sys_rsbac_acl_get_tlist (
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t __user * tid,
++ struct rsbac_acl_entry_t __user entry_array[],
++ rsbac_time_t __user ttl_array[],
++ u_int maxnum)
++ {
++ union rsbac_target_id_t k_tid;
++ struct rsbac_acl_entry_t * k_entry_p;
++ rsbac_time_t * k_ttl_p;
++ int err = 0;
++
++ if(!tid || (target >= T_NONE))
++ return -RSBAC_EINVALIDTARGET;
++ if(!entry_array)
++ return -RSBAC_EINVALIDPOINTER;
++ if(!maxnum)
++ return -RSBAC_EINVALIDVALUE;
++ if(maxnum > RSBAC_ACL_MAX_MAXNUM)
++ maxnum = RSBAC_ACL_MAX_MAXNUM;
++
++ /* get values from user space */
++ err = rsbac_get_user(&k_tid, tid, sizeof(k_tid) );
++ if(err)
++ return err;
++ switch (target) {
++ case T_FD:
++ return -RSBAC_EINVALIDTARGET;
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_SYMLINK:
++ k_tid.file.dentry_p = NULL;
++ k_tid.dir.dentry_p = NULL;
++ break;
++ case T_PROCESS:
++ k_tid.process = find_pid_ns(k_tid.uprocess, &init_pid_ns);
++ if(!k_tid.process)
++ return -RSBAC_EINVALIDTARGET;
++ break;
++ default:
++ break;
++ }
++
++ /* call acl function */
++ err = rsbac_acl_sys_get_tlist(ta_number, target, k_tid, &k_entry_p, &k_ttl_p);
++ if(err>0)
++ {
++ if(err > maxnum)
++ err = maxnum;
++ rsbac_put_user(k_entry_p,
++ entry_array,
++ err * sizeof(*k_entry_p) );
++ if(ttl_array)
++ {
++ rsbac_put_user(k_ttl_p,
++ ttl_array,
++ err * sizeof(*k_ttl_p) );
++ }
++ rsbac_kfree(k_entry_p);
++ rsbac_kfree(k_ttl_p);
++ }
++ return err;
++ } /* end of sys_rsbac_acl_get_tlist() */
++
++int sys_rsbac_acl_get_tlist_n(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ char __user * t_name,
++ struct rsbac_acl_entry_t __user entry_array[],
++ rsbac_time_t __user ttl_array[],
++ u_int maxnum)
++ {
++ struct dentry * t_dentry = NULL;
++ struct rsbac_acl_entry_t * k_entry_p;
++ rsbac_time_t * k_ttl_p;
++ rsbac_boolean_t need_put = FALSE;
++ int err = 0;
++ union rsbac_target_id_t tid;
++
++ struct path path;
++
++ if(target >= T_NONE)
++ return -RSBAC_EINVALIDTARGET;
++ if(!entry_array)
++ return -RSBAC_EINVALIDPOINTER;
++
++ switch (target)
++ {
++ case T_FD:
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_SYMLINK:
++ if(t_name)
++ {
++ if ((err = user_lpath(t_name, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_acl)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_acl_get_tlist_n(): call to user_lpath() returned %i\n", err);
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ need_put = TRUE;
++ if (!t_dentry->d_inode)
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* is inode of type file, symlink or block/char device? */
++ switch(target)
++ {
++ case T_FD:
++ if(S_ISREG(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ if(S_ISDIR(t_dentry->d_inode->i_mode))
++ {
++ target = T_DIR;
++ }
++ else
++ if(S_ISLNK(t_dentry->d_inode->i_mode))
++ {
++ target = T_SYMLINK;
++ }
++ else
++ if(S_ISFIFO(t_dentry->d_inode->i_mode))
++ {
++ target = T_FIFO;
++ }
++ else
++ if(S_ISSOCK(t_dentry->d_inode->i_mode))
++ {
++ target = T_UNIXSOCK;
++ }
++ else
++ if(S_ISBLK(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ if(S_ISCHR(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ case T_FILE:
++ if ( !(S_ISREG(t_dentry->d_inode->i_mode))
++ && !(S_ISBLK(t_dentry->d_inode->i_mode))
++ && !(S_ISCHR(t_dentry->d_inode->i_mode)) )
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ case T_DIR:
++ if ( !(S_ISDIR(t_dentry->d_inode->i_mode)) )
++ { /* This is no dir */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ case T_FIFO:
++ /* is inode of type fifo? */
++ if ( !(S_ISFIFO(t_dentry->d_inode->i_mode)))
++ { /* This is no fifo */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ case T_UNIXSOCK:
++ if ( !(S_ISSOCK(t_dentry->d_inode->i_mode)))
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ case T_SYMLINK:
++ /* is inode of type symlink? */
++ if ( !(S_ISLNK(t_dentry->d_inode->i_mode)))
++ { /* This is no symlink */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ default:
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ tid.file.device = t_dentry->d_sb->s_dev;
++ tid.file.inode = t_dentry->d_inode->i_ino;
++ tid.file.dentry_p = t_dentry;
++ }
++ else
++ {
++ if(target == T_FD)
++ target = T_FILE;
++ tid.file.device = RSBAC_ZERO_DEV;
++ tid.file.inode = 0;
++ tid.file.dentry_p = NULL;
++ }
++ break;
++
++ case T_DEV:
++ if(t_name)
++ {
++ if ((err = user_lpath(t_name, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_acl)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_acl_get_tlist_n(): call to user_lpath() returned %i\n", err);
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ need_put = TRUE;
++ if (!t_dentry->d_inode)
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* is inode of type file, symlink or block/char device? */
++ if ( !(S_ISBLK(t_dentry->d_inode->i_mode))
++ && !(S_ISCHR(t_dentry->d_inode->i_mode)) )
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* fill target id and call internal function */
++ if(S_ISBLK(t_dentry->d_inode->i_mode))
++ tid.dev.type = D_block;
++ else
++ tid.dev.type = D_char;
++ tid.dev.major = RSBAC_MAJOR(t_dentry->d_inode->i_rdev);
++ tid.dev.minor = RSBAC_MINOR(t_dentry->d_inode->i_rdev);
++ }
++ else
++ {
++ tid.dev = RSBAC_ZERO_DEV_DESC;
++ }
++ break;
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* call ACL function */
++ err = rsbac_acl_sys_get_tlist(ta_number, target, tid,
++ &k_entry_p, &k_ttl_p);
++
++out_dput:
++ if(need_put)
++ path_put(&path);
++out:
++ if(err>0)
++ {
++ if(err > maxnum)
++ err = maxnum;
++ rsbac_put_user(k_entry_p,
++ entry_array,
++ err * sizeof(*k_entry_p) );
++ if(ttl_array)
++ {
++ rsbac_put_user(k_ttl_p,
++ ttl_array,
++ err * sizeof(*k_ttl_p) );
++ }
++ rsbac_kfree(k_entry_p);
++ rsbac_kfree(k_ttl_p);
++ }
++ return err;
++ } /* end of sys_rsbac_acl_get_tlist_n() */
++
++/************************************************************************** */
++
++int sys_rsbac_acl_get_mask (
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ union rsbac_target_id_t __user * tid,
++ rsbac_acl_rights_vector_t __user * mask_p)
++ {
++ union rsbac_target_id_t k_tid;
++ rsbac_acl_rights_vector_t k_mask;
++ int err = 0;
++
++ if(!tid || (target >= T_NONE))
++ return -RSBAC_EINVALIDTARGET;
++ if(!mask_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++ /* get values from user space */
++ rsbac_get_user(&k_tid, tid, sizeof(k_tid) );
++ switch (target) {
++ case T_FD:
++ return -RSBAC_EINVALIDTARGET;
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_SYMLINK:
++ k_tid.file.dentry_p = NULL;
++ k_tid.dir.dentry_p = NULL;
++ break;
++ case T_PROCESS:
++ k_tid.process = find_pid_ns(k_tid.uprocess, &init_pid_ns);
++ if(!k_tid.process)
++ return -RSBAC_EINVALIDTARGET;
++ break;
++ default:
++ break;
++ }
++ /* call acl function */
++ err = rsbac_acl_sys_get_mask(ta_number, target, k_tid, &k_mask);
++ if(!err)
++ {
++ rsbac_put_user(&k_mask,
++ mask_p,
++ sizeof(k_mask) );
++ }
++ return err;
++ } /* end of sys_rsbac_acl_get_mask() */
++
++int sys_rsbac_acl_get_mask_n(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_target_t target,
++ char __user * t_name,
++ rsbac_acl_rights_vector_t __user * mask_p)
++ {
++ struct dentry * t_dentry = NULL;
++ rsbac_acl_rights_vector_t k_mask;
++ rsbac_boolean_t need_put = FALSE;
++ int err = 0;
++ union rsbac_target_id_t tid;
++
++ struct path path;
++
++ if(target >= T_NONE)
++ return -RSBAC_EINVALIDTARGET;
++ if(!mask_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++ switch (target)
++ {
++ case T_FD:
++ case T_FILE:
++ case T_DIR:
++ case T_FIFO:
++ case T_UNIXSOCK:
++ case T_SYMLINK:
++ if(t_name)
++ {
++ if ((err = user_lpath(t_name, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_acl)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_acl_get_mask_n(): call to user_lpath() returned %i\n", err);
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ need_put = TRUE;
++ if (!t_dentry->d_inode)
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* is inode of type file, symlink or block/char device? */
++ switch(target)
++ {
++ case T_FD:
++ if(S_ISREG(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ if(S_ISDIR(t_dentry->d_inode->i_mode))
++ {
++ target = T_DIR;
++ }
++ else
++ if(S_ISLNK(t_dentry->d_inode->i_mode))
++ {
++ target = T_SYMLINK;
++ }
++ else
++ if(S_ISFIFO(t_dentry->d_inode->i_mode))
++ {
++ target = T_FIFO;
++ }
++ else
++ if(S_ISSOCK(t_dentry->d_inode->i_mode))
++ {
++ target = T_UNIXSOCK;
++ }
++ else
++ if(S_ISBLK(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ if(S_ISCHR(t_dentry->d_inode->i_mode))
++ {
++ target = T_FILE;
++ }
++ else
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ case T_FILE:
++ if ( !(S_ISREG(t_dentry->d_inode->i_mode))
++ && !(S_ISBLK(t_dentry->d_inode->i_mode))
++ && !(S_ISCHR(t_dentry->d_inode->i_mode)) )
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ case T_DIR:
++ if ( !(S_ISDIR(t_dentry->d_inode->i_mode)) )
++ { /* This is no dir */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ case T_FIFO:
++ if ( !(S_ISFIFO(t_dentry->d_inode->i_mode)))
++ { /* This is no fifo */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ case T_UNIXSOCK:
++ if ( !(S_ISSOCK(t_dentry->d_inode->i_mode)))
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ case T_SYMLINK:
++ if ( !(S_ISLNK(t_dentry->d_inode->i_mode)))
++ { /* This is no symlink */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ break;
++ default:
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ tid.file.device = t_dentry->d_sb->s_dev;
++ tid.file.inode = t_dentry->d_inode->i_ino;
++ tid.file.dentry_p = t_dentry;
++ }
++ else
++ {
++ if(target == T_FD)
++ target = T_FILE;
++ tid.file.device = RSBAC_ZERO_DEV;
++ tid.file.inode = 0;
++ tid.file.dentry_p = NULL;
++ }
++ break;
++
++ case T_DEV:
++ if(t_name)
++ {
++ if ((err = user_lpath(t_name, &path)))
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_acl)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_acl_get_mask_n(): call to user_lpath() returned %i\n", err);
++#endif
++ goto out;
++ }
++ t_dentry = path.dentry;
++ need_put = TRUE;
++ if (!t_dentry->d_inode)
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* is inode of type block/char device? */
++ if ( !(S_ISBLK(t_dentry->d_inode->i_mode))
++ && !(S_ISCHR(t_dentry->d_inode->i_mode)) )
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* fill target id and call internal function */
++ if(S_ISBLK(t_dentry->d_inode->i_mode))
++ tid.dev.type = D_block;
++ else
++ tid.dev.type = D_char;
++ tid.dev.major = RSBAC_MAJOR(t_dentry->d_inode->i_rdev);
++ tid.dev.minor = RSBAC_MINOR(t_dentry->d_inode->i_rdev);
++ }
++ else
++ {
++ tid.dev = RSBAC_ZERO_DEV_DESC;
++ }
++ break;
++
++ default:
++ return -RSBAC_EINVALIDTARGET;
++ }
++ /* call ACL function */
++ err = rsbac_acl_sys_get_mask(ta_number, target, tid, &k_mask);
++
++out_dput:
++ if(need_put)
++ path_put(&path);
++out:
++ if(!err)
++ {
++ rsbac_put_user(&k_mask,
++ mask_p,
++ sizeof(k_mask) );
++ }
++ return err;
++ } /* end of sys_rsbac_acl_get_mask_n() */
++
++/******** ACL groups *********/
++
++int sys_rsbac_acl_group(
++ rsbac_list_ta_number_t ta_number,
++ enum rsbac_acl_group_syscall_type_t call,
++ union rsbac_acl_group_syscall_arg_t __user * arg_p)
++ {
++ union rsbac_acl_group_syscall_arg_t k_arg;
++ int err = 0;
++
++ if(call >= ACLGS_none)
++ return -RSBAC_EINVALIDREQUEST;
++ if(!arg_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ switch(call)
++ {
++ case ACLGS_add_group:
++ case ACLGS_change_group:
++ case ACLGS_remove_group:
++ case ACLGS_add_member:
++ case ACLGS_remove_member:
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_acl_group(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++
++ default:
++ break;
++ }
++ }
++#endif
++
++ /* get values from user space */
++ err = rsbac_get_user(&k_arg, arg_p, sizeof(k_arg) );
++
++ /* call acl function */
++ if(err >= 0)
++ err = rsbac_acl_sys_group(ta_number, call, k_arg);
++ return err;
++ } /* end of sys_rsbac_acl() */
++
++int sys_rsbac_acl_list_all_dev(
++ rsbac_list_ta_number_t ta_number,
++ struct rsbac_dev_desc_t __user * id_p,
++ u_long maxnum)
++ {
++ int err = 0;
++ long count;
++ long count2;
++
++ if(id_p && maxnum)
++ {
++ struct rsbac_dev_desc_t * k_id_p = NULL;
++
++ count = rsbac_acl_list_all_major_dev(ta_number, &k_id_p);
++ if(count < 0)
++ return count;
++ if(count > maxnum)
++ count = maxnum;
++
++ if(count)
++ {
++ err = rsbac_put_user(k_id_p, id_p, count * sizeof(*k_id_p) );
++ rsbac_kfree(k_id_p);
++ if(err)
++ return err;
++ id_p += count;
++ maxnum -= count;
++ if(!maxnum)
++ return count;
++ }
++
++ count2 = rsbac_acl_list_all_dev(ta_number, &k_id_p);
++ if(count2 < 0)
++ return count2;
++ if(count2 > maxnum)
++ count2 = maxnum;
++
++ if(count2)
++ {
++ err = rsbac_put_user(k_id_p, id_p, count2 * sizeof(*k_id_p) );
++ rsbac_kfree(k_id_p);
++ if(err)
++ return err;
++ count += count2;
++ }
++ return count;
++ }
++ else
++ {
++ count = rsbac_acl_list_all_major_dev(ta_number, NULL);
++ if(count < 0)
++ return count;
++ count2 = rsbac_acl_list_all_dev(ta_number, NULL);
++ if(count2 < 0)
++ return count2;
++ else
++ return count + count2;
++ }
++ }
++
++int sys_rsbac_acl_list_all_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t __user * id_p,
++ u_long maxnum)
++ {
++ int err = 0;
++ long count;
++
++ if(id_p && maxnum)
++ {
++ rsbac_uid_t * k_id_p = NULL;
++
++ count = rsbac_acl_list_all_user(ta_number, &k_id_p);
++ if(count < 0)
++ return count;
++ if(count > maxnum)
++ count = maxnum;
++
++ if(count)
++ {
++ err = rsbac_put_user(k_id_p, id_p, count * sizeof(*k_id_p) );
++ rsbac_kfree(k_id_p);
++ if(err)
++ return err;
++ }
++ return count;
++ }
++ else
++ {
++ return rsbac_acl_list_all_user(ta_number, NULL);
++ }
++ }
++
++int sys_rsbac_acl_list_all_group(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t __user * id_p,
++ u_long maxnum)
++ {
++#ifdef CONFIG_RSBAC_ACL_UM_PROT
++ int err = 0;
++ long count;
++
++ if(id_p && maxnum)
++ {
++ rsbac_gid_t * k_id_p = NULL;
++
++ count = rsbac_acl_list_all_group(ta_number, &k_id_p);
++ if(count < 0)
++ return count;
++ if(count > maxnum)
++ count = maxnum;
++
++ if(count)
++ {
++ err = rsbac_put_user(k_id_p, id_p, count * sizeof(*k_id_p) );
++ rsbac_kfree(k_id_p);
++ if(err)
++ return err;
++ }
++ return count;
++ }
++ else
++ {
++ return rsbac_acl_list_all_group(ta_number, NULL);
++ }
++#else
++ return -RSBAC_EINVALIDMODULE;
++#endif
++ }
++#endif
++
++/******** JAIL *********/
++
++#ifdef CONFIG_RSBAC_JAIL
++int sys_rsbac_jail(rsbac_version_t version,
++ char __user * path,
++ rsbac_jail_ip_t ip,
++ rsbac_jail_flags_t flags,
++ rsbac_cap_vector_t max_caps,
++ rsbac_jail_scd_vector_t scd_get,
++ rsbac_jail_scd_vector_t scd_modify)
++ {
++ return rsbac_jail_sys_jail(version, path, ip, flags,
++ max_caps, scd_get, scd_modify);
++ }
++#endif
++
++/******** UM *********/
++
++#ifdef CONFIG_RSBAC_UM
++int sys_rsbac_um_auth_name(
++ char __user * name,
++ char __user * pass)
++ {
++ rsbac_uid_t uid = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, RSBAC_NO_USER);
++ int err;
++ char * k_name;
++ char * k_pass;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val;
++
++ if(!name || !pass)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_auth_name(): NULL name or pass\n");
++ }
++#endif
++ return -RSBAC_EINVALIDPOINTER;
++ }
++ k_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(!k_name)
++ return -RSBAC_ENOMEM;
++ k_pass = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(!k_pass)
++ {
++ rsbac_kfree(k_name);
++ return -RSBAC_ENOMEM;
++ }
++ err = rsbac_get_user(k_name, name, RSBAC_MAXNAMELEN);
++ if(err)
++ goto out_free;
++ k_name[RSBAC_MAXNAMELEN-1] = 0;
++ err = rsbac_get_user(k_pass, pass, RSBAC_MAXNAMELEN);
++ if(err)
++ goto out_free;
++ k_pass[RSBAC_MAXNAMELEN-1] = 0;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_auth_name(): authenticating user %s\n",
++ k_name);
++ }
++#endif
++ err = rsbac_um_get_uid(0, k_name, &uid);
++ if(err) {
++ if(err == -RSBAC_ENOTFOUND) {
++ err = -EPERM;
++ ssleep(1);
++ }
++ goto out_free;
++ }
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_auth_name(): calling ADF\n");
++ }
++#endif
++ i_tid.user = uid;
++ i_attr_val.dummy = 0;
++ if (!rsbac_adf_request(R_AUTHENTICATE,
++ task_pid(current),
++ T_USER,
++ i_tid,
++ A_none,
++ i_attr_val))
++ {
++ err = -EPERM;
++ ssleep(1);
++ goto out_free;
++ }
++#endif /* MAINT */
++
++ err = rsbac_um_check_pass(uid, k_pass);
++ if(err) {
++ if(err == -RSBAC_ENOTFOUND) {
++ err = -EPERM;
++ }
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(uid))
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_auth_uid(): authenticating user %u/%u failed\n",
++ RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ else
++#endif
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_auth_uid(): authenticating user %u failed\n",
++ RSBAC_UID_NUM(uid));
++ }
++#endif
++ ssleep(1);
++ goto out_free;
++ }
++
++#ifdef CONFIG_RSBAC_AUTH
++ /* set auth_last_auth for this process */
++ i_tid.process = task_pid(current);
++ i_attr_val.auth_last_auth = uid;
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ i_tid,
++ A_auth_last_auth,
++ i_attr_val))
++ {
++ rsbac_ds_set_error("sys_rsbac_um_auth_name()", A_auth_last_auth);
++ }
++#endif /* AUTH */
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_auth_name(): setting process %u vset to %u\n",
++ current->pid, RSBAC_UID_SET(uid));
++ }
++#endif
++ /* set vset for this process */
++ i_tid.process = task_pid(current);
++ i_attr_val.vset = RSBAC_UID_SET(uid);
++ if (rsbac_set_attr(SW_GEN,
++ T_PROCESS,
++ i_tid,
++ A_vset,
++ i_attr_val))
++ {
++ rsbac_ds_set_error("sys_rsbac_um_auth_name()", A_vset);
++ }
++#endif
++
++out_free:
++ rsbac_kfree(k_name);
++ memset(k_pass, 0, RSBAC_MAXNAMELEN);
++ rsbac_kfree(k_pass);
++ return err;
++ }
++
++int sys_rsbac_um_auth_uid(rsbac_uid_t uid,
++ char __user * pass)
++ {
++ int err;
++ char * k_pass;
++ union rsbac_target_id_t i_tid;
++ union rsbac_attribute_value_t i_attr_val;
++
++ if(!pass)
++ return -RSBAC_EINVALIDPOINTER;
++ k_pass = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(!k_pass)
++ return -RSBAC_ENOMEM;
++ err = rsbac_get_user(k_pass, pass, RSBAC_MAXNAMELEN);
++ if(err)
++ goto out_free;
++ k_pass[RSBAC_MAXNAMELEN-1] = 0;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (rsbac_get_vset(), uid);
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(uid))
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_auth_uid(): authenticating user %u/%u\n",
++ RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ else
++#endif
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_auth_uid(): authenticating user %u\n",
++ RSBAC_UID_NUM(uid));
++ }
++#endif
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_auth_uid(): calling ADF\n");
++ }
++#endif
++ i_tid.user = uid;
++ i_attr_val.dummy = 0;
++ if (!rsbac_adf_request(R_AUTHENTICATE,
++ task_pid(current),
++ T_USER,
++ i_tid,
++ A_none,
++ i_attr_val))
++ {
++ err = -EPERM;
++ ssleep(1);
++ goto out_free;
++ }
++#endif /* MAINT */
++
++ err = rsbac_um_check_pass(uid, k_pass);
++ if(err) {
++ if(err == -RSBAC_ENOTFOUND) {
++ err = -EPERM;
++ }
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(uid))
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_auth_uid(): authenticating user %u/%u failed\n",
++ RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ else
++#endif
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_auth_uid(): authenticating user %u failed\n",
++ RSBAC_UID_NUM(uid));
++ }
++#endif
++ goto out_free;
++ }
++
++#ifdef CONFIG_RSBAC_AUTH
++ /* set auth_last_auth for this process */
++ i_tid.process = task_pid(current);
++ i_attr_val.auth_last_auth = uid;
++ if (rsbac_set_attr(SW_AUTH,
++ T_PROCESS,
++ i_tid,
++ A_auth_last_auth,
++ i_attr_val))
++ {
++ rsbac_ds_set_error("sys_rsbac_um_auth_uid()", A_auth_last_auth);
++ }
++#endif /* AUTH */
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ /* set vset for this process */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_auth_name(): setting process %u vset to %u\n",
++ current->pid, RSBAC_UID_SET(uid));
++ }
++#endif
++ i_tid.process = task_pid(current);
++ i_attr_val.vset = RSBAC_UID_SET(uid);
++ if (rsbac_set_attr(SW_GEN,
++ T_PROCESS,
++ i_tid,
++ A_vset,
++ i_attr_val))
++ {
++ rsbac_ds_set_error("sys_rsbac_um_auth_uid()", A_vset);
++ }
++#endif
++
++out_free:
++ memset(k_pass, 0, RSBAC_MAXNAMELEN);
++ rsbac_kfree(k_pass);
++ return err;
++ }
++
++int sys_rsbac_um_add_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t uid,
++ struct rsbac_um_user_entry_t __user * entry_p,
++ char __user * pass,
++ rsbac_time_t ttl)
++ {
++ int err;
++ struct rsbac_um_user_entry_t * k_entry_p;
++ char * k_pass;
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ if(!entry_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++#ifdef CONFIG_RSBAC_FREEZE_UM
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_add_user(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (rsbac_get_vset(), uid);
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_add_user(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = uid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_CREATE,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ k_entry_p = rsbac_kmalloc_unlocked(sizeof(*k_entry_p));
++ if(!k_entry_p)
++ return -RSBAC_ENOMEM;
++ if(pass)
++ {
++ k_pass = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(!k_pass)
++ {
++ rsbac_kfree(k_entry_p);
++ return -RSBAC_ENOMEM;
++ }
++ }
++ else
++ k_pass = NULL;
++ err = rsbac_get_user(k_entry_p, entry_p, sizeof(*k_entry_p));
++ if(err)
++ goto out_free;
++ if(!k_entry_p->name[0])
++ {
++ err = -RSBAC_EINVALIDVALUE;
++ goto out_free;
++ }
++ err = rsbac_um_get_uid(0, k_entry_p->name, &uid);
++ if(!err) {
++ err = -RSBAC_EEXISTS;
++ goto out_free;
++ }
++ if(pass)
++ {
++ err = rsbac_get_user(k_pass, pass, RSBAC_MAXNAMELEN);
++ if(err)
++ goto out_free;
++ k_pass[RSBAC_MAXNAMELEN-1] = 0;
++ }
++ err = rsbac_um_add_user(ta_number, &uid, k_entry_p, k_pass, ttl);
++
++#ifndef CONFIG_RSBAC_MAINT
++ /* RSBAC: notify ADF of new user */
++ if(!err)
++ {
++ rsbac_target_id.user = uid;
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_CREATE,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_add_user(): rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
++
++out_free:
++ rsbac_kfree(k_entry_p);
++ if(k_pass)
++ {
++ memset(k_pass, 0, RSBAC_MAXNAMELEN);
++ rsbac_kfree(k_pass);
++ }
++ return err;
++ }
++
++int sys_rsbac_um_add_group(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t gid,
++ struct rsbac_um_group_entry_t __user * entry_p,
++ char __user * pass,
++ rsbac_time_t ttl)
++ {
++ int err;
++ struct rsbac_um_group_entry_t * k_entry_p;
++ char * k_pass;
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ if(!entry_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++#ifdef CONFIG_RSBAC_FREEZE_UM
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_add_group(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_GID_SET(gid) == RSBAC_UM_VIRTUAL_KEEP)
++ gid = RSBAC_GEN_GID (rsbac_get_vset(), RSBAC_GID_NUM(gid));
++ else
++ if (RSBAC_GID_SET(gid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ gid = RSBAC_GID_NUM(gid);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_add_group(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.group = gid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_CREATE,
++ task_pid(current),
++ T_GROUP,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ k_entry_p = rsbac_kmalloc_unlocked(sizeof(*k_entry_p));
++ if(!k_entry_p)
++ return -RSBAC_ENOMEM;
++ if(pass)
++ {
++ k_pass = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(!k_pass)
++ {
++ rsbac_kfree(k_entry_p);
++ return -RSBAC_ENOMEM;
++ }
++ }
++ else
++ k_pass = NULL;
++ err = rsbac_get_user(k_entry_p, entry_p, sizeof(*k_entry_p));
++ if(err)
++ goto out_free;
++ if(!k_entry_p->name[0])
++ {
++ err = -RSBAC_EINVALIDVALUE;
++ goto out_free;
++ }
++ err = rsbac_um_get_gid(0, k_entry_p->name, &gid);
++ if(!err) {
++ err = -RSBAC_EEXISTS;
++ goto out_free;
++ }
++ if(pass)
++ {
++ err = rsbac_get_user(k_pass, pass, RSBAC_MAXNAMELEN);
++ if(err)
++ goto out_free;
++ k_pass[RSBAC_MAXNAMELEN-1] = 0;
++ }
++ err = rsbac_um_add_group(ta_number, &gid, k_entry_p, k_pass, ttl);
++
++#ifndef CONFIG_RSBAC_MAINT
++ /* RSBAC: notify ADF of new group */
++ if(!err)
++ {
++ rsbac_target_id.group = gid;
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_CREATE,
++ task_pid(current),
++ T_GROUP,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_add_group(): rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
++
++out_free:
++ rsbac_kfree(k_entry_p);
++ if(k_pass)
++ {
++ memset(k_pass, 0, RSBAC_MAXNAMELEN);
++ rsbac_kfree(k_pass);
++ }
++ return err;
++ }
++
++int sys_rsbac_um_add_gm(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user,
++ rsbac_gid_num_t group,
++ rsbac_time_t ttl)
++ {
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_FREEZE_UM
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_add_gm(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(user) == RSBAC_UM_VIRTUAL_KEEP)
++ user = RSBAC_GEN_UID (rsbac_get_vset(), user);
++ else
++ if (RSBAC_UID_SET(user) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ user = RSBAC_UID_NUM(user);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_add_gm(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = user;
++ rsbac_attribute_value.group = group;
++ if (!rsbac_adf_request(R_CHANGE_GROUP,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ return rsbac_um_add_gm(ta_number, user, group, ttl);
++ }
++
++int sys_rsbac_um_mod_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t uid,
++ enum rsbac_um_mod_t mod,
++ union rsbac_um_mod_data_t __user * data_p)
++ {
++ int err;
++ union rsbac_um_mod_data_t * k_data_p;
++#ifndef CONFIG_RSBAC_MAINT
++ enum rsbac_adf_request_t rsbac_request;
++ union rsbac_target_id_t rsbac_target_id;
++ enum rsbac_attribute_t rsbac_attribute = A_none;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ if(mod >= UM_none)
++ return -RSBAC_EINVALIDREQUEST;
++ if( !data_p
++ && (mod != UM_pass)
++ )
++ return -RSBAC_EINVALIDPOINTER;
++
++#ifdef CONFIG_RSBAC_FREEZE_UM
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_mod_user(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (rsbac_get_vset(), uid);
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_mod_user(): calling ADF\n");
++ }
++#endif
++ rsbac_attribute_value.dummy = 0;
++ switch(mod)
++ {
++ case UM_name:
++ rsbac_request = R_RENAME;
++ break;
++
++ case UM_pass:
++ case UM_cryptpass:
++ rsbac_request = R_MODIFY_PERMISSIONS_DATA;
++ break;
++
++ case UM_fullname:
++ rsbac_request = R_WRITE;
++ break;
++
++ case UM_homedir:
++ rsbac_request = R_WRITE;
++ break;
++
++ case UM_shell:
++ rsbac_request = R_WRITE;
++ break;
++
++ case UM_group:
++ rsbac_request = R_CHANGE_GROUP;
++ rsbac_attribute = A_group;
++ rsbac_attribute_value.group = data_p->group;
++ break;
++
++ case UM_lastchange:
++ rsbac_request = R_WRITE;
++ break;
++
++ case UM_minchange:
++ rsbac_request = R_WRITE;
++ break;
++
++ case UM_maxchange:
++ rsbac_request = R_WRITE;
++ break;
++
++ case UM_warnchange:
++ rsbac_request = R_WRITE;
++ break;
++
++ case UM_inactive:
++ rsbac_request = R_WRITE;
++ break;
++
++ case UM_expire:
++ rsbac_request = R_WRITE;
++ break;
++
++ case UM_ttl:
++ rsbac_request = R_DELETE;
++ break;
++
++ default:
++ return -RSBAC_EINVALIDREQUEST;
++ }
++ rsbac_target_id.user = uid;
++ if (!rsbac_adf_request(rsbac_request,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ rsbac_attribute,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++
++ if(data_p)
++ {
++ k_data_p = rsbac_kmalloc_unlocked(sizeof(*k_data_p));
++ if(!k_data_p)
++ return -RSBAC_ENOMEM;
++ err = rsbac_get_user(k_data_p, data_p, sizeof(*k_data_p));
++ if(err)
++ {
++ rsbac_kfree(k_data_p);
++ return err;
++ }
++ k_data_p->string[RSBAC_MAXNAMELEN-1] = 0;
++ }
++ else
++ k_data_p = NULL;
++
++ err = rsbac_um_mod_user(ta_number, uid, mod, k_data_p);
++
++ if(k_data_p)
++ rsbac_kfree(k_data_p);
++ return err;
++ }
++
++int sys_rsbac_um_mod_group(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t gid,
++ enum rsbac_um_mod_t mod,
++ union rsbac_um_mod_data_t __user * data_p)
++ {
++ int err;
++ union rsbac_um_mod_data_t * k_data_p;
++#ifndef CONFIG_RSBAC_MAINT
++ enum rsbac_adf_request_t rsbac_request;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ if(mod >= UM_none)
++ return -RSBAC_EINVALIDREQUEST;
++ if( !data_p
++ && (mod != UM_pass)
++ )
++ return -RSBAC_EINVALIDPOINTER;
++
++#ifdef CONFIG_RSBAC_FREEZE_UM
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_mod_group(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_GID_SET(gid) == RSBAC_UM_VIRTUAL_KEEP)
++ gid = RSBAC_GEN_GID (rsbac_get_vset(), RSBAC_GID_NUM(gid));
++ else
++ if (RSBAC_GID_SET(gid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ gid = RSBAC_GID_NUM(gid);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_mod_group(): calling ADF\n");
++ }
++#endif
++ switch(mod)
++ {
++ case UM_name:
++ rsbac_request = R_RENAME;
++ break;
++
++ case UM_pass:
++ case UM_cryptpass:
++ rsbac_request = R_MODIFY_PERMISSIONS_DATA;
++ break;
++
++ case UM_ttl:
++ rsbac_request = R_DELETE;
++ break;
++
++ default:
++ return -RSBAC_EINVALIDREQUEST;
++ }
++ rsbac_target_id.group = gid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(rsbac_request,
++ task_pid(current),
++ T_GROUP,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ if(data_p)
++ {
++ k_data_p = rsbac_kmalloc_unlocked(sizeof(*k_data_p));
++ if(!k_data_p)
++ return -RSBAC_ENOMEM;
++ err = rsbac_get_user(k_data_p, data_p, sizeof(*k_data_p));
++ if(err)
++ {
++ rsbac_kfree(k_data_p);
++ return err;
++ }
++ k_data_p->string[RSBAC_MAXNAMELEN-1] = 0;
++ }
++ else
++ k_data_p = NULL;
++
++ err = rsbac_um_mod_group(ta_number, gid, mod, k_data_p);
++
++ if(k_data_p)
++ rsbac_kfree(k_data_p);
++ return err;
++ }
++
++int sys_rsbac_um_get_user_item(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t uid,
++ enum rsbac_um_mod_t mod,
++ union rsbac_um_mod_data_t __user * data_p)
++ {
++ int err;
++ union rsbac_um_mod_data_t * k_data_p;
++#ifndef CONFIG_RSBAC_MAINT
++ enum rsbac_adf_request_t rsbac_request;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ if(mod >= UM_none)
++ return -RSBAC_EINVALIDREQUEST;
++ if(!data_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (rsbac_get_vset(), uid);
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_get_user_item(): calling ADF\n");
++ }
++#endif
++ rsbac_attribute_value.dummy = 0;
++ switch(mod)
++ {
++ case UM_name:
++ rsbac_request = R_SEARCH;
++ break;
++
++ case UM_group:
++ case UM_fullname:
++ case UM_homedir:
++ case UM_shell:
++ rsbac_request = R_GET_STATUS_DATA;
++ break;
++
++ case UM_pass:
++ rsbac_request = R_GET_PERMISSIONS_DATA;
++ break;
++
++ case UM_lastchange:
++ case UM_minchange:
++ case UM_maxchange:
++ case UM_warnchange:
++ case UM_inactive:
++ case UM_expire:
++ case UM_ttl:
++ rsbac_request = R_READ;
++ break;
++
++ default:
++ return -RSBAC_EINVALIDREQUEST;
++ }
++ rsbac_target_id.user = uid;
++ if (!rsbac_adf_request(rsbac_request,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ k_data_p = rsbac_kmalloc_unlocked(sizeof(*k_data_p));
++ if(!k_data_p)
++ return -RSBAC_ENOMEM;
++ memset(k_data_p, 0, sizeof(*k_data_p));
++
++ err = rsbac_um_get_user_item(ta_number, uid, mod, k_data_p);
++ if(!err)
++ err = rsbac_put_user(k_data_p, data_p, sizeof(*k_data_p) );
++ rsbac_kfree(k_data_p);
++ return err;
++ }
++
++int sys_rsbac_um_get_group_item(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t gid,
++ enum rsbac_um_mod_t mod,
++ union rsbac_um_mod_data_t __user * data_p)
++ {
++ int err;
++ union rsbac_um_mod_data_t * k_data_p;
++#ifndef CONFIG_RSBAC_MAINT
++ enum rsbac_adf_request_t rsbac_request;
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ if(mod >= UM_none)
++ return -RSBAC_EINVALIDREQUEST;
++ if(!data_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_GID_SET(gid) == RSBAC_UM_VIRTUAL_KEEP)
++ gid = RSBAC_GEN_GID (rsbac_get_vset(), gid);
++ else
++ if (RSBAC_GID_SET(gid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ gid = RSBAC_GID_NUM(gid);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_get_group_item(): calling ADF\n");
++ }
++#endif
++ rsbac_attribute_value.dummy = 0;
++ switch(mod)
++ {
++ case UM_name:
++ rsbac_request = R_SEARCH;
++ break;
++
++ case UM_pass:
++ rsbac_request = R_GET_PERMISSIONS_DATA;
++ break;
++
++ case UM_ttl:
++ rsbac_request = R_GET_STATUS_DATA;
++ break;
++
++ default:
++ return -RSBAC_EINVALIDREQUEST;
++ }
++ rsbac_target_id.group = gid;
++ if (!rsbac_adf_request(rsbac_request,
++ task_pid(current),
++ T_GROUP,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ k_data_p = rsbac_kmalloc_unlocked(sizeof(*k_data_p));
++ if(!k_data_p)
++ return -RSBAC_ENOMEM;
++ memset(k_data_p, 0, sizeof(*k_data_p));
++
++ err = rsbac_um_get_group_item(ta_number, gid, mod, k_data_p);
++ if(!err)
++ err = rsbac_put_user(k_data_p, data_p, sizeof(*k_data_p) );
++ rsbac_kfree(k_data_p);
++ return err;
++ }
++
++int sys_rsbac_um_remove_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t uid)
++ {
++ int err;
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_FREEZE_UM
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_remove_user(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (rsbac_get_vset(), uid);
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_remove_user(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = uid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_DELETE,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ err = rsbac_um_remove_user(ta_number, uid);
++
++#ifndef CONFIG_RSBAC_MAINT
++ if(!err)
++ {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_DELETE,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_remove_user(): rsbac_adf_set_attr() returned error");
++ }
++ }
++#endif
++ return err;
++ }
++
++int sys_rsbac_um_remove_group(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t gid)
++ {
++ int err;
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_FREEZE_UM
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_remove_group(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_GID_SET(gid) == RSBAC_UM_VIRTUAL_KEEP)
++ gid = RSBAC_GEN_GID (rsbac_get_vset(), gid);
++ else
++ if (RSBAC_GID_SET(gid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ gid = RSBAC_GID_NUM(gid);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_remove_group(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.group = gid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_DELETE,
++ task_pid(current),
++ T_GROUP,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ err = rsbac_um_remove_group(ta_number, gid);
++
++#ifndef CONFIG_RSBAC_MAINT
++ if(!err)
++ {
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_DELETE,
++ task_pid(current),
++ T_GROUP,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_remove_group(): rsbac_adf_set_attr() returned error");
++ }
++ }
++#endif
++ return err;
++ }
++
++int sys_rsbac_um_remove_gm(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user,
++ rsbac_gid_num_t group)
++ {
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_FREEZE_UM
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_remove_gm(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(user) == RSBAC_UM_VIRTUAL_KEEP)
++ user = RSBAC_GEN_UID (rsbac_get_vset(), user);
++ else
++ if (RSBAC_UID_SET(user) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ user = RSBAC_UID_NUM(user);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_remove_gm(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = user;
++ rsbac_attribute_value.group = group;
++ if (!rsbac_adf_request(R_CHANGE_GROUP,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_group,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ return rsbac_um_remove_gm(ta_number, user, group);
++ }
++
++int sys_rsbac_um_user_exists(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t uid)
++ {
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (rsbac_get_vset(), uid);
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_user_exists(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = uid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_SEARCH,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ return rsbac_um_user_exists(ta_number, uid);
++ }
++
++int sys_rsbac_um_group_exists(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t gid)
++ {
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_GID_SET(gid) == RSBAC_UM_VIRTUAL_KEEP)
++ gid = RSBAC_GEN_GID (rsbac_get_vset(), gid);
++ else
++ if (RSBAC_GID_SET(gid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ gid = RSBAC_GID_NUM(gid);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_group_exists(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.group = gid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_SEARCH,
++ task_pid(current),
++ T_GROUP,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ return rsbac_um_group_exists(ta_number, gid);
++ }
++
++int sys_rsbac_um_get_next_user(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t old_user,
++ rsbac_uid_t __user * next_user_p)
++ {
++ rsbac_uid_t k_next_user;
++ int err;
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ if(!next_user_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(old_user) == RSBAC_UM_VIRTUAL_KEEP)
++ old_user = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(old_user));
++ else
++ if (RSBAC_UID_SET(old_user) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ old_user = RSBAC_UID_NUM(old_user);
++#endif
++
++ while (!(err = rsbac_um_get_next_user(ta_number, old_user, &k_next_user)))
++ {
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_get_next_user(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = k_next_user;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_SEARCH,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ old_user = k_next_user;
++ continue;
++ }
++#endif /* MAINT */
++ err = rsbac_put_user(&k_next_user, next_user_p, sizeof(k_next_user));
++ break;
++ }
++ return err;
++ }
++
++int sys_rsbac_um_get_user_list(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_um_set_t vset,
++ rsbac_uid_t __user user_array[],
++ u_int maxnum)
++ {
++ long count;
++ rsbac_uid_t * k_user_array;
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ if(maxnum > RSBAC_UM_MAX_MAXNUM)
++ maxnum = RSBAC_UM_MAX_MAXNUM;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (vset == RSBAC_UM_VIRTUAL_KEEP)
++ vset = rsbac_get_vset();
++ else
++ if ( (vset > RSBAC_UM_VIRTUAL_MAX)
++ && (vset != RSBAC_UM_VIRTUAL_ALL)
++ )
++ return -RSBAC_EINVALIDVALUE;
++#else
++ vset = 0;
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_get_user_list(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = RSBAC_GEN_UID(vset, RSBAC_ALL_USERS);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_SEARCH,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ /* count only */
++ if(!user_array || !maxnum)
++ return rsbac_um_get_user_list(ta_number, vset, NULL);
++
++ count = rsbac_um_get_user_list(ta_number, vset, &k_user_array);
++ if(count>0)
++ {
++ if(count > maxnum)
++ count = maxnum;
++ rsbac_put_user(k_user_array,
++ user_array,
++ count * sizeof(*k_user_array) );
++ rsbac_kfree(k_user_array);
++ }
++ return count;
++ } /* end of sys_rsbac_um_get_user_list() */
++
++int sys_rsbac_um_get_gm_list(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_uid_t user,
++ rsbac_gid_num_t __user group_array[],
++ u_int maxnum)
++ {
++ long count;
++ rsbac_gid_num_t * k_group_array;
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++
++ if(maxnum > RSBAC_UM_MAX_MAXNUM)
++ maxnum = RSBAC_UM_MAX_MAXNUM;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(user) == RSBAC_UM_VIRTUAL_KEEP)
++ user = RSBAC_GEN_UID (rsbac_get_vset(), user);
++ else
++ if (RSBAC_UID_SET(user) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ user = RSBAC_UID_NUM(user);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_get_gm_list(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = user;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ /* count only */
++ if(!group_array || !maxnum)
++ return rsbac_um_get_gm_list(ta_number, user, NULL);
++
++ count = rsbac_um_get_gm_list(ta_number, user, &k_group_array);
++ if(count>0)
++ {
++ if(count > maxnum)
++ count = maxnum;
++ rsbac_put_user(k_group_array,
++ group_array,
++ count * sizeof(*k_group_array) );
++ rsbac_kfree(k_group_array);
++ }
++ return count;
++ } /* end of sys_rsbac_um_get_gm_list() */
++
++int sys_rsbac_um_get_gm_user_list(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_gid_t group,
++ rsbac_uid_num_t __user user_array[],
++ u_int maxnum)
++ {
++ long count;
++ rsbac_uid_num_t * k_user_array;
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ if(maxnum > RSBAC_UM_MAX_MAXNUM)
++ maxnum = RSBAC_UM_MAX_MAXNUM;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_GID_SET(group) == RSBAC_UM_VIRTUAL_KEEP)
++ group = RSBAC_GEN_GID (rsbac_get_vset(), group);
++ else
++ if (RSBAC_GID_SET(group) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ group = RSBAC_GID_NUM(group);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_get_gm_user_list(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.group = group;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_READ,
++ task_pid(current),
++ T_GROUP,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++ /* count number of all users */
++ if(!user_array || !maxnum)
++ return rsbac_um_get_gm_user_list(ta_number, group, NULL);
++
++ count = rsbac_um_get_gm_user_list(ta_number, group, &k_user_array);
++ if(count>0)
++ {
++ if(count > maxnum)
++ count = maxnum;
++ rsbac_put_user(k_user_array,
++ user_array,
++ count * sizeof(*k_user_array) );
++ rsbac_kfree(k_user_array);
++ }
++ return count;
++ } /* end of sys_rsbac_um_get_gm_user_list() */
++
++int sys_rsbac_um_get_group_list(
++ rsbac_list_ta_number_t ta_number,
++ rsbac_um_set_t vset,
++ rsbac_gid_t __user group_array[],
++ u_int maxnum)
++ {
++ long count;
++ rsbac_gid_t * k_group_array;
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ if(maxnum > RSBAC_UM_MAX_MAXNUM)
++ maxnum = RSBAC_UM_MAX_MAXNUM;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (vset == RSBAC_UM_VIRTUAL_KEEP)
++ vset = rsbac_get_vset();
++ else
++ if ( (vset > RSBAC_UM_VIRTUAL_MAX)
++ && (vset != RSBAC_UM_VIRTUAL_ALL)
++ )
++ return -RSBAC_EINVALIDVALUE;
++#else
++ vset = 0;
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_get_group_list(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.group = RSBAC_GEN_GID(vset, RSBAC_ALL_USERS);
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_SEARCH,
++ task_pid(current),
++ T_GROUP,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ /* count only */
++ if(!group_array || !maxnum)
++ return rsbac_um_get_group_list(ta_number, vset, NULL);
++
++ count = rsbac_um_get_group_list(ta_number, vset, &k_group_array);
++ if(count>0)
++ {
++ if(count > maxnum)
++ count = maxnum;
++ rsbac_put_user(k_group_array,
++ group_array,
++ count * sizeof(*k_group_array) );
++ rsbac_kfree(k_group_array);
++ }
++ return count;
++ } /* end of sys_rsbac_um_get_group_list() */
++
++int sys_rsbac_um_get_uid(
++ rsbac_list_ta_number_t ta_number,
++ char __user * name,
++ rsbac_uid_t __user * uid_p)
++ {
++ rsbac_uid_t k_uid;
++ int err;
++ char k_name[RSBAC_MAXNAMELEN];
++
++ if(!name || !uid_p)
++ return -RSBAC_EINVALIDPOINTER;
++
++ err = rsbac_get_user(k_name, name, RSBAC_MAXNAMELEN);
++ if(err)
++ return err;
++ k_name[RSBAC_MAXNAMELEN-1] = 0;
++ err = rsbac_get_user(&k_uid, uid_p, sizeof(k_uid));
++ if(err)
++ return err;
++ /* vset checks are in rsbac_um_get_uid() */
++ err = rsbac_um_get_uid(ta_number, k_name, &k_uid);
++ if(!err)
++ {
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_get_uid(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = k_uid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_SEARCH,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ }
++ else
++#endif /* MAINT */
++
++ err = rsbac_put_user(&k_uid, uid_p, sizeof(k_uid));
++ }
++ return err;
++ }
++
++int sys_rsbac_um_get_gid(
++ rsbac_list_ta_number_t ta_number,
++ char __user * name,
++ rsbac_gid_t __user * gid_p)
++ {
++ rsbac_gid_t k_gid;
++ int err;
++ char k_name[RSBAC_MAXNAMELEN];
++
++ if(!name || !gid_p)
++ return -RSBAC_EINVALIDPOINTER;
++ err = rsbac_get_user(k_name, name, RSBAC_MAXNAMELEN);
++ if(err)
++ return err;
++ k_name[RSBAC_MAXNAMELEN-1] = 0;
++ err = rsbac_get_user(&k_gid, gid_p, sizeof(k_gid));
++ if(err)
++ return err;
++ /* vset checks are in rsbac_um_get_gid() */
++ err = rsbac_um_get_gid(ta_number, k_name, &k_gid);
++ if(!err)
++ {
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_get_gid(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.group = k_gid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_SEARCH,
++ task_pid(current),
++ T_GROUP,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ err = -EPERM;
++ }
++ else
++#endif /* MAINT */
++
++ err = rsbac_put_user(&k_gid, gid_p, sizeof(k_gid));
++ }
++ return err;
++ }
++
++int sys_rsbac_um_set_pass(rsbac_uid_t uid,
++ char __user * old_pass,
++ char __user * new_pass)
++ {
++ int err;
++ char __user * k_new_pass;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ rsbac_um_set_t vset;
++#endif
++
++ if(!new_pass)
++ return -RSBAC_EINVALIDPOINTER;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ vset = rsbac_get_vset();
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (vset, uid);
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++ k_new_pass = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(!k_new_pass)
++ return -RSBAC_ENOMEM;
++ err = rsbac_get_user(k_new_pass, new_pass, RSBAC_MAXNAMELEN);
++ if(err)
++ {
++ rsbac_kfree(k_new_pass);
++ return err;
++ }
++ k_new_pass[RSBAC_MAXNAMELEN-1] = 0;
++
++ if( old_pass
++ && (RSBAC_UID_NUM(uid) == current_uid())
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ && (RSBAC_UID_SET(uid) == vset)
++#endif
++ )
++ {
++ char * k_old_pass;
++
++ k_old_pass = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(!k_old_pass)
++ {
++ rsbac_kfree(k_new_pass);
++ return -RSBAC_ENOMEM;
++ }
++ err = rsbac_get_user(k_old_pass, old_pass, RSBAC_MAXNAMELEN);
++ if(err)
++ {
++ rsbac_kfree(k_old_pass);
++ rsbac_kfree(k_new_pass);
++ return err;
++ }
++ k_old_pass[RSBAC_MAXNAMELEN-1] = 0;
++ err = rsbac_um_check_pass(uid, k_old_pass);
++ rsbac_kfree(k_old_pass);
++ if(err)
++ {
++ rsbac_kfree(k_new_pass);
++ rsbac_printk(KERN_INFO "sys_rsbac_um_set_pass(): old password check failed\n");
++ return err;
++ }
++ err = rsbac_um_good_pass(uid, k_new_pass);
++ if(err)
++ {
++ rsbac_kfree(k_new_pass);
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(uid))
++ rsbac_printk(KERN_DEBUG
++ "sys_rsbac_um_set_pass(): new password goodness check failed for user %u/%u\n",
++ RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ else
++#endif
++ rsbac_printk(KERN_DEBUG
++ "sys_rsbac_um_set_pass(): new password goodness check failed for user %u\n",
++ RSBAC_UID_NUM(uid));
++ }
++#endif
++ return err;
++ }
++ }
++ else
++ {
++#ifndef CONFIG_RSBAC_MAINT
++ /* check admin rights here */
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++#ifdef CONFIG_RSBAC_FREEZE_UM
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_set_pass(): RSBAC configuration frozen, no administration allowed!\n");
++ rsbac_kfree(k_new_pass);
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_set_pass(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = uid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_PERMISSIONS_DATA,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ rsbac_kfree(k_new_pass);
++ return -EPERM;
++ }
++#endif /* MAINT */
++ }
++
++ err = rsbac_um_set_pass(uid, k_new_pass);
++ rsbac_kfree(k_new_pass);
++ return err;
++ }
++
++int sys_rsbac_um_set_pass_name(char __user * name,
++ char __user * old_pass,
++ char __user * new_pass)
++ {
++ int err;
++ rsbac_uid_t uid = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, RSBAC_NO_USER);
++ char * k_name;
++
++ if(!name || !new_pass)
++ return -RSBAC_EINVALIDPOINTER;
++ k_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(!k_name)
++ return -RSBAC_ENOMEM;
++ err = rsbac_get_user(k_name, name, RSBAC_MAXNAMELEN);
++ if(err)
++ {
++ rsbac_kfree(k_name);
++ return err;
++ }
++ k_name[RSBAC_MAXNAMELEN-1] = 0;
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_set_pass_name(): user %s\n",
++ k_name);
++ }
++#endif
++ err = rsbac_um_get_uid(0, k_name, &uid);
++ rsbac_kfree(k_name);
++ if(err)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_set_pass_name(): lookup of user %s failed\n",
++ k_name);
++ }
++#endif
++ }
++ else
++ err = sys_rsbac_um_set_pass(uid, old_pass, new_pass);
++
++ return err;
++ }
++
++int sys_rsbac_um_add_onetime(rsbac_uid_t uid,
++ char __user * old_pass,
++ char __user * new_pass,
++ rsbac_time_t ttl)
++ {
++#if defined(CONFIG_RSBAC_UM_ONETIME)
++ int err;
++ char * k_new_pass;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ rsbac_um_set_t vset;
++#endif
++
++ if(!new_pass)
++ return -RSBAC_EINVALIDPOINTER;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ vset = rsbac_get_vset();
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (vset, uid);
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++ k_new_pass = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(!k_new_pass)
++ return -RSBAC_ENOMEM;
++ err = rsbac_get_user(k_new_pass, new_pass, RSBAC_MAXNAMELEN);
++ if(err)
++ {
++ rsbac_kfree(k_new_pass);
++ return err;
++ }
++ k_new_pass[RSBAC_MAXNAMELEN-1] = 0;
++
++ if( old_pass
++ && (RSBAC_UID_NUM(uid) == current_uid())
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ && (RSBAC_UID_SET(uid) == vset)
++#endif
++ )
++ {
++ char * k_old_pass;
++
++ k_old_pass = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(!k_old_pass)
++ {
++ rsbac_kfree(k_new_pass);
++ return -RSBAC_ENOMEM;
++ }
++ err = rsbac_get_user(k_old_pass, old_pass, RSBAC_MAXNAMELEN);
++ if(err)
++ {
++ rsbac_kfree(k_old_pass);
++ rsbac_kfree(k_new_pass);
++ return err;
++ }
++ k_old_pass[RSBAC_MAXNAMELEN-1] = 0;
++ err = rsbac_um_check_pass(uid, k_old_pass);
++ rsbac_kfree(k_old_pass);
++ if(err)
++ {
++ rsbac_kfree(k_new_pass);
++ rsbac_printk(KERN_INFO "sys_rsbac_um_add_onetime(): old password check failed\n");
++ return err;
++ }
++ err = rsbac_um_good_pass(uid, k_new_pass);
++ if(err)
++ {
++ rsbac_kfree(k_new_pass);
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if(RSBAC_UID_SET(uid))
++ rsbac_printk(KERN_DEBUG
++ "sys_rsbac_um_add_onetime(): new password goodness check failed for user %u/%u\n",
++ RSBAC_UID_SET(uid), RSBAC_UID_NUM(uid));
++ else
++#endif
++ rsbac_printk(KERN_DEBUG
++ "sys_rsbac_um_add_onetime(): new password goodness check failed for user %u\n",
++ RSBAC_UID_NUM(uid));
++ }
++#endif
++ return err;
++ }
++ }
++ else
++ {
++#ifndef CONFIG_RSBAC_MAINT
++ /* check admin rights here */
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++#ifdef CONFIG_RSBAC_FREEZE_UM
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_add_onetime(): RSBAC configuration frozen, no administration allowed!\n");
++ rsbac_kfree(k_new_pass);
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_add_onetime(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = uid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_PERMISSIONS_DATA,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ rsbac_kfree(k_new_pass);
++ return -EPERM;
++ }
++#endif /* MAINT */
++ }
++
++ err = rsbac_um_add_onetime(uid, k_new_pass, ttl);
++ rsbac_kfree(k_new_pass);
++ return err;
++#else
++ return -RSBAC_EINVALIDMODULE;
++#endif
++ }
++
++int sys_rsbac_um_add_onetime_name(char __user * name,
++ char __user * old_pass,
++ char __user * new_pass,
++ rsbac_time_t ttl)
++ {
++#if defined(CONFIG_RSBAC_UM_ONETIME)
++ int err;
++ rsbac_uid_t uid = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, RSBAC_NO_USER);
++ char * k_name;
++
++ if(!name || !new_pass)
++ return -RSBAC_EINVALIDPOINTER;
++ k_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(!k_name)
++ return -RSBAC_ENOMEM;
++ err = rsbac_get_user(k_name, name, RSBAC_MAXNAMELEN);
++ if(err)
++ {
++ rsbac_kfree(k_name);
++ return err;
++ }
++ k_name[RSBAC_MAXNAMELEN-1] = 0;
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_add_onetime_name(): user %s\n",
++ k_name);
++ }
++#endif
++ err = rsbac_um_get_uid(0, k_name, &uid);
++ rsbac_kfree(k_name);
++ if(err)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_add_onetime_name(): lookup of user %s failed\n",
++ k_name);
++ }
++#endif
++ }
++ else
++ err = sys_rsbac_um_add_onetime(uid, old_pass, new_pass, ttl);
++
++ return err;
++#else
++ return -RSBAC_EINVALIDMODULE;
++#endif
++ }
++
++int sys_rsbac_um_remove_all_onetime(rsbac_uid_t uid,
++ char __user * old_pass)
++ {
++#if defined(CONFIG_RSBAC_UM_ONETIME)
++ int err;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ rsbac_um_set_t vset;
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ vset = rsbac_get_vset();
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (vset, uid);
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++ if( old_pass
++ && (RSBAC_UID_NUM(uid) == current_uid())
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ && (RSBAC_UID_SET(uid) == vset)
++#endif
++ )
++ {
++ char * k_old_pass;
++
++ k_old_pass = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(!k_old_pass)
++ {
++ return -RSBAC_ENOMEM;
++ }
++ err = rsbac_get_user(k_old_pass, old_pass, RSBAC_MAXNAMELEN);
++ if(err)
++ {
++ rsbac_kfree(k_old_pass);
++ return err;
++ }
++ k_old_pass[RSBAC_MAXNAMELEN-1] = 0;
++ err = rsbac_um_check_pass(uid, k_old_pass);
++ rsbac_kfree(k_old_pass);
++ if(err)
++ {
++ rsbac_printk(KERN_INFO "sys_rsbac_um_remove_all_onetime(): old password check failed\n");
++ return err;
++ }
++ }
++ else
++ {
++#ifndef CONFIG_RSBAC_MAINT
++ /* check admin rights here */
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++#ifdef CONFIG_RSBAC_FREEZE_UM
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_remove_all_onetime(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_remove_all_onetime(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = uid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_PERMISSIONS_DATA,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++ }
++
++ err = rsbac_um_remove_all_onetime(uid);
++ return err;
++#else
++ return -RSBAC_EINVALIDMODULE;
++#endif
++ }
++
++int sys_rsbac_um_remove_all_onetime_name(char __user * name,
++ char __user * old_pass)
++ {
++#if defined(CONFIG_RSBAC_UM_ONETIME)
++ int err;
++ rsbac_uid_t uid = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, RSBAC_NO_USER);
++ char * k_name;
++
++ if(!name)
++ return -RSBAC_EINVALIDPOINTER;
++ k_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(!k_name)
++ return -RSBAC_ENOMEM;
++ err = rsbac_get_user(k_name, name, RSBAC_MAXNAMELEN);
++ if(err)
++ {
++ rsbac_kfree(k_name);
++ return err;
++ }
++ k_name[RSBAC_MAXNAMELEN-1] = 0;
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_remove_all_onetime_name(): user %s\n",
++ k_name);
++ }
++#endif
++ err = rsbac_um_get_uid(0, k_name, &uid);
++ rsbac_kfree(k_name);
++ if(err)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_remove_all_onetime_name(): lookup of user %s failed\n",
++ k_name);
++ }
++#endif
++ }
++ else
++ err = sys_rsbac_um_remove_all_onetime(uid, old_pass);
++
++ return err;
++#else
++ return -RSBAC_EINVALIDMODULE;
++#endif
++ }
++
++int sys_rsbac_um_count_onetime(rsbac_uid_t uid,
++ char __user * old_pass)
++ {
++#if defined(CONFIG_RSBAC_UM_ONETIME)
++ int err;
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ rsbac_um_set_t vset;
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ vset = rsbac_get_vset();
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (vset, uid);
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++ if( old_pass
++ && (RSBAC_UID_NUM(uid) == current_uid())
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ && (RSBAC_UID_SET(uid) == vset)
++#endif
++ )
++ {
++ char * k_old_pass;
++
++ k_old_pass = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(!k_old_pass)
++ {
++ return -RSBAC_ENOMEM;
++ }
++ err = rsbac_get_user(k_old_pass, old_pass, RSBAC_MAXNAMELEN);
++ if(err)
++ {
++ rsbac_kfree(k_old_pass);
++ return err;
++ }
++ k_old_pass[RSBAC_MAXNAMELEN-1] = 0;
++ err = rsbac_um_check_pass(uid, k_old_pass);
++ rsbac_kfree(k_old_pass);
++ if(err)
++ {
++ rsbac_printk(KERN_INFO "sys_rsbac_um_count_onetime(): old password check failed\n");
++ return err;
++ }
++ }
++ else
++ {
++#ifndef CONFIG_RSBAC_MAINT
++ /* check admin rights here */
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++#ifdef CONFIG_RSBAC_FREEZE_UM
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_count_onetime(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_count_onetime(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = uid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_READ,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++ }
++
++ return rsbac_um_count_onetime(uid);
++#else
++ return -RSBAC_EINVALIDMODULE;
++#endif
++ }
++
++int sys_rsbac_um_count_onetime_name(char __user * name,
++ char __user * old_pass)
++ {
++#if defined(CONFIG_RSBAC_UM_ONETIME)
++ int err;
++ rsbac_uid_t uid = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, RSBAC_NO_USER);
++ char * k_name;
++
++ if(!name)
++ return -RSBAC_EINVALIDPOINTER;
++ k_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(!k_name)
++ return -RSBAC_ENOMEM;
++ err = rsbac_get_user(k_name, name, RSBAC_MAXNAMELEN);
++ if(err)
++ {
++ rsbac_kfree(k_name);
++ return err;
++ }
++ k_name[RSBAC_MAXNAMELEN-1] = 0;
++
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_count_onetime_name(): user %s\n",
++ k_name);
++ }
++#endif
++ err = rsbac_um_get_uid(0, k_name, &uid);
++ rsbac_kfree(k_name);
++ if(err)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_count_onetime_name(): lookup of user %s failed\n",
++ k_name);
++ }
++#endif
++ }
++ else
++ err = sys_rsbac_um_count_onetime(uid, old_pass);
++
++ return err;
++#else
++ return -RSBAC_EINVALIDMODULE;
++#endif
++ }
++
++int sys_rsbac_um_set_group_pass(rsbac_gid_t gid,
++ char __user * new_pass)
++ {
++ int err;
++ char * k_new_pass;
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_FREEZE_UM
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_set_group_pass(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_GID_SET(gid) == RSBAC_UM_VIRTUAL_KEEP)
++ gid = RSBAC_GEN_GID (rsbac_get_vset(), RSBAC_GID_NUM(gid));
++ else
++ if (RSBAC_GID_SET(gid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ gid = RSBAC_GID_NUM(gid);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++ /* check admin rights here */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_set_group_pass(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.group = gid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_MODIFY_PERMISSIONS_DATA,
++ task_pid(current),
++ T_GROUP,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ if(new_pass)
++ {
++ k_new_pass = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(!k_new_pass)
++ return -RSBAC_ENOMEM;
++ err = rsbac_get_user(k_new_pass, new_pass, RSBAC_MAXNAMELEN);
++ if(!err)
++ {
++ k_new_pass[RSBAC_MAXNAMELEN-1] = 0;
++ err = rsbac_um_set_group_pass(gid, k_new_pass);
++ }
++ rsbac_kfree(k_new_pass);
++ }
++ else
++ {
++ err = rsbac_um_set_group_pass(gid, NULL);
++ }
++ return err;
++ }
++
++int sys_rsbac_um_check_account(rsbac_uid_t uid)
++ {
++ int err;
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (rsbac_get_vset(), uid);
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_check_account(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = uid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ err = rsbac_um_check_account(uid);
++ if(err == -RSBAC_ENOTFOUND)
++ err = -EPERM;
++ return err;
++ }
++
++int sys_rsbac_um_check_account_name(char __user * name)
++ {
++ int err;
++ rsbac_uid_t uid = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, RSBAC_NO_USER);
++ char k_name[RSBAC_MAXNAMELEN];
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ if(!name)
++ return -RSBAC_EINVALIDPOINTER;
++ err = rsbac_get_user(k_name, name, RSBAC_MAXNAMELEN);
++ if(err)
++ return err;
++ k_name[RSBAC_MAXNAMELEN-1] = 0;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_check_account_name(): checking user %s\n",
++ k_name);
++ }
++#endif
++ err = rsbac_um_get_uid(0, k_name, &uid);
++ if(err)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_check_account_name(): lookup of user %s failed\n",
++ k_name);
++ }
++#endif
++ if(err == -RSBAC_ENOTFOUND)
++ err = -EPERM;
++ return err;
++ }
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_check_account_name(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = uid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ err = rsbac_um_check_account(uid);
++ if(err == -RSBAC_ENOTFOUND)
++ err = -EPERM;
++ return err;
++ }
++
++int sys_rsbac_um_get_max_history(rsbac_list_ta_number_t ta_number, rsbac_uid_t uid)
++ {
++#ifdef CONFIG_RSBAC_UM_PWHISTORY
++ int err;
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (rsbac_get_vset(), uid);
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_get_max_history(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = uid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_READ,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ err = rsbac_um_get_max_history(ta_number, uid);
++ if(err == -RSBAC_ENOTFOUND)
++ err = -EPERM;
++ return err;
++#else
++ return -RSBAC_EINVALIDMODULE;
++#endif
++ }
++
++int sys_rsbac_um_get_max_history_name(rsbac_list_ta_number_t ta_number, char __user * name)
++ {
++#ifdef CONFIG_RSBAC_UM_PWHISTORY
++ int err;
++ rsbac_uid_t uid = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, RSBAC_NO_USER);
++ char k_name[RSBAC_MAXNAMELEN];
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ if(!name)
++ return -RSBAC_EINVALIDPOINTER;
++ err = rsbac_get_user(k_name, name, RSBAC_MAXNAMELEN);
++ if(err)
++ return err;
++ k_name[RSBAC_MAXNAMELEN-1] = 0;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_get_max_history_name(): getting max_history of user %s\n",
++ k_name);
++ }
++#endif
++ err = rsbac_um_get_uid(0, k_name, &uid);
++ if(err)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_get_max_history_name(): lookup of user %s failed\n",
++ k_name);
++ }
++#endif
++ if(err == -RSBAC_ENOTFOUND)
++ err = -EPERM;
++ return err;
++ }
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_get_max_history_name(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = uid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_READ,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ err = rsbac_um_get_max_history(ta_number, uid);
++ if(err == -RSBAC_ENOTFOUND)
++ err = -EPERM;
++ return err;
++#else
++ return -RSBAC_EINVALIDMODULE;
++#endif
++ }
++
++int sys_rsbac_um_set_max_history(rsbac_list_ta_number_t ta_number, rsbac_uid_t uid, __u8 max_history)
++ {
++#ifdef CONFIG_RSBAC_UM_PWHISTORY
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(uid) == RSBAC_UM_VIRTUAL_KEEP)
++ uid = RSBAC_GEN_UID (rsbac_get_vset(), uid);
++ else
++ if (RSBAC_UID_SET(uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ uid = RSBAC_UID_NUM(uid);
++#endif
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_set_max_history(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = uid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_WRITE,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ return rsbac_um_set_max_history(ta_number, uid, max_history);
++#else
++ return -RSBAC_EINVALIDMODULE;
++#endif
++ }
++
++int sys_rsbac_um_set_max_history_name(rsbac_list_ta_number_t ta_number, char __user * name, __u8 max_history)
++ {
++#ifdef CONFIG_RSBAC_UM_PWHISTORY
++ int err;
++ rsbac_uid_t uid = RSBAC_GEN_UID(RSBAC_UM_VIRTUAL_KEEP, RSBAC_NO_USER);
++ char k_name[RSBAC_MAXNAMELEN];
++#ifndef CONFIG_RSBAC_MAINT
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++#endif
++
++ if(!name)
++ return -RSBAC_EINVALIDPOINTER;
++ err = rsbac_get_user(k_name, name, RSBAC_MAXNAMELEN);
++ if(err)
++ return err;
++ k_name[RSBAC_MAXNAMELEN-1] = 0;
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_set_max_history_name(): setting max_history of user %s\n",
++ k_name);
++ }
++#endif
++ err = rsbac_um_get_uid(0, k_name, &uid);
++ if(err)
++ {
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef_um)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_set_max_history_name(): lookup of user %s failed\n",
++ k_name);
++ }
++#endif
++ if(err == -RSBAC_ENOTFOUND)
++ err = -EPERM;
++ return err;
++ }
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_set_max_history_name(): calling ADF\n");
++ }
++#endif
++ rsbac_target_id.user = uid;
++ rsbac_attribute_value.dummy = 0;
++ if (!rsbac_adf_request(R_WRITE,
++ task_pid(current),
++ T_USER,
++ rsbac_target_id,
++ A_none,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ return rsbac_um_set_max_history(ta_number, uid, max_history);
++#else
++ return -RSBAC_EINVALIDMODULE;
++#endif
++ }
++
++int sys_rsbac_um_select_vset(rsbac_um_set_t vset)
++ {
++#if defined(CONFIG_RSBAC_UM_VIRTUAL)
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_target_id_t rsbac_new_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if (vset > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++
++#ifndef CONFIG_RSBAC_MAINT
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_um_select_vset(): calling ADF\n");
++#endif
++ rsbac_target_id.process = task_pid(current);
++ rsbac_attribute_value.owner = RSBAC_GEN_UID(vset, current_uid());
++ if (!rsbac_adf_request(R_CHANGE_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ A_owner,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#endif /* MAINT */
++
++ rsbac_pr_debug(aef_um, "Switching process %u to vset %u\n",
++ current->pid, vset);
++ rsbac_target_id.process = task_pid(current);
++ rsbac_attribute_value.vset = vset;
++ if (rsbac_set_attr(SW_GEN,
++ T_PROCESS,
++ rsbac_target_id,
++ A_vset,
++ rsbac_attribute_value))
++ {
++ rsbac_ds_set_error("sys_rsbac_um_select_vset()", A_vset);
++ }
++#ifndef CONFIG_RSBAC_MAINT
++ else
++ {
++ rsbac_target_id.process = task_pid(current);
++ rsbac_attribute_value.owner = RSBAC_GEN_UID(vset, current_uid());
++ rsbac_new_target_id.dummy = 0;
++ if (rsbac_adf_set_attr(R_CHANGE_OWNER,
++ task_pid(current),
++ T_PROCESS,
++ rsbac_target_id,
++ T_NONE,
++ rsbac_new_target_id,
++ A_owner,
++ rsbac_attribute_value))
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_um_select_vset(): rsbac_adf_set_attr() returned error\n");
++ }
++ }
++#endif
++ return 0;
++#else
++ return -RSBAC_EINVALIDMODULE;
++#endif
++ }
++#endif
++
++
++/************************************************* */
++/* DEBUG/LOG functions */
++/************************************************* */
++
++int sys_rsbac_adf_log_switch(enum rsbac_adf_request_t request,
++ enum rsbac_target_t target,
++ u_int value)
++ {
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++
++ if ((value != LL_none) && (value != LL_denied) && (value != LL_full))
++ return -RSBAC_EINVALIDVALUE;
++ if(request >= R_NONE)
++ return -RSBAC_EINVALIDREQUEST;
++ if( (target == T_FD)
++ || (target > T_NONE)
++ )
++ return -RSBAC_EINVALIDTARGET;
++
++#ifdef CONFIG_RSBAC_FREEZE
++ if(rsbac_freeze)
++ {
++ rsbac_printk(KERN_WARNING
++ "sys_rsbac_adf_log_switch(): RSBAC configuration frozen, no administration allowed!\n");
++ return -EPERM;
++ }
++#endif
++
++ /* call ADF */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_adf_log_switch(): calling ADF\n");
++#endif
++ rsbac_target_id.dummy = 0;
++ rsbac_attribute_value.request = target;
++ if (!rsbac_adf_request(R_SWITCH_LOG,
++ task_pid(current),
++ T_NONE,
++ rsbac_target_id,
++ A_request,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ char * request_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(request_name)
++ {
++ get_request_name(request_name,target);
++ rsbac_printk(KERN_INFO "sys_rsbac_adf_log_switch(): switching RSBAC module logging for request %s (No. %i) to %i!\n",
++ request_name, target, value);
++ rsbac_kfree(request_name);
++ }
++ }
++#endif
++ rsbac_adf_log_switch(request,target,value);
++ return 0;
++ }
++
++int sys_rsbac_get_adf_log(enum rsbac_adf_request_t request,
++ enum rsbac_target_t target,
++ u_int __user * value_p)
++ {
++ union rsbac_target_id_t rsbac_target_id;
++ union rsbac_attribute_value_t rsbac_attribute_value;
++ u_int k_value;
++ int err;
++
++ if(request >= R_NONE)
++ return -RSBAC_EINVALIDREQUEST;
++ if( (target == T_FD)
++ || (target > T_NONE)
++ )
++ return -RSBAC_EINVALIDTARGET;
++ if(!value_p)
++ return -RSBAC_EINVALIDPOINTER;
++ /* call ADF */
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ rsbac_printk(KERN_DEBUG "sys_rsbac_get_adf_log(): calling ADF\n");
++#endif
++ rsbac_target_id.scd = ST_rsbac;
++ rsbac_attribute_value.request = request;
++ if (!rsbac_adf_request(R_GET_STATUS_DATA,
++ task_pid(current),
++ T_SCD,
++ rsbac_target_id,
++ A_request,
++ rsbac_attribute_value))
++ {
++ return -EPERM;
++ }
++#ifdef CONFIG_RSBAC_DEBUG
++ if (rsbac_debug_aef)
++ {
++ char * request_name = rsbac_kmalloc_unlocked(RSBAC_MAXNAMELEN);
++ if(request_name)
++ {
++ get_request_name(request_name,target);
++ rsbac_printk(KERN_DEBUG "sys_rsbac_get_adf_log(): getting RSBAC module logging for request %s (No. %i)!\n",
++ request_name, target);
++ rsbac_kfree(request_name);
++ }
++ }
++#endif
++ err = rsbac_get_adf_log(request, target, &k_value);
++ if(!err)
++ {
++ rsbac_put_user(&k_value,
++ value_p,
++ sizeof(k_value) );
++ }
++ return err;
++ }
++
++/*
++ * Commands to sys_rsbac_log:
++ *
++ * 0 -- Close the log. Currently a NOP.
++ * 1 -- Open the log. Currently a NOP.
++ * 2 -- Read from the log.
++ * 3 -- Read up to the last 4k of messages in the ring buffer.
++ * 4 -- Read and clear last 4k of messages in the ring buffer
++ * 5 -- Clear ring buffer.
++ */
++int sys_rsbac_log(int type,
++ char __user * buf,
++ int len)
++ {
++#if defined(CONFIG_RSBAC_RMSG)
++ return rsbac_log(type,buf,len);
++#else
++ return 0;
++#endif /* RMSG */
++ }
++
++#if defined(CONFIG_RSBAC_INIT_DELAY)
++int sys_rsbac_init(char __user * path)
++ {
++ struct dentry * t_dentry = NULL;
++ rsbac_boolean_t need_put = FALSE;
++ int err = 0;
++
++ struct path ppath;
++
++ if(!path)
++ return rsbac_init(ROOT_DEV);
++
++ if ((err = user_lpath(path, &ppath)))
++ {
++ goto out;
++ }
++ t_dentry = ppath.dentry;
++ need_put = TRUE;
++ if (!t_dentry->d_inode)
++ {
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ /* is inode of type file, symlink or block/char device? */
++ if(!S_ISBLK(t_dentry->d_inode->i_mode))
++ { /* This is no file or device */
++ err = -RSBAC_EINVALIDTARGET;
++ goto out_dput;
++ }
++ err = rsbac_init(t_dentry->d_sb->s_dev);
++
++out_dput:
++ if(need_put)
++ path_put(&ppath);
++out:
++ return err;
++ }
++#endif
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++int sys_rsbac_list_ta_begin_name(
++ rsbac_time_t ttl,
++ rsbac_list_ta_number_t __user * ta_number_p,
++ rsbac_uid_t commit_uid,
++ char __user * name,
++ char __user * password)
++ {
++ int err;
++ rsbac_list_ta_number_t k_ta_number = 0;
++ char * k_name = NULL;
++ char * k_password = NULL;
++
++#ifdef CONFIG_RSBAC_UM_VIRTUAL
++ if (RSBAC_UID_SET(commit_uid) == RSBAC_UM_VIRTUAL_KEEP)
++ commit_uid = RSBAC_GEN_UID (rsbac_get_vset(), RSBAC_UID_NUM(commit_uid));
++ else
++ if (RSBAC_UID_SET(commit_uid) > RSBAC_UM_VIRTUAL_MAX)
++ return -RSBAC_EINVALIDVALUE;
++#else
++ commit_uid = RSBAC_UID_NUM(commit_uid);
++#endif
++ if(name)
++ {
++ k_name = rsbac_kmalloc_unlocked(RSBAC_LIST_TA_MAX_NAMELEN);
++ if(!k_name)
++ return -ENOMEM;
++ err = rsbac_get_user(k_name, name, RSBAC_LIST_TA_MAX_NAMELEN - 1);
++ if(err)
++ {
++ rsbac_kfree(k_name);
++ return err;
++ }
++ k_name[RSBAC_LIST_TA_MAX_NAMELEN - 1] = 0;
++ }
++ if(password)
++ {
++ k_password = rsbac_kmalloc_unlocked(RSBAC_LIST_TA_MAX_PASSLEN);
++ if(!k_password)
++ return -ENOMEM;
++ err = rsbac_get_user(k_password, password, RSBAC_LIST_TA_MAX_PASSLEN - 1);
++ if(err)
++ {
++ rsbac_kfree(k_password);
++ return err;
++ }
++ k_password[RSBAC_LIST_TA_MAX_PASSLEN - 1] = 0;
++ }
++ err = rsbac_list_ta_begin(ttl, &k_ta_number, commit_uid, k_name, k_password);
++ if(!err)
++ err = rsbac_put_user(&k_ta_number,
++ ta_number_p,
++ sizeof(k_ta_number) );
++ if(k_name)
++ rsbac_kfree(k_name);
++ if(k_password)
++ rsbac_kfree(k_password);
++ return err;
++ }
++
++int sys_rsbac_list_ta_begin(
++ rsbac_time_t ttl,
++ rsbac_list_ta_number_t __user * ta_number_p,
++ rsbac_uid_t commit_uid,
++ char __user * password)
++ {
++ return sys_rsbac_list_ta_begin_name(ttl, ta_number_p, commit_uid, NULL, password);
++ }
++
++int sys_rsbac_list_ta_refresh(
++ rsbac_time_t ttl,
++ rsbac_list_ta_number_t ta_number,
++ char __user * password)
++ {
++ int err;
++ char * k_password;
++
++ if(password)
++ {
++ k_password = rsbac_kmalloc_unlocked(RSBAC_LIST_TA_MAX_PASSLEN);
++ if(!k_password)
++ return -ENOMEM;
++ err = rsbac_get_user(k_password, password, RSBAC_LIST_TA_MAX_PASSLEN - 1);
++ if(err)
++ {
++ rsbac_kfree(k_password);
++ return err;
++ }
++ k_password[RSBAC_LIST_TA_MAX_PASSLEN - 1] = 0;
++ }
++ else
++ k_password = NULL;
++ err = rsbac_list_ta_refresh(ttl, ta_number, k_password);
++ if(k_password)
++ rsbac_kfree(k_password);
++ return err;
++ }
++
++int sys_rsbac_list_ta_commit(
++ rsbac_list_ta_number_t ta_number,
++ char __user * password)
++ {
++ int err;
++ char * k_password;
++
++ if(password)
++ {
++ k_password = rsbac_kmalloc_unlocked(RSBAC_LIST_TA_MAX_PASSLEN);
++ if(!k_password)
++ return -ENOMEM;
++ err = rsbac_get_user(k_password, password, RSBAC_LIST_TA_MAX_PASSLEN - 1);
++ if(err)
++ {
++ rsbac_kfree(k_password);
++ return err;
++ }
++ k_password[RSBAC_LIST_TA_MAX_PASSLEN - 1] = 0;
++ }
++ else
++ k_password = NULL;
++ err = rsbac_list_ta_commit(ta_number, k_password);
++ if(k_password)
++ rsbac_kfree(k_password);
++ return err;
++ }
++
++int sys_rsbac_list_ta_forget(
++ rsbac_list_ta_number_t ta_number,
++ char __user * password)
++ {
++ int err;
++ char * k_password;
++
++ if(password)
++ {
++ k_password = rsbac_kmalloc_unlocked(RSBAC_LIST_TA_MAX_PASSLEN);
++ if(!k_password)
++ return -ENOMEM;
++ err = rsbac_get_user(k_password, password, RSBAC_LIST_TA_MAX_PASSLEN - 1);
++ if(err)
++ {
++ rsbac_kfree(k_password);
++ return err;
++ }
++ k_password[RSBAC_LIST_TA_MAX_PASSLEN - 1] = 0;
++ }
++ else
++ k_password = NULL;
++ err = rsbac_list_ta_forget(ta_number, k_password);
++ if(k_password)
++ rsbac_kfree(k_password);
++ return err;
++ }
++#endif
++
++/* Big dispatcher for all syscalls */
++#ifdef rsbac_syscall4
++asmlinkage int sys_rsbac(int dummy,
++ rsbac_version_t version,
++ enum rsbac_syscall_t call,
++ union rsbac_syscall_arg_t __user * arg_p)
++#else
++asmlinkage int sys_rsbac(rsbac_version_t version,
++ enum rsbac_syscall_t call,
++ union rsbac_syscall_arg_t __user * arg_p)
++#endif
++ {
++ union rsbac_syscall_arg_t k_arg;
++ int err;
++
++ if( (!rsbac_initialized) && (call != RSYS_init) ) {
++ rsbac_printk(KERN_WARNING "sys_rsbac(): RSBAC not initialized\n");
++ return -RSBAC_ENOTINITIALIZED;
++ }
++
++ if ( ( (version < RSBAC_VERSION_MAKE_NR(RSBAC_VERSION_MAJOR,RSBAC_VERSION_MID,0))
++ || (version >= RSBAC_VERSION_MAKE_NR(RSBAC_VERSION_MAJOR, (RSBAC_VERSION_MID + 1),0))
++ )
++ && (call != RSYS_version))
++ return -RSBAC_EINVALIDVERSION;
++
++ if(call >= RSYS_none)
++ return -RSBAC_EINVALIDREQUEST;
++
++#ifdef CONFIG_RSBAC_XSTATS
++ syscall_count[call]++;
++#endif
++
++ /* get values from user space */
++ if(arg_p)
++ {
++ err = rsbac_get_user(&k_arg, arg_p, sizeof(k_arg) );
++ if(err)
++ return err;
++ }
++ else
++ {
++ memset(&k_arg, 0, sizeof(k_arg));
++ }
++
++ switch(call)
++ {
++#ifdef CONFIG_RSBAC_UM
++ case RSYS_um_get_user_item:
++ return sys_rsbac_um_get_user_item(k_arg.um_get_user_item.ta_number,
++ k_arg.um_get_user_item.uid,
++ k_arg.um_get_user_item.mod,
++ k_arg.um_get_user_item.data_p);
++ case RSYS_um_get_uid:
++ return sys_rsbac_um_get_uid(k_arg.um_get_uid.ta_number,
++ k_arg.um_get_uid.name,
++ k_arg.um_get_uid.uid_p);
++ case RSYS_um_get_group_item:
++ return sys_rsbac_um_get_group_item(k_arg.um_get_group_item.ta_number,
++ k_arg.um_get_group_item.gid,
++ k_arg.um_get_group_item.mod,
++ k_arg.um_get_group_item.data_p);
++ case RSYS_um_get_gm_list:
++ return sys_rsbac_um_get_gm_list(k_arg.um_get_gm_list.ta_number,
++ k_arg.um_get_gm_list.user,
++ k_arg.um_get_gm_list.group_array,
++ k_arg.um_get_gm_list.maxnum);
++ case RSYS_um_get_gm_user_list:
++ return sys_rsbac_um_get_gm_user_list(k_arg.um_get_gm_user_list.ta_number,
++ k_arg.um_get_gm_user_list.group,
++ k_arg.um_get_gm_user_list.user_array,
++ k_arg.um_get_gm_user_list.maxnum);
++ case RSYS_um_get_gid:
++ return sys_rsbac_um_get_gid(k_arg.um_get_gid.ta_number,
++ k_arg.um_get_gid.name,
++ k_arg.um_get_gid.gid_p);
++ case RSYS_um_get_user_list:
++ return sys_rsbac_um_get_user_list(k_arg.um_get_user_list.ta_number,
++ k_arg.um_get_user_list.vset,
++ k_arg.um_get_user_list.user_array,
++ k_arg.um_get_user_list.maxnum);
++ case RSYS_um_check_account_name:
++ return sys_rsbac_um_check_account_name(k_arg.um_check_account_name.name);
++#endif
++ case RSYS_get_attr:
++ return sys_rsbac_get_attr(k_arg.get_attr.ta_number,
++ k_arg.get_attr.module,
++ k_arg.get_attr.target,
++ k_arg.get_attr.tid,
++ k_arg.get_attr.attr,
++ k_arg.get_attr.value,
++ k_arg.get_attr.inherit);
++ case RSYS_get_attr_n:
++ return sys_rsbac_get_attr_n(k_arg.get_attr_n.ta_number,
++ k_arg.get_attr_n.module,
++ k_arg.get_attr_n.target,
++ k_arg.get_attr_n.t_name,
++ k_arg.get_attr_n.attr,
++ k_arg.get_attr_n.value,
++ k_arg.get_attr_n.inherit);
++ case RSYS_set_attr:
++ return sys_rsbac_set_attr(k_arg.set_attr.ta_number,
++ k_arg.set_attr.module,
++ k_arg.set_attr.target,
++ k_arg.set_attr.tid,
++ k_arg.set_attr.attr,
++ k_arg.set_attr.value);
++ case RSYS_set_attr_n:
++ return sys_rsbac_set_attr_n(k_arg.set_attr_n.ta_number,
++ k_arg.set_attr_n.module,
++ k_arg.set_attr_n.target,
++ k_arg.set_attr_n.t_name,
++ k_arg.set_attr_n.attr,
++ k_arg.set_attr_n.value);
++#ifdef CONFIG_RSBAC_RC
++ case RSYS_rc_get_current_role:
++ return sys_rsbac_rc_get_current_role(k_arg.rc_get_current_role.role_p);
++ case RSYS_rc_get_item:
++ return sys_rsbac_rc_get_item(k_arg.rc_get_item.ta_number,
++ k_arg.rc_get_item.target,
++ k_arg.rc_get_item.tid_p,
++ k_arg.rc_get_item.subtid_p,
++ k_arg.rc_get_item.item,
++ k_arg.rc_get_item.value_p,
++ k_arg.rc_get_item.ttl_p);
++ case RSYS_rc_change_role:
++ return sys_rsbac_rc_change_role(k_arg.rc_change_role.role, k_arg.rc_change_role.pass);
++#endif
++#ifdef CONFIG_RSBAC_JAIL
++ case RSYS_jail:
++ return rsbac_jail_sys_jail(k_arg.jail.version,
++ k_arg.jail.path,
++ k_arg.jail.ip,
++ k_arg.jail.flags,
++ k_arg.jail.max_caps,
++ k_arg.jail.scd_get,
++ k_arg.jail.scd_modify);
++#endif
++
++ case RSYS_remove_target:
++ return sys_rsbac_remove_target(k_arg.remove_target.ta_number,
++ k_arg.remove_target.target,
++ k_arg.remove_target.tid);
++ case RSYS_remove_target_n:
++ return sys_rsbac_remove_target_n(k_arg.remove_target_n.ta_number,
++ k_arg.remove_target_n.target,
++ k_arg.remove_target_n.t_name);
++ case RSYS_net_list_all_netdev:
++ return sys_rsbac_net_list_all_netdev(k_arg.net_list_all_netdev.ta_number,
++ k_arg.net_list_all_netdev.id_p,
++ k_arg.net_list_all_netdev.maxnum);
++ case RSYS_net_template:
++ return sys_rsbac_net_template(k_arg.net_template.ta_number,
++ k_arg.net_template.call,
++ k_arg.net_template.id,
++ k_arg.net_template.data_p);
++ case RSYS_net_list_all_template:
++ return sys_rsbac_net_list_all_template(k_arg.net_list_all_template.ta_number,
++ k_arg.net_list_all_template.id_p,
++ k_arg.net_list_all_template.maxnum);
++ case RSYS_switch:
++ return sys_rsbac_switch(k_arg.switch_module.module,
++ k_arg.switch_module.value);
++ case RSYS_get_switch:
++ return sys_rsbac_get_switch(k_arg.get_switch_module.module,
++ k_arg.get_switch_module.value_p,
++ k_arg.get_switch_module.switchable_p);
++ case RSYS_adf_log_switch:
++ return sys_rsbac_adf_log_switch(k_arg.adf_log_switch.request,
++ k_arg.adf_log_switch.target,
++ k_arg.adf_log_switch.value);
++ case RSYS_get_adf_log:
++ return sys_rsbac_get_adf_log(k_arg.get_adf_log.request,
++ k_arg.get_adf_log.target,
++ k_arg.get_adf_log.value_p);
++ case RSYS_write:
++ return sys_rsbac_write();
++ case RSYS_log:
++ return sys_rsbac_log(k_arg.log.type,
++ k_arg.log.buf,
++ k_arg.log.len);
++#ifdef CONFIG_RSBAC_MAC
++ case RSYS_mac_set_curr_level:
++ return sys_rsbac_mac_set_curr_level(k_arg.mac_set_curr_level.level,
++ k_arg.mac_set_curr_level.categories_p);
++ case RSYS_mac_get_curr_level:
++ return sys_rsbac_mac_get_curr_level(k_arg.mac_get_curr_level.level_p,
++ k_arg.mac_get_curr_level.categories_p);
++ case RSYS_mac_get_max_level:
++ return sys_rsbac_mac_get_max_level(k_arg.mac_get_max_level.level_p,
++ k_arg.mac_get_max_level.categories_p);
++ case RSYS_mac_get_min_level:
++ return sys_rsbac_mac_get_min_level(k_arg.mac_get_min_level.level_p,
++ k_arg.mac_get_min_level.categories_p);
++ case RSYS_mac_add_p_tru:
++ return sys_rsbac_mac_add_p_tru(k_arg.mac_add_p_tru.ta_number,
++ k_arg.mac_add_p_tru.pid,
++ k_arg.mac_add_p_tru.uid,
++ k_arg.mac_add_p_tru.ttl);
++ case RSYS_mac_remove_p_tru:
++ return sys_rsbac_mac_remove_p_tru(k_arg.mac_remove_p_tru.ta_number,
++ k_arg.mac_remove_p_tru.pid,
++ k_arg.mac_add_p_tru.uid);
++ case RSYS_mac_add_f_tru:
++ return sys_rsbac_mac_add_f_tru(k_arg.mac_add_f_tru.ta_number,
++ k_arg.mac_add_f_tru.filename,
++ k_arg.mac_add_p_tru.uid,
++ k_arg.mac_add_f_tru.ttl);
++ case RSYS_mac_remove_f_tru:
++ return sys_rsbac_mac_remove_f_tru(k_arg.mac_remove_f_tru.ta_number,
++ k_arg.mac_remove_f_tru.filename,
++ k_arg.mac_remove_f_tru.uid);
++ case RSYS_mac_get_f_trulist:
++ return sys_rsbac_mac_get_f_trulist(k_arg.mac_get_f_trulist.ta_number,
++ k_arg.mac_get_f_trulist.filename,
++ k_arg.mac_get_f_trulist.trulist,
++ k_arg.mac_get_f_trulist.ttllist,
++ k_arg.mac_get_f_trulist.maxnum);
++ case RSYS_mac_get_p_trulist:
++ return sys_rsbac_mac_get_p_trulist(k_arg.mac_get_p_trulist.ta_number,
++ k_arg.mac_get_p_trulist.pid,
++ k_arg.mac_get_p_trulist.trulist,
++ k_arg.mac_get_p_trulist.ttllist,
++ k_arg.mac_get_p_trulist.maxnum);
++#endif
++#ifdef CONFIG_RSBAC_PM
++ case RSYS_stats_pm:
++ return sys_rsbac_stats_pm();
++ case RSYS_pm:
++ return sys_rsbac_pm(k_arg.pm.ta_number,
++ k_arg.pm.function,
++ k_arg.pm.param_p,
++ k_arg.pm.ticket);
++ case RSYS_pm_change_current_task:
++ return sys_rsbac_pm_change_current_task(k_arg.pm_change_current_task.task);
++ case RSYS_pm_create_file:
++ return sys_rsbac_pm_create_file(k_arg.pm_create_file.filename,
++ k_arg.pm_create_file.mode,
++ k_arg.pm_create_file.object_class);
++#endif
++#ifdef CONFIG_RSBAC_DAZ
++ case RSYS_daz_flush_cache:
++ return sys_rsbac_daz_flush_cache();
++#endif
++#ifdef CONFIG_RSBAC_RC
++ case RSYS_rc_copy_role:
++ return sys_rsbac_rc_copy_role(k_arg.rc_copy_role.ta_number,
++ k_arg.rc_copy_role.from_role,
++ k_arg.rc_copy_role.to_role);
++ case RSYS_rc_copy_type:
++ return sys_rsbac_rc_copy_type(k_arg.rc_copy_type.ta_number,
++ k_arg.rc_copy_type.target,
++ k_arg.rc_copy_type.from_type,
++ k_arg.rc_copy_type.to_type);
++ case RSYS_rc_set_item:
++ return sys_rsbac_rc_set_item(k_arg.rc_set_item.ta_number,
++ k_arg.rc_set_item.target,
++ k_arg.rc_set_item.tid_p,
++ k_arg.rc_set_item.subtid_p,
++ k_arg.rc_set_item.item,
++ k_arg.rc_set_item.value_p,
++ k_arg.rc_set_item.ttl);
++ case RSYS_rc_get_eff_rights_n:
++ return sys_rsbac_rc_get_eff_rights_n(k_arg.rc_get_eff_rights_n.ta_number,
++ k_arg.rc_get_eff_rights_n.target,
++ k_arg.rc_get_eff_rights_n.t_name,
++ k_arg.rc_get_eff_rights_n.request_vector_p,
++ k_arg.rc_get_eff_rights_n.ttl_p);
++ case RSYS_rc_get_list:
++ return sys_rsbac_rc_get_list(k_arg.rc_get_list.ta_number,
++ k_arg.rc_get_list.target,
++ k_arg.rc_get_list.tid_p,
++ k_arg.rc_get_list.item,
++ k_arg.rc_get_list.maxnum,
++ k_arg.rc_get_list.array_p,
++ k_arg.rc_get_list.ttl_array_p);
++ case RSYS_rc_select_fd_create_type:
++ return sys_rsbac_rc_select_fd_create_type(k_arg.rc_select_fd_create_type.type);
++#endif
++#ifdef CONFIG_RSBAC_AUTH
++ case RSYS_auth_add_p_cap:
++ return sys_rsbac_auth_add_p_cap(k_arg.auth_add_p_cap.ta_number,
++ k_arg.auth_add_p_cap.pid,
++ k_arg.auth_add_p_cap.cap_type,
++ k_arg.auth_add_p_cap.cap_range,
++ k_arg.auth_add_p_cap.ttl);
++ case RSYS_auth_remove_p_cap:
++ return sys_rsbac_auth_remove_p_cap(k_arg.auth_remove_p_cap.ta_number,
++ k_arg.auth_remove_p_cap.pid,
++ k_arg.auth_remove_p_cap.cap_type,
++ k_arg.auth_remove_p_cap.cap_range);
++ case RSYS_auth_add_f_cap:
++ return sys_rsbac_auth_add_f_cap(k_arg.auth_add_f_cap.ta_number,
++ k_arg.auth_add_f_cap.filename,
++ k_arg.auth_add_f_cap.cap_type,
++ k_arg.auth_add_f_cap.cap_range,
++ k_arg.auth_add_f_cap.ttl);
++ case RSYS_auth_remove_f_cap:
++ return sys_rsbac_auth_remove_f_cap(k_arg.auth_remove_f_cap.ta_number,
++ k_arg.auth_remove_f_cap.filename,
++ k_arg.auth_remove_f_cap.cap_type,
++ k_arg.auth_remove_f_cap.cap_range);
++ case RSYS_auth_get_f_caplist:
++ return sys_rsbac_auth_get_f_caplist(k_arg.auth_get_f_caplist.ta_number,
++ k_arg.auth_get_f_caplist.filename,
++ k_arg.auth_get_f_caplist.cap_type,
++ k_arg.auth_get_f_caplist.caplist,
++ k_arg.auth_get_f_caplist.ttllist,
++ k_arg.auth_get_f_caplist.maxnum);
++ case RSYS_auth_get_p_caplist:
++ return sys_rsbac_auth_get_p_caplist(k_arg.auth_get_p_caplist.ta_number,
++ k_arg.auth_get_p_caplist.pid,
++ k_arg.auth_get_p_caplist.cap_type,
++ k_arg.auth_get_p_caplist.caplist,
++ k_arg.auth_get_p_caplist.ttllist,
++ k_arg.auth_get_p_caplist.maxnum);
++#endif
++#ifdef CONFIG_RSBAC_ACL
++ case RSYS_acl:
++ return sys_rsbac_acl(k_arg.acl.ta_number,
++ k_arg.acl.call,
++ k_arg.acl.arg);
++ case RSYS_acl_n:
++ return sys_rsbac_acl_n(k_arg.acl_n.ta_number,
++ k_arg.acl_n.call,
++ k_arg.acl_n.arg);
++ case RSYS_acl_get_rights:
++ return sys_rsbac_acl_get_rights(k_arg.acl_get_rights.ta_number,
++ k_arg.acl_get_rights.arg,
++ k_arg.acl_get_rights.rights_p,
++ k_arg.acl_get_rights.effective);
++ case RSYS_acl_get_rights_n:
++ return sys_rsbac_acl_get_rights_n(k_arg.acl_get_rights_n.ta_number,
++ k_arg.acl_get_rights_n.arg,
++ k_arg.acl_get_rights_n.rights_p,
++ k_arg.acl_get_rights_n.effective);
++ case RSYS_acl_get_tlist:
++ return sys_rsbac_acl_get_tlist(k_arg.acl_get_tlist.ta_number,
++ k_arg.acl_get_tlist.target,
++ k_arg.acl_get_tlist.tid,
++ k_arg.acl_get_tlist.entry_array,
++ k_arg.acl_get_tlist.ttl_array,
++ k_arg.acl_get_tlist.maxnum);
++ case RSYS_acl_get_tlist_n:
++ return sys_rsbac_acl_get_tlist_n(k_arg.acl_get_tlist_n.ta_number,
++ k_arg.acl_get_tlist_n.target,
++ k_arg.acl_get_tlist_n.t_name,
++ k_arg.acl_get_tlist_n.entry_array,
++ k_arg.acl_get_tlist_n.ttl_array,
++ k_arg.acl_get_tlist_n.maxnum);
++ case RSYS_acl_get_mask:
++ return sys_rsbac_acl_get_mask(k_arg.acl_get_mask.ta_number,
++ k_arg.acl_get_mask.target,
++ k_arg.acl_get_mask.tid,
++ k_arg.acl_get_mask.mask_p);
++ case RSYS_acl_get_mask_n:
++ return sys_rsbac_acl_get_mask_n(k_arg.acl_get_mask_n.ta_number,
++ k_arg.acl_get_mask_n.target,
++ k_arg.acl_get_mask_n.t_name,
++ k_arg.acl_get_mask_n.mask_p);
++ case RSYS_acl_group:
++ return sys_rsbac_acl_group(k_arg.acl_group.ta_number,
++ k_arg.acl_group.call,
++ k_arg.acl_group.arg_p);
++ case RSYS_acl_list_all_dev:
++ return sys_rsbac_acl_list_all_dev(k_arg.acl_list_all_dev.ta_number,
++ k_arg.acl_list_all_dev.id_p,
++ k_arg.acl_list_all_dev.maxnum);
++ case RSYS_acl_list_all_user:
++ return sys_rsbac_acl_list_all_user(k_arg.acl_list_all_user.ta_number,
++ k_arg.acl_list_all_user.id_p,
++ k_arg.acl_list_all_user.maxnum);
++ case RSYS_acl_list_all_group:
++ return sys_rsbac_acl_list_all_group(k_arg.acl_list_all_group.ta_number,
++ k_arg.acl_list_all_group.id_p,
++ k_arg.acl_list_all_group.maxnum);
++#endif
++#ifdef CONFIG_RSBAC_REG
++ case RSYS_reg:
++ return sys_rsbac_reg(k_arg.reg.handle,
++ k_arg.reg.arg);
++#endif
++#ifdef CONFIG_RSBAC_UM
++ case RSYS_um_auth_name:
++ return sys_rsbac_um_auth_name(k_arg.um_auth_name.name,
++ k_arg.um_auth_name.pass);
++ case RSYS_um_auth_uid:
++ return sys_rsbac_um_auth_uid(k_arg.um_auth_uid.uid,
++ k_arg.um_auth_uid.pass);
++ case RSYS_um_add_user:
++ return sys_rsbac_um_add_user(k_arg.um_add_user.ta_number,
++ k_arg.um_add_user.uid,
++ k_arg.um_add_user.entry_p,
++ k_arg.um_add_user.pass,
++ k_arg.um_add_user.ttl);
++ case RSYS_um_add_group:
++ return sys_rsbac_um_add_group(k_arg.um_add_group.ta_number,
++ k_arg.um_add_group.gid,
++ k_arg.um_add_group.entry_p,
++ k_arg.um_add_group.pass,
++ k_arg.um_add_group.ttl);
++ case RSYS_um_add_gm:
++ return sys_rsbac_um_add_gm(k_arg.um_add_gm.ta_number,
++ k_arg.um_add_gm.uid,
++ k_arg.um_add_gm.gid,
++ k_arg.um_add_gm.ttl);
++ case RSYS_um_mod_user:
++ return sys_rsbac_um_mod_user(k_arg.um_mod_user.ta_number,
++ k_arg.um_mod_user.uid,
++ k_arg.um_mod_user.mod,
++ k_arg.um_mod_user.data_p);
++ case RSYS_um_mod_group:
++ return sys_rsbac_um_mod_group(k_arg.um_mod_group.ta_number,
++ k_arg.um_mod_group.gid,
++ k_arg.um_mod_group.mod,
++ k_arg.um_mod_group.data_p);
++ case RSYS_um_remove_user:
++ return sys_rsbac_um_remove_user(k_arg.um_remove_user.ta_number,
++ k_arg.um_remove_user.uid);
++ case RSYS_um_remove_group:
++ return sys_rsbac_um_remove_group(k_arg.um_remove_group.ta_number,
++ k_arg.um_remove_group.gid);
++ case RSYS_um_remove_gm:
++ return sys_rsbac_um_remove_gm(k_arg.um_remove_gm.ta_number,
++ k_arg.um_remove_gm.uid,
++ k_arg.um_remove_gm.gid);
++ case RSYS_um_user_exists:
++ return sys_rsbac_um_user_exists(k_arg.um_user_exists.ta_number,
++ k_arg.um_user_exists.uid);
++ case RSYS_um_get_next_user:
++ return sys_rsbac_um_get_next_user(k_arg.um_get_next_user.ta_number,
++ k_arg.um_get_next_user.old_user,
++ k_arg.um_get_next_user.next_user_p);
++ case RSYS_um_group_exists:
++ return sys_rsbac_um_group_exists(k_arg.um_group_exists.ta_number,
++ k_arg.um_group_exists.gid);
++ case RSYS_um_get_group_list:
++ return sys_rsbac_um_get_group_list(k_arg.um_get_group_list.ta_number,
++ k_arg.um_get_group_list.vset,
++ k_arg.um_get_group_list.group_array,
++ k_arg.um_get_group_list.maxnum);
++ case RSYS_um_set_pass:
++ return sys_rsbac_um_set_pass(k_arg.um_set_pass.uid,
++ k_arg.um_set_pass.old_pass,
++ k_arg.um_set_pass.new_pass);
++ case RSYS_um_set_pass_name:
++ return sys_rsbac_um_set_pass_name(k_arg.um_set_pass_name.name,
++ k_arg.um_set_pass_name.old_pass,
++ k_arg.um_set_pass_name.new_pass);
++ case RSYS_um_add_onetime:
++ return sys_rsbac_um_add_onetime(k_arg.um_add_onetime.uid,
++ k_arg.um_add_onetime.old_pass,
++ k_arg.um_add_onetime.new_pass,
++ k_arg.um_add_onetime.ttl);
++ case RSYS_um_add_onetime_name:
++ return sys_rsbac_um_add_onetime_name(k_arg.um_add_onetime_name.name,
++ k_arg.um_add_onetime_name.old_pass,
++ k_arg.um_add_onetime_name.new_pass,
++ k_arg.um_add_onetime_name.ttl);
++ case RSYS_um_remove_all_onetime:
++ return sys_rsbac_um_remove_all_onetime(k_arg.um_remove_all_onetime.uid,
++ k_arg.um_remove_all_onetime.old_pass);
++ case RSYS_um_remove_all_onetime_name:
++ return sys_rsbac_um_remove_all_onetime_name(k_arg.um_remove_all_onetime_name.name,
++ k_arg.um_remove_all_onetime_name.old_pass);
++ case RSYS_um_count_onetime:
++ return sys_rsbac_um_count_onetime(k_arg.um_count_onetime.uid,
++ k_arg.um_count_onetime.old_pass);
++ case RSYS_um_count_onetime_name:
++ return sys_rsbac_um_count_onetime_name(k_arg.um_count_onetime_name.name,
++ k_arg.um_count_onetime_name.old_pass);
++ case RSYS_um_set_group_pass:
++ return sys_rsbac_um_set_group_pass(k_arg.um_set_group_pass.gid,
++ k_arg.um_set_group_pass.new_pass);
++ case RSYS_um_check_account:
++ return sys_rsbac_um_check_account(k_arg.um_check_account.uid);
++ case RSYS_um_get_max_history:
++ return sys_rsbac_um_get_max_history(k_arg.um_get_max_history.ta_number,
++ k_arg.um_get_max_history.uid);
++ case RSYS_um_get_max_history_name:
++ return sys_rsbac_um_get_max_history_name(k_arg.um_get_max_history_name.ta_number,
++ k_arg.um_get_max_history_name.name);
++ case RSYS_um_set_max_history:
++ return sys_rsbac_um_set_max_history(k_arg.um_set_max_history.ta_number,
++ k_arg.um_set_max_history.uid,
++ k_arg.um_set_max_history.max_history);
++ case RSYS_um_set_max_history_name:
++ return sys_rsbac_um_set_max_history_name(k_arg.um_set_max_history_name.ta_number,
++ k_arg.um_set_max_history_name.name,
++ k_arg.um_set_max_history_name.max_history);
++ case RSYS_um_select_vset:
++ return sys_rsbac_um_select_vset(k_arg.um_select_vset.vset);
++#endif
++
++#ifdef CONFIG_RSBAC_LIST_TRANS
++ case RSYS_list_ta_begin_name:
++ return sys_rsbac_list_ta_begin_name(k_arg.list_ta_begin.ttl,
++ k_arg.list_ta_begin_name.ta_number_p,
++ k_arg.list_ta_begin_name.commit_uid,
++ k_arg.list_ta_begin_name.name,
++ k_arg.list_ta_begin_name.password);
++ case RSYS_list_ta_begin:
++ return sys_rsbac_list_ta_begin(k_arg.list_ta_begin.ttl,
++ k_arg.list_ta_begin.ta_number_p,
++ k_arg.list_ta_begin.commit_uid,
++ k_arg.list_ta_begin.password);
++ case RSYS_list_ta_refresh:
++ return sys_rsbac_list_ta_refresh(k_arg.list_ta_refresh.ttl,
++ k_arg.list_ta_refresh.ta_number,
++ k_arg.list_ta_refresh.password);
++ case RSYS_list_ta_commit:
++ return sys_rsbac_list_ta_commit(k_arg.list_ta_commit.ta_number,
++ k_arg.list_ta_commit.password);
++ case RSYS_list_ta_forget:
++ return sys_rsbac_list_ta_forget(k_arg.list_ta_forget.ta_number,
++ k_arg.list_ta_forget.password);
++#endif
++
++ case RSYS_list_all_dev:
++ return sys_rsbac_list_all_dev(k_arg.list_all_dev.ta_number,
++ k_arg.list_all_dev.id_p,
++ k_arg.list_all_dev.maxnum);
++ case RSYS_list_all_user:
++ return sys_rsbac_list_all_user(k_arg.list_all_user.ta_number,
++ k_arg.list_all_user.id_p,
++ k_arg.list_all_user.maxnum);
++ case RSYS_list_all_group:
++ return sys_rsbac_list_all_group(k_arg.list_all_group.ta_number,
++ k_arg.list_all_group.id_p,
++ k_arg.list_all_group.maxnum);
++ case RSYS_list_all_ipc:
++ return sys_rsbac_list_all_ipc(k_arg.list_all_ipc.
++ ta_number,
++ k_arg.list_all_ipc.id_p,
++ k_arg.list_all_ipc.maxnum);
++
++ case RSYS_version:
++ return RSBAC_VERSION_NR;
++ case RSYS_stats:
++ return sys_rsbac_stats();
++ case RSYS_check:
++ return sys_rsbac_check(k_arg.check.correct, k_arg.check.check_inode);
++#if defined(CONFIG_RSBAC_INIT_DELAY)
++ case RSYS_init:
++ return sys_rsbac_init(k_arg.init.root_dev);
++#endif
++
++ default:
++ return -RSBAC_EINVALIDREQUEST;
++ }
++ }
++
++/* end of syscalls.c */
+diff --git a/security/Kconfig b/security/Kconfig
+index ccc61f8..1d35a89 100644
+--- a/security/Kconfig
++++ b/security/Kconfig
+@@ -4,6 +4,8 @@
+
+ menu "Security options"
+
++source "rsbac/Kconfig"
++
+ config KEYS
+ bool "Enable access key retention support"
+ help
diff --git a/3.4.1/4520_pax-linux-3.4-test7.patch b/3.4.1/4520_pax-linux-3.4-test7.patch
new file mode 100644
index 0000000..d1bd927
--- /dev/null
+++ b/3.4.1/4520_pax-linux-3.4-test7.patch
@@ -0,0 +1,72723 @@
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/alpha/include/asm/atomic.h linux-3.4-pax/arch/alpha/include/asm/atomic.h
+--- linux-3.4/arch/alpha/include/asm/atomic.h 2012-05-21 11:32:30.591926200 +0200
++++ linux-3.4-pax/arch/alpha/include/asm/atomic.h 2012-05-21 12:10:08.400048827 +0200
+@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unles
+ #define atomic_dec(v) atomic_sub(1,(v))
+ #define atomic64_dec(v) atomic64_sub(1,(v))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #define smp_mb__before_atomic_dec() smp_mb()
+ #define smp_mb__after_atomic_dec() smp_mb()
+ #define smp_mb__before_atomic_inc() smp_mb()
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/alpha/include/asm/elf.h linux-3.4-pax/arch/alpha/include/asm/elf.h
+--- linux-3.4/arch/alpha/include/asm/elf.h 2012-05-21 11:32:30.603926201 +0200
++++ linux-3.4-pax/arch/alpha/include/asm/elf.h 2012-05-21 12:10:08.404048827 +0200
+@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
++#endif
++
+ /* $0 is set by ld.so to a pointer to a function which might be
+ registered using atexit. This provides a mean for the dynamic
+ linker to call DT_FINI functions for shared libraries that have
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/alpha/include/asm/pgalloc.h linux-3.4-pax/arch/alpha/include/asm/pgalloc.h
+--- linux-3.4/arch/alpha/include/asm/pgalloc.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/alpha/include/asm/pgalloc.h 2012-05-21 12:10:08.408048828 +0200
+@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t
+ pgd_set(pgd, pmd);
+ }
+
++static inline void
++pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
++{
++ pgd_populate(mm, pgd, pmd);
++}
++
+ extern pgd_t *pgd_alloc(struct mm_struct *mm);
+
+ static inline void
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/alpha/include/asm/pgtable.h linux-3.4-pax/arch/alpha/include/asm/pgtable.h
+--- linux-3.4/arch/alpha/include/asm/pgtable.h 2012-05-21 11:32:30.627926202 +0200
++++ linux-3.4-pax/arch/alpha/include/asm/pgtable.h 2012-05-21 12:10:08.408048828 +0200
+@@ -102,6 +102,17 @@ struct vm_area_struct;
+ #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
+ #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+ #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+
+ #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/alpha/kernel/module.c linux-3.4-pax/arch/alpha/kernel/module.c
+--- linux-3.4/arch/alpha/kernel/module.c 2011-10-24 12:48:18.623092181 +0200
++++ linux-3.4-pax/arch/alpha/kernel/module.c 2012-05-21 12:10:08.412048828 +0200
+@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
+
+ /* The small sections were sorted to the end of the segment.
+ The following should definitely cover them. */
+- gp = (u64)me->module_core + me->core_size - 0x8000;
++ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
+ got = sechdrs[me->arch.gotsecindex].sh_addr;
+
+ for (i = 0; i < n; i++) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/alpha/kernel/osf_sys.c linux-3.4-pax/arch/alpha/kernel/osf_sys.c
+--- linux-3.4/arch/alpha/kernel/osf_sys.c 2012-05-21 11:32:30.703926206 +0200
++++ linux-3.4-pax/arch/alpha/kernel/osf_sys.c 2012-05-21 12:10:08.412048828 +0200
+@@ -1146,7 +1146,7 @@ arch_get_unmapped_area_1(unsigned long a
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (limit - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = vma->vm_end;
+ vma = vma->vm_next;
+@@ -1182,6 +1182,10 @@ arch_get_unmapped_area(struct file *filp
+ merely specific addresses, but regions of memory -- perhaps
+ this feature should be incorporated into all ports? */
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+ if (addr != (unsigned long) -ENOMEM)
+@@ -1189,8 +1193,8 @@ arch_get_unmapped_area(struct file *filp
+ }
+
+ /* Next, try allocating at TASK_UNMAPPED_BASE. */
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+- len, limit);
++ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
++
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/alpha/mm/fault.c linux-3.4-pax/arch/alpha/mm/fault.c
+--- linux-3.4/arch/alpha/mm/fault.c 2012-05-21 11:32:30.855926214 +0200
++++ linux-3.4-pax/arch/alpha/mm/fault.c 2012-05-21 12:10:08.416048828 +0200
+@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *
+ __reload_thread(pcb);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int ldah, ldq, jmp;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
++ jmp == 0x6BFB0000U)
++ {
++ unsigned long r27, addr;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
++
++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ err = get_user(r27, (unsigned long *)addr);
++ if (err)
++ break;
++
++ regs->r27 = r27;
++ regs->pc = r27;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ldah, lda, br;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(lda, (unsigned int *)(regs->pc+4));
++ err |= get_user(br, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (lda & 0xFFFF0000U) == 0xA77B0000U &&
++ (br & 0xFFE00000U) == 0xC3E00000U)
++ {
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
++
++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int br;
++
++ err = get_user(br, (unsigned int *)regs->pc);
++
++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
++ unsigned int br2, ldq, nop, jmp;
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
++
++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ err = get_user(br2, (unsigned int *)addr);
++ err |= get_user(ldq, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ err |= get_user(jmp, (unsigned int *)(addr+12));
++ err |= get_user(resolver, (unsigned long *)(addr+16));
++
++ if (err)
++ break;
++
++ if (br2 == 0xC3600000U &&
++ ldq == 0xA77B000CU &&
++ nop == 0x47FF041FU &&
++ jmp == 0x6B7B0000U)
++ {
++ regs->r28 = regs->pc+4;
++ regs->r27 = addr+16;
++ regs->pc = resolver;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
+
+ /*
+ * This routine handles page faults. It determines the address,
+@@ -130,8 +248,29 @@ do_page_fault(unsigned long address, uns
+ good_area:
+ si_code = SEGV_ACCERR;
+ if (cause < 0) {
+- if (!(vma->vm_flags & VM_EXEC))
++ if (!(vma->vm_flags & VM_EXEC)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
++ do_group_exit(SIGKILL);
++#else
+ goto bad_area;
++#endif
++
++ }
+ } else if (!cause) {
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/include/asm/atomic.h linux-3.4-pax/arch/arm/include/asm/atomic.h
+--- linux-3.4/arch/arm/include/asm/atomic.h 2012-05-21 11:32:31.575926252 +0200
++++ linux-3.4-pax/arch/arm/include/asm/atomic.h 2012-05-21 12:10:08.416048828 +0200
+@@ -17,17 +17,35 @@
+ #include <asm/barrier.h>
+ #include <asm/cmpxchg.h>
+
++#ifdef CONFIG_GENERIC_ATOMIC64
++#include <asm-generic/atomic64.h>
++#endif
++
+ #define ATOMIC_INIT(i) { (i) }
+
+ #ifdef __KERNEL__
+
++#define _ASM_EXTABLE(from, to) \
++" .pushsection __ex_table,\"a\"\n"\
++" .align 3\n" \
++" .long " #from ", " #to"\n" \
++" .popsection"
++
+ /*
+ * On ARM, ordinary assignment (str instruction) doesn't clear the local
+ * strex/ldrex monitor on some implementations. The reason we can use it for
+ * atomic_set() is the clrex or dummy strex done on every exception return.
+ */
+ #define atomic_read(v) (*(volatile int *)&(v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return v->counter;
++}
+ #define atomic_set(v,i) (((v)->counter) = (i))
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
+
+ #if __LINUX_ARM_ARCH__ >= 6
+
+@@ -42,6 +60,35 @@ static inline void atomic_add(int i, ato
+ int result;
+
+ __asm__ __volatile__("@ atomic_add\n"
++"1: ldrex %1, [%3]\n"
++" adds %0, %1, %4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
++" strex %1, %0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "Ir" (i)
++ : "cc");
++}
++
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ unsigned long tmp;
++ int result;
++
++ __asm__ __volatile__("@ atomic_add_unchecked\n"
+ "1: ldrex %0, [%3]\n"
+ " add %0, %0, %4\n"
+ " strex %1, %0, [%3]\n"
+@@ -60,6 +107,42 @@ static inline int atomic_add_return(int
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic_add_return\n"
++"1: ldrex %1, [%3]\n"
++" adds %0, %1, %4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++" mov %0, %1\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
++" strex %1, %0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "Ir" (i)
++ : "cc");
++
++ smp_mb();
++
++ return result;
++}
++
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ unsigned long tmp;
++ int result;
++
++ smp_mb();
++
++ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
+ "1: ldrex %0, [%3]\n"
+ " add %0, %0, %4\n"
+ " strex %1, %0, [%3]\n"
+@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, ato
+ int result;
+
+ __asm__ __volatile__("@ atomic_sub\n"
++"1: ldrex %1, [%3]\n"
++" subs %0, %1, %4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
++" strex %1, %0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "Ir" (i)
++ : "cc");
++}
++
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ unsigned long tmp;
++ int result;
++
++ __asm__ __volatile__("@ atomic_sub_unchecked\n"
+ "1: ldrex %0, [%3]\n"
+ " sub %0, %0, %4\n"
+ " strex %1, %0, [%3]\n"
+@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic_sub_return\n"
+-"1: ldrex %0, [%3]\n"
+-" sub %0, %0, %4\n"
++"1: ldrex %1, [%3]\n"
++" sub %0, %1, %4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++" mov %0, %1\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
+ " strex %1, %0, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "Ir" (i)
+ : "cc");
+@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_
+ return oldval;
+ }
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
++{
++ unsigned long oldval, res;
++
++ smp_mb();
++
++ do {
++ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
++ "ldrex %1, [%3]\n"
++ "mov %0, #0\n"
++ "teq %1, %4\n"
++ "strexeq %0, %5, [%3]\n"
++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
++ : "r" (&ptr->counter), "Ir" (old), "r" (new)
++ : "cc");
++ } while (res);
++
++ smp_mb();
++
++ return oldval;
++}
++
+ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+ {
+ unsigned long tmp, tmp2;
+@@ -167,7 +315,9 @@ static inline int atomic_add_return(int
+
+ return val;
+ }
++#define atomic_add_return_unchecked(i, v) atomic_add_return(i, v)
+ #define atomic_add(i, v) (void) atomic_add_return(i, v)
++#define atomic_add_unchecked(i, v) (void) atomic_add_return_unchecked(i, v)
+
+ static inline int atomic_sub_return(int i, atomic_t *v)
+ {
+@@ -181,7 +331,9 @@ static inline int atomic_sub_return(int
+
+ return val;
+ }
++#define atomic_sub_return_unchecked(i, v) atomic_sub_return(i, v)
+ #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
++#define atomic_sub_unchecked(i, v) (void) atomic_sub_return_unchecked(i, v)
+
+ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+ {
+@@ -196,6 +348,7 @@ static inline int atomic_cmpxchg(atomic_
+
+ return ret;
+ }
++#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg(v, o, n)
+
+ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+ {
+@@ -209,6 +362,10 @@ static inline void atomic_clear_mask(uns
+ #endif /* __LINUX_ARM_ARCH__ */
+
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+@@ -221,11 +378,27 @@ static inline int __atomic_add_unless(at
+ }
+
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+
+ #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v) == 0;
++}
+ #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
+ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+@@ -241,6 +414,14 @@ typedef struct {
+ u64 __aligned(8) counter;
+ } atomic64_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ u64 __aligned(8) counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(i) { (i) }
+
+ static inline u64 atomic64_read(atomic64_t *v)
+@@ -256,6 +437,19 @@ static inline u64 atomic64_read(atomic64
+ return result;
+ }
+
++static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
++{
++ u64 result;
++
++ __asm__ __volatile__("@ atomic64_read_unchecked\n"
++" ldrexd %0, %H0, [%1]"
++ : "=&r" (result)
++ : "r" (&v->counter), "Qo" (v->counter)
++ );
++
++ return result;
++}
++
+ static inline void atomic64_set(atomic64_t *v, u64 i)
+ {
+ u64 tmp;
+@@ -270,6 +464,20 @@ static inline void atomic64_set(atomic64
+ : "cc");
+ }
+
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
++{
++ u64 tmp;
++
++ __asm__ __volatile__("@ atomic64_set_unchecked\n"
++"1: ldrexd %0, %H0, [%2]\n"
++" strexd %0, %3, %H3, [%2]\n"
++" teq %0, #0\n"
++" bne 1b"
++ : "=&r" (tmp), "=Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ : "cc");
++}
++
+ static inline void atomic64_add(u64 i, atomic64_t *v)
+ {
+ u64 result;
+@@ -278,6 +486,36 @@ static inline void atomic64_add(u64 i, a
+ __asm__ __volatile__("@ atomic64_add\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+ " adds %0, %0, %4\n"
++" adcs %H0, %H0, %H4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
++" strexd %1, %0, %H0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ : "cc");
++}
++
++static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
++{
++ u64 result;
++ unsigned long tmp;
++
++ __asm__ __volatile__("@ atomic64_add_unchecked\n"
++"1: ldrexd %0, %H0, [%3]\n"
++" adds %0, %0, %4\n"
+ " adc %H0, %H0, %H4\n"
+ " strexd %1, %0, %H0, [%3]\n"
+ " teq %1, #0\n"
+@@ -289,12 +527,49 @@ static inline void atomic64_add(u64 i, a
+
+ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+ {
++ u64 result, tmp;
++
++ smp_mb();
++
++ __asm__ __volatile__("@ atomic64_add_return\n"
++"1: ldrexd %1, %H1, [%3]\n"
++" adds %0, %1, %4\n"
++" adcs %H0, %H1, %H4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++" mov %0, %1\n"
++" mov %H0, %H1\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
++" strexd %1, %0, %H0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ : "cc");
++
++ smp_mb();
++
++ return result;
++}
++
++static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
++{
+ u64 result;
+ unsigned long tmp;
+
+ smp_mb();
+
+- __asm__ __volatile__("@ atomic64_add_return\n"
++ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+ " adds %0, %0, %4\n"
+ " adc %H0, %H0, %H4\n"
+@@ -318,23 +593,34 @@ static inline void atomic64_sub(u64 i, a
+ __asm__ __volatile__("@ atomic64_sub\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+ " subs %0, %0, %4\n"
+-" sbc %H0, %H0, %H4\n"
++" sbcs %H0, %H0, %H4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
+ " strexd %1, %0, %H0, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
+ }
+
+-static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
++static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
+ {
+ u64 result;
+ unsigned long tmp;
+
+- smp_mb();
+-
+- __asm__ __volatile__("@ atomic64_sub_return\n"
++ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+ " subs %0, %0, %4\n"
+ " sbc %H0, %H0, %H4\n"
+@@ -344,6 +630,39 @@ static inline u64 atomic64_sub_return(u6
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
++}
++
++static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
++{
++ u64 result, tmp;
++
++ smp_mb();
++
++ __asm__ __volatile__("@ atomic64_sub_return\n"
++"1: ldrexd %1, %H1, [%3]\n"
++" subs %0, %1, %4\n"
++" sbc %H0, %H1, %H4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++" mov %0, %1\n"
++" mov %H0, %H1\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
++" strexd %1, %0, %H0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ : "cc");
+
+ smp_mb();
+
+@@ -374,6 +693,30 @@ static inline u64 atomic64_cmpxchg(atomi
+ return oldval;
+ }
+
++static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
++{
++ u64 oldval;
++ unsigned long res;
++
++ smp_mb();
++
++ do {
++ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
++ "ldrexd %1, %H1, [%3]\n"
++ "mov %0, #0\n"
++ "teq %1, %4\n"
++ "teqeq %H1, %H4\n"
++ "strexdeq %0, %5, %H5, [%3]"
++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
++ : "r" (&ptr->counter), "r" (old), "r" (new)
++ : "cc");
++ } while (res);
++
++ smp_mb();
++
++ return oldval;
++}
++
+ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+ {
+ u64 result;
+@@ -397,21 +740,34 @@ static inline u64 atomic64_xchg(atomic64
+
+ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
+ {
+- u64 result;
+- unsigned long tmp;
++ u64 result, tmp;
+
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic64_dec_if_positive\n"
+-"1: ldrexd %0, %H0, [%3]\n"
+-" subs %0, %0, #1\n"
+-" sbc %H0, %H0, #0\n"
++"1: ldrexd %1, %H1, [%3]\n"
++" subs %0, %1, #1\n"
++" sbc %H0, %H1, #0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++" mov %0, %1\n"
++" mov %H0, %H1\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
+ " teq %H0, #0\n"
+-" bmi 2f\n"
++" bmi 4f\n"
+ " strexd %1, %0, %H0, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b\n"
+-"2:"
++"4:\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter)
+ : "cc");
+@@ -434,13 +790,25 @@ static inline int atomic64_add_unless(at
+ " teq %0, %5\n"
+ " teqeq %H0, %H5\n"
+ " moveq %1, #0\n"
+-" beq 2f\n"
++" beq 4f\n"
+ " adds %0, %0, %6\n"
+ " adc %H0, %H0, %H6\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
+ " strexd %2, %0, %H0, [%4]\n"
+ " teq %2, #0\n"
+ " bne 1b\n"
+-"2:"
++"4:\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
+ : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (u), "r" (a)
+ : "cc");
+@@ -453,10 +821,13 @@ static inline int atomic64_add_unless(at
+
+ #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+ #define atomic64_inc(v) atomic64_add(1LL, (v))
++#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
+ #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
++#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+ #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
+ #define atomic64_dec(v) atomic64_sub(1LL, (v))
++#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
+ #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
+ #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/include/asm/cacheflush.h linux-3.4-pax/arch/arm/include/asm/cacheflush.h
+--- linux-3.4/arch/arm/include/asm/cacheflush.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/arm/include/asm/cacheflush.h 2012-05-21 12:10:08.420048828 +0200
+@@ -108,7 +108,7 @@ struct cpu_cache_fns {
+ void (*dma_unmap_area)(const void *, size_t, int);
+
+ void (*dma_flush_range)(const void *, const void *);
+-};
++} __no_const;
+
+ /*
+ * Select the calling method
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/include/asm/cache.h linux-3.4-pax/arch/arm/include/asm/cache.h
+--- linux-3.4/arch/arm/include/asm/cache.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/arm/include/asm/cache.h 2012-05-21 12:10:08.420048828 +0200
+@@ -5,7 +5,7 @@
+ #define __ASMARM_CACHE_H
+
+ #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/include/asm/cmpxchg.h linux-3.4-pax/arch/arm/include/asm/cmpxchg.h
+--- linux-3.4/arch/arm/include/asm/cmpxchg.h 2012-05-21 11:32:31.623926256 +0200
++++ linux-3.4-pax/arch/arm/include/asm/cmpxchg.h 2012-05-21 12:10:08.424048829 +0200
+@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsig
+
+ #define xchg(ptr,x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
++#define xchg_unchecked(ptr,x) \
++ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+ #include <asm-generic/cmpxchg-local.h>
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/include/asm/elf.h linux-3.4-pax/arch/arm/include/asm/elf.h
+--- linux-3.4/arch/arm/include/asm/elf.h 2012-05-21 11:32:31.723926261 +0200
++++ linux-3.4-pax/arch/arm/include/asm/elf.h 2012-05-21 12:10:08.424048829 +0200
+@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00008000UL
++
++#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#endif
+
+ /* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t
+ extern void elf_set_personality(const struct elf32_hdr *);
+ #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
+
+-struct mm_struct;
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/include/asm/kmap_types.h linux-3.4-pax/arch/arm/include/asm/kmap_types.h
+--- linux-3.4/arch/arm/include/asm/kmap_types.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/arm/include/asm/kmap_types.h 2012-05-21 12:10:08.428048829 +0200
+@@ -21,6 +21,7 @@ enum km_type {
+ KM_L1_CACHE,
+ KM_L2_CACHE,
+ KM_KDB,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/include/asm/outercache.h linux-3.4-pax/arch/arm/include/asm/outercache.h
+--- linux-3.4/arch/arm/include/asm/outercache.h 2012-01-08 19:47:31.027473972 +0100
++++ linux-3.4-pax/arch/arm/include/asm/outercache.h 2012-05-21 12:10:08.428048829 +0200
+@@ -35,7 +35,7 @@ struct outer_cache_fns {
+ #endif
+ void (*set_debug)(unsigned long);
+ void (*resume)(void);
+-};
++} __no_const;
+
+ #ifdef CONFIG_OUTER_CACHE
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/include/asm/page.h linux-3.4-pax/arch/arm/include/asm/page.h
+--- linux-3.4/arch/arm/include/asm/page.h 2012-05-21 11:32:31.911926271 +0200
++++ linux-3.4-pax/arch/arm/include/asm/page.h 2012-05-21 12:10:08.428048829 +0200
+@@ -123,7 +123,7 @@ struct cpu_user_fns {
+ void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
+ void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+-};
++} __no_const;
+
+ #ifdef MULTI_USER
+ extern struct cpu_user_fns cpu_user;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/include/asm/pgalloc.h linux-3.4-pax/arch/arm/include/asm/pgalloc.h
+--- linux-3.4/arch/arm/include/asm/pgalloc.h 2012-03-19 10:38:39.444050913 +0100
++++ linux-3.4-pax/arch/arm/include/asm/pgalloc.h 2012-05-21 12:10:08.432048829 +0200
+@@ -43,6 +43,11 @@ static inline void pud_populate(struct m
+ set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
+ }
+
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
++
+ #else /* !CONFIG_ARM_LPAE */
+
+ /*
+@@ -51,6 +56,7 @@ static inline void pud_populate(struct m
+ #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
+ #define pmd_free(mm, pmd) do { } while (0)
+ #define pud_populate(mm,pmd,pte) BUG()
++#define pud_populate_kernel(mm,pmd,pte) BUG()
+
+ #endif /* CONFIG_ARM_LPAE */
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/include/asm/uaccess.h linux-3.4-pax/arch/arm/include/asm/uaccess.h
+--- linux-3.4/arch/arm/include/asm/uaccess.h 2012-05-21 11:32:32.091926281 +0200
++++ linux-3.4-pax/arch/arm/include/asm/uaccess.h 2012-05-21 12:10:08.432048829 +0200
+@@ -22,6 +22,8 @@
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++
+ /*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+@@ -387,8 +389,23 @@ do { \
+
+
+ #ifdef CONFIG_MMU
+-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
+-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
++extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
++extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
++
++static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ return ___copy_from_user(to, from, n);
++}
++
++static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ return ___copy_to_user(to, from, n);
++}
++
+ extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
+ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
+
+ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_READ, from, n))
+ n = __copy_from_user(to, from, n);
+ else /* security hole - plug it */
+@@ -412,6 +432,9 @@ static inline unsigned long __must_check
+
+ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/kernel/armksyms.c linux-3.4-pax/arch/arm/kernel/armksyms.c
+--- linux-3.4/arch/arm/kernel/armksyms.c 2012-05-21 11:32:32.115926282 +0200
++++ linux-3.4-pax/arch/arm/kernel/armksyms.c 2012-05-21 12:10:08.436048829 +0200
+@@ -94,8 +94,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
+ #ifdef CONFIG_MMU
+ EXPORT_SYMBOL(copy_page);
+
+-EXPORT_SYMBOL(__copy_from_user);
+-EXPORT_SYMBOL(__copy_to_user);
++EXPORT_SYMBOL(___copy_from_user);
++EXPORT_SYMBOL(___copy_to_user);
+ EXPORT_SYMBOL(__clear_user);
+
+ EXPORT_SYMBOL(__get_user_1);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/kernel/process.c linux-3.4-pax/arch/arm/kernel/process.c
+--- linux-3.4/arch/arm/kernel/process.c 2012-05-21 11:32:32.251926290 +0200
++++ linux-3.4-pax/arch/arm/kernel/process.c 2012-05-21 12:10:08.436048829 +0200
+@@ -28,7 +28,6 @@
+ #include <linux/tick.h>
+ #include <linux/utsname.h>
+ #include <linux/uaccess.h>
+-#include <linux/random.h>
+ #include <linux/hw_breakpoint.h>
+ #include <linux/cpuidle.h>
+
+@@ -275,9 +274,10 @@ void machine_power_off(void)
+ machine_shutdown();
+ if (pm_power_off)
+ pm_power_off();
++ BUG();
+ }
+
+-void machine_restart(char *cmd)
++__noreturn void machine_restart(char *cmd)
+ {
+ machine_shutdown();
+
+@@ -519,12 +519,6 @@ unsigned long get_wchan(struct task_stru
+ return 0;
+ }
+
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long range_end = mm->brk + 0x02000000;
+- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+-}
+-
+ #ifdef CONFIG_MMU
+ /*
+ * The vectors page is always readable from user space for the
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/kernel/setup.c linux-3.4-pax/arch/arm/kernel/setup.c
+--- linux-3.4/arch/arm/kernel/setup.c 2012-05-21 11:32:32.267926291 +0200
++++ linux-3.4-pax/arch/arm/kernel/setup.c 2012-05-21 12:10:08.440048829 +0200
+@@ -111,13 +111,13 @@ struct processor processor __read_mostly
+ struct cpu_tlb_fns cpu_tlb __read_mostly;
+ #endif
+ #ifdef MULTI_USER
+-struct cpu_user_fns cpu_user __read_mostly;
++struct cpu_user_fns cpu_user __read_only;
+ #endif
+ #ifdef MULTI_CACHE
+-struct cpu_cache_fns cpu_cache __read_mostly;
++struct cpu_cache_fns cpu_cache __read_only;
+ #endif
+ #ifdef CONFIG_OUTER_CACHE
+-struct outer_cache_fns outer_cache __read_mostly;
++struct outer_cache_fns outer_cache __read_only;
+ EXPORT_SYMBOL(outer_cache);
+ #endif
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/lib/copy_from_user.S linux-3.4-pax/arch/arm/lib/copy_from_user.S
+--- linux-3.4/arch/arm/lib/copy_from_user.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/arm/lib/copy_from_user.S 2012-05-21 12:10:08.440048829 +0200
+@@ -16,7 +16,7 @@
+ /*
+ * Prototype:
+ *
+- * size_t __copy_from_user(void *to, const void *from, size_t n)
++ * size_t ___copy_from_user(void *to, const void *from, size_t n)
+ *
+ * Purpose:
+ *
+@@ -84,11 +84,11 @@
+
+ .text
+
+-ENTRY(__copy_from_user)
++ENTRY(___copy_from_user)
+
+ #include "copy_template.S"
+
+-ENDPROC(__copy_from_user)
++ENDPROC(___copy_from_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/lib/copy_page.S linux-3.4-pax/arch/arm/lib/copy_page.S
+--- linux-3.4/arch/arm/lib/copy_page.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/arm/lib/copy_page.S 2012-05-21 12:10:08.444048830 +0200
+@@ -10,6 +10,7 @@
+ * ASM optimised string functions
+ */
+ #include <linux/linkage.h>
++#include <linux/const.h>
+ #include <asm/assembler.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/cache.h>
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/lib/copy_to_user.S linux-3.4-pax/arch/arm/lib/copy_to_user.S
+--- linux-3.4/arch/arm/lib/copy_to_user.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/arm/lib/copy_to_user.S 2012-05-21 12:10:08.444048830 +0200
+@@ -16,7 +16,7 @@
+ /*
+ * Prototype:
+ *
+- * size_t __copy_to_user(void *to, const void *from, size_t n)
++ * size_t ___copy_to_user(void *to, const void *from, size_t n)
+ *
+ * Purpose:
+ *
+@@ -88,11 +88,11 @@
+ .text
+
+ ENTRY(__copy_to_user_std)
+-WEAK(__copy_to_user)
++WEAK(___copy_to_user)
+
+ #include "copy_template.S"
+
+-ENDPROC(__copy_to_user)
++ENDPROC(___copy_to_user)
+ ENDPROC(__copy_to_user_std)
+
+ .pushsection .fixup,"ax"
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/lib/uaccess.S linux-3.4-pax/arch/arm/lib/uaccess.S
+--- linux-3.4/arch/arm/lib/uaccess.S 2012-03-19 10:38:39.916050885 +0100
++++ linux-3.4-pax/arch/arm/lib/uaccess.S 2012-05-21 12:10:08.448048830 +0200
+@@ -20,7 +20,7 @@
+
+ #define PAGE_SHIFT 12
+
+-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
++/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
+ * Purpose : copy a block to user memory from kernel memory
+ * Params : to - user memory
+ * : from - kernel memory
+@@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ M
+ sub r2, r2, ip
+ b .Lc2u_dest_aligned
+
+-ENTRY(__copy_to_user)
++ENTRY(___copy_to_user)
+ stmfd sp!, {r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .Lc2u_not_enough
+@@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ M
+ ldrgtb r3, [r1], #0
+ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+-ENDPROC(__copy_to_user)
++ENDPROC(___copy_to_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+ 9001: ldmfd sp!, {r0, r4 - r7, pc}
+ .popsection
+
+-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
++/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
+ * Purpose : copy a block from user memory to kernel memory
+ * Params : to - kernel memory
+ * : from - user memory
+@@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ M
+ sub r2, r2, ip
+ b .Lcfu_dest_aligned
+
+-ENTRY(__copy_from_user)
++ENTRY(___copy_from_user)
+ stmfd sp!, {r0, r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .Lcfu_not_enough
+@@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ M
+ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+-ENDPROC(__copy_from_user)
++ENDPROC(___copy_from_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/lib/uaccess_with_memcpy.c linux-3.4-pax/arch/arm/lib/uaccess_with_memcpy.c
+--- linux-3.4/arch/arm/lib/uaccess_with_memcpy.c 2012-01-08 19:47:31.451473950 +0100
++++ linux-3.4-pax/arch/arm/lib/uaccess_with_memcpy.c 2012-05-21 12:10:08.448048830 +0200
+@@ -104,7 +104,7 @@ out:
+ }
+
+ unsigned long
+-__copy_to_user(void __user *to, const void *from, unsigned long n)
++___copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ /*
+ * This test is stubbed out of the main function above to keep
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/mach-omap2/board-n8x0.c linux-3.4-pax/arch/arm/mach-omap2/board-n8x0.c
+--- linux-3.4/arch/arm/mach-omap2/board-n8x0.c 2012-05-21 11:32:36.647926529 +0200
++++ linux-3.4-pax/arch/arm/mach-omap2/board-n8x0.c 2012-05-21 12:10:08.448048830 +0200
+@@ -596,7 +596,7 @@ static int n8x0_menelaus_late_init(struc
+ }
+ #endif
+
+-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
++static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
+ .late_init = n8x0_menelaus_late_init,
+ };
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/mm/fault.c linux-3.4-pax/arch/arm/mm/fault.c
+--- linux-3.4/arch/arm/mm/fault.c 2012-05-21 11:32:43.919926925 +0200
++++ linux-3.4-pax/arch/arm/mm/fault.c 2012-05-21 12:10:08.452048830 +0200
+@@ -174,6 +174,13 @@ __do_user_fault(struct task_struct *tsk,
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (fsr & FSR_LNX_PF) {
++ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ tsk->thread.address = addr;
+ tsk->thread.error_code = fsr;
+ tsk->thread.trap_no = 14;
+@@ -397,6 +404,33 @@ do_page_fault(unsigned long addr, unsign
+ }
+ #endif /* CONFIG_MMU */
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (__force unsigned char __user *)pc+i))
++ printk(KERN_CONT "?? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++
++ printk(KERN_ERR "PAX: bytes at SP-4: ");
++ for (i = -1; i < 20; i++) {
++ unsigned long c;
++ if (get_user(c, (__force unsigned long __user *)sp+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08lx ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * First Level Translation Fault Handler
+ *
+@@ -577,6 +611,20 @@ do_PrefetchAbort(unsigned long addr, uns
+ const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
+ struct siginfo info;
+
++#ifdef CONFIG_PAX_REFCOUNT
++ if (fsr_fs(ifsr) == 2) {
++ unsigned int bkpt;
++
++ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
++ current->thread.error_code = ifsr;
++ current->thread.trap_no = 0;
++ pax_report_refcount_overflow(regs);
++ fixup_exception(regs);
++ return;
++ }
++ }
++#endif
++
+ if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
+ return;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/mm/mmap.c linux-3.4-pax/arch/arm/mm/mmap.c
+--- linux-3.4/arch/arm/mm/mmap.c 2012-03-19 10:38:49.092050065 +0100
++++ linux-3.4-pax/arch/arm/mm/mmap.c 2012-05-21 12:10:08.456048830 +0200
+@@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = mm->mmap_base;
+- mm->cached_hole_size = 0;
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
+
+ full_search:
+@@ -124,14 +127,14 @@ full_search:
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_str
+
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/plat-samsung/include/plat/dma-ops.h linux-3.4-pax/arch/arm/plat-samsung/include/plat/dma-ops.h
+--- linux-3.4/arch/arm/plat-samsung/include/plat/dma-ops.h 2012-03-19 10:38:49.736050362 +0100
++++ linux-3.4-pax/arch/arm/plat-samsung/include/plat/dma-ops.h 2012-05-21 12:10:08.456048830 +0200
+@@ -43,7 +43,7 @@ struct samsung_dma_ops {
+ int (*started)(unsigned ch);
+ int (*flush)(unsigned ch);
+ int (*stop)(unsigned ch);
+-};
++} __no_const;
+
+ extern void *samsung_dmadev_get_ops(void);
+ extern void *s3c_dma_get_ops(void);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/arm/plat-samsung/include/plat/ehci.h linux-3.4-pax/arch/arm/plat-samsung/include/plat/ehci.h
+--- linux-3.4/arch/arm/plat-samsung/include/plat/ehci.h 2012-01-08 19:47:41.215473428 +0100
++++ linux-3.4-pax/arch/arm/plat-samsung/include/plat/ehci.h 2012-05-21 12:10:08.460048831 +0200
+@@ -14,7 +14,7 @@
+ struct s5p_ehci_platdata {
+ int (*phy_init)(struct platform_device *pdev, int type);
+ int (*phy_exit)(struct platform_device *pdev, int type);
+-};
++} __no_const;
+
+ extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/avr32/include/asm/elf.h linux-3.4-pax/arch/avr32/include/asm/elf.h
+--- linux-3.4/arch/avr32/include/asm/elf.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/avr32/include/asm/elf.h 2012-05-21 12:10:08.460048831 +0200
+@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00001000UL
++
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/avr32/include/asm/kmap_types.h linux-3.4-pax/arch/avr32/include/asm/kmap_types.h
+--- linux-3.4/arch/avr32/include/asm/kmap_types.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/avr32/include/asm/kmap_types.h 2012-05-21 12:10:08.464048831 +0200
+@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
+ D(11) KM_IRQ1,
+ D(12) KM_SOFTIRQ0,
+ D(13) KM_SOFTIRQ1,
+-D(14) KM_TYPE_NR
++D(14) KM_CLEARPAGE,
++D(15) KM_TYPE_NR
+ };
+
+ #undef D
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/avr32/mm/fault.c linux-3.4-pax/arch/avr32/mm/fault.c
+--- linux-3.4/arch/avr32/mm/fault.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/avr32/mm/fault.c 2012-05-21 12:10:08.464048831 +0200
+@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
+
+ int exception_trace = 1;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address and the
+ * problem, and then passes it off to one of the appropriate routines.
+@@ -156,6 +173,16 @@ bad_area:
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ if (exception_trace && printk_ratelimit())
+ printk("%s%s[%d]: segfault at %08lx pc %08lx "
+ "sp %08lx ecr %lu\n",
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/frv/include/asm/atomic.h linux-3.4-pax/arch/frv/include/asm/atomic.h
+--- linux-3.4/arch/frv/include/asm/atomic.h 2012-05-21 11:32:47.211927102 +0200
++++ linux-3.4-pax/arch/frv/include/asm/atomic.h 2012-05-21 12:10:08.468048831 +0200
+@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64
+ #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
+ #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+ int c, old;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/frv/include/asm/kmap_types.h linux-3.4-pax/arch/frv/include/asm/kmap_types.h
+--- linux-3.4/arch/frv/include/asm/kmap_types.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/frv/include/asm/kmap_types.h 2012-05-21 12:10:08.468048831 +0200
+@@ -23,6 +23,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/frv/mm/elf-fdpic.c linux-3.4-pax/arch/frv/mm/elf-fdpic.c
+--- linux-3.4/arch/frv/mm/elf-fdpic.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/frv/mm/elf-fdpic.c 2012-05-21 12:10:08.468048831 +0200
+@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ goto success;
+ }
+
+@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ goto success;
+ addr = vma->vm_end;
+ }
+@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ goto success;
+ addr = vma->vm_end;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/ia64/include/asm/atomic.h linux-3.4-pax/arch/ia64/include/asm/atomic.h
+--- linux-3.4/arch/ia64/include/asm/atomic.h 2012-05-21 11:32:47.655927127 +0200
++++ linux-3.4-pax/arch/ia64/include/asm/atomic.h 2012-05-21 12:10:08.472048831 +0200
+@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64
+ #define atomic64_inc(v) atomic64_add(1, (v))
+ #define atomic64_dec(v) atomic64_sub(1, (v))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ /* Atomic operations are already serializing */
+ #define smp_mb__before_atomic_dec() barrier()
+ #define smp_mb__after_atomic_dec() barrier()
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/ia64/include/asm/elf.h linux-3.4-pax/arch/ia64/include/asm/elf.h
+--- linux-3.4/arch/ia64/include/asm/elf.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/ia64/include/asm/elf.h 2012-05-21 12:10:08.472048831 +0200
+@@ -42,6 +42,13 @@
+ */
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#endif
++
+ #define PT_IA_64_UNWIND 0x70000001
+
+ /* IA-64 relocations: */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/ia64/include/asm/pgalloc.h linux-3.4-pax/arch/ia64/include/asm/pgalloc.h
+--- linux-3.4/arch/ia64/include/asm/pgalloc.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/ia64/include/asm/pgalloc.h 2012-05-21 12:10:08.476048831 +0200
+@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t
+ pgd_val(*pgd_entry) = __pa(pud);
+ }
+
++static inline void
++pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
++{
++ pgd_populate(mm, pgd_entry, pud);
++}
++
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
+@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t
+ pud_val(*pud_entry) = __pa(pmd);
+ }
+
++static inline void
++pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
++{
++ pud_populate(mm, pud_entry, pmd);
++}
++
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/ia64/include/asm/pgtable.h linux-3.4-pax/arch/ia64/include/asm/pgtable.h
+--- linux-3.4/arch/ia64/include/asm/pgtable.h 2012-05-21 11:32:47.763927133 +0200
++++ linux-3.4-pax/arch/ia64/include/asm/pgtable.h 2012-05-21 12:10:08.476048831 +0200
+@@ -12,7 +12,7 @@
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+-
++#include <linux/const.h>
+ #include <asm/mman.h>
+ #include <asm/page.h>
+ #include <asm/processor.h>
+@@ -142,6 +142,17 @@
+ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++# define PAGE_COPY_NOEXEC PAGE_COPY
++#endif
++
+ #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+ #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+ #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/ia64/include/asm/spinlock.h linux-3.4-pax/arch/ia64/include/asm/spinlock.h
+--- linux-3.4/arch/ia64/include/asm/spinlock.h 2012-05-21 11:32:47.791927134 +0200
++++ linux-3.4-pax/arch/ia64/include/asm/spinlock.h 2012-05-21 12:10:08.480048832 +0200
+@@ -71,7 +71,7 @@ static __always_inline void __ticket_spi
+ unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
+
+ asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
+- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
++ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
+ }
+
+ static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/ia64/include/asm/uaccess.h linux-3.4-pax/arch/ia64/include/asm/uaccess.h
+--- linux-3.4/arch/ia64/include/asm/uaccess.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/ia64/include/asm/uaccess.h 2012-05-21 12:10:08.480048832 +0200
+@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
+ const void *__cu_from = (from); \
+ long __cu_len = (n); \
+ \
+- if (__access_ok(__cu_to, __cu_len, get_fs())) \
++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
+ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
+ __cu_len; \
+ })
+@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
+ long __cu_len = (n); \
+ \
+ __chk_user_ptr(__cu_from); \
+- if (__access_ok(__cu_from, __cu_len, get_fs())) \
++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
+ __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
+ __cu_len; \
+ })
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/ia64/kernel/module.c linux-3.4-pax/arch/ia64/kernel/module.c
+--- linux-3.4/arch/ia64/kernel/module.c 2011-10-24 12:48:23.459091923 +0200
++++ linux-3.4-pax/arch/ia64/kernel/module.c 2012-05-21 12:10:08.484048832 +0200
+@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
+ void
+ module_free (struct module *mod, void *module_region)
+ {
+- if (mod && mod->arch.init_unw_table &&
+- module_region == mod->module_init) {
++ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
+ unw_remove_unwind_table(mod->arch.init_unw_table);
+ mod->arch.init_unw_table = NULL;
+ }
+@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
+ }
+
+ static inline int
++in_init_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
++}
++
++static inline int
++in_init_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
++}
++
++static inline int
+ in_init (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_init < mod->init_size;
++ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
++}
++
++static inline int
++in_core_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
++}
++
++static inline int
++in_core_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
+ }
+
+ static inline int
+ in_core (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_core < mod->core_size;
++ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
+ }
+
+ static inline int
+@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_
+ break;
+
+ case RV_BDREL:
+- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
++ if (in_init_rx(mod, val))
++ val -= (uint64_t) mod->module_init_rx;
++ else if (in_init_rw(mod, val))
++ val -= (uint64_t) mod->module_init_rw;
++ else if (in_core_rx(mod, val))
++ val -= (uint64_t) mod->module_core_rx;
++ else if (in_core_rw(mod, val))
++ val -= (uint64_t) mod->module_core_rw;
+ break;
+
+ case RV_LTV:
+@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
+ * addresses have been selected...
+ */
+ uint64_t gp;
+- if (mod->core_size > MAX_LTOFF)
++ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
+ /*
+ * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
+ * at the end of the module.
+ */
+- gp = mod->core_size - MAX_LTOFF / 2;
++ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
+ else
+- gp = mod->core_size / 2;
+- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
++ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
++ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
+ mod->arch.gp = gp;
+ DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/ia64/kernel/sys_ia64.c linux-3.4-pax/arch/ia64/kernel/sys_ia64.c
+--- linux-3.4/arch/ia64/kernel/sys_ia64.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/ia64/kernel/sys_ia64.c 2012-05-21 12:10:08.484048832 +0200
+@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
+ if (REGION_NUMBER(addr) == RGN_HPAGE)
+ addr = 0;
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ addr = mm->free_area_cache;
++ else
++#endif
++
+ if (!addr)
+ addr = mm->free_area_cache;
+
+@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
++ if (start_addr != mm->mmap_base) {
+ /* Start a new search --- just in case we missed some holes. */
+- addr = TASK_UNMAPPED_BASE;
++ addr = mm->mmap_base;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* Remember the address where we stopped this search: */
+ mm->free_area_cache = addr + len;
+ return addr;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/ia64/kernel/vmlinux.lds.S linux-3.4-pax/arch/ia64/kernel/vmlinux.lds.S
+--- linux-3.4/arch/ia64/kernel/vmlinux.lds.S 2012-05-21 11:32:48.295927161 +0200
++++ linux-3.4-pax/arch/ia64/kernel/vmlinux.lds.S 2012-05-21 12:10:08.488048832 +0200
+@@ -198,7 +198,7 @@ SECTIONS {
+ /* Per-cpu data: */
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
+- __phys_per_cpu_start = __per_cpu_load;
++ __phys_per_cpu_start = per_cpu_load;
+ /*
+ * ensure percpu data fits
+ * into percpu page size
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/ia64/mm/fault.c linux-3.4-pax/arch/ia64/mm/fault.c
+--- linux-3.4/arch/ia64/mm/fault.c 2012-05-21 11:32:48.307927162 +0200
++++ linux-3.4-pax/arch/ia64/mm/fault.c 2012-05-21 12:10:08.488048832 +0200
+@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
+ return pte_present(pte);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ void __kprobes
+ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
+ {
+@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
+ mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
+ | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
+
+- if ((vma->vm_flags & mask) != mask)
++ if ((vma->vm_flags & mask) != mask) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
+
++ }
++
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+ * sure we exit gracefully rather than endlessly redo the
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/ia64/mm/hugetlbpage.c linux-3.4-pax/arch/ia64/mm/hugetlbpage.c
+--- linux-3.4/arch/ia64/mm/hugetlbpage.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/ia64/mm/hugetlbpage.c 2012-05-21 12:10:08.492048832 +0200
+@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+- if (!vmm || (addr + len) <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/ia64/mm/init.c linux-3.4-pax/arch/ia64/mm/init.c
+--- linux-3.4/arch/ia64/mm/init.c 2012-05-21 11:32:48.319927163 +0200
++++ linux-3.4-pax/arch/ia64/mm/init.c 2012-05-21 12:10:08.492048832 +0200
+@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
+ vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
+ vma->vm_end = vma->vm_start + PAGE_SIZE;
+ vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
++ vma->vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->mm->pax_flags & MF_PAX_MPROTECT)
++ vma->vm_flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ down_write(&current->mm->mmap_sem);
+ if (insert_vm_struct(current->mm, vma)) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/m32r/lib/usercopy.c linux-3.4-pax/arch/m32r/lib/usercopy.c
+--- linux-3.4/arch/m32r/lib/usercopy.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/m32r/lib/usercopy.c 2012-05-21 12:10:08.496048832 +0200
+@@ -14,6 +14,9 @@
+ unsigned long
+ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetch(from);
+ if (access_ok(VERIFY_WRITE, to, n))
+ __copy_user(to,from,n);
+@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
+ unsigned long
+ __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetchw(to);
+ if (access_ok(VERIFY_READ, from, n))
+ __copy_user_zeroing(to,from,n);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/mips/include/asm/atomic.h linux-3.4-pax/arch/mips/include/asm/atomic.h
+--- linux-3.4/arch/mips/include/asm/atomic.h 2012-05-21 11:32:50.479927280 +0200
++++ linux-3.4-pax/arch/mips/include/asm/atomic.h 2012-05-21 12:10:08.496048832 +0200
+@@ -21,6 +21,10 @@
+ #include <asm/cmpxchg.h>
+ #include <asm/war.h>
+
++#ifdef CONFIG_GENERIC_ATOMIC64
++#include <asm-generic/atomic64.h>
++#endif
++
+ #define ATOMIC_INIT(i) { (i) }
+
+ /*
+@@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unles
+ */
+ #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* CONFIG_64BIT */
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/mips/include/asm/elf.h linux-3.4-pax/arch/mips/include/asm/elf.h
+--- linux-3.4/arch/mips/include/asm/elf.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/mips/include/asm/elf.h 2012-05-21 12:10:08.500048833 +0200
+@@ -372,13 +372,16 @@ extern const char *__elf_platform;
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+ struct linux_binprm;
+ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+
+-struct mm_struct;
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* _ASM_ELF_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/mips/include/asm/exec.h linux-3.4-pax/arch/mips/include/asm/exec.h
+--- linux-3.4/arch/mips/include/asm/exec.h 2012-05-21 11:32:50.519927282 +0200
++++ linux-3.4-pax/arch/mips/include/asm/exec.h 2012-05-21 12:10:08.500048833 +0200
+@@ -12,6 +12,6 @@
+ #ifndef _ASM_EXEC_H
+ #define _ASM_EXEC_H
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ #endif /* _ASM_EXEC_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/mips/include/asm/page.h linux-3.4-pax/arch/mips/include/asm/page.h
+--- linux-3.4/arch/mips/include/asm/page.h 2012-03-19 10:38:52.780050198 +0100
++++ linux-3.4-pax/arch/mips/include/asm/page.h 2012-05-21 12:10:08.504048833 +0200
+@@ -98,7 +98,7 @@ extern void copy_user_highpage(struct pa
+ #ifdef CONFIG_CPU_MIPS32
+ typedef struct { unsigned long pte_low, pte_high; } pte_t;
+ #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
++ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
+ #else
+ typedef struct { unsigned long long pte; } pte_t;
+ #define pte_val(x) ((x).pte)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/mips/include/asm/pgalloc.h linux-3.4-pax/arch/mips/include/asm/pgalloc.h
+--- linux-3.4/arch/mips/include/asm/pgalloc.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/mips/include/asm/pgalloc.h 2012-05-21 12:10:08.504048833 +0200
+@@ -37,6 +37,11 @@ static inline void pud_populate(struct m
+ {
+ set_pud(pud, __pud((unsigned long)pmd));
+ }
++
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
+ #endif
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/mips/kernel/binfmt_elfn32.c linux-3.4-pax/arch/mips/kernel/binfmt_elfn32.c
+--- linux-3.4/arch/mips/kernel/binfmt_elfn32.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/mips/kernel/binfmt_elfn32.c 2012-05-21 12:10:08.504048833 +0200
+@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/elfcore.h>
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/mips/kernel/binfmt_elfo32.c linux-3.4-pax/arch/mips/kernel/binfmt_elfo32.c
+--- linux-3.4/arch/mips/kernel/binfmt_elfo32.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/mips/kernel/binfmt_elfo32.c 2012-05-21 12:10:08.508048833 +0200
+@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/mips/kernel/process.c linux-3.4-pax/arch/mips/kernel/process.c
+--- linux-3.4/arch/mips/kernel/process.c 2012-05-21 11:32:50.731927294 +0200
++++ linux-3.4-pax/arch/mips/kernel/process.c 2012-05-21 12:10:08.508048833 +0200
+@@ -480,15 +480,3 @@ unsigned long get_wchan(struct task_stru
+ out:
+ return pc;
+ }
+-
+-/*
+- * Don't forget that the stack pointer must be aligned on a 8 bytes
+- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+- */
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+-
+- return sp & ALMASK;
+-}
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/mips/mm/fault.c linux-3.4-pax/arch/mips/mm/fault.c
+--- linux-3.4/arch/mips/mm/fault.c 2012-05-21 11:32:50.915927304 +0200
++++ linux-3.4-pax/arch/mips/mm/fault.c 2012-05-21 12:10:08.512048833 +0200
+@@ -27,6 +27,23 @@
+ #include <asm/highmem.h> /* For VMALLOC_END */
+ #include <linux/kdebug.h>
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/mips/mm/mmap.c linux-3.4-pax/arch/mips/mm/mmap.c
+--- linux-3.4/arch/mips/mm/mmap.c 2011-10-24 12:48:24.319091873 +0200
++++ linux-3.4-pax/arch/mips/mm/mmap.c 2012-05-21 12:10:08.512048833 +0200
+@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_a
+ do_color_align = 1;
+
+ /* requesting a specific address */
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_a
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ }
+
+@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_a
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = vma->vm_end;
+ if (do_color_align)
+@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_a
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr - len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vmm, addr - len, len))
+ /* cache the address as a hint for next time */
+ return mm->free_area_cache = addr - len;
+ }
+@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_a
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (check_heap_stack_gap(vmm, addr, len)) {
+ /* cache the address as a hint for next time */
+ return mm->free_area_cache = addr;
+ }
+@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_str
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+ }
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- unsigned long rnd = get_random_int();
+-
+- rnd = rnd << PAGE_SHIFT;
+- /* 8MB for 32bit, 256MB for 64bit */
+- if (TASK_IS_32BIT_ADDR)
+- rnd = rnd & 0x7ffffful;
+- else
+- rnd = rnd & 0xffffffful;
+-
+- return rnd;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long base = mm->brk;
+- unsigned long ret;
+-
+- ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+-
+- return ret;
+-}
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/parisc/include/asm/atomic.h linux-3.4-pax/arch/parisc/include/asm/atomic.h
+--- linux-3.4/arch/parisc/include/asm/atomic.h 2012-05-21 11:32:51.427927332 +0200
++++ linux-3.4-pax/arch/parisc/include/asm/atomic.h 2012-05-21 12:10:08.516048834 +0200
+@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unles
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* !CONFIG_64BIT */
+
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/parisc/include/asm/elf.h linux-3.4-pax/arch/parisc/include/asm/elf.h
+--- linux-3.4/arch/parisc/include/asm/elf.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/parisc/include/asm/elf.h 2012-05-21 12:10:08.516048834 +0200
+@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/parisc/include/asm/pgalloc.h linux-3.4-pax/arch/parisc/include/asm/pgalloc.h
+--- linux-3.4/arch/parisc/include/asm/pgalloc.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/parisc/include/asm/pgalloc.h 2012-05-21 12:10:08.520048834 +0200
+@@ -61,6 +61,11 @@ static inline void pgd_populate(struct m
+ (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
+ }
+
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
++{
++ pgd_populate(mm, pgd, pmd);
++}
++
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+ {
+ pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
+@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_st
+ #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
+ #define pmd_free(mm, x) do { } while (0)
+ #define pgd_populate(mm, pmd, pte) BUG()
++#define pgd_populate_kernel(mm, pmd, pte) BUG()
+
+ #endif
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/parisc/include/asm/pgtable.h linux-3.4-pax/arch/parisc/include/asm/pgtable.h
+--- linux-3.4/arch/parisc/include/asm/pgtable.h 2012-05-21 11:32:51.471927334 +0200
++++ linux-3.4-pax/arch/parisc/include/asm/pgtable.h 2012-05-21 12:10:08.520048834 +0200
+@@ -212,6 +212,17 @@ struct vm_area_struct;
+ #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+ #define PAGE_COPY PAGE_EXECREAD
+ #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
+ #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/parisc/kernel/module.c linux-3.4-pax/arch/parisc/kernel/module.c
+--- linux-3.4/arch/parisc/kernel/module.c 2011-10-24 12:48:24.567091865 +0200
++++ linux-3.4-pax/arch/parisc/kernel/module.c 2012-05-21 12:10:08.524048834 +0200
+@@ -98,16 +98,38 @@
+
+ /* three functions to determine where in the module core
+ * or init pieces the location is */
++static inline int in_init_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rx &&
++ loc < (me->module_init_rx + me->init_size_rx));
++}
++
++static inline int in_init_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rw &&
++ loc < (me->module_init_rw + me->init_size_rw));
++}
++
+ static inline int in_init(struct module *me, void *loc)
+ {
+- return (loc >= me->module_init &&
+- loc <= (me->module_init + me->init_size));
++ return in_init_rx(me, loc) || in_init_rw(me, loc);
++}
++
++static inline int in_core_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rx &&
++ loc < (me->module_core_rx + me->core_size_rx));
++}
++
++static inline int in_core_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rw &&
++ loc < (me->module_core_rw + me->core_size_rw));
+ }
+
+ static inline int in_core(struct module *me, void *loc)
+ {
+- return (loc >= me->module_core &&
+- loc <= (me->module_core + me->core_size));
++ return in_core_rx(me, loc) || in_core_rw(me, loc);
+ }
+
+ static inline int in_local(struct module *me, void *loc)
+@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
+ }
+
+ /* align things a bit */
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.got_offset = me->core_size;
+- me->core_size += gots * sizeof(struct got_entry);
+-
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.fdesc_offset = me->core_size;
+- me->core_size += fdescs * sizeof(Elf_Fdesc);
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += gots * sizeof(struct got_entry);
++
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.fdesc_offset = me->core_size_rw;
++ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
+
+ me->arch.got_max = gots;
+ me->arch.fdesc_max = fdescs;
+@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
+
+ BUG_ON(value == 0);
+
+- got = me->module_core + me->arch.got_offset;
++ got = me->module_core_rw + me->arch.got_offset;
+ for (i = 0; got[i].addr; i++)
+ if (got[i].addr == value)
+ goto out;
+@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
+ #ifdef CONFIG_64BIT
+ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+ {
+- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
++ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
+
+ if (!value) {
+ printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
+@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
+
+ /* Create new one */
+ fdesc->addr = value;
+- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+ return (Elf_Addr)fdesc;
+ }
+ #endif /* CONFIG_64BIT */
+@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
+
+ table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
+ end = table + sechdrs[me->arch.unwind_section].sh_size;
+- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+
+ DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+ me->arch.unwind_section, table, end, gp);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/parisc/kernel/sys_parisc.c linux-3.4-pax/arch/parisc/kernel/sys_parisc.c
+--- linux-3.4/arch/parisc/kernel/sys_parisc.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/parisc/kernel/sys_parisc.c 2012-05-21 12:10:08.524048834 +0200
+@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = vma->vm_end;
+ }
+@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
+ if (addr < vma->vm_end) /* handle wraparound */
+@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
+ if (flags & MAP_FIXED)
+ return addr;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ if (filp) {
+ addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/parisc/kernel/traps.c linux-3.4-pax/arch/parisc/kernel/traps.c
+--- linux-3.4/arch/parisc/kernel/traps.c 2012-05-21 11:32:51.563927339 +0200
++++ linux-3.4-pax/arch/parisc/kernel/traps.c 2012-05-21 12:10:08.528048834 +0200
+@@ -732,9 +732,7 @@ void notrace handle_interruption(int cod
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,regs->iaoq[0]);
+- if (vma && (regs->iaoq[0] >= vma->vm_start)
+- && (vma->vm_flags & VM_EXEC)) {
+-
++ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/parisc/mm/fault.c linux-3.4-pax/arch/parisc/mm/fault.c
+--- linux-3.4/arch/parisc/mm/fault.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/parisc/mm/fault.c 2012-05-21 12:10:08.528048834 +0200
+@@ -15,6 +15,7 @@
+ #include <linux/sched.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/unistd.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/traps.h>
+@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
+ static unsigned long
+ parisc_acctyp(unsigned long code, unsigned int inst)
+ {
+- if (code == 6 || code == 16)
++ if (code == 6 || code == 7 || code == 16)
+ return VM_EXEC;
+
+ switch (inst & 0xf0000000) {
+@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when rt_sigreturn trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int bl, depwi;
++
++ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
++
++ if (err)
++ break;
++
++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
++
++ err = get_user(ldw, (unsigned int *)addr);
++ err |= get_user(bv, (unsigned int *)(addr+4));
++ err |= get_user(ldw2, (unsigned int *)(addr+8));
++
++ if (err)
++ break;
++
++ if (ldw == 0x0E801096U &&
++ bv == 0xEAC0C000U &&
++ ldw2 == 0x0E881095U)
++ {
++ unsigned int resolver, map;
++
++ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
++ if (err)
++ break;
++
++ regs->gr[20] = instruction_pointer(regs)+8;
++ regs->gr[21] = map;
++ regs->gr[22] = resolver;
++ regs->iaoq[0] = resolver | 3UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++ return 1;
++#endif
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int ldi1, ldi2, bel, nop;
++
++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
++
++ if (err)
++ break;
++
++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
++ ldi2 == 0x3414015AU &&
++ bel == 0xE4008200U &&
++ nop == 0x08000240U)
++ {
++ regs->gr[25] = (ldi1 & 2) >> 1;
++ regs->gr[20] = __NR_rt_sigreturn;
++ regs->gr[31] = regs->iaoq[1] + 16;
++ regs->sr[0] = regs->iasq[1];
++ regs->iaoq[0] = 0x100UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ regs->iasq[0] = regs->sr[2];
++ regs->iasq[1] = regs->sr[2];
++ return 2;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ int fixup_exception(struct pt_regs *regs)
+ {
+ const struct exception_table_entry *fix;
+@@ -192,8 +303,33 @@ good_area:
+
+ acc_type = parisc_acctyp(code,regs->iir);
+
+- if ((vma->vm_flags & acc_type) != acc_type)
++ if ((vma->vm_flags & acc_type) != acc_type) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
++ (address & ~3UL) == instruction_pointer(regs))
++ {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/include/asm/atomic.h linux-3.4-pax/arch/powerpc/include/asm/atomic.h
+--- linux-3.4/arch/powerpc/include/asm/atomic.h 2012-05-21 11:32:51.991927362 +0200
++++ linux-3.4-pax/arch/powerpc/include/asm/atomic.h 2012-05-21 12:10:08.532048834 +0200
+@@ -522,6 +522,16 @@ static __inline__ long atomic64_inc_not_
+ return t1;
+ }
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* __powerpc64__ */
+
+ #endif /* __KERNEL__ */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/include/asm/elf.h linux-3.4-pax/arch/powerpc/include/asm/elf.h
+--- linux-3.4/arch/powerpc/include/asm/elf.h 2011-10-24 12:48:24.695091853 +0200
++++ linux-3.4-pax/arch/powerpc/include/asm/elf.h 2012-05-21 12:10:08.532048834 +0200
+@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-extern unsigned long randomize_et_dyn(unsigned long base);
+-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
++#define ELF_ET_DYN_BASE (0x20000000)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
++
++#ifdef __powerpc64__
++#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
++#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
++#else
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
++#endif
+
+ /*
+ * Our registers are always unsigned longs, whether we're a 32 bit
+@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
+ (0x7ff >> (PAGE_SHIFT - 12)) : \
+ (0x3ffff >> (PAGE_SHIFT - 12)))
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* __KERNEL__ */
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/include/asm/exec.h linux-3.4-pax/arch/powerpc/include/asm/exec.h
+--- linux-3.4/arch/powerpc/include/asm/exec.h 2012-05-21 11:32:52.067927366 +0200
++++ linux-3.4-pax/arch/powerpc/include/asm/exec.h 2012-05-21 12:10:08.536048835 +0200
+@@ -4,6 +4,6 @@
+ #ifndef _ASM_POWERPC_EXEC_H
+ #define _ASM_POWERPC_EXEC_H
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ #endif /* _ASM_POWERPC_EXEC_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/include/asm/kmap_types.h linux-3.4-pax/arch/powerpc/include/asm/kmap_types.h
+--- linux-3.4/arch/powerpc/include/asm/kmap_types.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/powerpc/include/asm/kmap_types.h 2012-05-21 12:10:08.536048835 +0200
+@@ -27,6 +27,7 @@ enum km_type {
+ KM_PPC_SYNC_PAGE,
+ KM_PPC_SYNC_ICACHE,
+ KM_KDB,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/include/asm/mman.h linux-3.4-pax/arch/powerpc/include/asm/mman.h
+--- linux-3.4/arch/powerpc/include/asm/mman.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/powerpc/include/asm/mman.h 2012-05-21 12:10:08.540048835 +0200
+@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
+ }
+ #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
+
+-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
++static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
+ {
+ return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/include/asm/page_64.h linux-3.4-pax/arch/powerpc/include/asm/page_64.h
+--- linux-3.4/arch/powerpc/include/asm/page_64.h 2012-03-19 10:38:54.240050122 +0100
++++ linux-3.4-pax/arch/powerpc/include/asm/page_64.h 2012-05-21 12:10:08.540048835 +0200
+@@ -146,15 +146,18 @@ do { \
+ * stack by default, so in the absence of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_STACK_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ #define VM_STACK_DEFAULT_FLAGS \
+ (is_32bit_task() ? \
+ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
++#endif
+
+ #include <asm-generic/getorder.h>
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/include/asm/page.h linux-3.4-pax/arch/powerpc/include/asm/page.h
+--- linux-3.4/arch/powerpc/include/asm/page.h 2012-03-19 10:38:54.236050122 +0100
++++ linux-3.4-pax/arch/powerpc/include/asm/page.h 2012-05-21 12:10:08.540048835 +0200
+@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
+ * and needs to be executable. This means the whole heap ends
+ * up being executable.
+ */
+-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_DATA_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
+ #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
+ #endif
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ /*
+ * Use the top bit of the higher-level page table entries to indicate whether
+ * the entries we point to contain hugepages. This works because we know that
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/include/asm/pgalloc-64.h linux-3.4-pax/arch/powerpc/include/asm/pgalloc-64.h
+--- linux-3.4/arch/powerpc/include/asm/pgalloc-64.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/powerpc/include/asm/pgalloc-64.h 2012-05-21 12:10:08.544048835 +0200
+@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_st
+ #ifndef CONFIG_PPC_64K_PAGES
+
+ #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
++#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
+
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+@@ -67,6 +68,11 @@ static inline void pud_populate(struct m
+ pud_set(pud, (unsigned long)pmd);
+ }
+
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
++
+ #define pmd_populate(mm, pmd, pte_page) \
+ pmd_populate_kernel(mm, pmd, page_address(pte_page))
+ #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
+@@ -76,6 +82,7 @@ static inline void pud_populate(struct m
+ #else /* CONFIG_PPC_64K_PAGES */
+
+ #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
+
+ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/include/asm/pgtable.h linux-3.4-pax/arch/powerpc/include/asm/pgtable.h
+--- linux-3.4/arch/powerpc/include/asm/pgtable.h 2012-03-19 10:38:54.248050122 +0100
++++ linux-3.4-pax/arch/powerpc/include/asm/pgtable.h 2012-05-21 12:10:08.544048835 +0200
+@@ -2,6 +2,7 @@
+ #define _ASM_POWERPC_PGTABLE_H
+ #ifdef __KERNEL__
+
++#include <linux/const.h>
+ #ifndef __ASSEMBLY__
+ #include <asm/processor.h> /* For TASK_SIZE */
+ #include <asm/mmu.h>
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/include/asm/pte-hash32.h linux-3.4-pax/arch/powerpc/include/asm/pte-hash32.h
+--- linux-3.4/arch/powerpc/include/asm/pte-hash32.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/powerpc/include/asm/pte-hash32.h 2012-05-21 12:10:08.548048835 +0200
+@@ -21,6 +21,7 @@
+ #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
+ #define _PAGE_USER 0x004 /* usermode access allowed */
+ #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
++#define _PAGE_EXEC _PAGE_GUARDED
+ #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+ #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+ #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/include/asm/reg.h linux-3.4-pax/arch/powerpc/include/asm/reg.h
+--- linux-3.4/arch/powerpc/include/asm/reg.h 2012-05-21 11:32:52.227927375 +0200
++++ linux-3.4-pax/arch/powerpc/include/asm/reg.h 2012-05-21 12:10:08.548048835 +0200
+@@ -212,6 +212,7 @@
+ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
+ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
+ #define DSISR_NOHPTE 0x40000000 /* no translation found */
++#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
+ #define DSISR_PROTFAULT 0x08000000 /* protection fault */
+ #define DSISR_ISSTORE 0x02000000 /* access was a store */
+ #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/include/asm/uaccess.h linux-3.4-pax/arch/powerpc/include/asm/uaccess.h
+--- linux-3.4/arch/powerpc/include/asm/uaccess.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/powerpc/include/asm/uaccess.h 2012-05-21 12:10:08.552048836 +0200
+@@ -13,6 +13,8 @@
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++
+ /*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+@@ -327,52 +329,6 @@ do { \
+ extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
+-#ifndef __powerpc64__
+-
+-static inline unsigned long copy_from_user(void *to,
+- const void __user *from, unsigned long n)
+-{
+- unsigned long over;
+-
+- if (access_ok(VERIFY_READ, from, n))
+- return __copy_tofrom_user((__force void __user *)to, from, n);
+- if ((unsigned long)from < TASK_SIZE) {
+- over = (unsigned long)from + n - TASK_SIZE;
+- return __copy_tofrom_user((__force void __user *)to, from,
+- n - over) + over;
+- }
+- return n;
+-}
+-
+-static inline unsigned long copy_to_user(void __user *to,
+- const void *from, unsigned long n)
+-{
+- unsigned long over;
+-
+- if (access_ok(VERIFY_WRITE, to, n))
+- return __copy_tofrom_user(to, (__force void __user *)from, n);
+- if ((unsigned long)to < TASK_SIZE) {
+- over = (unsigned long)to + n - TASK_SIZE;
+- return __copy_tofrom_user(to, (__force void __user *)from,
+- n - over) + over;
+- }
+- return n;
+-}
+-
+-#else /* __powerpc64__ */
+-
+-#define __copy_in_user(to, from, size) \
+- __copy_tofrom_user((to), (from), (size))
+-
+-extern unsigned long copy_from_user(void *to, const void __user *from,
+- unsigned long n);
+-extern unsigned long copy_to_user(void __user *to, const void *from,
+- unsigned long n);
+-extern unsigned long copy_in_user(void __user *to, const void __user *from,
+- unsigned long n);
+-
+-#endif /* __powerpc64__ */
+-
+ static inline unsigned long __copy_from_user_inatomic(void *to,
+ const void __user *from, unsigned long n)
+ {
+@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
+ if (ret == 0)
+ return 0;
+ }
++
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+ }
+
+@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
+ if (ret == 0)
+ return 0;
+ }
++
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++
+ return __copy_tofrom_user(to, (__force const void __user *)from, n);
+ }
+
+@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
+ return __copy_to_user_inatomic(to, from, size);
+ }
+
++#ifndef __powerpc64__
++
++static inline unsigned long __must_check copy_from_user(void *to,
++ const void __user *from, unsigned long n)
++{
++ unsigned long over;
++
++ if ((long)n < 0)
++ return n;
++
++ if (access_ok(VERIFY_READ, from, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ return __copy_tofrom_user((__force void __user *)to, from, n);
++ }
++ if ((unsigned long)from < TASK_SIZE) {
++ over = (unsigned long)from + n - TASK_SIZE;
++ if (!__builtin_constant_p(n - over))
++ check_object_size(to, n - over, false);
++ return __copy_tofrom_user((__force void __user *)to, from,
++ n - over) + over;
++ }
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to,
++ const void *from, unsigned long n)
++{
++ unsigned long over;
++
++ if ((long)n < 0)
++ return n;
++
++ if (access_ok(VERIFY_WRITE, to, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ return __copy_tofrom_user(to, (__force void __user *)from, n);
++ }
++ if ((unsigned long)to < TASK_SIZE) {
++ over = (unsigned long)to + n - TASK_SIZE;
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n - over, true);
++ return __copy_tofrom_user(to, (__force void __user *)from,
++ n - over) + over;
++ }
++ return n;
++}
++
++#else /* __powerpc64__ */
++
++#define __copy_in_user(to, from, size) \
++ __copy_tofrom_user((to), (from), (size))
++
++static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
++ if (likely(access_ok(VERIFY_READ, from, n)))
++ n = __copy_from_user(to, from, n);
++ else
++ memset(to, 0, n);
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (likely(access_ok(VERIFY_WRITE, to, n))) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ n = __copy_to_user(to, from, n);
++ }
++ return n;
++}
++
++extern unsigned long copy_in_user(void __user *to, const void __user *from,
++ unsigned long n);
++
++#endif /* __powerpc64__ */
++
+ extern unsigned long __clear_user(void __user *addr, unsigned long size);
+
+ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/kernel/exceptions-64e.S linux-3.4-pax/arch/powerpc/kernel/exceptions-64e.S
+--- linux-3.4/arch/powerpc/kernel/exceptions-64e.S 2012-05-21 11:32:52.351927382 +0200
++++ linux-3.4-pax/arch/powerpc/kernel/exceptions-64e.S 2012-05-21 12:10:08.556048836 +0200
+@@ -661,6 +661,7 @@ storage_fault_common:
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl .save_nvgprs
+ mr r4,r14
+ mr r5,r15
+ ld r14,PACA_EXGEN+EX_R14(r13)
+@@ -669,8 +670,7 @@ storage_fault_common:
+ cmpdi r3,0
+ bne- 1f
+ b .ret_from_except_lite
+-1: bl .save_nvgprs
+- mr r5,r3
++1: mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ld r4,_DAR(r1)
+ bl .bad_page_fault
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/kernel/exceptions-64s.S linux-3.4-pax/arch/powerpc/kernel/exceptions-64s.S
+--- linux-3.4/arch/powerpc/kernel/exceptions-64s.S 2012-05-21 11:32:52.355927382 +0200
++++ linux-3.4-pax/arch/powerpc/kernel/exceptions-64s.S 2012-05-21 12:10:08.556048836 +0200
+@@ -890,10 +890,10 @@ handle_page_fault:
+ 11: ld r4,_DAR(r1)
+ ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl .save_nvgprs
+ bl .do_page_fault
+ cmpdi r3,0
+ beq+ 12f
+- bl .save_nvgprs
+ mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lwz r4,_DAR(r1)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/kernel/module_32.c linux-3.4-pax/arch/powerpc/kernel/module_32.c
+--- linux-3.4/arch/powerpc/kernel/module_32.c 2011-10-24 12:48:24.903091847 +0200
++++ linux-3.4-pax/arch/powerpc/kernel/module_32.c 2012-05-21 12:10:08.560048836 +0200
+@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
+ me->arch.core_plt_section = i;
+ }
+ if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
+- printk("Module doesn't contain .plt or .init.plt sections.\n");
++ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
+ return -ENOEXEC;
+ }
+
+@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *locati
+
+ DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
+ /* Init, or core PLT? */
+- if (location >= mod->module_core
+- && location < mod->module_core + mod->core_size)
++ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
++ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
+ entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
+- else
++ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
++ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
+ entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
++ else {
++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
++ return ~0UL;
++ }
+
+ /* Find this entry, or if that fails, the next avail. entry */
+ while (entry->jump[0]) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/kernel/process.c linux-3.4-pax/arch/powerpc/kernel/process.c
+--- linux-3.4/arch/powerpc/kernel/process.c 2012-05-21 11:32:52.531927391 +0200
++++ linux-3.4-pax/arch/powerpc/kernel/process.c 2012-05-21 12:10:08.560048836 +0200
+@@ -1282,58 +1282,3 @@ void thread_info_cache_init(void)
+ }
+
+ #endif /* THREAD_SHIFT < PAGE_SHIFT */
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+- return sp & ~0xf;
+-}
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- unsigned long rnd = 0;
+-
+- /* 8MB for 32bit, 1GB for 64bit */
+- if (is_32bit_task())
+- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+- else
+- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+-
+- return rnd << PAGE_SHIFT;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long base = mm->brk;
+- unsigned long ret;
+-
+-#ifdef CONFIG_PPC_STD_MMU_64
+- /*
+- * If we are using 1TB segments and we are allowed to randomise
+- * the heap, we can put it above 1TB so it is backed by a 1TB
+- * segment. Otherwise the heap will be in the bottom 1TB
+- * which always uses 256MB segments and this may result in a
+- * performance penalty.
+- */
+- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
+- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
+-#endif
+-
+- ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+-
+- return ret;
+-}
+-
+-unsigned long randomize_et_dyn(unsigned long base)
+-{
+- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < base)
+- return base;
+-
+- return ret;
+-}
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/kernel/signal_32.c linux-3.4-pax/arch/powerpc/kernel/signal_32.c
+--- linux-3.4/arch/powerpc/kernel/signal_32.c 2012-05-21 11:32:52.607927395 +0200
++++ linux-3.4-pax/arch/powerpc/kernel/signal_32.c 2012-05-21 12:10:08.564048836 +0200
+@@ -861,7 +861,7 @@ int handle_rt_signal32(unsigned long sig
+ /* Save user registers on the stack */
+ frame = &rt_sf->uc.uc_mcontext;
+ addr = frame;
+- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ if (save_user_regs(regs, frame, 0, 1))
+ goto badframe;
+ regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/kernel/signal_64.c linux-3.4-pax/arch/powerpc/kernel/signal_64.c
+--- linux-3.4/arch/powerpc/kernel/signal_64.c 2012-05-21 11:32:52.607927395 +0200
++++ linux-3.4-pax/arch/powerpc/kernel/signal_64.c 2012-05-21 12:10:08.568048836 +0200
+@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
+ current->thread.fpscr.val = 0;
+
+ /* Set up to return from userspace. */
+- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
+ } else {
+ err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/kernel/vdso.c linux-3.4-pax/arch/powerpc/kernel/vdso.c
+--- linux-3.4/arch/powerpc/kernel/vdso.c 2012-05-21 11:32:52.643927397 +0200
++++ linux-3.4-pax/arch/powerpc/kernel/vdso.c 2012-05-21 12:10:08.568048836 +0200
+@@ -34,6 +34,7 @@
+ #include <asm/firmware.h>
+ #include <asm/vdso.h>
+ #include <asm/vdso_datapage.h>
++#include <asm/mman.h>
+
+ #include "setup.h"
+
+@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct l
+ vdso_base = VDSO32_MBASE;
+ #endif
+
+- current->mm->context.vdso_base = 0;
++ current->mm->context.vdso_base = ~0UL;
+
+ /* vDSO has a problem and was disabled, just don't "enable" it for the
+ * process
+@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct l
+ vdso_base = get_unmapped_area(NULL, vdso_base,
+ (vdso_pages << PAGE_SHIFT) +
+ ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+- 0, 0);
++ 0, MAP_PRIVATE | MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(vdso_base)) {
+ rc = vdso_base;
+ goto fail_mmapsem;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/lib/usercopy_64.c linux-3.4-pax/arch/powerpc/lib/usercopy_64.c
+--- linux-3.4/arch/powerpc/lib/usercopy_64.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/powerpc/lib/usercopy_64.c 2012-05-21 12:10:08.572048837 +0200
+@@ -9,22 +9,6 @@
+ #include <linux/module.h>
+ #include <asm/uaccess.h>
+
+-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_READ, from, n)))
+- n = __copy_from_user(to, from, n);
+- else
+- memset(to, 0, n);
+- return n;
+-}
+-
+-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_WRITE, to, n)))
+- n = __copy_to_user(to, from, n);
+- return n;
+-}
+-
+ unsigned long copy_in_user(void __user *to, const void __user *from,
+ unsigned long n)
+ {
+@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
+ return n;
+ }
+
+-EXPORT_SYMBOL(copy_from_user);
+-EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(copy_in_user);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/mm/fault.c linux-3.4-pax/arch/powerpc/mm/fault.c
+--- linux-3.4/arch/powerpc/mm/fault.c 2012-05-21 11:32:53.151927425 +0200
++++ linux-3.4-pax/arch/powerpc/mm/fault.c 2012-05-21 12:10:08.572048837 +0200
+@@ -32,6 +32,10 @@
+ #include <linux/perf_event.h>
+ #include <linux/magic.h>
+ #include <linux/ratelimit.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/unistd.h>
+
+ #include <asm/firmware.h>
+ #include <asm/page.h>
+@@ -68,6 +72,33 @@ static inline int notify_page_fault(stru
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int __user *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * Check whether the instruction at regs->nip is a store using
+ * an update addressing form which will update r1.
+@@ -215,7 +246,7 @@ int __kprobes do_page_fault(struct pt_re
+ * indicate errors in DSISR but can validly be set in SRR1.
+ */
+ if (trap == 0x400)
+- error_code &= 0x48200000;
++ error_code &= 0x58200000;
+ else
+ is_write = error_code & DSISR_ISSTORE;
+ #else
+@@ -366,7 +397,7 @@ good_area:
+ * "undefined". Of those that can be set, this is the only
+ * one which seems bad.
+ */
+- if (error_code & 0x10000000)
++ if (error_code & DSISR_GUARDED)
+ /* Guarded storage error. */
+ goto bad_area;
+ #endif /* CONFIG_8xx */
+@@ -381,7 +412,7 @@ good_area:
+ * processors use the same I/D cache coherency mechanism
+ * as embedded.
+ */
+- if (error_code & DSISR_PROTFAULT)
++ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
+ goto bad_area;
+ #endif /* CONFIG_PPC_STD_MMU */
+
+@@ -463,6 +494,23 @@ bad_area:
+ bad_area_nosemaphore:
+ /* User mode accesses cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++#ifdef CONFIG_PPC_STD_MMU
++ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
++#else
++ if (is_exec && regs->nip == address) {
++#endif
++ switch (pax_handle_fetch_fault(regs)) {
++ }
++
++ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ _exception(SIGSEGV, regs, code, address);
+ return 0;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/mm/mmap_64.c linux-3.4-pax/arch/powerpc/mm/mmap_64.c
+--- linux-3.4/arch/powerpc/mm/mmap_64.c 2012-03-19 10:38:54.676050099 +0100
++++ linux-3.4-pax/arch/powerpc/mm/mmap_64.c 2012-05-21 12:10:08.572048837 +0200
+@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/powerpc/mm/slice.c linux-3.4-pax/arch/powerpc/mm/slice.c
+--- linux-3.4/arch/powerpc/mm/slice.c 2012-01-08 19:47:45.479473201 +0100
++++ linux-3.4-pax/arch/powerpc/mm/slice.c 2012-05-21 12:10:08.572048837 +0200
+@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
+ if ((mm->task_size - len) < addr)
+ return 0;
+ vma = find_vma(mm, addr);
+- return (!vma || (addr + len) <= vma->vm_start);
++ return check_heap_stack_gap(vma, addr, len);
+ }
+
+ static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+@@ -256,7 +256,7 @@ full_search:
+ addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
+ continue;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
+ }
+ }
+
+- addr = mm->mmap_base;
+- while (addr > len) {
++ if (mm->mmap_base < len)
++ addr = -ENOMEM;
++ else
++ addr = mm->mmap_base - len;
++
++ while (!IS_ERR_VALUE(addr)) {
+ /* Go down by chunk size */
+- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
++ addr = _ALIGN_DOWN(addr, 1ul << pshift);
+
+ /* Check for hit with different page size */
+ mask = slice_range_to_mask(addr, len);
+@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || (addr + len) <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* remember the address as a hint for next time */
+ if (use_cache)
+ mm->free_area_cache = addr;
+@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start;
++ addr = skip_heap_stack_gap(vma, len);
+ }
+
+ /*
+@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
+ if (fixed && addr > (mm->task_size - len))
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
++ addr = 0;
++#endif
++
+ /* If hint, make sure it matches our alignment restrictions */
+ if (!fixed && addr) {
+ addr = _ALIGN_UP(addr, 1ul << pshift);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/s390/include/asm/atomic.h linux-3.4-pax/arch/s390/include/asm/atomic.h
+--- linux-3.4/arch/s390/include/asm/atomic.h 2012-05-21 11:32:54.099927477 +0200
++++ linux-3.4-pax/arch/s390/include/asm/atomic.h 2012-05-21 12:10:08.576048837 +0200
+@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_
+ #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #define smp_mb__before_atomic_dec() smp_mb()
+ #define smp_mb__after_atomic_dec() smp_mb()
+ #define smp_mb__before_atomic_inc() smp_mb()
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/s390/include/asm/elf.h linux-3.4-pax/arch/s390/include/asm/elf.h
+--- linux-3.4/arch/s390/include/asm/elf.h 2012-05-21 11:32:54.107927476 +0200
++++ linux-3.4-pax/arch/s390/include/asm/elf.h 2012-05-21 12:10:08.576048837 +0200
+@@ -161,8 +161,14 @@ extern unsigned int vdso_enabled;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-extern unsigned long randomize_et_dyn(unsigned long base);
+-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
++#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. */
+@@ -210,7 +216,4 @@ struct linux_binprm;
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+ int arch_setup_additional_pages(struct linux_binprm *, int);
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/s390/include/asm/exec.h linux-3.4-pax/arch/s390/include/asm/exec.h
+--- linux-3.4/arch/s390/include/asm/exec.h 2012-05-21 11:32:54.107927476 +0200
++++ linux-3.4-pax/arch/s390/include/asm/exec.h 2012-05-21 12:10:08.576048837 +0200
+@@ -7,6 +7,6 @@
+ #ifndef __ASM_EXEC_H
+ #define __ASM_EXEC_H
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ #endif /* __ASM_EXEC_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/s390/include/asm/uaccess.h linux-3.4-pax/arch/s390/include/asm/uaccess.h
+--- linux-3.4/arch/s390/include/asm/uaccess.h 2012-05-21 11:32:54.167927480 +0200
++++ linux-3.4-pax/arch/s390/include/asm/uaccess.h 2012-05-21 12:10:08.580048837 +0200
+@@ -236,6 +236,10 @@ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+@@ -261,6 +265,9 @@ copy_to_user(void __user *to, const void
+ static inline unsigned long __must_check
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n) && (n <= 256))
+ return uaccess.copy_from_user_small(n, from, to);
+ else
+@@ -295,6 +302,10 @@ copy_from_user(void *to, const void __us
+ unsigned int sz = __compiletime_object_size(to);
+
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (unlikely(sz != -1 && sz < n)) {
+ copy_from_user_overflow();
+ return n;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/s390/kernel/module.c linux-3.4-pax/arch/s390/kernel/module.c
+--- linux-3.4/arch/s390/kernel/module.c 2011-10-24 12:48:25.383091820 +0200
++++ linux-3.4-pax/arch/s390/kernel/module.c 2012-05-21 12:10:08.580048837 +0200
+@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
+
+ /* Increase core size by size of got & plt and set start
+ offsets for got and plt. */
+- me->core_size = ALIGN(me->core_size, 4);
+- me->arch.got_offset = me->core_size;
+- me->core_size += me->arch.got_size;
+- me->arch.plt_offset = me->core_size;
+- me->core_size += me->arch.plt_size;
++ me->core_size_rw = ALIGN(me->core_size_rw, 4);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += me->arch.got_size;
++ me->arch.plt_offset = me->core_size_rx;
++ me->core_size_rx += me->arch.plt_size;
+ return 0;
+ }
+
+@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ if (info->got_initialized == 0) {
+ Elf_Addr *gotent;
+
+- gotent = me->module_core + me->arch.got_offset +
++ gotent = me->module_core_rw + me->arch.got_offset +
+ info->got_offset;
+ *gotent = val;
+ info->got_initialized = 1;
+@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ else if (r_type == R_390_GOTENT ||
+ r_type == R_390_GOTPLTENT)
+ *(unsigned int *) loc =
+- (val + (Elf_Addr) me->module_core - loc) >> 1;
++ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
+ else if (r_type == R_390_GOT64 ||
+ r_type == R_390_GOTPLT64)
+ *(unsigned long *) loc = val;
+@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
+ if (info->plt_initialized == 0) {
+ unsigned int *ip;
+- ip = me->module_core + me->arch.plt_offset +
++ ip = me->module_core_rx + me->arch.plt_offset +
+ info->plt_offset;
+ #ifndef CONFIG_64BIT
+ ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
+@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ val - loc + 0xffffUL < 0x1ffffeUL) ||
+ (r_type == R_390_PLT32DBL &&
+ val - loc + 0xffffffffULL < 0x1fffffffeULL)))
+- val = (Elf_Addr) me->module_core +
++ val = (Elf_Addr) me->module_core_rx +
+ me->arch.plt_offset +
+ info->plt_offset;
+ val += rela->r_addend - loc;
+@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ case R_390_GOTOFF32: /* 32 bit offset to GOT. */
+ case R_390_GOTOFF64: /* 64 bit offset to GOT. */
+ val = val + rela->r_addend -
+- ((Elf_Addr) me->module_core + me->arch.got_offset);
++ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
+ if (r_type == R_390_GOTOFF16)
+ *(unsigned short *) loc = val;
+ else if (r_type == R_390_GOTOFF32)
+@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ break;
+ case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
+ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
+- val = (Elf_Addr) me->module_core + me->arch.got_offset +
++ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
+ rela->r_addend - loc;
+ if (r_type == R_390_GOTPC)
+ *(unsigned int *) loc = val;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/s390/kernel/process.c linux-3.4-pax/arch/s390/kernel/process.c
+--- linux-3.4/arch/s390/kernel/process.c 2012-05-21 11:32:54.259927485 +0200
++++ linux-3.4-pax/arch/s390/kernel/process.c 2012-05-21 12:10:08.580048837 +0200
+@@ -316,39 +316,3 @@ unsigned long get_wchan(struct task_stru
+ }
+ return 0;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+- return sp & ~0xf;
+-}
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- /* 8MB for 32bit, 1GB for 64bit */
+- if (is_32bit_task())
+- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+- else
+- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+- return ret;
+-}
+-
+-unsigned long randomize_et_dyn(unsigned long base)
+-{
+- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (!(current->flags & PF_RANDOMIZE))
+- return base;
+- if (ret < base)
+- return base;
+- return ret;
+-}
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/s390/mm/mmap.c linux-3.4-pax/arch/s390/mm/mmap.c
+--- linux-3.4/arch/s390/mm/mmap.c 2012-05-21 11:32:54.375927491 +0200
++++ linux-3.4-pax/arch/s390/mm/mmap.c 2012-05-21 12:10:08.584048837 +0200
+@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_str
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/score/include/asm/exec.h linux-3.4-pax/arch/score/include/asm/exec.h
+--- linux-3.4/arch/score/include/asm/exec.h 2012-05-21 11:32:54.415927494 +0200
++++ linux-3.4-pax/arch/score/include/asm/exec.h 2012-05-21 12:10:08.584048837 +0200
+@@ -1,6 +1,6 @@
+ #ifndef _ASM_SCORE_EXEC_H
+ #define _ASM_SCORE_EXEC_H
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) (x)
+
+ #endif /* _ASM_SCORE_EXEC_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/score/kernel/process.c linux-3.4-pax/arch/score/kernel/process.c
+--- linux-3.4/arch/score/kernel/process.c 2012-05-21 11:32:54.419927494 +0200
++++ linux-3.4-pax/arch/score/kernel/process.c 2012-05-21 12:10:08.588048837 +0200
+@@ -159,8 +159,3 @@ unsigned long get_wchan(struct task_stru
+
+ return task_pt_regs(task)->cp0_epc;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- return sp;
+-}
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sh/mm/mmap.c linux-3.4-pax/arch/sh/mm/mmap.c
+--- linux-3.4/arch/sh/mm/mmap.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/sh/mm/mmap.c 2012-05-21 12:10:08.588048837 +0200
+@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -106,7 +105,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+- addr = mm->mmap_base-len;
+- if (do_colour_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
++ addr = mm->mmap_base - len;
+
+ do {
++ if (do_colour_align)
++ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- if (do_colour_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+- } while (likely(len < vma->vm_start));
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/include/asm/atomic_64.h linux-3.4-pax/arch/sparc/include/asm/atomic_64.h
+--- linux-3.4/arch/sparc/include/asm/atomic_64.h 2012-05-21 11:32:55.643927560 +0200
++++ linux-3.4-pax/arch/sparc/include/asm/atomic_64.h 2012-05-21 12:10:08.588048837 +0200
+@@ -14,18 +14,40 @@
+ #define ATOMIC64_INIT(i) { (i) }
+
+ #define atomic_read(v) (*(volatile int *)&(v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return v->counter;
++}
+ #define atomic64_read(v) (*(volatile long *)&(v)->counter)
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return v->counter;
++}
+
+ #define atomic_set(v, i) (((v)->counter) = i)
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
+ #define atomic64_set(v, i) (((v)->counter) = i)
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
+
+ extern void atomic_add(int, atomic_t *);
++extern void atomic_add_unchecked(int, atomic_unchecked_t *);
+ extern void atomic64_add(long, atomic64_t *);
++extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
+ extern void atomic_sub(int, atomic_t *);
++extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
+ extern void atomic64_sub(long, atomic64_t *);
++extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
+
+ extern int atomic_add_ret(int, atomic_t *);
++extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
+ extern long atomic64_add_ret(long, atomic64_t *);
++extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
+ extern int atomic_sub_ret(int, atomic_t *);
+ extern long atomic64_sub_ret(long, atomic64_t *);
+
+@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
+ #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
+
+ #define atomic_inc_return(v) atomic_add_ret(1, v)
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_ret_unchecked(1, v);
++}
+ #define atomic64_inc_return(v) atomic64_add_ret(1, v)
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ return atomic64_add_ret_unchecked(1, v);
++}
+
+ #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
+ #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
+
+ #define atomic_add_return(i, v) atomic_add_ret(i, v)
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ return atomic_add_ret_unchecked(i, v);
++}
+ #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
++{
++ return atomic64_add_ret_unchecked(i, v);
++}
+
+ /*
+ * atomic_inc_and_test - increment and test
+@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
+ * other cases.
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_inc_return_unchecked(v) == 0;
++}
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+
+ #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
+@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomi
+ #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
+
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+ #define atomic64_inc(v) atomic64_add(1, v)
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_add_unchecked(1, v);
++}
+
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+ #define atomic64_dec(v) atomic64_sub(1, v)
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_sub_unchecked(1, v);
++}
+
+ #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
+ #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
+
+ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%icc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(at
+ #define atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+ #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%xcc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/include/asm/cache.h linux-3.4-pax/arch/sparc/include/asm/cache.h
+--- linux-3.4/arch/sparc/include/asm/cache.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/sparc/include/asm/cache.h 2012-05-21 12:10:08.592048838 +0200
+@@ -10,7 +10,7 @@
+ #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES 32
++#define L1_CACHE_BYTES 32UL
+
+ #ifdef CONFIG_SPARC32
+ #define SMP_CACHE_BYTES_SHIFT 5
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/include/asm/elf_32.h linux-3.4-pax/arch/sparc/include/asm/elf_32.h
+--- linux-3.4/arch/sparc/include/asm/elf_32.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/sparc/include/asm/elf_32.h 2012-05-21 12:10:08.592048838 +0200
+@@ -114,6 +114,13 @@ typedef struct {
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This can NOT be done in userspace
+ on Sparc. */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/include/asm/elf_64.h linux-3.4-pax/arch/sparc/include/asm/elf_64.h
+--- linux-3.4/arch/sparc/include/asm/elf_64.h 2011-10-24 12:48:25.651091805 +0200
++++ linux-3.4-pax/arch/sparc/include/asm/elf_64.h 2012-05-21 12:10:08.596048838 +0200
+@@ -180,6 +180,13 @@ typedef struct {
+ #define ELF_ET_DYN_BASE 0x0000010000000000UL
+ #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
++#endif
++
+ extern unsigned long sparc64_elf_hwcap;
+ #define ELF_HWCAP sparc64_elf_hwcap
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/include/asm/pgalloc_32.h linux-3.4-pax/arch/sparc/include/asm/pgalloc_32.h
+--- linux-3.4/arch/sparc/include/asm/pgalloc_32.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/sparc/include/asm/pgalloc_32.h 2012-05-21 12:10:08.596048838 +0200
+@@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd
+ BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
+ #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
+ #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
++#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
+
+ BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
+ #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/include/asm/pgalloc_64.h linux-3.4-pax/arch/sparc/include/asm/pgalloc_64.h
+--- linux-3.4/arch/sparc/include/asm/pgalloc_64.h 2011-10-24 12:48:25.663091805 +0200
++++ linux-3.4-pax/arch/sparc/include/asm/pgalloc_64.h 2012-05-21 12:10:08.600048838 +0200
+@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_st
+ }
+
+ #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
++#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
+
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/include/asm/pgtable_32.h linux-3.4-pax/arch/sparc/include/asm/pgtable_32.h
+--- linux-3.4/arch/sparc/include/asm/pgtable_32.h 2012-05-21 11:32:55.723927565 +0200
++++ linux-3.4-pax/arch/sparc/include/asm/pgtable_32.h 2012-05-21 12:10:08.600048838 +0200
+@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
+ BTFIXUPDEF_INT(page_none)
+ BTFIXUPDEF_INT(page_copy)
+ BTFIXUPDEF_INT(page_readonly)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++BTFIXUPDEF_INT(page_shared_noexec)
++BTFIXUPDEF_INT(page_copy_noexec)
++BTFIXUPDEF_INT(page_readonly_noexec)
++#endif
++
+ BTFIXUPDEF_INT(page_kernel)
+
+ #define PMD_SHIFT SUN4C_PMD_SHIFT
+@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
+ #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
+ #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
+
++#ifdef CONFIG_PAX_PAGEEXEC
++extern pgprot_t PAGE_SHARED_NOEXEC;
++# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
++# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ extern unsigned long page_kernel;
+
+ #ifdef MODULE
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/include/asm/pgtsrmmu.h linux-3.4-pax/arch/sparc/include/asm/pgtsrmmu.h
+--- linux-3.4/arch/sparc/include/asm/pgtsrmmu.h 2011-10-24 12:48:25.667091805 +0200
++++ linux-3.4-pax/arch/sparc/include/asm/pgtsrmmu.h 2012-05-21 12:10:08.604048838 +0200
+@@ -115,6 +115,13 @@
+ SRMMU_EXEC | SRMMU_REF)
+ #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+ SRMMU_EXEC | SRMMU_REF)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#endif
++
+ #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
+ SRMMU_DIRTY | SRMMU_REF)
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/include/asm/spinlock_64.h linux-3.4-pax/arch/sparc/include/asm/spinlock_64.h
+--- linux-3.4/arch/sparc/include/asm/spinlock_64.h 2011-10-24 12:48:25.687091804 +0200
++++ linux-3.4-pax/arch/sparc/include/asm/spinlock_64.h 2012-05-21 12:10:08.604048838 +0200
+@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
+
+ /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
+
+-static void inline arch_read_lock(arch_rwlock_t *lock)
++static inline void arch_read_lock(arch_rwlock_t *lock)
+ {
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__ (
+ "1: ldsw [%2], %0\n"
+ " brlz,pn %0, 2f\n"
+-"4: add %0, 1, %1\n"
++"4: addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
+ " .previous"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (lock)
+- : "memory");
++ : "memory", "cc");
+ }
+
+-static int inline arch_read_trylock(arch_rwlock_t *lock)
++static inline int arch_read_trylock(arch_rwlock_t *lock)
+ {
+ int tmp1, tmp2;
+
+@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
+ "1: ldsw [%2], %0\n"
+ " brlz,a,pn %0, 2f\n"
+ " mov 0, %0\n"
+-" add %0, 1, %1\n"
++" addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
+ return tmp1;
+ }
+
+-static void inline arch_read_unlock(arch_rwlock_t *lock)
++static inline void arch_read_unlock(arch_rwlock_t *lock)
+ {
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__(
+ "1: lduw [%2], %0\n"
+-" sub %0, 1, %1\n"
++" subcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%xcc, 1b\n"
+@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
+ : "memory");
+ }
+
+-static void inline arch_write_lock(arch_rwlock_t *lock)
++static inline void arch_write_lock(arch_rwlock_t *lock)
+ {
+ unsigned long mask, tmp1, tmp2;
+
+@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
+ : "memory");
+ }
+
+-static void inline arch_write_unlock(arch_rwlock_t *lock)
++static inline void arch_write_unlock(arch_rwlock_t *lock)
+ {
+ __asm__ __volatile__(
+ " stw %%g0, [%0]"
+@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
+ : "memory");
+ }
+
+-static int inline arch_write_trylock(arch_rwlock_t *lock)
++static inline int arch_write_trylock(arch_rwlock_t *lock)
+ {
+ unsigned long mask, tmp1, tmp2, result;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/include/asm/thread_info_32.h linux-3.4-pax/arch/sparc/include/asm/thread_info_32.h
+--- linux-3.4/arch/sparc/include/asm/thread_info_32.h 2012-03-19 10:38:55.976050030 +0100
++++ linux-3.4-pax/arch/sparc/include/asm/thread_info_32.h 2012-05-21 12:10:08.608048839 +0200
+@@ -50,6 +50,8 @@ struct thread_info {
+ unsigned long w_saved;
+
+ struct restart_block restart_block;
++
++ unsigned long lowest_stack;
+ };
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/include/asm/thread_info_64.h linux-3.4-pax/arch/sparc/include/asm/thread_info_64.h
+--- linux-3.4/arch/sparc/include/asm/thread_info_64.h 2012-03-19 10:38:55.976050030 +0100
++++ linux-3.4-pax/arch/sparc/include/asm/thread_info_64.h 2012-05-21 12:10:08.608048839 +0200
+@@ -63,6 +63,8 @@ struct thread_info {
+ struct pt_regs *kern_una_regs;
+ unsigned int kern_una_insn;
+
++ unsigned long lowest_stack;
++
+ unsigned long fpregs[0] __attribute__ ((aligned(64)));
+ };
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/include/asm/uaccess_32.h linux-3.4-pax/arch/sparc/include/asm/uaccess_32.h
+--- linux-3.4/arch/sparc/include/asm/uaccess_32.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/sparc/include/asm/uaccess_32.h 2012-05-21 12:10:08.608048839 +0200
+@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
+
+ static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+- if (n && __access_ok((unsigned long) to, n))
++ if ((long)n < 0)
++ return n;
++
++ if (n && __access_ok((unsigned long) to, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
+ return __copy_user(to, (__force void __user *) from, n);
+- else
++ } else
+ return n;
+ }
+
+ static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++
+ return __copy_user(to, (__force void __user *) from, n);
+ }
+
+ static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- if (n && __access_ok((unsigned long) from, n))
++ if ((long)n < 0)
++ return n;
++
++ if (n && __access_ok((unsigned long) from, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
+ return __copy_user((__force void __user *) to, from, n);
+- else
++ } else
+ return n;
+ }
+
+ static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ return __copy_user((__force void __user *) to, from, n);
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/include/asm/uaccess_64.h linux-3.4-pax/arch/sparc/include/asm/uaccess_64.h
+--- linux-3.4/arch/sparc/include/asm/uaccess_64.h 2012-05-21 11:32:55.759927567 +0200
++++ linux-3.4-pax/arch/sparc/include/asm/uaccess_64.h 2012-05-21 12:10:08.612048839 +0200
+@@ -10,6 +10,7 @@
+ #include <linux/compiler.h>
+ #include <linux/string.h>
+ #include <linux/thread_info.h>
++#include <linux/kernel.h>
+ #include <asm/asi.h>
+ #include <asm/spitfire.h>
+ #include <asm-generic/uaccess-unaligned.h>
+@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_from_user(to, from, size);
++ unsigned long ret;
+
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
++ if (!__builtin_constant_p(size))
++ check_object_size(to, size, false);
++
++ ret = ___copy_from_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_from_user_fixup(to, from, size);
+
+@@ -229,8 +237,15 @@ extern unsigned long copy_to_user_fixup(
+ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_to_user(to, from, size);
++ unsigned long ret;
++
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
++ if (!__builtin_constant_p(size))
++ check_object_size(from, size, true);
+
++ ret = ___copy_to_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_to_user_fixup(to, from, size);
+ return ret;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/include/asm/uaccess.h linux-3.4-pax/arch/sparc/include/asm/uaccess.h
+--- linux-3.4/arch/sparc/include/asm/uaccess.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/sparc/include/asm/uaccess.h 2012-05-21 12:10:08.612048839 +0200
+@@ -1,5 +1,13 @@
+ #ifndef ___ASM_SPARC_UACCESS_H
+ #define ___ASM_SPARC_UACCESS_H
++
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++#include <linux/types.h>
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++#endif
++#endif
++
+ #if defined(__sparc__) && defined(__arch64__)
+ #include <asm/uaccess_64.h>
+ #else
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/kernel/Makefile linux-3.4-pax/arch/sparc/kernel/Makefile
+--- linux-3.4/arch/sparc/kernel/Makefile 2011-10-24 12:48:25.691091803 +0200
++++ linux-3.4-pax/arch/sparc/kernel/Makefile 2012-05-21 12:10:08.616048839 +0200
+@@ -3,7 +3,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ extra-y := head_$(BITS).o
+ extra-y += init_task.o
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/kernel/sys_sparc_32.c linux-3.4-pax/arch/sparc/kernel/sys_sparc_32.c
+--- linux-3.4/arch/sparc/kernel/sys_sparc_32.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/sparc/kernel/sys_sparc_32.c 2012-05-21 12:10:08.616048839 +0200
+@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
+ if (ARCH_SUN4C && len > 0x20000000)
+ return -ENOMEM;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr);
+@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
+ }
+ if (TASK_SIZE - PAGE_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vmm || addr + len <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = vmm->vm_end;
+ if (flags & MAP_SHARED)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/kernel/sys_sparc_64.c linux-3.4-pax/arch/sparc/kernel/sys_sparc_64.c
+--- linux-3.4/arch/sparc/kernel/sys_sparc_64.c 2012-05-21 11:32:55.979927579 +0200
++++ linux-3.4-pax/arch/sparc/kernel/sys_sparc_64.c 2012-05-21 12:10:08.620048839 +0200
+@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ }
+
+@@ -174,14 +177,14 @@ full_search:
+ vma = find_vma(mm, VA_EXCLUDE_END);
+ }
+ if (unlikely(task_size < addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+- addr = mm->mmap_base-len;
+- if (do_color_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
++ addr = mm->mmap_base - len;
+
+ do {
++ if (do_color_align)
++ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- if (do_color_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+- } while (likely(len < vma->vm_start));
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
+ gap == RLIM_INFINITY ||
+ sysctl_legacy_va_layout) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
+ gap = (task_size / 6 * 5);
+
+ mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/kernel/traps_64.c linux-3.4-pax/arch/sparc/kernel/traps_64.c
+--- linux-3.4/arch/sparc/kernel/traps_64.c 2012-05-21 11:32:56.011927580 +0200
++++ linux-3.4-pax/arch/sparc/kernel/traps_64.c 2012-05-21 12:10:08.620048839 +0200
+@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
+
+ lvl -= 0x100;
+ if (regs->tstate & TSTATE_PRIV) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ sprintf(buffer, "Kernel bad sw trap %lx", lvl);
+ die_if_kernel(buffer, regs);
+ }
+@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
+ void bad_trap_tl1(struct pt_regs *regs, long lvl)
+ {
+ char buffer[32];
+-
++
+ if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
+ 0, lvl, SIGTRAP) == NOTIFY_STOP)
+ return;
+
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+ sprintf (buffer, "Bad trap %lx at tl>0", lvl);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/lib/atomic_64.S linux-3.4-pax/arch/sparc/lib/atomic_64.S
+--- linux-3.4/arch/sparc/lib/atomic_64.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/sparc/lib/atomic_64.S 2012-05-21 12:10:08.624048839 +0200
+@@ -18,7 +18,12 @@
+ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_add, .-atomic_add
+
++ .globl atomic_add_unchecked
++ .type atomic_add_unchecked,#function
++atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ add %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_add_unchecked, .-atomic_add_unchecked
++
+ .globl atomic_sub
+ .type atomic_sub,#function
+ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_sub, .-atomic_sub
+
++ .globl atomic_sub_unchecked
++ .type atomic_sub_unchecked,#function
++atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ sub %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_sub_unchecked, .-atomic_sub_unchecked
++
+ .globl atomic_add_ret
+ .type atomic_add_ret,#function
+ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_add_ret, .-atomic_add_ret
+
++ .globl atomic_add_ret_unchecked
++ .type atomic_add_ret_unchecked,#function
++atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ addcc %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ add %g7, %o0, %g7
++ sra %g7, 0, %o0
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
++
+ .globl atomic_sub_ret
+ .type atomic_sub_ret,#function
+ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
+ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_add, .-atomic64_add
+
++ .globl atomic64_add_unchecked
++ .type atomic64_add_unchecked,#function
++atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ addcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_add_unchecked, .-atomic64_add_unchecked
++
+ .globl atomic64_sub
+ .type atomic64_sub,#function
+ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_sub, .-atomic64_sub
+
++ .globl atomic64_sub_unchecked
++ .type atomic64_sub_unchecked,#function
++atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ subcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
++
+ .globl atomic64_add_ret
+ .type atomic64_add_ret,#function
+ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_add_ret, .-atomic64_add_ret
+
++ .globl atomic64_add_ret_unchecked
++ .type atomic64_add_ret_unchecked,#function
++atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ addcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ add %g7, %o0, %g7
++ mov %g7, %o0
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
++
+ .globl atomic64_sub_ret
+ .type atomic64_sub_ret,#function
+ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/lib/ksyms.c linux-3.4-pax/arch/sparc/lib/ksyms.c
+--- linux-3.4/arch/sparc/lib/ksyms.c 2012-03-19 10:38:56.172050019 +0100
++++ linux-3.4-pax/arch/sparc/lib/ksyms.c 2012-05-21 12:10:08.628048840 +0200
+@@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
+
+ /* Atomic counter implementation. */
+ EXPORT_SYMBOL(atomic_add);
++EXPORT_SYMBOL(atomic_add_unchecked);
+ EXPORT_SYMBOL(atomic_add_ret);
++EXPORT_SYMBOL(atomic_add_ret_unchecked);
+ EXPORT_SYMBOL(atomic_sub);
++EXPORT_SYMBOL(atomic_sub_unchecked);
+ EXPORT_SYMBOL(atomic_sub_ret);
+ EXPORT_SYMBOL(atomic64_add);
++EXPORT_SYMBOL(atomic64_add_unchecked);
+ EXPORT_SYMBOL(atomic64_add_ret);
++EXPORT_SYMBOL(atomic64_add_ret_unchecked);
+ EXPORT_SYMBOL(atomic64_sub);
++EXPORT_SYMBOL(atomic64_sub_unchecked);
+ EXPORT_SYMBOL(atomic64_sub_ret);
+
+ /* Atomic bit operations. */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/lib/Makefile linux-3.4-pax/arch/sparc/lib/Makefile
+--- linux-3.4/arch/sparc/lib/Makefile 2011-10-24 12:48:25.839091800 +0200
++++ linux-3.4-pax/arch/sparc/lib/Makefile 2012-05-21 12:10:08.628048840 +0200
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi -DST_DIV0=0x02
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
+ lib-$(CONFIG_SPARC32) += memcpy.o memset.o
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/mm/fault_32.c linux-3.4-pax/arch/sparc/mm/fault_32.c
+--- linux-3.4/arch/sparc/mm/fault_32.c 2012-05-21 11:32:56.055927583 +0200
++++ linux-3.4-pax/arch/sparc/mm/fault_32.c 2012-05-21 12:10:08.632048840 +0200
+@@ -21,6 +21,9 @@
+ #include <linux/perf_event.h>
+ #include <linux/interrupt.h>
+ #include <linux/kdebug.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -207,6 +210,268 @@ static unsigned long compute_si_addr(str
+ return safe_compute_effective_address(regs, insn);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ INIT_LIST_HEAD(&vma->anon_vma_chain);
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->pc);
++ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned int addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->pc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned int addr;
++
++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(ba, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr, save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->pc = call_dl_resolve;
++ regs->npc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->pc-4));
++ err |= get_user(call, (unsigned int *)regs->pc);
++ err |= get_user(nop, (unsigned int *)(regs->pc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->pc;
++ regs->pc = dl_resolve;
++ regs->npc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+ int text_fault)
+ {
+@@ -282,6 +547,24 @@ good_area:
+ if(!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Allow reads even for write-only mappings */
+ if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/mm/fault_64.c linux-3.4-pax/arch/sparc/mm/fault_64.c
+--- linux-3.4/arch/sparc/mm/fault_64.c 2012-05-21 11:32:56.059927583 +0200
++++ linux-3.4-pax/arch/sparc/mm/fault_64.c 2012-05-21 12:10:08.632048840 +0200
+@@ -21,6 +21,9 @@
+ #include <linux/kprobes.h>
+ #include <linux/kdebug.h>
+ #include <linux/percpu.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
+ show_regs(regs);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ INIT_LIST_HEAD(&vma->anon_vma_chain);
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->tpc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->tpc);
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->tpc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned long addr;
++
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #4 */
++ unsigned int sethi, mov1, call, mov2;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(call, (unsigned int *)(regs->tpc+8));
++ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ mov1 == 0x8210000FU &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ mov2 == 0x9E100001U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #5 */
++ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x82106000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x83287020U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #6 */
++ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or, (unsigned int *)(regs->tpc+16));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ sllx == 0x83287020U &&
++ (or & 0xFFFFE000U) == 0x8A116000U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++ unsigned int save, call;
++ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->tpc = call_dl_resolve;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++
++ /* PaX: 64-bit PLT stub */
++ err = get_user(sethi1, (unsigned int *)addr);
++ err |= get_user(sethi2, (unsigned int *)(addr+4));
++ err |= get_user(or1, (unsigned int *)(addr+8));
++ err |= get_user(or2, (unsigned int *)(addr+12));
++ err |= get_user(sllx, (unsigned int *)(addr+16));
++ err |= get_user(add, (unsigned int *)(addr+20));
++ err |= get_user(jmpl, (unsigned int *)(addr+24));
++ err |= get_user(nop, (unsigned int *)(addr+28));
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x88112000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x89293020U &&
++ add == 0x8A010005U &&
++ jmpl == 0x89C14000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G4] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
++ regs->u_regs[UREG_G4] = addr + 24;
++ addr = regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->tpc-4));
++ err |= get_user(call, (unsigned int *)regs->tpc);
++ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ dl_resolve &= 0xFFFFFFFFUL;
++
++ regs->u_regs[UREG_RETPC] = regs->tpc;
++ regs->tpc = dl_resolve;
++ regs->tnpc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (ba & 0xFFF00000U) == 0x30600000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ {
+ struct mm_struct *mm = current->mm;
+@@ -343,6 +797,29 @@ retry:
+ if (!vma)
+ goto bad_area;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ /* PaX: detect ITLB misses on non-exec pages */
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
++ {
++ if (address != regs->tpc)
++ goto good_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Pure DTLB misses do not tell us whether the fault causing
+ * load/store/atomic was a write or not, it only says that there
+ * was no match. So in such a case we (carefully) read the
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/mm/hugetlbpage.c linux-3.4-pax/arch/sparc/mm/hugetlbpage.c
+--- linux-3.4/arch/sparc/mm/hugetlbpage.c 2012-01-08 19:47:48.099473061 +0100
++++ linux-3.4-pax/arch/sparc/mm/hugetlbpage.c 2012-05-21 12:10:08.636048840 +0200
+@@ -67,7 +67,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+- addr = (mm->mmap_base-len) & HPAGE_MASK;
++ addr = mm->mmap_base - len;
+
+ do {
++ addr &= HPAGE_MASK;
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = (vma->vm_start-len) & HPAGE_MASK;
+- } while (likely(len < vma->vm_start));
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *f
+ if (addr) {
+ addr = ALIGN(addr, HPAGE_SIZE);
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/mm/init_32.c linux-3.4-pax/arch/sparc/mm/init_32.c
+--- linux-3.4/arch/sparc/mm/init_32.c 2012-05-21 11:32:56.071927584 +0200
++++ linux-3.4-pax/arch/sparc/mm/init_32.c 2012-05-21 12:10:08.636048840 +0200
+@@ -315,6 +315,9 @@ extern void device_scan(void);
+ pgprot_t PAGE_SHARED __read_mostly;
+ EXPORT_SYMBOL(PAGE_SHARED);
+
++pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
++EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
++
+ void __init paging_init(void)
+ {
+ switch(sparc_cpu_model) {
+@@ -343,17 +346,17 @@ void __init paging_init(void)
+
+ /* Initialize the protection map with non-constant, MMU dependent values. */
+ protection_map[0] = PAGE_NONE;
+- protection_map[1] = PAGE_READONLY;
+- protection_map[2] = PAGE_COPY;
+- protection_map[3] = PAGE_COPY;
++ protection_map[1] = PAGE_READONLY_NOEXEC;
++ protection_map[2] = PAGE_COPY_NOEXEC;
++ protection_map[3] = PAGE_COPY_NOEXEC;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+- protection_map[9] = PAGE_READONLY;
+- protection_map[10] = PAGE_SHARED;
+- protection_map[11] = PAGE_SHARED;
++ protection_map[9] = PAGE_READONLY_NOEXEC;
++ protection_map[10] = PAGE_SHARED_NOEXEC;
++ protection_map[11] = PAGE_SHARED_NOEXEC;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/mm/Makefile linux-3.4-pax/arch/sparc/mm/Makefile
+--- linux-3.4/arch/sparc/mm/Makefile 2012-01-08 19:47:47.859473073 +0100
++++ linux-3.4-pax/arch/sparc/mm/Makefile 2012-05-21 12:10:08.640048840 +0200
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
+ obj-y += fault_$(BITS).o
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/sparc/mm/srmmu.c linux-3.4-pax/arch/sparc/mm/srmmu.c
+--- linux-3.4/arch/sparc/mm/srmmu.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/sparc/mm/srmmu.c 2012-05-21 12:10:08.644048841 +0200
+@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
+ PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
+ BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
+ BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
++ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
++ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
++#endif
++
+ BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
+ page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/tile/include/asm/atomic_64.h linux-3.4-pax/arch/tile/include/asm/atomic_64.h
+--- linux-3.4/arch/tile/include/asm/atomic_64.h 2012-05-21 11:32:56.123927586 +0200
++++ linux-3.4-pax/arch/tile/include/asm/atomic_64.h 2012-05-21 12:10:08.644048841 +0200
+@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(a
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ /* Atomic dec and inc don't implement barrier, so provide them if needed. */
+ #define smp_mb__before_atomic_dec() smp_mb()
+ #define smp_mb__after_atomic_dec() smp_mb()
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/um/include/asm/kmap_types.h linux-3.4-pax/arch/um/include/asm/kmap_types.h
+--- linux-3.4/arch/um/include/asm/kmap_types.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/um/include/asm/kmap_types.h 2012-05-21 12:10:08.648048841 +0200
+@@ -23,6 +23,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/um/include/asm/page.h linux-3.4-pax/arch/um/include/asm/page.h
+--- linux-3.4/arch/um/include/asm/page.h 2012-01-08 19:47:48.407473044 +0100
++++ linux-3.4-pax/arch/um/include/asm/page.h 2012-05-21 12:10:08.648048841 +0200
+@@ -14,6 +14,9 @@
+ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef __ASSEMBLY__
+
+ struct page;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/um/include/asm/pgtable-3level.h linux-3.4-pax/arch/um/include/asm/pgtable-3level.h
+--- linux-3.4/arch/um/include/asm/pgtable-3level.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/um/include/asm/pgtable-3level.h 2012-05-21 12:10:08.648048841 +0200
+@@ -58,6 +58,7 @@
+ #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
+ #define pud_populate(mm, pud, pmd) \
+ set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
+
+ #ifdef CONFIG_64BIT
+ #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/um/kernel/process.c linux-3.4-pax/arch/um/kernel/process.c
+--- linux-3.4/arch/um/kernel/process.c 2012-05-21 11:32:56.483927606 +0200
++++ linux-3.4-pax/arch/um/kernel/process.c 2012-05-21 12:10:08.652048841 +0200
+@@ -404,22 +404,6 @@ int singlestepping(void * t)
+ return 2;
+ }
+
+-/*
+- * Only x86 and x86_64 have an arch_align_stack().
+- * All other arches have "#define arch_align_stack(x) (x)"
+- * in their asm/system.h
+- * As this is included in UML from asm-um/system-generic.h,
+- * we can use it to behave as the subarch does.
+- */
+-#ifndef arch_align_stack
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
+-#endif
+-
+ unsigned long get_wchan(struct task_struct *p)
+ {
+ unsigned long stack_page, sp, ip;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/um/Makefile linux-3.4-pax/arch/um/Makefile
+--- linux-3.4/arch/um/Makefile 2012-05-21 11:32:56.355927600 +0200
++++ linux-3.4-pax/arch/um/Makefile 2012-05-21 12:10:08.652048841 +0200
+@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINE
+ $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
+ $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
+
++ifdef CONSTIFY_PLUGIN
++USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
++endif
++
+ #This will adjust *FLAGS accordingly to the platform.
+ include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/boot/bitops.h linux-3.4-pax/arch/x86/boot/bitops.h
+--- linux-3.4/arch/x86/boot/bitops.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/boot/bitops.h 2012-05-21 12:10:08.656048841 +0200
+@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
+ u8 v;
+ const u32 *p = (const u32 *)addr;
+
+- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
++ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+ return v;
+ }
+
+@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
+
+ static inline void set_bit(int nr, void *addr)
+ {
+- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
++ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
+ }
+
+ #endif /* BOOT_BITOPS_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/boot/boot.h linux-3.4-pax/arch/x86/boot/boot.h
+--- linux-3.4/arch/x86/boot/boot.h 2012-05-21 11:32:56.723927619 +0200
++++ linux-3.4-pax/arch/x86/boot/boot.h 2012-05-21 12:10:08.656048841 +0200
+@@ -85,7 +85,7 @@ static inline void io_delay(void)
+ static inline u16 ds(void)
+ {
+ u16 seg;
+- asm("movw %%ds,%0" : "=rm" (seg));
++ asm volatile("movw %%ds,%0" : "=rm" (seg));
+ return seg;
+ }
+
+@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
+ static inline int memcmp(const void *s1, const void *s2, size_t len)
+ {
+ u8 diff;
+- asm("repe; cmpsb; setnz %0"
++ asm volatile("repe; cmpsb; setnz %0"
+ : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+ return diff;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/boot/compressed/eboot.c linux-3.4-pax/arch/x86/boot/compressed/eboot.c
+--- linux-3.4/arch/x86/boot/compressed/eboot.c 2012-05-21 11:32:56.727927619 +0200
++++ linux-3.4-pax/arch/x86/boot/compressed/eboot.c 2012-05-21 12:10:08.660048841 +0200
+@@ -122,7 +122,6 @@ again:
+ *addr = max_addr;
+ }
+
+-free_pool:
+ efi_call_phys1(sys_table->boottime->free_pool, map);
+
+ fail:
+@@ -186,7 +185,6 @@ static efi_status_t low_alloc(unsigned l
+ if (i == map_size / desc_size)
+ status = EFI_NOT_FOUND;
+
+-free_pool:
+ efi_call_phys1(sys_table->boottime->free_pool, map);
+ fail:
+ return status;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/boot/compressed/head_32.S linux-3.4-pax/arch/x86/boot/compressed/head_32.S
+--- linux-3.4/arch/x86/boot/compressed/head_32.S 2012-05-21 11:32:56.727927619 +0200
++++ linux-3.4-pax/arch/x86/boot/compressed/head_32.S 2012-05-21 12:10:08.660048841 +0200
+@@ -106,7 +106,7 @@ preferred_addr:
+ notl %eax
+ andl %eax, %ebx
+ #else
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+
+ /* Target address to relocate to for decompression */
+@@ -192,7 +192,7 @@ relocated:
+ * and where it was actually loaded.
+ */
+ movl %ebp, %ebx
+- subl $LOAD_PHYSICAL_ADDR, %ebx
++ subl $____LOAD_PHYSICAL_ADDR, %ebx
+ jz 2f /* Nothing to be done if loaded at compiled addr. */
+ /*
+ * Process relocations.
+@@ -200,8 +200,7 @@ relocated:
+
+ 1: subl $4, %edi
+ movl (%edi), %ecx
+- testl %ecx, %ecx
+- jz 2f
++ jecxz 2f
+ addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
+ jmp 1b
+ 2:
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/boot/compressed/head_64.S linux-3.4-pax/arch/x86/boot/compressed/head_64.S
+--- linux-3.4/arch/x86/boot/compressed/head_64.S 2012-05-21 11:32:56.727927619 +0200
++++ linux-3.4-pax/arch/x86/boot/compressed/head_64.S 2012-05-21 12:10:08.664048842 +0200
+@@ -91,7 +91,7 @@ ENTRY(startup_32)
+ notl %eax
+ andl %eax, %ebx
+ #else
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+
+ /* Target address to relocate to for decompression */
+@@ -263,7 +263,7 @@ preferred_addr:
+ notq %rax
+ andq %rax, %rbp
+ #else
+- movq $LOAD_PHYSICAL_ADDR, %rbp
++ movq $____LOAD_PHYSICAL_ADDR, %rbp
+ #endif
+
+ /* Target address to relocate to for decompression */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/boot/compressed/Makefile linux-3.4-pax/arch/x86/boot/compressed/Makefile
+--- linux-3.4/arch/x86/boot/compressed/Makefile 2012-05-21 11:32:56.723927619 +0200
++++ linux-3.4-pax/arch/x86/boot/compressed/Makefile 2012-05-21 12:10:08.664048842 +0200
+@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
+ KBUILD_CFLAGS += $(cflags-y)
+ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
++endif
+
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/boot/compressed/misc.c linux-3.4-pax/arch/x86/boot/compressed/misc.c
+--- linux-3.4/arch/x86/boot/compressed/misc.c 2012-03-19 10:38:56.384050008 +0100
++++ linux-3.4-pax/arch/x86/boot/compressed/misc.c 2012-05-21 12:10:08.668048842 +0200
+@@ -310,7 +310,7 @@ static void parse_elf(void *output)
+ case PT_LOAD:
+ #ifdef CONFIG_RELOCATABLE
+ dest = output;
+- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
++ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
+ #else
+ dest = (void *)(phdr->p_paddr);
+ #endif
+@@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *
+ error("Destination address too large");
+ #endif
+ #ifndef CONFIG_RELOCATABLE
+- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
++ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
+ error("Wrong destination address");
+ #endif
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/boot/cpucheck.c linux-3.4-pax/arch/x86/boot/cpucheck.c
+--- linux-3.4/arch/x86/boot/cpucheck.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/boot/cpucheck.c 2012-05-21 12:10:09.268048874 +0200
+@@ -74,7 +74,7 @@ static int has_fpu(void)
+ u16 fcw = -1, fsw = -1;
+ u32 cr0;
+
+- asm("movl %%cr0,%0" : "=r" (cr0));
++ asm volatile("movl %%cr0,%0" : "=r" (cr0));
+ if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
+ cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
+ asm volatile("movl %0,%%cr0" : : "r" (cr0));
+@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
+ {
+ u32 f0, f1;
+
+- asm("pushfl ; "
++ asm volatile("pushfl ; "
+ "pushfl ; "
+ "popl %0 ; "
+ "movl %0,%1 ; "
+@@ -115,7 +115,7 @@ static void get_flags(void)
+ set_bit(X86_FEATURE_FPU, cpu.flags);
+
+ if (has_eflag(X86_EFLAGS_ID)) {
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (max_intel_level),
+ "=b" (cpu_vendor[0]),
+ "=d" (cpu_vendor[1]),
+@@ -124,7 +124,7 @@ static void get_flags(void)
+
+ if (max_intel_level >= 0x00000001 &&
+ max_intel_level <= 0x0000ffff) {
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (tfms),
+ "=c" (cpu.flags[4]),
+ "=d" (cpu.flags[0])
+@@ -136,7 +136,7 @@ static void get_flags(void)
+ cpu.model += ((tfms >> 16) & 0xf) << 4;
+ }
+
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (max_amd_level)
+ : "a" (0x80000000)
+ : "ebx", "ecx", "edx");
+@@ -144,7 +144,7 @@ static void get_flags(void)
+ if (max_amd_level >= 0x80000001 &&
+ max_amd_level <= 0x8000ffff) {
+ u32 eax = 0x80000001;
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "+a" (eax),
+ "=c" (cpu.flags[6]),
+ "=d" (cpu.flags[1])
+@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
+ u32 ecx = MSR_K7_HWCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax &= ~(1 << 15);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ get_flags(); /* Make sure it really did something */
+ err = check_flags();
+@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
+ u32 ecx = MSR_VIA_FCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax |= (1<<1)|(1<<7);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ set_bit(X86_FEATURE_CX8, cpu.flags);
+ err = check_flags();
+@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
+ u32 eax, edx;
+ u32 level = 1;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
+- asm("cpuid"
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
++ asm volatile("cpuid"
+ : "+a" (level), "=d" (cpu.flags[0])
+ : : "ecx", "ebx");
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ err = check_flags();
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/boot/header.S linux-3.4-pax/arch/x86/boot/header.S
+--- linux-3.4/arch/x86/boot/header.S 2012-03-19 10:38:56.384050008 +0100
++++ linux-3.4-pax/arch/x86/boot/header.S 2012-05-21 12:10:09.268048874 +0200
+@@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical
+ # single linked list of
+ # struct setup_data
+
+-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
++pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
+
+ #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
+ #define VO_INIT_SIZE (VO__end - VO__text)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/boot/Makefile linux-3.4-pax/arch/x86/boot/Makefile
+--- linux-3.4/arch/x86/boot/Makefile 2012-05-21 11:32:56.723927619 +0200
++++ linux-3.4-pax/arch/x86/boot/Makefile 2012-05-21 12:10:09.268048874 +0200
+@@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
+ $(call cc-option, -fno-stack-protector) \
+ $(call cc-option, -mpreferred-stack-boundary=2)
+ KBUILD_CFLAGS += $(call cc-option, -m32)
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
++endif
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/boot/memory.c linux-3.4-pax/arch/x86/boot/memory.c
+--- linux-3.4/arch/x86/boot/memory.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/boot/memory.c 2012-05-21 12:10:09.272048875 +0200
+@@ -19,7 +19,7 @@
+
+ static int detect_memory_e820(void)
+ {
+- int count = 0;
++ unsigned int count = 0;
+ struct biosregs ireg, oreg;
+ struct e820entry *desc = boot_params.e820_map;
+ static struct e820entry buf; /* static so it is zeroed */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/boot/video.c linux-3.4-pax/arch/x86/boot/video.c
+--- linux-3.4/arch/x86/boot/video.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/boot/video.c 2012-05-21 12:10:09.272048875 +0200
+@@ -96,7 +96,7 @@ static void store_mode_params(void)
+ static unsigned int get_entry(void)
+ {
+ char entry_buf[4];
+- int i, len = 0;
++ unsigned int i, len = 0;
+ int key;
+ unsigned int v;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/boot/video-vesa.c linux-3.4-pax/arch/x86/boot/video-vesa.c
+--- linux-3.4/arch/x86/boot/video-vesa.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/boot/video-vesa.c 2012-05-21 12:10:09.276048875 +0200
+@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
+
+ boot_params.screen_info.vesapm_seg = oreg.es;
+ boot_params.screen_info.vesapm_off = oreg.di;
++ boot_params.screen_info.vesapm_size = oreg.cx;
+ }
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/crypto/aesni-intel_asm.S linux-3.4-pax/arch/x86/crypto/aesni-intel_asm.S
+--- linux-3.4/arch/x86/crypto/aesni-intel_asm.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/crypto/aesni-intel_asm.S 2012-05-21 12:10:09.280048875 +0200
+@@ -31,6 +31,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
++#include <asm/alternative-asm.h>
+
+ #ifdef __x86_64__
+ .data
+@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
+ pop %r14
+ pop %r13
+ pop %r12
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_gcm_dec)
+
+
+ /*****************************************************************************
+@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
+ pop %r14
+ pop %r13
+ pop %r12
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_gcm_enc)
+
+ #endif
+
+@@ -1714,6 +1719,7 @@ _key_expansion_256a:
+ pxor %xmm1, %xmm0
+ movaps %xmm0, (TKEYP)
+ add $0x10, TKEYP
++ pax_force_retaddr_bts
+ ret
+
+ .align 4
+@@ -1738,6 +1744,7 @@ _key_expansion_192a:
+ shufps $0b01001110, %xmm2, %xmm1
+ movaps %xmm1, 0x10(TKEYP)
+ add $0x20, TKEYP
++ pax_force_retaddr_bts
+ ret
+
+ .align 4
+@@ -1757,6 +1764,7 @@ _key_expansion_192b:
+
+ movaps %xmm0, (TKEYP)
+ add $0x10, TKEYP
++ pax_force_retaddr_bts
+ ret
+
+ .align 4
+@@ -1769,6 +1777,7 @@ _key_expansion_256b:
+ pxor %xmm1, %xmm2
+ movaps %xmm2, (TKEYP)
+ add $0x10, TKEYP
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
+ #ifndef __x86_64__
+ popl KEYP
+ #endif
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_set_key)
+
+ /*
+ * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
+@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
+ popl KLEN
+ popl KEYP
+ #endif
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_enc)
+
+ /*
+ * _aesni_enc1: internal ABI
+@@ -1959,6 +1972,7 @@ _aesni_enc1:
+ AESENC KEY STATE
+ movaps 0x70(TKEYP), KEY
+ AESENCLAST KEY STATE
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -2067,6 +2081,7 @@ _aesni_enc4:
+ AESENCLAST KEY STATE2
+ AESENCLAST KEY STATE3
+ AESENCLAST KEY STATE4
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
+ popl KLEN
+ popl KEYP
+ #endif
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_dec)
+
+ /*
+ * _aesni_dec1: internal ABI
+@@ -2146,6 +2163,7 @@ _aesni_dec1:
+ AESDEC KEY STATE
+ movaps 0x70(TKEYP), KEY
+ AESDECLAST KEY STATE
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -2254,6 +2272,7 @@ _aesni_dec4:
+ AESDECLAST KEY STATE2
+ AESDECLAST KEY STATE3
+ AESDECLAST KEY STATE4
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
+ popl KEYP
+ popl LEN
+ #endif
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_ecb_enc)
+
+ /*
+ * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
+ popl KEYP
+ popl LEN
+ #endif
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_ecb_dec)
+
+ /*
+ * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
+ popl LEN
+ popl IVP
+ #endif
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_cbc_enc)
+
+ /*
+ * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
+ popl LEN
+ popl IVP
+ #endif
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_cbc_dec)
+
+ #ifdef __x86_64__
+ .align 16
+@@ -2524,6 +2551,7 @@ _aesni_inc_init:
+ mov $1, TCTR_LOW
+ MOVQ_R64_XMM TCTR_LOW INC
+ MOVQ_R64_XMM CTR TCTR_LOW
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -2552,6 +2580,7 @@ _aesni_inc:
+ .Linc_low:
+ movaps CTR, IV
+ PSHUFB_XMM BSWAP_MASK IV
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
+ .Lctr_enc_ret:
+ movups IV, (IVP)
+ .Lctr_enc_just_ret:
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_ctr_enc)
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/crypto/aes-x86_64-asm_64.S linux-3.4-pax/arch/x86/crypto/aes-x86_64-asm_64.S
+--- linux-3.4/arch/x86/crypto/aes-x86_64-asm_64.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/crypto/aes-x86_64-asm_64.S 2012-05-21 12:10:09.284048875 +0200
+@@ -8,6 +8,8 @@
+ * including this sentence is retained in full.
+ */
+
++#include <asm/alternative-asm.h>
++
+ .extern crypto_ft_tab
+ .extern crypto_it_tab
+ .extern crypto_fl_tab
+@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
+ je B192; \
+ leaq 32(r9),r9;
+
++#define ret pax_force_retaddr 0, 1; ret
++
+ #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
+ movq r1,r2; \
+ movq r3,r4; \
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/crypto/blowfish-x86_64-asm_64.S linux-3.4-pax/arch/x86/crypto/blowfish-x86_64-asm_64.S
+--- linux-3.4/arch/x86/crypto/blowfish-x86_64-asm_64.S 2012-01-08 19:47:49.055473010 +0100
++++ linux-3.4-pax/arch/x86/crypto/blowfish-x86_64-asm_64.S 2012-05-21 12:10:09.284048875 +0200
+@@ -20,6 +20,8 @@
+ *
+ */
+
++#include <asm/alternative-asm.h>
++
+ .file "blowfish-x86_64-asm.S"
+ .text
+
+@@ -151,9 +153,11 @@ __blowfish_enc_blk:
+ jnz __enc_xor;
+
+ write_block();
++ pax_force_retaddr 0, 1
+ ret;
+ __enc_xor:
+ xor_block();
++ pax_force_retaddr 0, 1
+ ret;
+
+ .align 8
+@@ -188,6 +192,7 @@ blowfish_dec_blk:
+
+ movq %r11, %rbp;
+
++ pax_force_retaddr 0, 1
+ ret;
+
+ /**********************************************************************
+@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
+
+ popq %rbx;
+ popq %rbp;
++ pax_force_retaddr 0, 1
+ ret;
+
+ __enc_xor4:
+@@ -349,6 +355,7 @@ __enc_xor4:
+
+ popq %rbx;
+ popq %rbp;
++ pax_force_retaddr 0, 1
+ ret;
+
+ .align 8
+@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
+ popq %rbx;
+ popq %rbp;
+
++ pax_force_retaddr 0, 1
+ ret;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/crypto/camellia-x86_64-asm_64.S linux-3.4-pax/arch/x86/crypto/camellia-x86_64-asm_64.S
+--- linux-3.4/arch/x86/crypto/camellia-x86_64-asm_64.S 2012-05-21 11:32:56.755927621 +0200
++++ linux-3.4-pax/arch/x86/crypto/camellia-x86_64-asm_64.S 2012-05-21 12:10:09.288048876 +0200
+@@ -20,6 +20,8 @@
+ *
+ */
+
++#include <asm/alternative-asm.h>
++
+ .file "camellia-x86_64-asm_64.S"
+ .text
+
+@@ -229,12 +231,14 @@ __enc_done:
+ enc_outunpack(mov, RT1);
+
+ movq RRBP, %rbp;
++ pax_force_retaddr 0, 1
+ ret;
+
+ __enc_xor:
+ enc_outunpack(xor, RT1);
+
+ movq RRBP, %rbp;
++ pax_force_retaddr 0, 1
+ ret;
+
+ .global camellia_dec_blk;
+@@ -275,6 +279,7 @@ __dec_rounds16:
+ dec_outunpack();
+
+ movq RRBP, %rbp;
++ pax_force_retaddr 0, 1
+ ret;
+
+ /**********************************************************************
+@@ -468,6 +473,7 @@ __enc2_done:
+
+ movq RRBP, %rbp;
+ popq %rbx;
++ pax_force_retaddr 0, 1
+ ret;
+
+ __enc2_xor:
+@@ -475,6 +481,7 @@ __enc2_xor:
+
+ movq RRBP, %rbp;
+ popq %rbx;
++ pax_force_retaddr 0, 1
+ ret;
+
+ .global camellia_dec_blk_2way;
+@@ -517,4 +524,5 @@ __dec2_rounds16:
+
+ movq RRBP, %rbp;
+ movq RXOR, %rbx;
++ pax_force_retaddr 0, 1
+ ret;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-3.4-pax/arch/x86/crypto/salsa20-x86_64-asm_64.S
+--- linux-3.4/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/crypto/salsa20-x86_64-asm_64.S 2012-05-21 12:10:09.288048876 +0200
+@@ -1,3 +1,5 @@
++#include <asm/alternative-asm.h>
++
+ # enter ECRYPT_encrypt_bytes
+ .text
+ .p2align 5
+@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
+ add %r11,%rsp
+ mov %rdi,%rax
+ mov %rsi,%rdx
++ pax_force_retaddr 0, 1
+ ret
+ # bytesatleast65:
+ ._bytesatleast65:
+@@ -891,6 +894,7 @@ ECRYPT_keysetup:
+ add %r11,%rsp
+ mov %rdi,%rax
+ mov %rsi,%rdx
++ pax_force_retaddr
+ ret
+ # enter ECRYPT_ivsetup
+ .text
+@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
+ add %r11,%rsp
+ mov %rdi,%rax
+ mov %rsi,%rdx
++ pax_force_retaddr
+ ret
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S linux-3.4-pax/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
+--- linux-3.4/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S 2012-05-21 11:32:56.775927622 +0200
++++ linux-3.4-pax/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S 2012-05-21 12:10:09.292048876 +0200
+@@ -24,6 +24,8 @@
+ *
+ */
+
++#include <asm/alternative-asm.h>
++
+ .file "serpent-sse2-x86_64-asm_64.S"
+ .text
+
+@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
+ write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+ write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
+
++ pax_force_retaddr
+ ret;
+
+ __enc_xor8:
+ xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+ xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
+
++ pax_force_retaddr
+ ret;
+
+ .align 8
+@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
+ write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
+ write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
+
++ pax_force_retaddr
+ ret;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/crypto/sha1_ssse3_asm.S linux-3.4-pax/arch/x86/crypto/sha1_ssse3_asm.S
+--- linux-3.4/arch/x86/crypto/sha1_ssse3_asm.S 2012-01-08 19:47:49.055473010 +0100
++++ linux-3.4-pax/arch/x86/crypto/sha1_ssse3_asm.S 2012-05-21 12:10:09.292048876 +0200
+@@ -28,6 +28,8 @@
+ * (at your option) any later version.
+ */
+
++#include <asm/alternative-asm.h>
++
+ #define CTX %rdi // arg1
+ #define BUF %rsi // arg2
+ #define CNT %rdx // arg3
+@@ -104,6 +106,7 @@
+ pop %r12
+ pop %rbp
+ pop %rbx
++ pax_force_retaddr 0, 1
+ ret
+
+ .size \name, .-\name
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/crypto/twofish-x86_64-asm_64-3way.S linux-3.4-pax/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+--- linux-3.4/arch/x86/crypto/twofish-x86_64-asm_64-3way.S 2012-01-08 19:47:49.063473009 +0100
++++ linux-3.4-pax/arch/x86/crypto/twofish-x86_64-asm_64-3way.S 2012-05-21 12:10:09.296048875 +0200
+@@ -20,6 +20,8 @@
+ *
+ */
+
++#include <asm/alternative-asm.h>
++
+ .file "twofish-x86_64-asm-3way.S"
+ .text
+
+@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
+ popq %r13;
+ popq %r14;
+ popq %r15;
++ pax_force_retaddr 0, 1
+ ret;
+
+ __enc_xor3:
+@@ -271,6 +274,7 @@ __enc_xor3:
+ popq %r13;
+ popq %r14;
+ popq %r15;
++ pax_force_retaddr 0, 1
+ ret;
+
+ .global twofish_dec_blk_3way
+@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
+ popq %r13;
+ popq %r14;
+ popq %r15;
++ pax_force_retaddr 0, 1
+ ret;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/crypto/twofish-x86_64-asm_64.S linux-3.4-pax/arch/x86/crypto/twofish-x86_64-asm_64.S
+--- linux-3.4/arch/x86/crypto/twofish-x86_64-asm_64.S 2012-01-08 19:47:49.063473009 +0100
++++ linux-3.4-pax/arch/x86/crypto/twofish-x86_64-asm_64.S 2012-05-21 12:10:09.296048875 +0200
+@@ -21,6 +21,7 @@
+ .text
+
+ #include <asm/asm-offsets.h>
++#include <asm/alternative-asm.h>
+
+ #define a_offset 0
+ #define b_offset 4
+@@ -268,6 +269,7 @@ twofish_enc_blk:
+
+ popq R1
+ movq $1,%rax
++ pax_force_retaddr 0, 1
+ ret
+
+ twofish_dec_blk:
+@@ -319,4 +321,5 @@ twofish_dec_blk:
+
+ popq R1
+ movq $1,%rax
++ pax_force_retaddr 0, 1
+ ret
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/ia32/ia32entry.S linux-3.4-pax/arch/x86/ia32/ia32entry.S
+--- linux-3.4/arch/x86/ia32/ia32entry.S 2012-03-19 10:38:56.404050007 +0100
++++ linux-3.4-pax/arch/x86/ia32/ia32entry.S 2012-05-21 12:10:09.300048875 +0200
+@@ -13,8 +13,10 @@
+ #include <asm/thread_info.h>
+ #include <asm/segment.h>
+ #include <asm/irqflags.h>
++#include <asm/pgtable.h>
+ #include <linux/linkage.h>
+ #include <linux/err.h>
++#include <asm/alternative-asm.h>
+
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
+ #include <linux/elf-em.h>
+@@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
+ ENDPROC(native_irq_enable_sysexit)
+ #endif
+
++ .macro pax_enter_kernel_user
++ pax_set_fptr_mask
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++ .endm
++
++ .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushq %rax
++ pushq %r11
++ call pax_randomize_kstack
++ popq %r11
++ popq %rax
++#endif
++ .endm
++
++.macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++.endm
++
+ /*
+ * 32bit SYSENTER instruction entry.
+ *
+@@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
+ CFI_REGISTER rsp,rbp
+ SWAPGS_UNSAFE_STACK
+ movq PER_CPU_VAR(kernel_stack), %rsp
+- addq $(KERNEL_STACK_OFFSET),%rsp
+- /*
+- * No need to follow this irqs on/off section: the syscall
+- * disabled irqs, here we enable it straight after entry:
+- */
+- ENABLE_INTERRUPTS(CLBR_NONE)
+ movl %ebp,%ebp /* zero extension */
+ pushq_cfi $__USER32_DS
+ /*CFI_REL_OFFSET ss,0*/
+@@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
+ CFI_REL_OFFSET rsp,0
+ pushfq_cfi
+ /*CFI_REL_OFFSET rflags,0*/
+- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
+- CFI_REGISTER rip,r10
++ orl $X86_EFLAGS_IF,(%rsp)
++ GET_THREAD_INFO(%r11)
++ movl TI_sysenter_return(%r11), %r11d
++ CFI_REGISTER rip,r11
+ pushq_cfi $__USER32_CS
+ /*CFI_REL_OFFSET cs,0*/
+ movl %eax, %eax
+- pushq_cfi %r10
++ pushq_cfi %r11
+ CFI_REL_OFFSET rip,0
+ pushq_cfi %rax
+ cld
+ SAVE_ARGS 0,1,0
++ pax_enter_kernel_user
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs, here we enable it straight after entry:
++ */
++ ENABLE_INTERRUPTS(CLBR_NONE)
+ /* no need to do an access_ok check here because rbp has been
+ 32bit zero extended */
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov $PAX_USER_SHADOW_BASE,%r11
++ add %r11,%rbp
++#endif
++
+ 1: movl (%rbp),%ebp
+ .section __ex_table,"a"
+ .quad 1b,ia32_badarg
+ .previous
+- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++ GET_THREAD_INFO(%r11)
++ orl $TS_COMPAT,TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
+ CFI_REMEMBER_STATE
+ jnz sysenter_tracesys
+ cmpq $(IA32_NR_syscalls-1),%rax
+@@ -160,12 +197,15 @@ sysenter_do_call:
+ sysenter_dispatch:
+ call *ia32_sys_call_table(,%rax,8)
+ movq %rax,RAX-ARGOFFSET(%rsp)
++ GET_THREAD_INFO(%r11)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
+ jnz sysexit_audit
+ sysexit_from_sys_call:
+- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++ pax_exit_kernel_user
++ pax_erase_kstack
++ andl $~TS_COMPAT,TI_status(%r11)
+ /* clear IF, that popfq doesn't enable interrupts early */
+ andl $~0x200,EFLAGS-R11(%rsp)
+ movl RIP-R11(%rsp),%edx /* User %eip */
+@@ -191,6 +231,9 @@ sysexit_from_sys_call:
+ movl %eax,%esi /* 2nd arg: syscall number */
+ movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
+ call __audit_syscall_entry
++
++ pax_erase_kstack
++
+ movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja ia32_badsys
+@@ -202,7 +245,7 @@ sysexit_from_sys_call:
+ .endm
+
+ .macro auditsys_exit exit
+- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
+ jnz ia32_ret_from_sys_call
+ TRACE_IRQS_ON
+ sti
+@@ -213,11 +256,12 @@ sysexit_from_sys_call:
+ 1: setbe %al /* 1 if error, 0 if not */
+ movzbl %al,%edi /* zero-extend that into %edi */
+ call __audit_syscall_exit
++ GET_THREAD_INFO(%r11)
+ movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
+ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
+ cli
+ TRACE_IRQS_OFF
+- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++ testl %edi,TI_flags(%r11)
+ jz \exit
+ CLEAR_RREGS -ARGOFFSET
+ jmp int_with_check
+@@ -235,7 +279,7 @@ sysexit_audit:
+
+ sysenter_tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
+ jz sysenter_auditsys
+ #endif
+ SAVE_REST
+@@ -243,6 +287,9 @@ sysenter_tracesys:
+ movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
+ movq %rsp,%rdi /* &pt_regs -> arg1 */
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+ cmpq $(IA32_NR_syscalls-1),%rax
+@@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
+ ENTRY(ia32_cstar_target)
+ CFI_STARTPROC32 simple
+ CFI_SIGNAL_FRAME
+- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
++ CFI_DEF_CFA rsp,0
+ CFI_REGISTER rip,rcx
+ /*CFI_REGISTER rflags,r11*/
+ SWAPGS_UNSAFE_STACK
+ movl %esp,%r8d
+ CFI_REGISTER rsp,r8
+ movq PER_CPU_VAR(kernel_stack),%rsp
++ SAVE_ARGS 8*6,0,0
++ pax_enter_kernel_user
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs and here we enable it straight after entry:
+ */
+ ENABLE_INTERRUPTS(CLBR_NONE)
+- SAVE_ARGS 8,0,0
+ movl %eax,%eax /* zero extension */
+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
+ movq %rcx,RIP-ARGOFFSET(%rsp)
+@@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
+ /* no need to do an access_ok check here because r8 has been
+ 32bit zero extended */
+ /* hardware stack frame is complete now */
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov $PAX_USER_SHADOW_BASE,%r11
++ add %r11,%r8
++#endif
++
+ 1: movl (%r8),%r9d
+ .section __ex_table,"a"
+ .quad 1b,ia32_badarg
+ .previous
+- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++ GET_THREAD_INFO(%r11)
++ orl $TS_COMPAT,TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
+ CFI_REMEMBER_STATE
+ jnz cstar_tracesys
+ cmpq $IA32_NR_syscalls-1,%rax
+@@ -317,12 +372,15 @@ cstar_do_call:
+ cstar_dispatch:
+ call *ia32_sys_call_table(,%rax,8)
+ movq %rax,RAX-ARGOFFSET(%rsp)
++ GET_THREAD_INFO(%r11)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
+ jnz sysretl_audit
+ sysretl_from_sys_call:
+- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++ pax_exit_kernel_user
++ pax_erase_kstack
++ andl $~TS_COMPAT,TI_status(%r11)
+ RESTORE_ARGS 0,-ARG_SKIP,0,0,0
+ movl RIP-ARGOFFSET(%rsp),%ecx
+ CFI_REGISTER rip,rcx
+@@ -350,7 +408,7 @@ sysretl_audit:
+
+ cstar_tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
+ jz cstar_auditsys
+ #endif
+ xchgl %r9d,%ebp
+@@ -359,6 +417,9 @@ cstar_tracesys:
+ movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
+ movq %rsp,%rdi /* &pt_regs -> arg1 */
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+ xchgl %ebp,%r9d
+@@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
+ CFI_REL_OFFSET rip,RIP-RIP
+ PARAVIRT_ADJUST_EXCEPTION_FRAME
+ SWAPGS
+- /*
+- * No need to follow this irqs on/off section: the syscall
+- * disabled irqs and here we enable it straight after entry:
+- */
+- ENABLE_INTERRUPTS(CLBR_NONE)
+ movl %eax,%eax
+ pushq_cfi %rax
+ cld
+ /* note the registers are not zero extended to the sf.
+ this could be a problem. */
+ SAVE_ARGS 0,1,0
+- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++ pax_enter_kernel_user
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs and here we enable it straight after entry:
++ */
++ ENABLE_INTERRUPTS(CLBR_NONE)
++ GET_THREAD_INFO(%r11)
++ orl $TS_COMPAT,TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
+ jnz ia32_tracesys
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja ia32_badsys
+@@ -435,6 +498,9 @@ ia32_tracesys:
+ movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
+ movq %rsp,%rdi /* &pt_regs -> arg1 */
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+ cmpq $(IA32_NR_syscalls-1),%rax
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/ia32/ia32_signal.c linux-3.4-pax/arch/x86/ia32/ia32_signal.c
+--- linux-3.4/arch/x86/ia32/ia32_signal.c 2012-05-21 11:32:56.783927622 +0200
++++ linux-3.4-pax/arch/x86/ia32/ia32_signal.c 2012-05-21 12:10:09.300048875 +0200
+@@ -168,7 +168,7 @@ asmlinkage long sys32_sigaltstack(const
+ }
+ seg = get_fs();
+ set_fs(KERNEL_DS);
+- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
++ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
+ set_fs(seg);
+ if (ret >= 0 && uoss_ptr) {
+ if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
+@@ -369,7 +369,7 @@ static int ia32_setup_sigcontext(struct
+ */
+ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+ size_t frame_size,
+- void **fpstate)
++ void __user **fpstate)
+ {
+ unsigned long sp;
+
+@@ -390,7 +390,7 @@ static void __user *get_sigframe(struct
+
+ if (used_math()) {
+ sp = sp - sig_xstate_ia32_size;
+- *fpstate = (struct _fpstate_ia32 *) sp;
++ *fpstate = (struct _fpstate_ia32 __user *) sp;
+ if (save_i387_xstate_ia32(*fpstate) < 0)
+ return (void __user *) -1L;
+ }
+@@ -398,7 +398,7 @@ static void __user *get_sigframe(struct
+ sp -= frame_size;
+ /* Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0. */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ return (void __user *) sp;
+ }
+
+@@ -456,7 +456,7 @@ int ia32_setup_frame(int sig, struct k_s
+ * These are actually not used anymore, but left because some
+ * gdb versions depend on them as a marker.
+ */
+- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+@@ -498,7 +498,7 @@ int ia32_setup_rt_frame(int sig, struct
+ 0xb8,
+ __NR_ia32_rt_sigreturn,
+ 0x80cd,
+- 0,
++ 0
+ };
+
+ frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
+@@ -528,16 +528,18 @@ int ia32_setup_rt_frame(int sig, struct
+
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
++ else if (current->mm->context.vdso)
++ /* Return stub is in 32bit vsyscall page */
++ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
+ else
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
+- rt_sigreturn);
++ restorer = &frame->retcode;
+ put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
+
+ /*
+ * Not actually used anymore, but left because some gdb
+ * versions need it.
+ */
+- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/ia32/sys_ia32.c linux-3.4-pax/arch/x86/ia32/sys_ia32.c
+--- linux-3.4/arch/x86/ia32/sys_ia32.c 2012-05-21 11:32:56.795927623 +0200
++++ linux-3.4-pax/arch/x86/ia32/sys_ia32.c 2012-05-21 12:10:09.304048877 +0200
+@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsign
+ */
+ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
+ {
+- typeof(ubuf->st_uid) uid = 0;
+- typeof(ubuf->st_gid) gid = 0;
++ typeof(((struct stat64 *)0)->st_uid) uid = 0;
++ typeof(((struct stat64 *)0)->st_gid) gid = 0;
+ SET_UID(uid, stat->uid);
+ SET_GID(gid, stat->gid);
+ if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
+@@ -292,7 +292,7 @@ asmlinkage long sys32_alarm(unsigned int
+ return alarm_setitimer(seconds);
+ }
+
+-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
++asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
+ int options)
+ {
+ return compat_sys_wait4(pid, stat_addr, options, NULL);
+@@ -313,7 +313,7 @@ asmlinkage long sys32_sched_rr_get_inter
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
++ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
+ set_fs(old_fs);
+ if (put_compat_timespec(&t, interval))
+ return -EFAULT;
+@@ -329,7 +329,7 @@ asmlinkage long sys32_rt_sigpending(comp
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
++ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
+ set_fs(old_fs);
+ if (!ret) {
+ switch (_NSIG_WORDS) {
+@@ -354,7 +354,7 @@ asmlinkage long sys32_rt_sigqueueinfo(in
+ if (copy_siginfo_from_user32(&info, uinfo))
+ return -EFAULT;
+ set_fs(KERNEL_DS);
+- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
++ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
+ set_fs(old_fs);
+ return ret;
+ }
+@@ -399,7 +399,7 @@ asmlinkage long sys32_sendfile(int out_f
+ return -EFAULT;
+
+ set_fs(KERNEL_DS);
+- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
++ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
+ count);
+ set_fs(old_fs);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/alternative-asm.h linux-3.4-pax/arch/x86/include/asm/alternative-asm.h
+--- linux-3.4/arch/x86/include/asm/alternative-asm.h 2012-03-19 10:38:56.416050006 +0100
++++ linux-3.4-pax/arch/x86/include/asm/alternative-asm.h 2012-05-21 12:10:09.304048877 +0200
+@@ -15,6 +15,45 @@
+ .endm
+ #endif
+
++#ifdef KERNEXEC_PLUGIN
++ .macro pax_force_retaddr_bts rip=0
++ btsq $63,\rip(%rsp)
++ .endm
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
++ .macro pax_force_retaddr rip=0, reload=0
++ btsq $63,\rip(%rsp)
++ .endm
++ .macro pax_force_fptr ptr
++ btsq $63,\ptr
++ .endm
++ .macro pax_set_fptr_mask
++ .endm
++#endif
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ .macro pax_force_retaddr rip=0, reload=0
++ .if \reload
++ pax_set_fptr_mask
++ .endif
++ orq %r10,\rip(%rsp)
++ .endm
++ .macro pax_force_fptr ptr
++ orq %r10,\ptr
++ .endm
++ .macro pax_set_fptr_mask
++ movabs $0x8000000000000000,%r10
++ .endm
++#endif
++#else
++ .macro pax_force_retaddr rip=0, reload=0
++ .endm
++ .macro pax_force_fptr ptr
++ .endm
++ .macro pax_force_retaddr_bts rip=0
++ .endm
++ .macro pax_set_fptr_mask
++ .endm
++#endif
++
+ .macro altinstruction_entry orig alt feature orig_len alt_len
+ .long \orig - .
+ .long \alt - .
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/alternative.h linux-3.4-pax/arch/x86/include/asm/alternative.h
+--- linux-3.4/arch/x86/include/asm/alternative.h 2012-05-21 11:32:56.799927623 +0200
++++ linux-3.4-pax/arch/x86/include/asm/alternative.h 2012-05-21 12:10:09.308048878 +0200
+@@ -89,7 +89,7 @@ static inline int alternatives_text_rese
+ ".section .discard,\"aw\",@progbits\n" \
+ " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
+ ".previous\n" \
+- ".section .altinstr_replacement, \"ax\"\n" \
++ ".section .altinstr_replacement, \"a\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous"
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/apic.h linux-3.4-pax/arch/x86/include/asm/apic.h
+--- linux-3.4/arch/x86/include/asm/apic.h 2012-05-21 11:32:56.803927623 +0200
++++ linux-3.4-pax/arch/x86/include/asm/apic.h 2012-05-21 12:10:09.312048878 +0200
+@@ -44,7 +44,7 @@ static inline void generic_apic_probe(vo
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+
+-extern unsigned int apic_verbosity;
++extern int apic_verbosity;
+ extern int local_apic_timer_c2_ok;
+
+ extern int disable_apic;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/apm.h linux-3.4-pax/arch/x86/include/asm/apm.h
+--- linux-3.4/arch/x86/include/asm/apm.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/apm.h 2012-05-21 12:10:09.312048878 +0200
+@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%al\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%bl\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/atomic64_32.h linux-3.4-pax/arch/x86/include/asm/atomic64_32.h
+--- linux-3.4/arch/x86/include/asm/atomic64_32.h 2012-05-21 11:32:56.807927624 +0200
++++ linux-3.4-pax/arch/x86/include/asm/atomic64_32.h 2012-05-21 12:10:09.316048878 +0200
+@@ -12,6 +12,14 @@ typedef struct {
+ u64 __aligned(8) counter;
+ } atomic64_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ u64 __aligned(8) counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(val) { (val) }
+
+ #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
+@@ -37,21 +45,31 @@ typedef struct {
+ ATOMIC64_DECL_ONE(sym##_386)
+
+ ATOMIC64_DECL_ONE(add_386);
++ATOMIC64_DECL_ONE(add_unchecked_386);
+ ATOMIC64_DECL_ONE(sub_386);
++ATOMIC64_DECL_ONE(sub_unchecked_386);
+ ATOMIC64_DECL_ONE(inc_386);
++ATOMIC64_DECL_ONE(inc_unchecked_386);
+ ATOMIC64_DECL_ONE(dec_386);
++ATOMIC64_DECL_ONE(dec_unchecked_386);
+ #endif
+
+ #define alternative_atomic64(f, out, in...) \
+ __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
+
+ ATOMIC64_DECL(read);
++ATOMIC64_DECL(read_unchecked);
+ ATOMIC64_DECL(set);
++ATOMIC64_DECL(set_unchecked);
+ ATOMIC64_DECL(xchg);
+ ATOMIC64_DECL(add_return);
++ATOMIC64_DECL(add_return_unchecked);
+ ATOMIC64_DECL(sub_return);
++ATOMIC64_DECL(sub_return_unchecked);
+ ATOMIC64_DECL(inc_return);
++ATOMIC64_DECL(inc_return_unchecked);
+ ATOMIC64_DECL(dec_return);
++ATOMIC64_DECL(dec_return_unchecked);
+ ATOMIC64_DECL(dec_if_positive);
+ ATOMIC64_DECL(inc_not_zero);
+ ATOMIC64_DECL(add_unless);
+@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg
+ }
+
+ /**
++ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
++ * @p: pointer to type atomic64_unchecked_t
++ * @o: expected value
++ * @n: new value
++ *
++ * Atomically sets @v to @n if it was equal to @o and returns
++ * the old value.
++ */
++
++static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
++{
++ return cmpxchg64(&v->counter, o, n);
++}
++
++/**
+ * atomic64_xchg - xchg atomic64 variable
+ * @v: pointer to type atomic64_t
+ * @n: value to assign
+@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64
+ }
+
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @n: value to assign
++ *
++ * Atomically sets the value of @v to @n.
++ */
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
++{
++ unsigned high = (unsigned)(i >> 32);
++ unsigned low = (unsigned)i;
++ alternative_atomic64(set, /* no output */,
++ "S" (v), "b" (low), "c" (high)
++ : "eax", "edx", "memory");
++}
++
++/**
+ * atomic64_read - read atomic64 variable
+ * @v: pointer to type atomic64_t
+ *
+@@ -125,6 +174,19 @@ static inline long long atomic64_read(co
+ }
+
+ /**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @v and returns it.
++ */
++static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
++{
++ long long r;
++ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
++ return r;
++ }
++
++/**
+ * atomic64_add_return - add and return
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+@@ -139,6 +201,21 @@ static inline long long atomic64_add_ret
+ return i;
+ }
+
++/**
++ * atomic64_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + *@v
++ */
++static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
++{
++ alternative_atomic64(add_return_unchecked,
++ ASM_OUTPUT2("+A" (i), "+c" (v)),
++ ASM_NO_INPUT_CLOBBER("memory"));
++ return i;
++}
++
+ /*
+ * Other variants with different arithmetic operators:
+ */
+@@ -158,6 +235,14 @@ static inline long long atomic64_inc_ret
+ return a;
+ }
+
++static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ long long a;
++ alternative_atomic64(inc_return_unchecked, "=&A" (a),
++ "S" (v) : "memory", "ecx");
++ return a;
++}
++
+ static inline long long atomic64_dec_return(atomic64_t *v)
+ {
+ long long a;
+@@ -179,6 +264,21 @@ static inline long long atomic64_add(lon
+ ASM_OUTPUT2("+A" (i), "+c" (v)),
+ ASM_NO_INPUT_CLOBBER("memory"));
+ return i;
++}
++
++/**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
++{
++ __alternative_atomic64(add_unchecked, add_return_unchecked,
++ ASM_OUTPUT2("+A" (i), "+c" (v)),
++ ASM_NO_INPUT_CLOBBER("memory"));
++ return i;
+ }
+
+ /**
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/atomic64_64.h linux-3.4-pax/arch/x86/include/asm/atomic64_64.h
+--- linux-3.4/arch/x86/include/asm/atomic64_64.h 2012-01-08 19:47:49.095473007 +0100
++++ linux-3.4-pax/arch/x86/include/asm/atomic64_64.h 2012-05-21 12:10:09.316048878 +0200
+@@ -18,7 +18,19 @@
+ */
+ static inline long atomic64_read(const atomic64_t *v)
+ {
+- return (*(volatile long *)&(v)->counter);
++ return (*(volatile const long *)&(v)->counter);
++}
++
++/**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer of type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ * Doesn't imply a read memory barrier.
++ */
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return (*(volatile const long *)&(v)->counter);
+ }
+
+ /**
+@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
+ }
+
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic64_add - add integer to atomic64 variable
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
+ */
+ static inline void atomic64_add(long i, atomic64_t *v)
+ {
++ asm volatile(LOCK_PREFIX "addq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "er" (i), "m" (v->counter));
++}
++
++/**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
++{
+ asm volatile(LOCK_PREFIX "addq %1,%0"
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
+ */
+ static inline void atomic64_sub(long i, atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subq %1,%0"
++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "er" (i), "m" (v->counter));
++}
++
++/**
++ * atomic64_sub_unchecked - subtract the atomic64 variable
++ * @i: integer value to subtract
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+ }
+@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
+ */
+ static inline void atomic64_inc(atomic64_t *v)
+ {
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic64_inc_unchecked - increment atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
+ asm volatile(LOCK_PREFIX "incq %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
+ */
+ static inline void atomic64_dec(atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decq %0"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic64_dec_unchecked - decrement atomic64 variable
++ * @v: pointer to type atomic64_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "decq %0\n"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decq %0; sete %1"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incq %0; sete %1"
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(
+ */
+ static inline long atomic64_add_return(long i, atomic64_t *v)
+ {
++ return i + xadd_check_overflow(&v->counter, i);
++}
++
++/**
++ * atomic64_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
++{
+ return i + xadd(&v->counter, i);
+ }
+
+@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(l
+ }
+
+ #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ return atomic64_add_return_unchecked(1, v);
++}
+ #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
+
+ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atom
+ return cmpxchg(&v->counter, old, new);
+ }
+
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
+ static inline long atomic64_xchg(atomic64_t *v, long new)
+ {
+ return xchg(&v->counter, new);
+@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic6
+ */
+ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("add %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "sub %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/atomic.h linux-3.4-pax/arch/x86/include/asm/atomic.h
+--- linux-3.4/arch/x86/include/asm/atomic.h 2012-01-08 19:47:49.091473008 +0100
++++ linux-3.4-pax/arch/x86/include/asm/atomic.h 2012-05-21 12:10:09.320048877 +0200
+@@ -22,7 +22,18 @@
+ */
+ static inline int atomic_read(const atomic_t *v)
+ {
+- return (*(volatile int *)&(v)->counter);
++ return (*(volatile const int *)&(v)->counter);
++}
++
++/**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ */
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return (*(volatile const int *)&(v)->counter);
+ }
+
+ /**
+@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
+ }
+
+ /**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
+ */
+ static inline void atomic_add(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "addl %1,%0"
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_add_unchecked - add integer to atomic variable
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
+ */
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subl %1,%0"
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_sub_unchecked - subtract integer from atomic variable
++ * @i: integer value to subtract
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
+ */
+ static inline void atomic_inc(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "incl %0"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter));
++}
++
++/**
++ * atomic_inc_unchecked - increment atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "incl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
+ */
+ static inline void atomic_dec(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decl %0"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter));
++}
++
++/**
++ * atomic_dec_unchecked - decrement atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "decl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decl %0; sete %1"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incl %0; sete %1"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
++ : "+m" (v->counter), "=qm" (c)
++ : : "memory");
++ return c != 0;
++}
++
++/**
++ * atomic_inc_and_test_unchecked - increment and test
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1
++ * and returns true if the result is zero, or false for all
++ * other cases.
++ */
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ unsigned char c;
++
++ asm volatile(LOCK_PREFIX "incl %0\n"
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -179,7 +341,7 @@ static inline int atomic_add_return(int
+ goto no_xadd;
+ #endif
+ /* Modern 486+ processor */
+- return i + xadd(&v->counter, i);
++ return i + xadd_check_overflow(&v->counter, i);
+
+ #ifdef CONFIG_M386
+ no_xadd: /* Legacy 386 processor */
+@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
+ }
+
+ /**
++ * atomic_add_return_unchecked - add integer and return
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++#ifdef CONFIG_M386
++ int __i;
++ unsigned long flags;
++ if (unlikely(boot_cpu_data.x86 <= 3))
++ goto no_xadd;
++#endif
++ /* Modern 486+ processor */
++ return i + xadd(&v->counter, i);
++
++#ifdef CONFIG_M386
++no_xadd: /* Legacy 386 processor */
++ raw_local_irq_save(flags);
++ __i = atomic_read_unchecked(v);
++ atomic_set_unchecked(v, i + __i);
++ raw_local_irq_restore(flags);
++ return i + __i;
++#endif
++}
++
++/**
+ * atomic_sub_return - subtract integer and return
+ * @v: pointer of type atomic_t
+ * @i: integer value to subtract
+@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int
+ }
+
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
+
+ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_
+ return cmpxchg(&v->counter, old, new);
+ }
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
+ static inline int atomic_xchg(atomic_t *v, int new)
+ {
+ return xchg(&v->counter, new);
+ }
+
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
++
+ /**
+ * __atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *
+ */
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "subl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(at
+ return c;
+ }
+
++/**
++ * atomic_inc_not_zero_hint - increment if not null
++ * @v: pointer of type atomic_t
++ * @hint: probable value of the atomic before the increment
++ *
++ * This version of atomic_inc_not_zero() gives a hint of probable
++ * value of the atomic. This helps processor to not read the memory
++ * before doing the atomic read/modify/write cycle, lowering
++ * number of bus transactions on some arches.
++ *
++ * Returns: 0 if increment was not done, 1 otherwise.
++ */
++#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
++static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
++{
++ int val, c = hint, new;
++
++ /* sanity test, should be removed by compiler if hint is a constant */
++ if (!hint)
++ return __atomic_add_unless(v, 1, 0);
++
++ do {
++ asm volatile("incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c));
++
++ val = atomic_cmpxchg(v, c, new);
++ if (val == c)
++ return 1;
++ c = val;
++ } while (c);
++
++ return 0;
++}
+
+ /*
+ * atomic_dec_if_positive - decrement by 1 if old value positive
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/bitops.h linux-3.4-pax/arch/x86/include/asm/bitops.h
+--- linux-3.4/arch/x86/include/asm/bitops.h 2012-03-19 10:38:56.420050006 +0100
++++ linux-3.4-pax/arch/x86/include/asm/bitops.h 2012-05-21 12:10:09.320048877 +0200
+@@ -38,7 +38,7 @@
+ * a mask operation on a byte.
+ */
+ #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
+-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
++#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
+ #define CONST_MASK(nr) (1 << ((nr) & 7))
+
+ /**
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/boot.h linux-3.4-pax/arch/x86/include/asm/boot.h
+--- linux-3.4/arch/x86/include/asm/boot.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/boot.h 2012-05-21 12:10:09.324048877 +0200
+@@ -11,10 +11,15 @@
+ #include <asm/pgtable_types.h>
+
+ /* Physical address where kernel should be loaded. */
+-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
++#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+ + (CONFIG_PHYSICAL_ALIGN - 1)) \
+ & ~(CONFIG_PHYSICAL_ALIGN - 1))
+
++#ifndef __ASSEMBLY__
++extern unsigned char __LOAD_PHYSICAL_ADDR[];
++#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
++#endif
++
+ /* Minimum kernel alignment, as a power of two */
+ #ifdef CONFIG_X86_64
+ #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/cacheflush.h linux-3.4-pax/arch/x86/include/asm/cacheflush.h
+--- linux-3.4/arch/x86/include/asm/cacheflush.h 2012-05-21 11:32:56.815927624 +0200
++++ linux-3.4-pax/arch/x86/include/asm/cacheflush.h 2012-05-21 12:10:09.324048877 +0200
+@@ -27,7 +27,7 @@ static inline unsigned long get_page_mem
+ unsigned long pg_flags = pg->flags & _PGMT_MASK;
+
+ if (pg_flags == _PGMT_DEFAULT)
+- return -1;
++ return ~0UL;
+ else if (pg_flags == _PGMT_WC)
+ return _PAGE_CACHE_WC;
+ else if (pg_flags == _PGMT_UC_MINUS)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/cache.h linux-3.4-pax/arch/x86/include/asm/cache.h
+--- linux-3.4/arch/x86/include/asm/cache.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/cache.h 2012-05-21 12:10:09.328048878 +0200
+@@ -5,12 +5,13 @@
+
+ /* L1 cache line size */
+ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
++#define __read_only __attribute__((__section__(".data..read_only")))
+
+ #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
+-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
++#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
+
+ #ifdef CONFIG_X86_VSMP
+ #ifdef CONFIG_SMP
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/checksum_32.h linux-3.4-pax/arch/x86/include/asm/checksum_32.h
+--- linux-3.4/arch/x86/include/asm/checksum_32.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/checksum_32.h 2012-05-21 12:10:09.328048878 +0200
+@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
+
++asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
++asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
+ /*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
+ int *err_ptr)
+ {
+ might_sleep();
+- return csum_partial_copy_generic((__force void *)src, dst,
++ return csum_partial_copy_generic_from_user((__force void *)src, dst,
+ len, sum, err_ptr, NULL);
+ }
+
+@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
+ {
+ might_sleep();
+ if (access_ok(VERIFY_WRITE, dst, len))
+- return csum_partial_copy_generic(src, (__force void *)dst,
++ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
+ len, sum, NULL, err_ptr);
+
+ if (len)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/cmpxchg.h linux-3.4-pax/arch/x86/include/asm/cmpxchg.h
+--- linux-3.4/arch/x86/include/asm/cmpxchg.h 2012-05-21 11:32:56.819927624 +0200
++++ linux-3.4-pax/arch/x86/include/asm/cmpxchg.h 2012-05-21 12:10:09.332048878 +0200
+@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
+ __compiletime_error("Bad argument size for cmpxchg");
+ extern void __xadd_wrong_size(void)
+ __compiletime_error("Bad argument size for xadd");
++extern void __xadd_check_overflow_wrong_size(void)
++ __compiletime_error("Bad argument size for xadd_check_overflow");
+ extern void __add_wrong_size(void)
+ __compiletime_error("Bad argument size for add");
++extern void __add_check_overflow_wrong_size(void)
++ __compiletime_error("Bad argument size for add_check_overflow");
+
+ /*
+ * Constants for operation sizes. On 32-bit, the 64-bit size it set to
+@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
+ __ret; \
+ })
+
++#define __xchg_op_check_overflow(ptr, arg, op, lock) \
++ ({ \
++ __typeof__ (*(ptr)) __ret = (arg); \
++ switch (sizeof(*(ptr))) { \
++ case __X86_CASE_L: \
++ asm volatile (lock #op "l %0, %1\n" \
++ "jno 0f\n" \
++ "mov %0,%1\n" \
++ "int $4\n0:\n" \
++ _ASM_EXTABLE(0b, 0b) \
++ : "+r" (__ret), "+m" (*(ptr)) \
++ : : "memory", "cc"); \
++ break; \
++ case __X86_CASE_Q: \
++ asm volatile (lock #op "q %q0, %1\n" \
++ "jno 0f\n" \
++ "mov %0,%1\n" \
++ "int $4\n0:\n" \
++ _ASM_EXTABLE(0b, 0b) \
++ : "+r" (__ret), "+m" (*(ptr)) \
++ : : "memory", "cc"); \
++ break; \
++ default: \
++ __ ## op ## _check_overflow_wrong_size(); \
++ } \
++ __ret; \
++ })
++
+ /*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
+ * Since this is generally used to protect other memory information, we
+@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
+ #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
+ #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
+
++#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
++#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
++
+ #define __add(ptr, inc, lock) \
+ ({ \
+ __typeof__ (*(ptr)) __ret = (inc); \
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/cpufeature.h linux-3.4-pax/arch/x86/include/asm/cpufeature.h
+--- linux-3.4/arch/x86/include/asm/cpufeature.h 2012-05-21 11:32:56.823927624 +0200
++++ linux-3.4-pax/arch/x86/include/asm/cpufeature.h 2012-05-21 12:10:09.332048878 +0200
+@@ -371,7 +371,7 @@ static __always_inline __pure bool __sta
+ ".section .discard,\"aw\",@progbits\n"
+ " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
+ ".previous\n"
+- ".section .altinstr_replacement,\"ax\"\n"
++ ".section .altinstr_replacement,\"a\"\n"
+ "3: movb $1,%0\n"
+ "4:\n"
+ ".previous\n"
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/desc_defs.h linux-3.4-pax/arch/x86/include/asm/desc_defs.h
+--- linux-3.4/arch/x86/include/asm/desc_defs.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/desc_defs.h 2012-05-21 12:10:09.336048878 +0200
+@@ -31,6 +31,12 @@ struct desc_struct {
+ unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
+ unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
+ };
++ struct {
++ u16 offset_low;
++ u16 seg;
++ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
++ unsigned offset_high: 16;
++ } gate;
+ };
+ } __attribute__((packed));
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/desc.h linux-3.4-pax/arch/x86/include/asm/desc.h
+--- linux-3.4/arch/x86/include/asm/desc.h 2012-03-19 10:38:56.428050005 +0100
++++ linux-3.4-pax/arch/x86/include/asm/desc.h 2012-05-21 12:10:09.336048878 +0200
+@@ -4,6 +4,7 @@
+ #include <asm/desc_defs.h>
+ #include <asm/ldt.h>
+ #include <asm/mmu.h>
++#include <asm/pgtable.h>
+
+ #include <linux/smp.h>
+
+@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
+
+ desc->type = (info->read_exec_only ^ 1) << 1;
+ desc->type |= info->contents << 2;
++ desc->type |= info->seg_not_present ^ 1;
+
+ desc->s = 1;
+ desc->dpl = 0x3;
+@@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_
+ }
+
+ extern struct desc_ptr idt_descr;
+-extern gate_desc idt_table[];
+ extern struct desc_ptr nmi_idt_descr;
+-extern gate_desc nmi_idt_table[];
+-
+-struct gdt_page {
+- struct desc_struct gdt[GDT_ENTRIES];
+-} __attribute__((aligned(PAGE_SIZE)));
+-
+-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
++extern gate_desc idt_table[256];
++extern gate_desc nmi_idt_table[256];
+
++extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
+ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
+ {
+- return per_cpu(gdt_page, cpu).gdt;
++ return cpu_gdt_table[cpu];
+ }
+
+ #ifdef CONFIG_X86_64
+@@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *
+ unsigned long base, unsigned dpl, unsigned flags,
+ unsigned short seg)
+ {
+- gate->a = (seg << 16) | (base & 0xffff);
+- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
++ gate->gate.offset_low = base;
++ gate->gate.seg = seg;
++ gate->gate.reserved = 0;
++ gate->gate.type = type;
++ gate->gate.s = 0;
++ gate->gate.dpl = dpl;
++ gate->gate.p = 1;
++ gate->gate.offset_high = base >> 16;
+ }
+
+ #endif
+@@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(str
+
+ static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
+ {
++ pax_open_kernel();
+ memcpy(&idt[entry], gate, sizeof(*gate));
++ pax_close_kernel();
+ }
+
+ static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
+ {
++ pax_open_kernel();
+ memcpy(&ldt[entry], desc, 8);
++ pax_close_kernel();
+ }
+
+ static inline void
+@@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struc
+ default: size = sizeof(*gdt); break;
+ }
+
++ pax_open_kernel();
+ memcpy(&gdt[entry], desc, size);
++ pax_close_kernel();
+ }
+
+ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
+@@ -209,7 +218,9 @@ static inline void native_set_ldt(const
+
+ static inline void native_load_tr_desc(void)
+ {
++ pax_open_kernel();
+ asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
++ pax_close_kernel();
+ }
+
+ static inline void native_load_gdt(const struct desc_ptr *dtr)
+@@ -246,8 +257,10 @@ static inline void native_load_tls(struc
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+ unsigned int i;
+
++ pax_open_kernel();
+ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
+ gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
++ pax_close_kernel();
+ }
+
+ #define _LDT_empty(info) \
+@@ -310,7 +323,7 @@ static inline void set_desc_limit(struct
+ }
+
+ #ifdef CONFIG_X86_64
+-static inline void set_nmi_gate(int gate, void *addr)
++static inline void set_nmi_gate(int gate, const void *addr)
+ {
+ gate_desc s;
+
+@@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate
+ }
+ #endif
+
+-static inline void _set_gate(int gate, unsigned type, void *addr,
++static inline void _set_gate(int gate, unsigned type, const void *addr,
+ unsigned dpl, unsigned ist, unsigned seg)
+ {
+ gate_desc s;
+@@ -338,7 +351,7 @@ static inline void _set_gate(int gate, u
+ * Pentium F0 0F bugfix can have resulted in the mapped
+ * IDT being write-protected.
+ */
+-static inline void set_intr_gate(unsigned int n, void *addr)
++static inline void set_intr_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
+@@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsig
+ /*
+ * This routine sets up an interrupt gate at directory privilege level 3.
+ */
+-static inline void set_system_intr_gate(unsigned int n, void *addr)
++static inline void set_system_intr_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
+ }
+
+-static inline void set_system_trap_gate(unsigned int n, void *addr)
++static inline void set_system_trap_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
+ }
+
+-static inline void set_trap_gate(unsigned int n, void *addr)
++static inline void set_trap_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
+@@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigne
+ static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
++ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
+ }
+
+-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
+ }
+
+-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
+ }
+
++#ifdef CONFIG_X86_32
++static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
++{
++ struct desc_struct d;
++
++ if (likely(limit))
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++ pack_descriptor(&d, base, limit, 0xFB, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
++}
++#endif
++
+ #endif /* _ASM_X86_DESC_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/e820.h linux-3.4-pax/arch/x86/include/asm/e820.h
+--- linux-3.4/arch/x86/include/asm/e820.h 2012-03-19 10:38:56.428050005 +0100
++++ linux-3.4-pax/arch/x86/include/asm/e820.h 2012-05-21 12:10:09.340048878 +0200
+@@ -69,7 +69,7 @@ struct e820map {
+ #define ISA_START_ADDRESS 0xa0000
+ #define ISA_END_ADDRESS 0x100000
+
+-#define BIOS_BEGIN 0x000a0000
++#define BIOS_BEGIN 0x000c0000
+ #define BIOS_END 0x00100000
+
+ #define BIOS_ROM_BASE 0xffe00000
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/elf.h linux-3.4-pax/arch/x86/include/asm/elf.h
+--- linux-3.4/arch/x86/include/asm/elf.h 2012-05-21 11:32:56.835927625 +0200
++++ linux-3.4-pax/arch/x86/include/asm/elf.h 2012-05-21 12:10:09.340048878 +0200
+@@ -243,7 +243,25 @@ extern int force_personality32;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
++#else
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++#ifdef CONFIG_X86_32
++#define PAX_ELF_ET_DYN_BASE 0x10000000UL
++
++#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#else
++#define PAX_ELF_ET_DYN_BASE 0x400000UL
++
++#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#endif
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+@@ -296,16 +314,12 @@ do { \
+
+ #define ARCH_DLINFO \
+ do { \
+- if (vdso_enabled) \
+- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+- (unsigned long)current->mm->context.vdso); \
++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
+ } while (0)
+
+ #define ARCH_DLINFO_X32 \
+ do { \
+- if (vdso_enabled) \
+- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+- (unsigned long)current->mm->context.vdso); \
++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
+ } while (0)
+
+ #define AT_SYSINFO 32
+@@ -320,7 +334,7 @@ else \
+
+ #endif /* !CONFIG_X86_32 */
+
+-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
++#define VDSO_CURRENT_BASE (current->mm->context.vdso)
+
+ #define VDSO_ENTRY \
+ ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
+@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(st
+ extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
+ #define compat_arch_setup_additional_pages syscall32_setup_pages
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ /*
+ * True on X86_32 or when emulating IA32 on X86_64
+ */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/emergency-restart.h linux-3.4-pax/arch/x86/include/asm/emergency-restart.h
+--- linux-3.4/arch/x86/include/asm/emergency-restart.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/emergency-restart.h 2012-05-21 12:10:09.344048879 +0200
+@@ -15,6 +15,6 @@ enum reboot_type {
+
+ extern enum reboot_type reboot_type;
+
+-extern void machine_emergency_restart(void);
++extern void machine_emergency_restart(void) __noreturn;
+
+ #endif /* _ASM_X86_EMERGENCY_RESTART_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/fpu-internal.h linux-3.4-pax/arch/x86/include/asm/fpu-internal.h
+--- linux-3.4/arch/x86/include/asm/fpu-internal.h 2012-05-21 11:32:56.839927625 +0200
++++ linux-3.4-pax/arch/x86/include/asm/fpu-internal.h 2012-05-21 12:10:09.348048879 +0200
+@@ -86,6 +86,11 @@ static inline int fxrstor_checking(struc
+ {
+ int err;
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
++ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
++#endif
++
+ /* See comment in fxsave() below. */
+ #ifdef CONFIG_AS_FXSAVEQ
+ asm volatile("1: fxrstorq %[fx]\n\t"
+@@ -115,6 +120,11 @@ static inline int fxsave_user(struct i38
+ {
+ int err;
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
++ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
++#endif
++
+ /*
+ * Clear the bytes not touched by the fxsave and reserved
+ * for the SW usage.
+@@ -271,7 +281,7 @@ static inline int restore_fpu_checking(s
+ "emms\n\t" /* clear stack tags */
+ "fildl %P[addr]", /* set F?P to defined value */
+ X86_FEATURE_FXSAVE_LEAK,
+- [addr] "m" (tsk->thread.fpu.has_fpu));
++ [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
+
+ return fpu_restore_checking(&tsk->thread.fpu);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/futex.h linux-3.4-pax/arch/x86/include/asm/futex.h
+--- linux-3.4/arch/x86/include/asm/futex.h 2012-05-21 11:32:56.839927625 +0200
++++ linux-3.4-pax/arch/x86/include/asm/futex.h 2012-05-21 12:10:09.348048879 +0200
+@@ -11,16 +11,18 @@
+ #include <asm/processor.h>
+
+ #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
++ typecheck(u32 __user *, uaddr); \
+ asm volatile("1:\t" insn "\n" \
+ "2:\t.section .fixup,\"ax\"\n" \
+ "3:\tmov\t%3, %1\n" \
+ "\tjmp\t2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
++ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
+ : "i" (-EFAULT), "0" (oparg), "1" (0))
+
+ #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
++ typecheck(u32 __user *, uaddr); \
+ asm volatile("1:\tmovl %2, %0\n" \
+ "\tmovl\t%0, %3\n" \
+ "\t" insn "\n" \
+@@ -33,7 +35,7 @@
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
+ : "=&a" (oldval), "=&r" (ret), \
+- "+m" (*uaddr), "=&r" (tem) \
++ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
+ : "r" (oparg), "i" (-EFAULT), "1" (0))
+
+ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+@@ -60,10 +62,10 @@ static inline int futex_atomic_op_inuser
+
+ switch (op) {
+ case FUTEX_OP_SET:
+- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
++ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
++ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
+ uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+@@ -122,13 +124,13 @@ static inline int futex_atomic_cmpxchg_i
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
++ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
+ "2:\t.section .fixup, \"ax\"\n"
+ "3:\tmov %3, %0\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
++ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
+ : "i" (-EFAULT), "r" (newval), "1" (oldval)
+ : "memory"
+ );
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/hw_irq.h linux-3.4-pax/arch/x86/include/asm/hw_irq.h
+--- linux-3.4/arch/x86/include/asm/hw_irq.h 2012-01-08 19:47:49.127473006 +0100
++++ linux-3.4-pax/arch/x86/include/asm/hw_irq.h 2012-05-21 12:10:09.352048879 +0200
+@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
+ extern void enable_IO_APIC(void);
+
+ /* Statistics */
+-extern atomic_t irq_err_count;
+-extern atomic_t irq_mis_count;
++extern atomic_unchecked_t irq_err_count;
++extern atomic_unchecked_t irq_mis_count;
+
+ /* EISA */
+ extern void eisa_set_level_irq(unsigned int irq);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/io.h linux-3.4-pax/arch/x86/include/asm/io.h
+--- linux-3.4/arch/x86/include/asm/io.h 2011-10-24 12:48:26.091091780 +0200
++++ linux-3.4-pax/arch/x86/include/asm/io.h 2012-05-21 12:10:09.352048879 +0200
+@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
+
+ #include <linux/vmalloc.h>
+
++#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
++static inline int valid_phys_addr_range(unsigned long addr, size_t count)
++{
++ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
++static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
++{
++ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
+ /*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/irqflags.h linux-3.4-pax/arch/x86/include/asm/irqflags.h
+--- linux-3.4/arch/x86/include/asm/irqflags.h 2011-10-24 12:48:26.103091782 +0200
++++ linux-3.4-pax/arch/x86/include/asm/irqflags.h 2012-05-21 12:10:09.356048879 +0200
+@@ -141,6 +141,11 @@ static inline notrace unsigned long arch
+ sti; \
+ sysexit
+
++#define GET_CR0_INTO_RDI mov %cr0, %rdi
++#define SET_RDI_INTO_CR0 mov %rdi, %cr0
++#define GET_CR3_INTO_RDI mov %cr3, %rdi
++#define SET_RDI_INTO_CR3 mov %rdi, %cr3
++
+ #else
+ #define INTERRUPT_RETURN iret
+ #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/kprobes.h linux-3.4-pax/arch/x86/include/asm/kprobes.h
+--- linux-3.4/arch/x86/include/asm/kprobes.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/kprobes.h 2012-05-21 12:10:09.356048879 +0200
+@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
+ #define RELATIVEJUMP_SIZE 5
+ #define RELATIVECALL_OPCODE 0xe8
+ #define RELATIVE_ADDR_SIZE 4
+-#define MAX_STACK_SIZE 64
+-#define MIN_STACK_SIZE(ADDR) \
+- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
+- THREAD_SIZE - (unsigned long)(ADDR))) \
+- ? (MAX_STACK_SIZE) \
+- : (((unsigned long)current_thread_info()) + \
+- THREAD_SIZE - (unsigned long)(ADDR)))
++#define MAX_STACK_SIZE 64UL
++#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
+
+ #define flush_insn_slot(p) do { } while (0)
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/kvm_host.h linux-3.4-pax/arch/x86/include/asm/kvm_host.h
+--- linux-3.4/arch/x86/include/asm/kvm_host.h 2012-05-21 11:32:56.903927629 +0200
++++ linux-3.4-pax/arch/x86/include/asm/kvm_host.h 2012-05-22 15:28:29.991384691 +0200
+@@ -679,7 +679,7 @@ struct kvm_x86_ops {
+ int (*check_intercept)(struct kvm_vcpu *vcpu,
+ struct x86_instruction_info *info,
+ enum x86_intercept_stage stage);
+-};
++} __do_const;
+
+ struct kvm_arch_async_pf {
+ u32 token;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/local.h linux-3.4-pax/arch/x86/include/asm/local.h
+--- linux-3.4/arch/x86/include/asm/local.h 2012-05-21 11:32:56.915927629 +0200
++++ linux-3.4-pax/arch/x86/include/asm/local.h 2012-05-21 12:10:09.360048879 +0200
+@@ -17,26 +17,58 @@ typedef struct {
+
+ static inline void local_inc(local_t *l)
+ {
+- asm volatile(_ASM_INC "%0"
++ asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_DEC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (l->a.counter));
+ }
+
+ static inline void local_dec(local_t *l)
+ {
+- asm volatile(_ASM_DEC "%0"
++ asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_INC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (l->a.counter));
+ }
+
+ static inline void local_add(long i, local_t *l)
+ {
+- asm volatile(_ASM_ADD "%1,%0"
++ asm volatile(_ASM_ADD "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_SUB "%1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (l->a.counter)
+ : "ir" (i));
+ }
+
+ static inline void local_sub(long i, local_t *l)
+ {
+- asm volatile(_ASM_SUB "%1,%0"
++ asm volatile(_ASM_SUB "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_ADD "%1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (l->a.counter)
+ : "ir" (i));
+ }
+@@ -54,7 +86,16 @@ static inline int local_sub_and_test(lon
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_SUB "%2,%0; sete %1"
++ asm volatile(_ASM_SUB "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_ADD "%2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -72,7 +113,16 @@ static inline int local_dec_and_test(loc
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_DEC "%0; sete %1"
++ asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_INC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -90,7 +140,16 @@ static inline int local_inc_and_test(loc
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_INC "%0; sete %1"
++ asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_DEC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -109,7 +168,16 @@ static inline int local_add_negative(lon
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_ADD "%2,%0; sets %1"
++ asm volatile(_ASM_ADD "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_SUB "%2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -132,7 +200,15 @@ static inline long local_add_return(long
+ #endif
+ /* Modern 486+ processor */
+ __i = i;
+- asm volatile(_ASM_XADD "%0, %1;"
++ asm volatile(_ASM_XADD "%0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_MOV "%0,%1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+r" (i), "+m" (l->a.counter)
+ : : "memory");
+ return i + __i;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/mman.h linux-3.4-pax/arch/x86/include/asm/mman.h
+--- linux-3.4/arch/x86/include/asm/mman.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/mman.h 2012-05-21 12:10:09.364048880 +0200
+@@ -5,4 +5,14 @@
+
+ #include <asm-generic/mman.h>
+
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++#ifdef CONFIG_X86_32
++#define arch_mmap_check i386_mmap_check
++int i386_mmap_check(unsigned long addr, unsigned long len,
++ unsigned long flags);
++#endif
++#endif
++#endif
++
+ #endif /* _ASM_X86_MMAN_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/mmu_context.h linux-3.4-pax/arch/x86/include/asm/mmu_context.h
+--- linux-3.4/arch/x86/include/asm/mmu_context.h 2011-10-24 12:48:26.115091779 +0200
++++ linux-3.4-pax/arch/x86/include/asm/mmu_context.h 2012-05-28 00:31:51.231163437 +0200
+@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
+
+ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+ {
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ unsigned int i;
++ pgd_t *pgd;
++
++ pax_open_kernel();
++ pgd = get_cpu_pgd(smp_processor_id());
++ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
++ set_pgd_batched(pgd+i, native_make_pgd(0));
++ pax_close_kernel();
++#endif
++
+ #ifdef CONFIG_SMP
+ if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+ percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
+@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
+ struct task_struct *tsk)
+ {
+ unsigned cpu = smp_processor_id();
++#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++ int tlbstate = TLBSTATE_OK;
++#endif
+
+ if (likely(prev != next)) {
+ #ifdef CONFIG_SMP
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++ tlbstate = percpu_read(cpu_tlbstate.state);
++#endif
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ percpu_write(cpu_tlbstate.active_mm, next);
+ #endif
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+ /* Re-load page tables */
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ pax_open_kernel();
++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
++ pax_close_kernel();
++ load_cr3(get_cpu_pgd(cpu));
++#else
+ load_cr3(next->pgd);
++#endif
+
+ /* stop flush ipis for the previous mm */
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
+ */
+ if (unlikely(prev->context.ldt != next->context.ldt))
+ load_LDT_nolock(&next->context);
+- }
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ if (!(__supported_pte_mask & _PAGE_NX)) {
++ smp_mb__before_clear_bit();
++ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
++ smp_mb__after_clear_bit();
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++ }
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
++ prev->context.user_cs_limit != next->context.user_cs_limit))
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
+ #ifdef CONFIG_SMP
++ else if (unlikely(tlbstate != TLBSTATE_OK))
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++#endif
++
++ }
+ else {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ pax_open_kernel();
++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
++ pax_close_kernel();
++ load_cr3(get_cpu_pgd(cpu));
++#endif
++
++#ifdef CONFIG_SMP
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
+
+@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
+ * tlb flush IPI delivery. We must reload CR3
+ * to make sure to use no freed page tables.
+ */
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ load_cr3(next->pgd);
++#endif
++
+ load_LDT_nolock(&next->context);
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (!(__supported_pte_mask & _PAGE_NX))
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
++#endif
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++
+ }
+- }
+ #endif
++ }
+ }
+
+ #define activate_mm(prev, next) \
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/mmu.h linux-3.4-pax/arch/x86/include/asm/mmu.h
+--- linux-3.4/arch/x86/include/asm/mmu.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/mmu.h 2012-05-21 12:10:09.368048880 +0200
+@@ -9,7 +9,7 @@
+ * we put the segment information here.
+ */
+ typedef struct {
+- void *ldt;
++ struct desc_struct *ldt;
+ int size;
+
+ #ifdef CONFIG_X86_64
+@@ -18,7 +18,19 @@ typedef struct {
+ #endif
+
+ struct mutex lock;
+- void *vdso;
++ unsigned long vdso;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ unsigned long user_cs_base;
++ unsigned long user_cs_limit;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpumask_t cpu_user_cs_mask;
++#endif
++
++#endif
++#endif
+ } mm_context_t;
+
+ #ifdef CONFIG_SMP
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/module.h linux-3.4-pax/arch/x86/include/asm/module.h
+--- linux-3.4/arch/x86/include/asm/module.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/module.h 2012-05-21 12:10:09.368048880 +0200
+@@ -5,6 +5,7 @@
+
+ #ifdef CONFIG_X86_64
+ /* X86_64 does not define MODULE_PROC_FAMILY */
++#define MODULE_PROC_FAMILY ""
+ #elif defined CONFIG_M386
+ #define MODULE_PROC_FAMILY "386 "
+ #elif defined CONFIG_M486
+@@ -59,8 +60,20 @@
+ #error unknown processor family
+ #endif
+
+-#ifdef CONFIG_X86_32
+-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
++#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
++#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
++#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
++#else
++#define MODULE_PAX_KERNEXEC ""
+ #endif
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define MODULE_PAX_UDEREF "UDEREF "
++#else
++#define MODULE_PAX_UDEREF ""
++#endif
++
++#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
++
+ #endif /* _ASM_X86_MODULE_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/page_64_types.h linux-3.4-pax/arch/x86/include/asm/page_64_types.h
+--- linux-3.4/arch/x86/include/asm/page_64_types.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/page_64_types.h 2012-05-21 12:10:09.372048880 +0200
+@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
+
+ /* duplicated to the one in bootmem.h */
+ extern unsigned long max_pfn;
+-extern unsigned long phys_base;
++extern const unsigned long phys_base;
+
+ extern unsigned long __phys_addr(unsigned long);
+ #define __phys_reloc_hide(x) (x)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/paravirt.h linux-3.4-pax/arch/x86/include/asm/paravirt.h
+--- linux-3.4/arch/x86/include/asm/paravirt.h 2012-05-21 11:32:56.939927631 +0200
++++ linux-3.4-pax/arch/x86/include/asm/paravirt.h 2012-05-21 12:10:09.372048880 +0200
+@@ -668,6 +668,18 @@ static inline void set_pgd(pgd_t *pgdp,
+ val);
+ }
+
++static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
++{
++ pgdval_t val = native_pgd_val(pgd);
++
++ if (sizeof(pgdval_t) > sizeof(long))
++ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
++ val, (u64)val >> 32);
++ else
++ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
++ val);
++}
++
+ static inline void pgd_clear(pgd_t *pgdp)
+ {
+ set_pgd(pgdp, __pgd(0));
+@@ -749,6 +761,21 @@ static inline void __set_fixmap(unsigned
+ pv_mmu_ops.set_fixmap(idx, phys, flags);
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long pax_open_kernel(void)
++{
++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
++}
++
++static inline unsigned long pax_close_kernel(void)
++{
++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
++}
++#else
++static inline unsigned long pax_open_kernel(void) { return 0; }
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+
+ static inline int arch_spin_is_locked(struct arch_spinlock *lock)
+@@ -965,7 +992,7 @@ extern void default_banner(void);
+
+ #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
+ #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
+-#define PARA_INDIRECT(addr) *%cs:addr
++#define PARA_INDIRECT(addr) *%ss:addr
+ #endif
+
+ #define INTERRUPT_RETURN \
+@@ -1042,6 +1069,21 @@ extern void default_banner(void);
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
+ CLBR_NONE, \
+ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
++
++#define GET_CR0_INTO_RDI \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
++ mov %rax,%rdi
++
++#define SET_RDI_INTO_CR0 \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++
++#define GET_CR3_INTO_RDI \
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
++ mov %rax,%rdi
++
++#define SET_RDI_INTO_CR3 \
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
++
+ #endif /* CONFIG_X86_32 */
+
+ #endif /* __ASSEMBLY__ */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/paravirt_types.h linux-3.4-pax/arch/x86/include/asm/paravirt_types.h
+--- linux-3.4/arch/x86/include/asm/paravirt_types.h 2011-10-24 12:48:26.123091780 +0200
++++ linux-3.4-pax/arch/x86/include/asm/paravirt_types.h 2012-05-21 12:10:09.376048880 +0200
+@@ -84,20 +84,20 @@ struct pv_init_ops {
+ */
+ unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
+ unsigned long addr, unsigned len);
+-};
++} __no_const;
+
+
+ struct pv_lazy_ops {
+ /* Set deferred update mode, used for batching operations. */
+ void (*enter)(void);
+ void (*leave)(void);
+-};
++} __no_const;
+
+ struct pv_time_ops {
+ unsigned long long (*sched_clock)(void);
+ unsigned long long (*steal_clock)(int cpu);
+ unsigned long (*get_tsc_khz)(void);
+-};
++} __no_const;
+
+ struct pv_cpu_ops {
+ /* hooks for various privileged instructions */
+@@ -193,7 +193,7 @@ struct pv_cpu_ops {
+
+ void (*start_context_switch)(struct task_struct *prev);
+ void (*end_context_switch)(struct task_struct *next);
+-};
++} __no_const;
+
+ struct pv_irq_ops {
+ /*
+@@ -224,7 +224,7 @@ struct pv_apic_ops {
+ unsigned long start_eip,
+ unsigned long start_esp);
+ #endif
+-};
++} __no_const;
+
+ struct pv_mmu_ops {
+ unsigned long (*read_cr2)(void);
+@@ -313,6 +313,7 @@ struct pv_mmu_ops {
+ struct paravirt_callee_save make_pud;
+
+ void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
++ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
+ #endif /* PAGETABLE_LEVELS == 4 */
+ #endif /* PAGETABLE_LEVELS >= 3 */
+
+@@ -324,6 +325,12 @@ struct pv_mmu_ops {
+ an mfn. We can tell which is which from the index. */
+ void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long (*pax_open_kernel)(void);
++ unsigned long (*pax_close_kernel)(void);
++#endif
++
+ };
+
+ struct arch_spinlock;
+@@ -334,7 +341,7 @@ struct pv_lock_ops {
+ void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
+ int (*spin_trylock)(struct arch_spinlock *lock);
+ void (*spin_unlock)(struct arch_spinlock *lock);
+-};
++} __no_const;
+
+ /* This contains all the paravirt structures: we get a convenient
+ * number for each function using the offset which we use to indicate
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/pgalloc.h linux-3.4-pax/arch/x86/include/asm/pgalloc.h
+--- linux-3.4/arch/x86/include/asm/pgalloc.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/pgalloc.h 2012-05-21 12:10:09.380048881 +0200
+@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
+ pmd_t *pmd, pte_t *pte)
+ {
+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
++}
++
++static inline void pmd_populate_user(struct mm_struct *mm,
++ pmd_t *pmd, pte_t *pte)
++{
++ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
+ set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
+ }
+
+@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct
+
+ #ifdef CONFIG_X86_PAE
+ extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
++{
++ pud_populate(mm, pudp, pmd);
++}
+ #else /* !CONFIG_X86_PAE */
+ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ {
+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
+ set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
+ }
++
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
++ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
++}
+ #endif /* CONFIG_X86_PAE */
+
+ #if PAGETABLE_LEVELS > 3
+@@ -114,6 +131,12 @@ static inline void pgd_populate(struct m
+ set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
+ }
+
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
++{
++ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
++ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
++}
++
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/pgtable-2level.h linux-3.4-pax/arch/x86/include/asm/pgtable-2level.h
+--- linux-3.4/arch/x86/include/asm/pgtable-2level.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/pgtable-2level.h 2012-05-21 12:10:09.380048881 +0200
+@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ *pmdp = pmd;
++ pax_close_kernel();
+ }
+
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/pgtable_32.h linux-3.4-pax/arch/x86/include/asm/pgtable_32.h
+--- linux-3.4/arch/x86/include/asm/pgtable_32.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/pgtable_32.h 2012-05-21 12:10:09.380048881 +0200
+@@ -25,9 +25,6 @@
+ struct mm_struct;
+ struct vm_area_struct;
+
+-extern pgd_t swapper_pg_dir[1024];
+-extern pgd_t initial_page_table[1024];
+-
+ static inline void pgtable_cache_init(void) { }
+ static inline void check_pgt_cache(void) { }
+ void paging_init(void);
+@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
+ # include <asm/pgtable-2level.h>
+ #endif
+
++extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
++extern pgd_t initial_page_table[PTRS_PER_PGD];
++#ifdef CONFIG_X86_PAE
++extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
++#endif
++
+ #if defined(CONFIG_HIGHPTE)
+ #define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
+@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
+ /* Clear a kernel PTE and flush it from the TLB */
+ #define kpte_clear_flush(ptep, vaddr) \
+ do { \
++ pax_open_kernel(); \
+ pte_clear(&init_mm, (vaddr), (ptep)); \
++ pax_close_kernel(); \
+ __flush_tlb_one((vaddr)); \
+ } while (0)
+
+@@ -74,6 +79,9 @@ do { \
+
+ #endif /* !__ASSEMBLY__ */
+
++#define HAVE_ARCH_UNMAPPED_AREA
++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
++
+ /*
+ * kern_addr_valid() is (1) for FLATMEM and (0) for
+ * SPARSEMEM and DISCONTIGMEM
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/pgtable_32_types.h linux-3.4-pax/arch/x86/include/asm/pgtable_32_types.h
+--- linux-3.4/arch/x86/include/asm/pgtable_32_types.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/pgtable_32_types.h 2012-05-21 12:10:09.384048881 +0200
+@@ -8,7 +8,7 @@
+ */
+ #ifdef CONFIG_X86_PAE
+ # include <asm/pgtable-3level_types.h>
+-# define PMD_SIZE (1UL << PMD_SHIFT)
++# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
+ # define PMD_MASK (~(PMD_SIZE - 1))
+ #else
+ # include <asm/pgtable-2level_types.h>
+@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
+ # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++#ifndef __ASSEMBLY__
++extern unsigned char MODULES_EXEC_VADDR[];
++extern unsigned char MODULES_EXEC_END[];
++#endif
++#include <asm/boot.h>
++#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
++#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
++#else
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++#endif
++
+ #define MODULES_VADDR VMALLOC_START
+ #define MODULES_END VMALLOC_END
+ #define MODULES_LEN (MODULES_VADDR - MODULES_END)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/pgtable-3level.h linux-3.4-pax/arch/x86/include/asm/pgtable-3level.h
+--- linux-3.4/arch/x86/include/asm/pgtable-3level.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/pgtable-3level.h 2012-05-21 12:10:09.384048881 +0200
+@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
++ pax_close_kernel();
+ }
+
+ static inline void native_set_pud(pud_t *pudp, pud_t pud)
+ {
++ pax_open_kernel();
+ set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
++ pax_close_kernel();
+ }
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/pgtable_64.h linux-3.4-pax/arch/x86/include/asm/pgtable_64.h
+--- linux-3.4/arch/x86/include/asm/pgtable_64.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/pgtable_64.h 2012-05-21 12:10:09.388048880 +0200
+@@ -16,10 +16,14 @@
+
+ extern pud_t level3_kernel_pgt[512];
+ extern pud_t level3_ident_pgt[512];
++extern pud_t level3_vmalloc_start_pgt[512];
++extern pud_t level3_vmalloc_end_pgt[512];
++extern pud_t level3_vmemmap_pgt[512];
++extern pud_t level2_vmemmap_pgt[512];
+ extern pmd_t level2_kernel_pgt[512];
+ extern pmd_t level2_fixmap_pgt[512];
+-extern pmd_t level2_ident_pgt[512];
+-extern pgd_t init_level4_pgt[];
++extern pmd_t level2_ident_pgt[512*2];
++extern pgd_t init_level4_pgt[512];
+
+ #define swapper_pg_dir init_level4_pgt
+
+@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ *pmdp = pmd;
++ pax_close_kernel();
+ }
+
+ static inline void native_pmd_clear(pmd_t *pmd)
+@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_
+
+ static inline void native_set_pud(pud_t *pudp, pud_t pud)
+ {
++ pax_open_kernel();
+ *pudp = pud;
++ pax_close_kernel();
+ }
+
+ static inline void native_pud_clear(pud_t *pud)
+@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_
+
+ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
+ {
++ pax_open_kernel();
++ *pgdp = pgd;
++ pax_close_kernel();
++}
++
++static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
++{
+ *pgdp = pgd;
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/pgtable_64_types.h linux-3.4-pax/arch/x86/include/asm/pgtable_64_types.h
+--- linux-3.4/arch/x86/include/asm/pgtable_64_types.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/pgtable_64_types.h 2012-05-21 12:10:09.388048880 +0200
+@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
+ #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
+ #define MODULES_END _AC(0xffffffffff000000, UL)
+ #define MODULES_LEN (MODULES_END - MODULES_VADDR)
++#define MODULES_EXEC_VADDR MODULES_VADDR
++#define MODULES_EXEC_END MODULES_END
++
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
+
+ #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/pgtable.h linux-3.4-pax/arch/x86/include/asm/pgtable.h
+--- linux-3.4/arch/x86/include/asm/pgtable.h 2012-03-19 10:38:56.460050004 +0100
++++ linux-3.4-pax/arch/x86/include/asm/pgtable.h 2012-05-30 02:30:59.578996029 +0200
+@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
+
+ #ifndef __PAGETABLE_PUD_FOLDED
+ #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
++#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
+ #define pgd_clear(pgd) native_pgd_clear(pgd)
+ #endif
+
+@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
+
+ #define arch_end_context_switch(prev) do {} while(0)
+
++#define pax_open_kernel() native_pax_open_kernel()
++#define pax_close_kernel() native_pax_close_kernel()
+ #endif /* CONFIG_PARAVIRT */
+
++#define __HAVE_ARCH_PAX_OPEN_KERNEL
++#define __HAVE_ARCH_PAX_CLOSE_KERNEL
++
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long native_pax_open_kernel(void)
++{
++ unsigned long cr0;
++
++ preempt_disable();
++ barrier();
++ cr0 = read_cr0() ^ X86_CR0_WP;
++ BUG_ON(unlikely(cr0 & X86_CR0_WP));
++ write_cr0(cr0);
++ return cr0 ^ X86_CR0_WP;
++}
++
++static inline unsigned long native_pax_close_kernel(void)
++{
++ unsigned long cr0;
++
++ cr0 = read_cr0() ^ X86_CR0_WP;
++ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
++ write_cr0(cr0);
++ barrier();
++ preempt_enable_no_resched();
++ return cr0 ^ X86_CR0_WP;
++}
++#else
++static inline unsigned long native_pax_open_kernel(void) { return 0; }
++static inline unsigned long native_pax_close_kernel(void) { return 0; }
++#endif
++
+ /*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
++static inline int pte_user(pte_t pte)
++{
++ return pte_val(pte) & _PAGE_USER;
++}
++
+ static inline int pte_dirty(pte_t pte)
+ {
+ return pte_flags(pte) & _PAGE_DIRTY;
+@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
+ return pte_clear_flags(pte, _PAGE_RW);
+ }
+
++static inline pte_t pte_mkread(pte_t pte)
++{
++ return __pte(pte_val(pte) | _PAGE_USER);
++}
++
+ static inline pte_t pte_mkexec(pte_t pte)
+ {
+- return pte_clear_flags(pte, _PAGE_NX);
++#ifdef CONFIG_X86_PAE
++ if (__supported_pte_mask & _PAGE_NX)
++ return pte_clear_flags(pte, _PAGE_NX);
++ else
++#endif
++ return pte_set_flags(pte, _PAGE_USER);
++}
++
++static inline pte_t pte_exprotect(pte_t pte)
++{
++#ifdef CONFIG_X86_PAE
++ if (__supported_pte_mask & _PAGE_NX)
++ return pte_set_flags(pte, _PAGE_NX);
++ else
++#endif
++ return pte_clear_flags(pte, _PAGE_USER);
+ }
+
+ static inline pte_t pte_mkdirty(pte_t pte)
+@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
+ #endif
+
+ #ifndef __ASSEMBLY__
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
++static inline pgd_t *get_cpu_pgd(unsigned int cpu)
++{
++ return cpu_pgd[cpu];
++}
++#endif
++
+ #include <linux/mm_types.h>
+
+ static inline int pte_none(pte_t pte)
+@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
+
+ static inline int pgd_bad(pgd_t pgd)
+ {
+- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
++ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
+ }
+
+ static inline int pgd_none(pgd_t pgd)
+@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
+ * pgd_offset() returns a (pgd_t *)
+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
+ */
+-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
++#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
++#endif
++
+ /*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
+ #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
+ #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
+
++#ifdef CONFIG_X86_32
++#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
++#else
++#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
++#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
++#else
++#define PAX_USER_SHADOW_BASE (_AC(0,UL))
++#endif
++
++#endif
++
+ #ifndef __ASSEMBLY__
+
+ extern int direct_gbpages;
+@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
+ * dst and src can be on the same page, but the range must not overlap,
+ * and must not cross a page boundary.
+ */
+-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
++static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
+ {
+- memcpy(dst, src, count * sizeof(pgd_t));
++ pax_open_kernel();
++ while (count--)
++ *dst++ = *src++;
++ pax_close_kernel();
+ }
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
++#endif
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
++#else
++static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
++#endif
+
+ #include <asm-generic/pgtable.h>
+ #endif /* __ASSEMBLY__ */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/pgtable_types.h linux-3.4-pax/arch/x86/include/asm/pgtable_types.h
+--- linux-3.4/arch/x86/include/asm/pgtable_types.h 2011-10-24 12:48:26.131091781 +0200
++++ linux-3.4-pax/arch/x86/include/asm/pgtable_types.h 2012-05-21 12:10:09.392048880 +0200
+@@ -16,13 +16,12 @@
+ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
+ #define _PAGE_BIT_PAT 7 /* on 4KB pages */
+ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
++#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
+ #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
+ #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
+ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
+-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
+-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
+-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
++#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
++#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
+ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
+
+ /* If _PAGE_BIT_PRESENT is clear, we use these: */
+@@ -40,7 +39,6 @@
+ #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
+ #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
+ #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
+-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
+ #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
+ #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
+ #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
+@@ -57,8 +55,10 @@
+
+ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
+-#else
++#elif defined(CONFIG_KMEMCHECK)
+ #define _PAGE_NX (_AT(pteval_t, 0))
++#else
++#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+ #endif
+
+ #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
+@@ -96,6 +96,9 @@
+ #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED)
+
++#define PAGE_READONLY_NOEXEC PAGE_READONLY
++#define PAGE_SHARED_NOEXEC PAGE_SHARED
++
+ #define __PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
+ #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
+@@ -106,7 +109,7 @@
+ #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
+ #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
+ #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
+-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
++#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
+ #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
+ #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
+ #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
+@@ -168,8 +171,8 @@
+ * bits are combined, this will alow user to access the high address mapped
+ * VDSO in the presence of CONFIG_COMPAT_VDSO
+ */
+-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
+-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
++#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
++#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
+ #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
+ #endif
+
+@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t p
+ {
+ return native_pgd_val(pgd) & PTE_FLAGS_MASK;
+ }
++#endif
+
++#if PAGETABLE_LEVELS == 3
++#include <asm-generic/pgtable-nopud.h>
++#endif
++
++#if PAGETABLE_LEVELS == 2
++#include <asm-generic/pgtable-nopmd.h>
++#endif
++
++#ifndef __ASSEMBLY__
+ #if PAGETABLE_LEVELS > 3
+ typedef struct { pudval_t pud; } pud_t;
+
+@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pu
+ return pud.pud;
+ }
+ #else
+-#include <asm-generic/pgtable-nopud.h>
+-
+ static inline pudval_t native_pud_val(pud_t pud)
+ {
+ return native_pgd_val(pud.pgd);
+@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pm
+ return pmd.pmd;
+ }
+ #else
+-#include <asm-generic/pgtable-nopmd.h>
+-
+ static inline pmdval_t native_pmd_val(pmd_t pmd)
+ {
+ return native_pgd_val(pmd.pud.pgd);
+@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
+
+ extern pteval_t __supported_pte_mask;
+ extern void set_nx(void);
+-extern int nx_enabled;
+
+ #define pgprot_writecombine pgprot_writecombine
+ extern pgprot_t pgprot_writecombine(pgprot_t prot);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/processor.h linux-3.4-pax/arch/x86/include/asm/processor.h
+--- linux-3.4/arch/x86/include/asm/processor.h 2012-05-21 11:32:56.963927632 +0200
++++ linux-3.4-pax/arch/x86/include/asm/processor.h 2012-05-30 02:17:46.999036899 +0200
+@@ -276,7 +276,7 @@ struct tss_struct {
+
+ } ____cacheline_aligned;
+
+-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
++extern struct tss_struct init_tss[NR_CPUS];
+
+ /*
+ * Save the original ist values for checking stack pointers during debugging
+@@ -807,11 +807,18 @@ static inline void spin_lock_prefetch(co
+ */
+ #define TASK_SIZE PAGE_OFFSET
+ #define TASK_SIZE_MAX TASK_SIZE
++
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
++#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
++#else
+ #define STACK_TOP TASK_SIZE
+-#define STACK_TOP_MAX STACK_TOP
++#endif
++
++#define STACK_TOP_MAX TASK_SIZE
+
+ #define INIT_THREAD { \
+- .sp0 = sizeof(init_stack) + (long)&init_stack, \
++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
+ .vm86_info = NULL, \
+ .sysenter_cs = __KERNEL_CS, \
+ .io_bitmap_ptr = NULL, \
+@@ -825,7 +832,7 @@ static inline void spin_lock_prefetch(co
+ */
+ #define INIT_TSS { \
+ .x86_tss = { \
+- .sp0 = sizeof(init_stack) + (long)&init_stack, \
++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
+ .ss0 = __KERNEL_DS, \
+ .ss1 = __KERNEL_CS, \
+ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
+@@ -836,11 +843,7 @@ static inline void spin_lock_prefetch(co
+ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+
+ #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
+-#define KSTK_TOP(info) \
+-({ \
+- unsigned long *__ptr = (unsigned long *)(info); \
+- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
+-})
++#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
+
+ /*
+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
+@@ -855,7 +858,7 @@ extern unsigned long thread_saved_pc(str
+ #define task_pt_regs(task) \
+ ({ \
+ struct pt_regs *__regs__; \
+- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
++ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
+ __regs__ - 1; \
+ })
+
+@@ -865,13 +868,13 @@ extern unsigned long thread_saved_pc(str
+ /*
+ * User space process size. 47bits minus one guard page.
+ */
+-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
++#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
+
+ /* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+ #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
+- 0xc0000000 : 0xFFFFe000)
++ 0xc0000000 : 0xFFFFf000)
+
+ #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
+ IA32_PAGE_OFFSET : TASK_SIZE_MAX)
+@@ -882,11 +885,11 @@ extern unsigned long thread_saved_pc(str
+ #define STACK_TOP_MAX TASK_SIZE_MAX
+
+ #define INIT_THREAD { \
+- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
+ }
+
+ #define INIT_TSS { \
+- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
+ }
+
+ /*
+@@ -914,6 +917,10 @@ extern void start_thread(struct pt_regs
+ */
+ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
++#endif
++
+ #define KSTK_EIP(task) (task_pt_regs(task)->ip)
+
+ /* Get/set a process' ability to use the timestamp counter instruction */
+@@ -976,12 +983,12 @@ extern bool cpu_has_amd_erratum(const in
+
+ void cpu_idle_wait(void);
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+ extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+
+ void default_idle(void);
+ bool set_pm_idle_to_default(void);
+
+-void stop_this_cpu(void *dummy);
++void stop_this_cpu(void *dummy) __noreturn;
+
+ #endif /* _ASM_X86_PROCESSOR_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/ptrace.h linux-3.4-pax/arch/x86/include/asm/ptrace.h
+--- linux-3.4/arch/x86/include/asm/ptrace.h 2012-05-21 11:32:56.979927633 +0200
++++ linux-3.4-pax/arch/x86/include/asm/ptrace.h 2012-05-21 12:10:09.400048883 +0200
+@@ -155,28 +155,29 @@ static inline unsigned long regs_return_
+ }
+
+ /*
+- * user_mode_vm(regs) determines whether a register set came from user mode.
++ * user_mode(regs) determines whether a register set came from user mode.
+ * This is true if V8086 mode was enabled OR if the register set was from
+ * protected mode with RPL-3 CS value. This tricky test checks that with
+ * one comparison. Many places in the kernel can bypass this full check
+- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
++ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
++ * be used.
+ */
+-static inline int user_mode(struct pt_regs *regs)
++static inline int user_mode_novm(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
+ #else
+- return !!(regs->cs & 3);
++ return !!(regs->cs & SEGMENT_RPL_MASK);
+ #endif
+ }
+
+-static inline int user_mode_vm(struct pt_regs *regs)
++static inline int user_mode(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
+ USER_RPL;
+ #else
+- return user_mode(regs);
++ return user_mode_novm(regs);
+ #endif
+ }
+
+@@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_r
+ #ifdef CONFIG_X86_64
+ static inline bool user_64bit_mode(struct pt_regs *regs)
+ {
++ unsigned long cs = regs->cs & 0xffff;
+ #ifndef CONFIG_PARAVIRT
+ /*
+ * On non-paravirt systems, this is the only long mode CPL 3
+ * selector. We do not allow long mode selectors in the LDT.
+ */
+- return regs->cs == __USER_CS;
++ return cs == __USER_CS;
+ #else
+ /* Headers are too twisted for this to go in paravirt.h. */
+- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
++ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
+ #endif
+ }
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/reboot.h linux-3.4-pax/arch/x86/include/asm/reboot.h
+--- linux-3.4/arch/x86/include/asm/reboot.h 2012-01-08 19:47:49.199473002 +0100
++++ linux-3.4-pax/arch/x86/include/asm/reboot.h 2012-05-21 12:10:09.400048883 +0200
+@@ -6,19 +6,19 @@
+ struct pt_regs;
+
+ struct machine_ops {
+- void (*restart)(char *cmd);
+- void (*halt)(void);
+- void (*power_off)(void);
++ void (* __noreturn restart)(char *cmd);
++ void (* __noreturn halt)(void);
++ void (* __noreturn power_off)(void);
+ void (*shutdown)(void);
+ void (*crash_shutdown)(struct pt_regs *);
+- void (*emergency_restart)(void);
+-};
++ void (* __noreturn emergency_restart)(void);
++} __no_const;
+
+ extern struct machine_ops machine_ops;
+
+ void native_machine_crash_shutdown(struct pt_regs *regs);
+ void native_machine_shutdown(void);
+-void machine_real_restart(unsigned int type);
++void machine_real_restart(unsigned int type) __noreturn;
+ /* These must match dispatch_table in reboot_32.S */
+ #define MRR_BIOS 0
+ #define MRR_APM 1
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/rwsem.h linux-3.4-pax/arch/x86/include/asm/rwsem.h
+--- linux-3.4/arch/x86/include/asm/rwsem.h 2012-01-08 19:47:49.203473002 +0100
++++ linux-3.4-pax/arch/x86/include/asm/rwsem.h 2012-05-21 12:10:09.400048883 +0200
+@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
+ {
+ asm volatile("# beginning down_read\n\t"
+ LOCK_PREFIX _ASM_INC "(%1)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX _ASM_DEC "(%1)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* adds 0x00000001 */
+ " jns 1f\n"
+ " call call_rwsem_down_read_failed\n"
+@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
+ "1:\n\t"
+ " mov %1,%2\n\t"
+ " add %3,%2\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "sub %3,%2\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ " jle 2f\n\t"
+ LOCK_PREFIX " cmpxchg %2,%0\n\t"
+ " jnz 1b\n\t"
+@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
+ long tmp;
+ asm volatile("# beginning down_write\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %1,(%2)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* adds 0xffff0001, returns the old value */
+ " test %1,%1\n\t"
+ /* was the count 0 before? */
+@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
+ long tmp;
+ asm volatile("# beginning __up_read\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %1,(%2)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* subtracts 1, returns the old value */
+ " jns 1f\n\t"
+ " call call_rwsem_wake\n" /* expects old value in %edx */
+@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
+ long tmp;
+ asm volatile("# beginning __up_write\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %1,(%2)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* subtracts 0xffff0001, returns the old value */
+ " jns 1f\n\t"
+ " call call_rwsem_wake\n" /* expects old value in %edx */
+@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
+ {
+ asm volatile("# beginning __downgrade_write\n\t"
+ LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /*
+ * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
+ * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
+@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
+ */
+ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+ {
+- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
++ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX _ASM_SUB "%1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (sem->count)
+ : "er" (delta));
+ }
+@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long
+ */
+ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
+ {
+- return delta + xadd(&sem->count, delta);
++ return delta + xadd_check_overflow(&sem->count, delta);
+ }
+
+ #endif /* __KERNEL__ */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/segment.h linux-3.4-pax/arch/x86/include/asm/segment.h
+--- linux-3.4/arch/x86/include/asm/segment.h 2012-05-21 11:32:56.983927633 +0200
++++ linux-3.4-pax/arch/x86/include/asm/segment.h 2012-05-21 12:10:09.404048883 +0200
+@@ -64,10 +64,15 @@
+ * 26 - ESPFIX small SS
+ * 27 - per-cpu [ offset to per-cpu data area ]
+ * 28 - stack_canary-20 [ for stack protector ]
+- * 29 - unused
+- * 30 - unused
++ * 29 - PCI BIOS CS
++ * 30 - PCI BIOS DS
+ * 31 - TSS for double fault handler
+ */
++#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
++#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
++#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
++#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
++
+ #define GDT_ENTRY_TLS_MIN 6
+ #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+@@ -79,6 +84,8 @@
+
+ #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
+
++#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
++
+ #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
+
+ #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
+@@ -104,6 +111,12 @@
+ #define __KERNEL_STACK_CANARY 0
+ #endif
+
++#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
++#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
++
++#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
++#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
++
+ #define GDT_ENTRY_DOUBLEFAULT_TSS 31
+
+ /*
+@@ -141,7 +154,7 @@
+ */
+
+ /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
+-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
++#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
+
+
+ #else
+@@ -165,6 +178,8 @@
+ #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
+ #define __USER32_DS __USER_DS
+
++#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
++
+ #define GDT_ENTRY_TSS 8 /* needs two entries */
+ #define GDT_ENTRY_LDT 10 /* needs two entries */
+ #define GDT_ENTRY_TLS_MIN 12
+@@ -185,6 +200,7 @@
+ #endif
+
+ #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
++#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
+ #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
+ #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
+ #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
+@@ -263,7 +279,7 @@ static inline unsigned long get_limit(un
+ {
+ unsigned long __limit;
+ asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
+- return __limit + 1;
++ return __limit;
+ }
+
+ #endif /* !__ASSEMBLY__ */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/smp.h linux-3.4-pax/arch/x86/include/asm/smp.h
+--- linux-3.4/arch/x86/include/asm/smp.h 2012-03-19 10:38:56.464050003 +0100
++++ linux-3.4-pax/arch/x86/include/asm/smp.h 2012-05-21 12:10:09.404048883 +0200
+@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
+ /* cpus sharing the last level cache: */
+ DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
+ DECLARE_PER_CPU(u16, cpu_llc_id);
+-DECLARE_PER_CPU(int, cpu_number);
++DECLARE_PER_CPU(unsigned int, cpu_number);
+
+ static inline struct cpumask *cpu_sibling_mask(int cpu)
+ {
+@@ -77,7 +77,7 @@ struct smp_ops {
+
+ void (*send_call_func_ipi)(const struct cpumask *mask);
+ void (*send_call_func_single_ipi)(int cpu);
+-};
++} __no_const;
+
+ /* Globals due to paravirt */
+ extern void set_cpu_sibling_map(int cpu);
+@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
+ extern int safe_smp_processor_id(void);
+
+ #elif defined(CONFIG_X86_64_SMP)
+-#define raw_smp_processor_id() (percpu_read(cpu_number))
+-
+-#define stack_smp_processor_id() \
+-({ \
+- struct thread_info *ti; \
+- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
+- ti->cpu; \
+-})
++#define raw_smp_processor_id() (percpu_read(cpu_number))
++#define stack_smp_processor_id() raw_smp_processor_id()
+ #define safe_smp_processor_id() smp_processor_id()
+
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/spinlock.h linux-3.4-pax/arch/x86/include/asm/spinlock.h
+--- linux-3.4/arch/x86/include/asm/spinlock.h 2012-05-21 11:32:56.995927634 +0200
++++ linux-3.4-pax/arch/x86/include/asm/spinlock.h 2012-05-21 12:10:09.408048883 +0200
+@@ -175,6 +175,14 @@ static inline int arch_write_can_lock(ar
+ static inline void arch_read_lock(arch_rwlock_t *rw)
+ {
+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ "jns 1f\n"
+ "call __read_lock_failed\n\t"
+ "1:\n"
+@@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_r
+ static inline void arch_write_lock(arch_rwlock_t *rw)
+ {
+ asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ "jz 1f\n"
+ "call __write_lock_failed\n\t"
+ "1:\n"
+@@ -213,13 +229,29 @@ static inline int arch_write_trylock(arc
+
+ static inline void arch_read_unlock(arch_rwlock_t *rw)
+ {
+- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
++ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ :"+m" (rw->lock) : : "memory");
+ }
+
+ static inline void arch_write_unlock(arch_rwlock_t *rw)
+ {
+- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
++ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/stackprotector.h linux-3.4-pax/arch/x86/include/asm/stackprotector.h
+--- linux-3.4/arch/x86/include/asm/stackprotector.h 2012-05-21 11:32:56.999927634 +0200
++++ linux-3.4-pax/arch/x86/include/asm/stackprotector.h 2012-05-21 12:10:09.408048883 +0200
+@@ -47,7 +47,7 @@
+ * head_32 for boot CPU and setup_per_cpu_areas() for others.
+ */
+ #define GDT_STACK_CANARY_INIT \
+- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
++ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
+
+ /*
+ * Initialize the stackprotector canary value.
+@@ -112,7 +112,7 @@ static inline void setup_stack_canary_se
+
+ static inline void load_stack_canary_segment(void)
+ {
+-#ifdef CONFIG_X86_32
++#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
+ asm volatile ("mov %0, %%gs" : : "r" (0));
+ #endif
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/stacktrace.h linux-3.4-pax/arch/x86/include/asm/stacktrace.h
+--- linux-3.4/arch/x86/include/asm/stacktrace.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/stacktrace.h 2012-05-21 12:10:09.412048882 +0200
+@@ -11,28 +11,20 @@
+
+ extern int kstack_depth_to_print;
+
+-struct thread_info;
++struct task_struct;
+ struct stacktrace_ops;
+
+-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
+- unsigned long *stack,
+- unsigned long bp,
+- const struct stacktrace_ops *ops,
+- void *data,
+- unsigned long *end,
+- int *graph);
+-
+-extern unsigned long
+-print_context_stack(struct thread_info *tinfo,
+- unsigned long *stack, unsigned long bp,
+- const struct stacktrace_ops *ops, void *data,
+- unsigned long *end, int *graph);
+-
+-extern unsigned long
+-print_context_stack_bp(struct thread_info *tinfo,
+- unsigned long *stack, unsigned long bp,
+- const struct stacktrace_ops *ops, void *data,
+- unsigned long *end, int *graph);
++typedef unsigned long walk_stack_t(struct task_struct *task,
++ void *stack_start,
++ unsigned long *stack,
++ unsigned long bp,
++ const struct stacktrace_ops *ops,
++ void *data,
++ unsigned long *end,
++ int *graph);
++
++extern walk_stack_t print_context_stack;
++extern walk_stack_t print_context_stack_bp;
+
+ /* Generic stack tracer with callbacks */
+
+@@ -40,7 +32,7 @@ struct stacktrace_ops {
+ void (*address)(void *data, unsigned long address, int reliable);
+ /* On negative return stop dumping */
+ int (*stack)(void *data, char *name);
+- walk_stack_t walk_stack;
++ walk_stack_t *walk_stack;
+ };
+
+ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/switch_to.h linux-3.4-pax/arch/x86/include/asm/switch_to.h
+--- linux-3.4/arch/x86/include/asm/switch_to.h 2012-05-21 11:32:57.003927634 +0200
++++ linux-3.4-pax/arch/x86/include/asm/switch_to.h 2012-05-21 12:10:09.412048882 +0200
+@@ -108,7 +108,7 @@ do { \
+ "call __switch_to\n\t" \
+ "movq "__percpu_arg([current_task])",%%rsi\n\t" \
+ __switch_canary \
+- "movq %P[thread_info](%%rsi),%%r8\n\t" \
++ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
+ "movq %%rax,%%rdi\n\t" \
+ "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
+ "jnz ret_from_fork\n\t" \
+@@ -119,7 +119,7 @@ do { \
+ [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
+ [ti_flags] "i" (offsetof(struct thread_info, flags)), \
+ [_tif_fork] "i" (_TIF_FORK), \
+- [thread_info] "i" (offsetof(struct task_struct, stack)), \
++ [thread_info] "m" (current_tinfo), \
+ [current_task] "m" (current_task) \
+ __switch_canary_iparam \
+ : "memory", "cc" __EXTRA_CLOBBER)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/sys_ia32.h linux-3.4-pax/arch/x86/include/asm/sys_ia32.h
+--- linux-3.4/arch/x86/include/asm/sys_ia32.h 2012-05-21 11:32:57.007927634 +0200
++++ linux-3.4-pax/arch/x86/include/asm/sys_ia32.h 2012-05-21 12:10:09.416048882 +0200
+@@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, str
+ struct old_sigaction32 __user *);
+ asmlinkage long sys32_alarm(unsigned int);
+
+-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
++asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
+ asmlinkage long sys32_sysfs(int, u32, u32);
+
+ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/thread_info.h linux-3.4-pax/arch/x86/include/asm/thread_info.h
+--- linux-3.4/arch/x86/include/asm/thread_info.h 2012-05-21 11:32:57.039927636 +0200
++++ linux-3.4-pax/arch/x86/include/asm/thread_info.h 2012-05-21 12:10:09.420048883 +0200
+@@ -10,6 +10,7 @@
+ #include <linux/compiler.h>
+ #include <asm/page.h>
+ #include <asm/types.h>
++#include <asm/percpu.h>
+
+ /*
+ * low level task data that entry.S needs immediate access to
+@@ -24,7 +25,6 @@ struct exec_domain;
+ #include <linux/atomic.h>
+
+ struct thread_info {
+- struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ __u32 flags; /* low level flags */
+ __u32 status; /* thread synchronous flags */
+@@ -34,19 +34,13 @@ struct thread_info {
+ mm_segment_t addr_limit;
+ struct restart_block restart_block;
+ void __user *sysenter_return;
+-#ifdef CONFIG_X86_32
+- unsigned long previous_esp; /* ESP of the previous stack in
+- case of nested (IRQ) stacks
+- */
+- __u8 supervisor_stack[0];
+-#endif
++ unsigned long lowest_stack;
+ unsigned int sig_on_uaccess_error:1;
+ unsigned int uaccess_err:1; /* uaccess failed */
+ };
+
+-#define INIT_THREAD_INFO(tsk) \
++#define INIT_THREAD_INFO \
+ { \
+- .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+@@ -57,7 +51,7 @@ struct thread_info {
+ }, \
+ }
+
+-#define init_thread_info (init_thread_union.thread_info)
++#define init_thread_info (init_thread_union.stack)
+ #define init_stack (init_thread_union.stack)
+
+ #else /* !__ASSEMBLY__ */
+@@ -173,6 +167,23 @@ struct thread_info {
+ ret; \
+ })
+
++#ifdef __ASSEMBLY__
++/* how to get the thread information struct from ASM */
++#define GET_THREAD_INFO(reg) \
++ mov PER_CPU_VAR(current_tinfo), reg
++
++/* use this one if reg already contains %esp */
++#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
++#else
++/* how to get the thread information struct from C */
++DECLARE_PER_CPU(struct thread_info *, current_tinfo);
++
++static __always_inline struct thread_info *current_thread_info(void)
++{
++ return percpu_read_stable(current_tinfo);
++}
++#endif
++
+ #ifdef CONFIG_X86_32
+
+ #define STACK_WARN (THREAD_SIZE/8)
+@@ -183,35 +194,13 @@ struct thread_info {
+ */
+ #ifndef __ASSEMBLY__
+
+-
+ /* how to get the current stack pointer from C */
+ register unsigned long current_stack_pointer asm("esp") __used;
+
+-/* how to get the thread information struct from C */
+-static inline struct thread_info *current_thread_info(void)
+-{
+- return (struct thread_info *)
+- (current_stack_pointer & ~(THREAD_SIZE - 1));
+-}
+-
+-#else /* !__ASSEMBLY__ */
+-
+-/* how to get the thread information struct from ASM */
+-#define GET_THREAD_INFO(reg) \
+- movl $-THREAD_SIZE, reg; \
+- andl %esp, reg
+-
+-/* use this one if reg already contains %esp */
+-#define GET_THREAD_INFO_WITH_ESP(reg) \
+- andl $-THREAD_SIZE, reg
+-
+ #endif
+
+ #else /* X86_32 */
+
+-#include <asm/percpu.h>
+-#define KERNEL_STACK_OFFSET (5*8)
+-
+ /*
+ * macros/functions for gaining access to the thread information structure
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+@@ -219,27 +208,8 @@ static inline struct thread_info *curren
+ #ifndef __ASSEMBLY__
+ DECLARE_PER_CPU(unsigned long, kernel_stack);
+
+-static inline struct thread_info *current_thread_info(void)
+-{
+- struct thread_info *ti;
+- ti = (void *)(percpu_read_stable(kernel_stack) +
+- KERNEL_STACK_OFFSET - THREAD_SIZE);
+- return ti;
+-}
+-
+-#else /* !__ASSEMBLY__ */
+-
+-/* how to get the thread information struct from ASM */
+-#define GET_THREAD_INFO(reg) \
+- movq PER_CPU_VAR(kernel_stack),reg ; \
+- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
+-
+-/*
+- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
+- * a certain register (to be used in assembler memory operands).
+- */
+-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
+-
++/* how to get the current stack pointer from C */
++register unsigned long current_stack_pointer asm("rsp") __used;
+ #endif
+
+ #endif /* !X86_32 */
+@@ -285,5 +255,16 @@ extern void arch_task_cache_init(void);
+ extern void free_thread_info(struct thread_info *ti);
+ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+ #define arch_task_cache_init arch_task_cache_init
++
++#define __HAVE_THREAD_FUNCTIONS
++#define task_thread_info(task) (&(task)->tinfo)
++#define task_stack_page(task) ((task)->stack)
++#define setup_thread_stack(p, org) do {} while (0)
++#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
++
++#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
++extern struct task_struct *alloc_task_struct_node(int node);
++extern void free_task_struct(struct task_struct *);
++
+ #endif
+ #endif /* _ASM_X86_THREAD_INFO_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/uaccess_32.h linux-3.4-pax/arch/x86/include/asm/uaccess_32.h
+--- linux-3.4/arch/x86/include/asm/uaccess_32.h 2012-05-21 11:32:57.055927637 +0200
++++ linux-3.4-pax/arch/x86/include/asm/uaccess_32.h 2012-05-22 15:50:06.395315521 +0200
+@@ -11,15 +11,15 @@
+ #include <asm/page.h>
+
+ unsigned long __must_check __copy_to_user_ll
+- (void __user *to, const void *from, unsigned long n);
++ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
+ unsigned long __must_check __copy_from_user_ll
+- (void *to, const void __user *from, unsigned long n);
++ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
+ unsigned long __must_check __copy_from_user_ll_nozero
+- (void *to, const void __user *from, unsigned long n);
++ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
+ unsigned long __must_check __copy_from_user_ll_nocache
+- (void *to, const void __user *from, unsigned long n);
++ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
+ unsigned long __must_check __copy_from_user_ll_nocache_nozero
+- (void *to, const void __user *from, unsigned long n);
++ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
+
+ /**
+ * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
+@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_u
+ static __always_inline unsigned long __must_check
+ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to,
+ return ret;
+ }
+ }
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
+ return __copy_to_user_ll(to, from, n);
+ }
+
+@@ -82,12 +87,16 @@ static __always_inline unsigned long __m
+ __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ might_fault();
++
+ return __copy_to_user_inatomic(to, from, n);
+ }
+
+ static __always_inline unsigned long
+ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ /* Avoid zeroing the tail if the copy fails..
+ * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
+ * but as the zeroing behaviour is only significant when n is not
+@@ -137,6 +146,10 @@ static __always_inline unsigned long
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __
+ return ret;
+ }
+ }
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
+ return __copy_from_user_ll(to, from, n);
+ }
+
+@@ -159,6 +174,10 @@ static __always_inline unsigned long __c
+ const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -181,15 +200,19 @@ static __always_inline unsigned long
+ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
+ unsigned long n)
+ {
+- return __copy_from_user_ll_nocache_nozero(to, from, n);
+-}
++ if ((long)n < 0)
++ return n;
+
+-unsigned long __must_check copy_to_user(void __user *to,
+- const void *from, unsigned long n);
+-unsigned long __must_check _copy_from_user(void *to,
+- const void __user *from,
+- unsigned long n);
++ return __copy_from_user_ll_nocache_nozero(to, from, n);
++}
+
++extern void copy_to_user_overflow(void)
++#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
++ __compiletime_error("copy_to_user() buffer size is not provably correct")
++#else
++ __compiletime_warning("copy_to_user() buffer size is not provably correct")
++#endif
++;
+
+ extern void copy_from_user_overflow(void)
+ #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+@@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void
+ #endif
+ ;
+
+-static inline unsigned long __must_check copy_from_user(void *to,
+- const void __user *from,
+- unsigned long n)
++/**
++ * copy_to_user: - Copy a block of data into user space.
++ * @to: Destination address, in user space.
++ * @from: Source address, in kernel space.
++ * @n: Number of bytes to copy.
++ *
++ * Context: User context only. This function may sleep.
++ *
++ * Copy data from kernel space to user space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ */
++static inline unsigned long __must_check
++copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ int sz = __compiletime_object_size(from);
++
++ if (unlikely(sz != -1 && sz < n))
++ copy_to_user_overflow();
++ else if (access_ok(VERIFY_WRITE, to, n))
++ n = __copy_to_user(to, from, n);
++ return n;
++}
++
++/**
++ * copy_from_user: - Copy a block of data from user space.
++ * @to: Destination address, in kernel space.
++ * @from: Source address, in user space.
++ * @n: Number of bytes to copy.
++ *
++ * Context: User context only. This function may sleep.
++ *
++ * Copy data from user space to kernel space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ *
++ * If some data could not be copied, this function will pad the copied
++ * data to the requested size using zero bytes.
++ */
++static inline unsigned long __must_check
++copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ int sz = __compiletime_object_size(to);
+
+- if (likely(sz == -1 || sz >= n))
+- n = _copy_from_user(to, from, n);
+- else
++ if (unlikely(sz != -1 && sz < n))
+ copy_from_user_overflow();
+-
++ else if (access_ok(VERIFY_READ, from, n))
++ n = __copy_from_user(to, from, n);
++ else if ((long)n > 0) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ memset(to, 0, n);
++ }
+ return n;
+ }
+
+@@ -230,7 +297,7 @@ static inline unsigned long __must_check
+ #define strlen_user(str) strnlen_user(str, LONG_MAX)
+
+ long strnlen_user(const char __user *str, long n);
+-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
+-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
++unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
++unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
+
+ #endif /* _ASM_X86_UACCESS_32_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/uaccess_64.h linux-3.4-pax/arch/x86/include/asm/uaccess_64.h
+--- linux-3.4/arch/x86/include/asm/uaccess_64.h 2012-05-21 11:32:57.055927637 +0200
++++ linux-3.4-pax/arch/x86/include/asm/uaccess_64.h 2012-05-22 16:08:42.355255889 +0200
+@@ -10,6 +10,9 @@
+ #include <asm/alternative.h>
+ #include <asm/cpufeature.h>
+ #include <asm/page.h>
++#include <asm/pgtable.h>
++
++#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+ /*
+ * Copy To/From Userspace
+@@ -17,12 +20,14 @@
+
+ /* Handles exceptions in both to and from, but doesn't do access_ok */
+ __must_check unsigned long
+-copy_user_generic_string(void *to, const void *from, unsigned len);
++copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
+ __must_check unsigned long
+-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
++copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
+
+ static __always_inline __must_check unsigned long
+-copy_user_generic(void *to, const void *from, unsigned len)
++copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
++static __always_inline __must_check unsigned long
++copy_user_generic(void *to, const void *from, unsigned long len)
+ {
+ unsigned ret;
+
+@@ -32,142 +37,226 @@ copy_user_generic(void *to, const void *
+ ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
+ "=d" (len)),
+ "1" (to), "2" (from), "3" (len)
+- : "memory", "rcx", "r8", "r9", "r10", "r11");
++ : "memory", "rcx", "r8", "r9", "r11");
+ return ret;
+ }
+
++static __always_inline __must_check unsigned long
++__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
++static __always_inline __must_check unsigned long
++__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
+ __must_check unsigned long
+-_copy_to_user(void __user *to, const void *from, unsigned len);
+-__must_check unsigned long
+-_copy_from_user(void *to, const void __user *from, unsigned len);
+-__must_check unsigned long
+-copy_in_user(void __user *to, const void __user *from, unsigned len);
++copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
+
+ static inline unsigned long __must_check copy_from_user(void *to,
+ const void __user *from,
+ unsigned long n)
+ {
+- int sz = __compiletime_object_size(to);
+-
+ might_fault();
+- if (likely(sz == -1 || sz >= n))
+- n = _copy_from_user(to, from, n);
+-#ifdef CONFIG_DEBUG_VM
+- else
+- WARN(1, "Buffer overflow detected!\n");
+-#endif
++
++ if (access_ok(VERIFY_READ, from, n))
++ n = __copy_from_user(to, from, n);
++ else if (n < INT_MAX) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ memset(to, 0, n);
++ }
+ return n;
+ }
+
+ static __always_inline __must_check
+-int copy_to_user(void __user *dst, const void *src, unsigned size)
++int copy_to_user(void __user *dst, const void *src, unsigned long size)
+ {
+ might_fault();
+
+- return _copy_to_user(dst, src, size);
++ if (access_ok(VERIFY_WRITE, dst, size))
++ size = __copy_to_user(dst, src, size);
++ return size;
+ }
+
+ static __always_inline __must_check
+-int __copy_from_user(void *dst, const void __user *src, unsigned size)
++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
+ {
+- int ret = 0;
++ int sz = __compiletime_object_size(dst);
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
+- return copy_user_generic(dst, (__force void *)src, size);
++
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
++ if (unlikely(sz != -1 && sz < size)) {
++#ifdef CONFIG_DEBUG_VM
++ WARN(1, "Buffer overflow detected!\n");
++#endif
++ return size;
++ }
++
++ if (!__builtin_constant_p(size)) {
++ check_object_size(dst, size, false);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic(dst, (__force_kernel const void *)src, size);
++ }
+ switch (size) {
+- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
++ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
+ ret, "b", "b", "=q", 1);
+ return ret;
+- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
++ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
+ ret, "w", "w", "=r", 2);
+ return ret;
+- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
++ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
+ ret, "l", "k", "=r", 4);
+ return ret;
+- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 8);
+ return ret;
+ case 10:
+- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 10);
+ if (unlikely(ret))
+ return ret;
+ __get_user_asm(*(u16 *)(8 + (char *)dst),
+- (u16 __user *)(8 + (char __user *)src),
++ (const u16 __user *)(8 + (const char __user *)src),
+ ret, "w", "w", "=r", 2);
+ return ret;
+ case 16:
+- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 16);
+ if (unlikely(ret))
+ return ret;
+ __get_user_asm(*(u64 *)(8 + (char *)dst),
+- (u64 __user *)(8 + (char __user *)src),
++ (const u64 __user *)(8 + (const char __user *)src),
+ ret, "q", "", "=r", 8);
+ return ret;
+ default:
+- return copy_user_generic(dst, (__force void *)src, size);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic(dst, (__force_kernel const void *)src, size);
+ }
+ }
+
+ static __always_inline __must_check
+-int __copy_to_user(void __user *dst, const void *src, unsigned size)
++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
+ {
+- int ret = 0;
++ int sz = __compiletime_object_size(src);
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
+- return copy_user_generic((__force void *)dst, src, size);
++
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_WRITE, dst, size))
++ return size;
++#endif
++
++ if (unlikely(sz != -1 && sz < size)) {
++#ifdef CONFIG_DEBUG_VM
++ WARN(1, "Buffer overflow detected!\n");
++#endif
++ return size;
++ }
++
++ if (!__builtin_constant_p(size)) {
++ check_object_size(src, size, true);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic((__force_kernel void *)dst, src, size);
++ }
+ switch (size) {
+- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
++ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
+ ret, "b", "b", "iq", 1);
+ return ret;
+- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
++ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ return ret;
+- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
++ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
+ ret, "l", "k", "ir", 4);
+ return ret;
+- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ return ret;
+ case 10:
+- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 10);
+ if (unlikely(ret))
+ return ret;
+ asm("":::"memory");
+- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
++ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ return ret;
+ case 16:
+- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 16);
+ if (unlikely(ret))
+ return ret;
+ asm("":::"memory");
+- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
++ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ return ret;
+ default:
+- return copy_user_generic((__force void *)dst, src, size);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic((__force_kernel void *)dst, src, size);
+ }
+ }
+
+ static __always_inline __must_check
+-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
+ {
+- int ret = 0;
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
+- return copy_user_generic((__force void *)dst,
+- (__force void *)src, size);
++
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++ if (!__access_ok(VERIFY_WRITE, dst, size))
++ return size;
++#endif
++
++ if (!__builtin_constant_p(size)) {
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic((__force_kernel void *)dst,
++ (__force_kernel const void *)src, size);
++ }
+ switch (size) {
+ case 1: {
+ u8 tmp;
+- __get_user_asm(tmp, (u8 __user *)src,
++ __get_user_asm(tmp, (const u8 __user *)src,
+ ret, "b", "b", "=q", 1);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u8 __user *)dst,
+@@ -176,7 +265,7 @@ int __copy_in_user(void __user *dst, con
+ }
+ case 2: {
+ u16 tmp;
+- __get_user_asm(tmp, (u16 __user *)src,
++ __get_user_asm(tmp, (const u16 __user *)src,
+ ret, "w", "w", "=r", 2);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u16 __user *)dst,
+@@ -186,7 +275,7 @@ int __copy_in_user(void __user *dst, con
+
+ case 4: {
+ u32 tmp;
+- __get_user_asm(tmp, (u32 __user *)src,
++ __get_user_asm(tmp, (const u32 __user *)src,
+ ret, "l", "k", "=r", 4);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u32 __user *)dst,
+@@ -195,7 +284,7 @@ int __copy_in_user(void __user *dst, con
+ }
+ case 8: {
+ u64 tmp;
+- __get_user_asm(tmp, (u64 __user *)src,
++ __get_user_asm(tmp, (const u64 __user *)src,
+ ret, "q", "", "=r", 8);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u64 __user *)dst,
+@@ -203,47 +292,92 @@ int __copy_in_user(void __user *dst, con
+ return ret;
+ }
+ default:
+- return copy_user_generic((__force void *)dst,
+- (__force void *)src, size);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic((__force_kernel void *)dst,
++ (__force_kernel const void *)src, size);
+ }
+ }
+
+ __must_check long strnlen_user(const char __user *str, long n);
+ __must_check long __strnlen_user(const char __user *str, long n);
+ __must_check long strlen_user(const char __user *str);
+-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
+-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
++__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
++__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
+
+ static __must_check __always_inline int
+-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
++__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
+ {
+- return copy_user_generic(dst, (__force const void *)src, size);
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic(dst, (__force_kernel const void *)src, size);
+ }
+
+-static __must_check __always_inline int
+-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
++static __must_check __always_inline unsigned long
++__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
+ {
+- return copy_user_generic((__force void *)dst, src, size);
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_WRITE, dst, size))
++ return size;
++
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic((__force_kernel void *)dst, src, size);
+ }
+
+-extern long __copy_user_nocache(void *dst, const void __user *src,
+- unsigned size, int zerorest);
++extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
++ unsigned long size, int zerorest) __size_overflow(3);
+
+-static inline int
+-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
++static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
+ {
+ might_sleep();
++
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
+ return __copy_user_nocache(dst, src, size, 1);
+ }
+
+-static inline int
+-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
+- unsigned size)
++static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
++ unsigned long size)
+ {
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
+ return __copy_user_nocache(dst, src, size, 0);
+ }
+
+-unsigned long
+-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
++extern unsigned long
++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
+
+ #endif /* _ASM_X86_UACCESS_64_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/uaccess.h linux-3.4-pax/arch/x86/include/asm/uaccess.h
+--- linux-3.4/arch/x86/include/asm/uaccess.h 2012-05-21 11:32:57.051927637 +0200
++++ linux-3.4-pax/arch/x86/include/asm/uaccess.h 2012-05-21 12:10:09.428048883 +0200
+@@ -7,12 +7,15 @@
+ #include <linux/compiler.h>
+ #include <linux/thread_info.h>
+ #include <linux/string.h>
++#include <linux/sched.h>
+ #include <asm/asm.h>
+ #include <asm/page.h>
+
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++
+ /*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+@@ -28,7 +31,12 @@
+
+ #define get_ds() (KERNEL_DS)
+ #define get_fs() (current_thread_info()->addr_limit)
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++void __set_fs(mm_segment_t x);
++void set_fs(mm_segment_t x);
++#else
+ #define set_fs(x) (current_thread_info()->addr_limit = (x))
++#endif
+
+ #define segment_eq(a, b) ((a).seg == (b).seg)
+
+@@ -76,7 +84,33 @@
+ * checks that the pointer is in the user space range - after calling
+ * this function, memory access functions may still return -EFAULT.
+ */
+-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
++#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
++#define access_ok(type, addr, size) \
++({ \
++ long __size = size; \
++ unsigned long __addr = (unsigned long)addr; \
++ unsigned long __addr_ao = __addr & PAGE_MASK; \
++ unsigned long __end_ao = __addr + __size - 1; \
++ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
++ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
++ while(__addr_ao <= __end_ao) { \
++ char __c_ao; \
++ __addr_ao += PAGE_SIZE; \
++ if (__size > PAGE_SIZE) \
++ cond_resched(); \
++ if (__get_user(__c_ao, (char __user *)__addr)) \
++ break; \
++ if (type != VERIFY_WRITE) { \
++ __addr = __addr_ao; \
++ continue; \
++ } \
++ if (__put_user(__c_ao, (char __user *)__addr)) \
++ break; \
++ __addr = __addr_ao; \
++ } \
++ } \
++ __ret_ao; \
++})
+
+ /*
+ * The exception table consists of pairs of addresses: the first is the
+@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
+ asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+
+-
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg "gs;"
++#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
++#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
++#else
++#define __copyuser_seg
++#define __COPYUSER_SET_ES
++#define __COPYUSER_RESTORE_ES
++#endif
+
+ #ifdef CONFIG_X86_32
+ #define __put_user_asm_u64(x, addr, err, errret) \
+- asm volatile("1: movl %%eax,0(%2)\n" \
+- "2: movl %%edx,4(%2)\n" \
++ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
++ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: movl %3,%0\n" \
+@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
+ : "A" (x), "r" (addr), "i" (errret), "0" (err))
+
+ #define __put_user_asm_ex_u64(x, addr) \
+- asm volatile("1: movl %%eax,0(%1)\n" \
+- "2: movl %%edx,4(%1)\n" \
++ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
++ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
+ "3:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ _ASM_EXTABLE(2b, 3b - 2b) \
+@@ -252,7 +294,7 @@ extern void __put_user_8(void);
+ __typeof__(*(ptr)) __pu_val; \
+ __chk_user_ptr(ptr); \
+ might_fault(); \
+- __pu_val = x; \
++ __pu_val = (x); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __put_user_x(1, __pu_val, ptr, __ret_pu); \
+@@ -373,7 +415,7 @@ do { \
+ } while (0)
+
+ #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+@@ -381,7 +423,7 @@ do { \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+- : "=r" (err), ltype(x) \
++ : "=r" (err), ltype (x) \
+ : "m" (__m(addr)), "i" (errret), "0" (err))
+
+ #define __get_user_size_ex(x, ptr, size) \
+@@ -406,7 +448,7 @@ do { \
+ } while (0)
+
+ #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
+ "2:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ : ltype(x) : "m" (__m(addr)))
+@@ -423,13 +465,24 @@ do { \
+ int __gu_err; \
+ unsigned long __gu_val; \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
+- (x) = (__force __typeof__(*(ptr)))__gu_val; \
++ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+ })
+
+ /* FIXME: this hack is definitely wrong -AK */
+ struct __large_struct { unsigned long buf[100]; };
+-#define __m(x) (*(struct __large_struct __user *)(x))
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define ____m(x) \
++({ \
++ unsigned long ____x = (unsigned long)(x); \
++ if (____x < PAX_USER_SHADOW_BASE) \
++ ____x += PAX_USER_SHADOW_BASE; \
++ (void __user *)____x; \
++})
++#else
++#define ____m(x) (x)
++#endif
++#define __m(x) (*(struct __large_struct __user *)____m(x))
+
+ /*
+ * Tell gcc we read from memory instead of writing: this is because
+@@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
+ * aliasing issues.
+ */
+ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+@@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r"(err) \
+- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
++ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
+
+ #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
+ "2:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ : : ltype(x), "m" (__m(addr)))
+@@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
+ * On error, the variable @x is set to zero.
+ */
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __get_user(x, ptr) get_user((x), (ptr))
++#else
+ #define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
++#endif
+
+ /**
+ * __put_user: - Write a simple value into user space, with less checking.
+@@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
+ * Returns zero on success, or -EFAULT on error.
+ */
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __put_user(x, ptr) put_user((x), (ptr))
++#else
+ #define __put_user(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
++#endif
+
+ #define __get_user_unaligned __get_user
+ #define __put_user_unaligned __put_user
+@@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
+ #define get_user_ex(x, ptr) do { \
+ unsigned long __gue_val; \
+ __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
+- (x) = (__force __typeof__(*(ptr)))__gue_val; \
++ (x) = (__typeof__(*(ptr)))__gue_val; \
+ } while (0)
+
+ #ifdef CONFIG_X86_WP_WORKS_OK
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/vdso.h linux-3.4-pax/arch/x86/include/asm/vdso.h
+--- linux-3.4/arch/x86/include/asm/vdso.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/vdso.h 2012-05-21 12:10:09.428048883 +0200
+@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
+ #define VDSO32_SYMBOL(base, name) \
+ ({ \
+ extern const char VDSO32_##name[]; \
+- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
++ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
+ })
+ #endif
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/x86_init.h linux-3.4-pax/arch/x86/include/asm/x86_init.h
+--- linux-3.4/arch/x86/include/asm/x86_init.h 2012-05-21 11:32:57.071927638 +0200
++++ linux-3.4-pax/arch/x86/include/asm/x86_init.h 2012-05-21 12:10:09.432048883 +0200
+@@ -29,7 +29,7 @@ struct x86_init_mpparse {
+ void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
+ void (*find_smp_config)(void);
+ void (*get_smp_config)(unsigned int early);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_resources - platform specific resource related ops
+@@ -43,7 +43,7 @@ struct x86_init_resources {
+ void (*probe_roms)(void);
+ void (*reserve_resources)(void);
+ char *(*memory_setup)(void);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_irqs - platform specific interrupt setup
+@@ -56,7 +56,7 @@ struct x86_init_irqs {
+ void (*pre_vector_init)(void);
+ void (*intr_init)(void);
+ void (*trap_init)(void);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_oem - oem platform specific customizing functions
+@@ -66,7 +66,7 @@ struct x86_init_irqs {
+ struct x86_init_oem {
+ void (*arch_setup)(void);
+ void (*banner)(void);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_mapping - platform specific initial kernel pagetable setup
+@@ -77,7 +77,7 @@ struct x86_init_oem {
+ */
+ struct x86_init_mapping {
+ void (*pagetable_reserve)(u64 start, u64 end);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_paging - platform specific paging functions
+@@ -87,7 +87,7 @@ struct x86_init_mapping {
+ struct x86_init_paging {
+ void (*pagetable_setup_start)(pgd_t *base);
+ void (*pagetable_setup_done)(pgd_t *base);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_timers - platform specific timer setup
+@@ -102,7 +102,7 @@ struct x86_init_timers {
+ void (*tsc_pre_init)(void);
+ void (*timer_init)(void);
+ void (*wallclock_init)(void);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_iommu - platform specific iommu setup
+@@ -110,7 +110,7 @@ struct x86_init_timers {
+ */
+ struct x86_init_iommu {
+ int (*iommu_init)(void);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_pci - platform specific pci init functions
+@@ -124,7 +124,7 @@ struct x86_init_pci {
+ int (*init)(void);
+ void (*init_irq)(void);
+ void (*fixup_irqs)(void);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_ops - functions for platform specific setup
+@@ -140,7 +140,7 @@ struct x86_init_ops {
+ struct x86_init_timers timers;
+ struct x86_init_iommu iommu;
+ struct x86_init_pci pci;
+-};
++} __no_const;
+
+ /**
+ * struct x86_cpuinit_ops - platform specific cpu hotplug setups
+@@ -151,7 +151,7 @@ struct x86_cpuinit_ops {
+ void (*setup_percpu_clockev)(void);
+ void (*early_percpu_clock_init)(void);
+ void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
+-};
++} __no_const;
+
+ /**
+ * struct x86_platform_ops - platform specific runtime functions
+@@ -177,7 +177,7 @@ struct x86_platform_ops {
+ int (*i8042_detect)(void);
+ void (*save_sched_clock_state)(void);
+ void (*restore_sched_clock_state)(void);
+-};
++} __no_const;
+
+ struct pci_dev;
+
+@@ -186,7 +186,7 @@ struct x86_msi_ops {
+ void (*teardown_msi_irq)(unsigned int irq);
+ void (*teardown_msi_irqs)(struct pci_dev *dev);
+ void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
+-};
++} __no_const;
+
+ extern struct x86_init_ops x86_init;
+ extern struct x86_cpuinit_ops x86_cpuinit;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/include/asm/xsave.h linux-3.4-pax/arch/x86/include/asm/xsave.h
+--- linux-3.4/arch/x86/include/asm/xsave.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/include/asm/xsave.h 2012-05-21 12:10:09.432048883 +0200
+@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
+ {
+ int err;
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
++ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
++#endif
++
+ /*
+ * Clear the xsave header first, so that reserved fields are
+ * initialized to zero.
+@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsav
+ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
+ {
+ int err;
+- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
++ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
++ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
++#endif
++
+ __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/Kconfig linux-3.4-pax/arch/x86/Kconfig
+--- linux-3.4/arch/x86/Kconfig 2012-05-21 11:32:56.711927618 +0200
++++ linux-3.4-pax/arch/x86/Kconfig 2012-05-21 12:10:09.436048884 +0200
+@@ -229,7 +229,7 @@ config X86_HT
+
+ config X86_32_LAZY_GS
+ def_bool y
+- depends on X86_32 && !CC_STACKPROTECTOR
++ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
+
+ config ARCH_HWEIGHT_CFLAGS
+ string
+@@ -1133,7 +1133,7 @@ config PAGE_OFFSET
+ hex
+ default 0xB0000000 if VMSPLIT_3G_OPT
+ default 0x80000000 if VMSPLIT_2G
+- default 0x78000000 if VMSPLIT_2G_OPT
++ default 0x70000000 if VMSPLIT_2G_OPT
+ default 0x40000000 if VMSPLIT_1G
+ default 0xC0000000
+ depends on X86_32
+@@ -1523,6 +1523,7 @@ config SECCOMP
+
+ config CC_STACKPROTECTOR
+ bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
++ depends on X86_64 || !PAX_MEMORY_UDEREF
+ ---help---
+ This option turns on the -fstack-protector GCC feature. This
+ feature puts, at the beginning of functions, a canary value on
+@@ -1580,6 +1581,7 @@ config KEXEC_JUMP
+ config PHYSICAL_START
+ hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
+ default "0x1000000"
++ range 0x400000 0x40000000
+ ---help---
+ This gives the physical address where the kernel is loaded.
+
+@@ -1643,6 +1645,7 @@ config X86_NEED_RELOCS
+ config PHYSICAL_ALIGN
+ hex "Alignment value to which kernel should be aligned" if X86_32
+ default "0x1000000"
++ range 0x400000 0x1000000 if PAX_KERNEXEC
+ range 0x2000 0x1000000
+ ---help---
+ This value puts the alignment restrictions on physical address
+@@ -1674,7 +1677,7 @@ config HOTPLUG_CPU
+ Say N if you want to disable CPU hotplug.
+
+ config COMPAT_VDSO
+- def_bool y
++ def_bool n
+ prompt "Compat VDSO support"
+ depends on X86_32 || IA32_EMULATION
+ ---help---
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/Kconfig.cpu linux-3.4-pax/arch/x86/Kconfig.cpu
+--- linux-3.4/arch/x86/Kconfig.cpu 2012-05-21 11:32:56.715927619 +0200
++++ linux-3.4-pax/arch/x86/Kconfig.cpu 2012-05-21 12:10:09.440048884 +0200
+@@ -334,7 +334,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ def_bool y
+- depends on M586MMX || M586TSC || M586 || M486 || M386
++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
+
+ config X86_INVD_BUG
+ def_bool y
+@@ -358,7 +358,7 @@ config X86_POPAD_OK
+
+ config X86_ALIGNMENT_16
+ def_bool y
+- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+
+ config X86_INTEL_USERCOPY
+ def_bool y
+@@ -404,7 +404,7 @@ config X86_CMPXCHG64
+ # generates cmov.
+ config X86_CMOV
+ def_bool y
+- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+
+ config X86_MINIMUM_CPU_FAMILY
+ int
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/Kconfig.debug linux-3.4-pax/arch/x86/Kconfig.debug
+--- linux-3.4/arch/x86/Kconfig.debug 2012-03-19 10:38:56.376050007 +0100
++++ linux-3.4-pax/arch/x86/Kconfig.debug 2012-05-21 12:10:09.440048884 +0200
+@@ -84,7 +84,7 @@ config X86_PTDUMP
+ config DEBUG_RODATA
+ bool "Write protect kernel read-only data structures"
+ default y
+- depends on DEBUG_KERNEL
++ depends on DEBUG_KERNEL && BROKEN
+ ---help---
+ Mark the kernel read-only data as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const
+@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
+
+ config DEBUG_SET_MODULE_RONX
+ bool "Set loadable kernel module data as NX and text as RO"
+- depends on MODULES
++ depends on MODULES && BROKEN
+ ---help---
+ This option helps catch unintended modifications to loadable
+ kernel module's text and read-only data. It also prevents execution
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/acpi/realmode/Makefile linux-3.4-pax/arch/x86/kernel/acpi/realmode/Makefile
+--- linux-3.4/arch/x86/kernel/acpi/realmode/Makefile 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/kernel/acpi/realmode/Makefile 2012-05-21 12:10:09.444048884 +0200
+@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
+ $(call cc-option, -fno-stack-protector) \
+ $(call cc-option, -mpreferred-stack-boundary=2)
+ KBUILD_CFLAGS += $(call cc-option, -m32)
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
++endif
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/acpi/sleep.c linux-3.4-pax/arch/x86/kernel/acpi/sleep.c
+--- linux-3.4/arch/x86/kernel/acpi/sleep.c 2012-05-21 11:32:57.199927645 +0200
++++ linux-3.4-pax/arch/x86/kernel/acpi/sleep.c 2012-05-21 12:10:09.444048884 +0200
+@@ -98,8 +98,12 @@ int acpi_suspend_lowlevel(void)
+ header->trampoline_segment = trampoline_address() >> 4;
+ #ifdef CONFIG_SMP
+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
++
++ pax_open_kernel();
+ early_gdt_descr.address =
+ (unsigned long)get_cpu_gdt_table(smp_processor_id());
++ pax_close_kernel();
++
+ initial_gs = per_cpu_offset(smp_processor_id());
+ #endif
+ initial_code = (unsigned long)wakeup_long64;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/acpi/wakeup_32.S linux-3.4-pax/arch/x86/kernel/acpi/wakeup_32.S
+--- linux-3.4/arch/x86/kernel/acpi/wakeup_32.S 2012-05-21 11:32:57.207927645 +0200
++++ linux-3.4-pax/arch/x86/kernel/acpi/wakeup_32.S 2012-05-21 12:10:09.448048884 +0200
+@@ -30,13 +30,11 @@ wakeup_pmode_return:
+ # and restore the stack ... but you need gdt for this to work
+ movl saved_context_esp, %esp
+
+- movl %cs:saved_magic, %eax
+- cmpl $0x12345678, %eax
++ cmpl $0x12345678, saved_magic
+ jne bogus_magic
+
+ # jump to place where we left off
+- movl saved_eip, %eax
+- jmp *%eax
++ jmp *(saved_eip)
+
+ bogus_magic:
+ jmp bogus_magic
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/alternative.c linux-3.4-pax/arch/x86/kernel/alternative.c
+--- linux-3.4/arch/x86/kernel/alternative.c 2012-01-08 19:47:49.287472997 +0100
++++ linux-3.4-pax/arch/x86/kernel/alternative.c 2012-05-21 12:10:09.448048884 +0200
+@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives
+ */
+ for (a = start; a < end; a++) {
+ instr = (u8 *)&a->instr_offset + a->instr_offset;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
++ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
+ replacement = (u8 *)&a->repl_offset + a->repl_offset;
+ BUG_ON(a->replacementlen > a->instrlen);
+ BUG_ON(a->instrlen > sizeof(insnbuf));
+@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const
+ for (poff = start; poff < end; poff++) {
+ u8 *ptr = (u8 *)poff + *poff;
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
++ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
+ if (!*poff || ptr < text || ptr >= text_end)
+ continue;
+ /* turn DS segment override prefix into lock prefix */
+- if (*ptr == 0x3e)
++ if (*ktla_ktva(ptr) == 0x3e)
+ text_poke(ptr, ((unsigned char []){0xf0}), 1);
+ };
+ mutex_unlock(&text_mutex);
+@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(cons
+ for (poff = start; poff < end; poff++) {
+ u8 *ptr = (u8 *)poff + *poff;
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
++ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
+ if (!*poff || ptr < text || ptr >= text_end)
+ continue;
+ /* turn lock prefix into DS segment override prefix */
+- if (*ptr == 0xf0)
++ if (*ktla_ktva(ptr) == 0xf0)
+ text_poke(ptr, ((unsigned char []){0x3E}), 1);
+ };
+ mutex_unlock(&text_mutex);
+@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(str
+
+ BUG_ON(p->len > MAX_PATCH_LEN);
+ /* prep the buffer with the original instructions */
+- memcpy(insnbuf, p->instr, p->len);
++ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
+ used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
+ (unsigned long)p->instr, p->len);
+
+@@ -568,7 +587,7 @@ void __init alternative_instructions(voi
+ if (smp_alt_once)
+ free_init_pages("SMP alternatives",
+ (unsigned long)__smp_locks,
+- (unsigned long)__smp_locks_end);
++ PAGE_ALIGN((unsigned long)__smp_locks_end));
+
+ restart_nmi();
+ }
+@@ -585,13 +604,17 @@ void __init alternative_instructions(voi
+ * instructions. And on the local CPU you need to be protected again NMI or MCE
+ * handlers seeing an inconsistent instruction while you patch.
+ */
+-void *__init_or_module text_poke_early(void *addr, const void *opcode,
++void *__kprobes text_poke_early(void *addr, const void *opcode,
+ size_t len)
+ {
+ unsigned long flags;
+ local_irq_save(flags);
+- memcpy(addr, opcode, len);
++
++ pax_open_kernel();
++ memcpy(ktla_ktva(addr), opcode, len);
+ sync_core();
++ pax_close_kernel();
++
+ local_irq_restore(flags);
+ /* Could also do a CLFLUSH here to speed up CPU recovery; but
+ that causes hangs on some VIA CPUs. */
+@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(v
+ */
+ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
+ {
+- unsigned long flags;
+- char *vaddr;
++ unsigned char *vaddr = ktla_ktva(addr);
+ struct page *pages[2];
+- int i;
++ size_t i;
+
+ if (!core_kernel_text((unsigned long)addr)) {
+- pages[0] = vmalloc_to_page(addr);
+- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
++ pages[0] = vmalloc_to_page(vaddr);
++ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
+ } else {
+- pages[0] = virt_to_page(addr);
++ pages[0] = virt_to_page(vaddr);
+ WARN_ON(!PageReserved(pages[0]));
+- pages[1] = virt_to_page(addr + PAGE_SIZE);
++ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
+ }
+ BUG_ON(!pages[0]);
+- local_irq_save(flags);
+- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
+- if (pages[1])
+- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
+- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
+- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
+- clear_fixmap(FIX_TEXT_POKE0);
+- if (pages[1])
+- clear_fixmap(FIX_TEXT_POKE1);
+- local_flush_tlb();
+- sync_core();
+- /* Could also do a CLFLUSH here to speed up CPU recovery; but
+- that causes hangs on some VIA CPUs. */
++ text_poke_early(addr, opcode, len);
+ for (i = 0; i < len; i++)
+- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
+- local_irq_restore(flags);
++ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
+ return addr;
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/apic/apic.c linux-3.4-pax/arch/x86/kernel/apic/apic.c
+--- linux-3.4/arch/x86/kernel/apic/apic.c 2012-05-21 11:32:57.215927646 +0200
++++ linux-3.4-pax/arch/x86/kernel/apic/apic.c 2012-05-21 12:10:09.452048884 +0200
+@@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
+ /*
+ * Debug level, exported for io_apic.c
+ */
+-unsigned int apic_verbosity;
++int apic_verbosity;
+
+ int pic_mode;
+
+@@ -1917,7 +1917,7 @@ void smp_error_interrupt(struct pt_regs
+ apic_write(APIC_ESR, 0);
+ v1 = apic_read(APIC_ESR);
+ ack_APIC_irq();
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+
+ apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
+ smp_processor_id(), v0 , v1);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/apic/io_apic.c linux-3.4-pax/arch/x86/kernel/apic/io_apic.c
+--- linux-3.4/arch/x86/kernel/apic/io_apic.c 2012-05-21 11:32:57.251927648 +0200
++++ linux-3.4-pax/arch/x86/kernel/apic/io_apic.c 2012-05-21 12:10:09.456048885 +0200
+@@ -83,7 +83,9 @@ static struct io_apic_ops io_apic_ops =
+
+ void __init set_io_apic_ops(const struct io_apic_ops *ops)
+ {
+- io_apic_ops = *ops;
++ pax_open_kernel();
++ memcpy((void*)&io_apic_ops, ops, sizeof io_apic_ops);
++ pax_close_kernel();
+ }
+
+ /*
+@@ -1135,7 +1137,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
+ }
+ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
+
+-void lock_vector_lock(void)
++void lock_vector_lock(void) __acquires(vector_lock)
+ {
+ /* Used to the online set of cpus does not change
+ * during assign_irq_vector.
+@@ -1143,7 +1145,7 @@ void lock_vector_lock(void)
+ raw_spin_lock(&vector_lock);
+ }
+
+-void unlock_vector_lock(void)
++void unlock_vector_lock(void) __releases(vector_lock)
+ {
+ raw_spin_unlock(&vector_lock);
+ }
+@@ -2549,7 +2551,7 @@ static void ack_apic_edge(struct irq_dat
+ ack_APIC_irq();
+ }
+
+-atomic_t irq_mis_count;
++atomic_unchecked_t irq_mis_count;
+
+ #ifdef CONFIG_GENERIC_PENDING_IRQ
+ static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
+@@ -2667,7 +2669,7 @@ static void ack_apic_level(struct irq_da
+ * at the cpu.
+ */
+ if (!(v & (1 << (i & 0x1f)))) {
+- atomic_inc(&irq_mis_count);
++ atomic_inc_unchecked(&irq_mis_count);
+
+ eoi_ioapic_irq(irq, cfg);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/apm_32.c linux-3.4-pax/arch/x86/kernel/apm_32.c
+--- linux-3.4/arch/x86/kernel/apm_32.c 2012-05-21 11:32:57.311927651 +0200
++++ linux-3.4-pax/arch/x86/kernel/apm_32.c 2012-05-21 12:10:09.460048885 +0200
+@@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
+ * This is for buggy BIOS's that refer to (real mode) segment 0x40
+ * even though they are called in protected mode.
+ */
+-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
+ (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
+
+ static const char driver_version[] = "1.16ac"; /* no spaces */
+@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
+ BUG_ON(cpu != 0);
+ gdt = get_cpu_gdt_table(cpu);
+ save_desc_40 = gdt[0x40 / 8];
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ apm_irq_save(flags);
+ APM_DO_SAVE_SEGS;
+@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
+ &call->esi);
+ APM_DO_RESTORE_SEGS;
+ apm_irq_restore(flags);
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+
+ return call->eax & 0xff;
+@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
+ BUG_ON(cpu != 0);
+ gdt = get_cpu_gdt_table(cpu);
+ save_desc_40 = gdt[0x40 / 8];
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ apm_irq_save(flags);
+ APM_DO_SAVE_SEGS;
+@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
+ &call->eax);
+ APM_DO_RESTORE_SEGS;
+ apm_irq_restore(flags);
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+ return error;
+ }
+@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
+ * code to that CPU.
+ */
+ gdt = get_cpu_gdt_table(0);
++
++ pax_open_kernel();
+ set_desc_base(&gdt[APM_CS >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
+ set_desc_base(&gdt[APM_CS_16 >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
+ set_desc_base(&gdt[APM_DS >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
++ pax_close_kernel();
+
+ proc_create("apm", 0, NULL, &apm_file_ops);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/asm-offsets_64.c linux-3.4-pax/arch/x86/kernel/asm-offsets_64.c
+--- linux-3.4/arch/x86/kernel/asm-offsets_64.c 2012-05-21 11:32:57.315927651 +0200
++++ linux-3.4-pax/arch/x86/kernel/asm-offsets_64.c 2012-05-21 12:10:09.464048885 +0200
+@@ -76,6 +76,7 @@ int main(void)
+ BLANK();
+ #undef ENTRY
+
++ DEFINE(TSS_size, sizeof(struct tss_struct));
+ OFFSET(TSS_ist, tss_struct, x86_tss.ist);
+ BLANK();
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/asm-offsets.c linux-3.4-pax/arch/x86/kernel/asm-offsets.c
+--- linux-3.4/arch/x86/kernel/asm-offsets.c 2012-03-19 10:38:56.508050001 +0100
++++ linux-3.4-pax/arch/x86/kernel/asm-offsets.c 2012-05-21 12:10:09.468048885 +0200
+@@ -33,6 +33,8 @@ void common(void) {
+ OFFSET(TI_status, thread_info, status);
+ OFFSET(TI_addr_limit, thread_info, addr_limit);
+ OFFSET(TI_preempt_count, thread_info, preempt_count);
++ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
++ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
+
+ BLANK();
+ OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+@@ -53,8 +55,26 @@ void common(void) {
+ OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
+ OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
++ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
++#ifdef CONFIG_X86_64
++ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
++#endif
+ #endif
+
++#endif
++
++ BLANK();
++ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
++ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
++ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
++
+ #ifdef CONFIG_XEN
+ BLANK();
+ OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/cpu/amd.c linux-3.4-pax/arch/x86/kernel/cpu/amd.c
+--- linux-3.4/arch/x86/kernel/cpu/amd.c 2012-05-21 11:32:57.327927652 +0200
++++ linux-3.4-pax/arch/x86/kernel/cpu/amd.c 2012-05-21 12:10:09.468048885 +0200
+@@ -691,7 +691,7 @@ static unsigned int __cpuinit amd_size_c
+ unsigned int size)
+ {
+ /* AMD errata T13 (order #21922) */
+- if ((c->x86 == 6)) {
++ if (c->x86 == 6) {
+ /* Duron Rev A0 */
+ if (c->x86_model == 3 && c->x86_mask == 0)
+ size = 64;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/cpu/common.c linux-3.4-pax/arch/x86/kernel/cpu/common.c
+--- linux-3.4/arch/x86/kernel/cpu/common.c 2012-05-21 11:32:57.331927652 +0200
++++ linux-3.4-pax/arch/x86/kernel/cpu/common.c 2012-05-21 12:10:09.472048886 +0200
+@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitcon
+
+ static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
+
+-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
+-#ifdef CONFIG_X86_64
+- /*
+- * We need valid kernel segments for data and code in long mode too
+- * IRET will check the segment types kkeil 2000/10/28
+- * Also sysret mandates a special GDT layout
+- *
+- * TLS descriptors are currently at a different place compared to i386.
+- * Hopefully nobody expects them at a fixed place (Wine?)
+- */
+- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
+-#else
+- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
+- /*
+- * Segments used for calling PnP BIOS have byte granularity.
+- * They code segments and data segments have fixed 64k limits,
+- * the transfer segment sizes are set at run time.
+- */
+- /* 32-bit code */
+- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
+- /* 16-bit code */
+- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
+- /*
+- * The APM segments have byte granularity and their bases
+- * are set at run time. All have 64k limits.
+- */
+- /* 32-bit code */
+- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
+- /* 16-bit code */
+- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
+- /* data */
+- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
+-
+- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- GDT_STACK_CANARY_INIT
+-#endif
+-} };
+-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+-
+ static int __init x86_xsave_setup(char *s)
+ {
+ setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+@@ -374,7 +320,7 @@ void switch_to_new_gdt(int cpu)
+ {
+ struct desc_ptr gdt_descr;
+
+- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
++ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+ /* Reload the per-cpu base */
+@@ -841,6 +787,10 @@ static void __cpuinit identify_cpu(struc
+ /* Filter out anything that depends on CPUID levels we don't have */
+ filter_cpuid_features(c, true);
+
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
++ setup_clear_cpu_cap(X86_FEATURE_SEP);
++#endif
++
+ /* If the model name is still unset, do table lookup. */
+ if (!c->x86_model_id[0]) {
+ const char *p;
+@@ -1021,10 +971,12 @@ static __init int setup_disablecpuid(cha
+ }
+ __setup("clearcpuid=", setup_disablecpuid);
+
++DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
++EXPORT_PER_CPU_SYMBOL(current_tinfo);
++
+ #ifdef CONFIG_X86_64
+ struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
+-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
+- (unsigned long) nmi_idt_table };
++struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
+
+ DEFINE_PER_CPU_FIRST(union irq_stack_union,
+ irq_stack_union) __aligned(PAGE_SIZE);
+@@ -1038,7 +990,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
+ EXPORT_PER_CPU_SYMBOL(current_task);
+
+ DEFINE_PER_CPU(unsigned long, kernel_stack) =
+- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
++ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
+ EXPORT_PER_CPU_SYMBOL(kernel_stack);
+
+ DEFINE_PER_CPU(char *, irq_stack_ptr) =
+@@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(str
+ {
+ memset(regs, 0, sizeof(struct pt_regs));
+ regs->fs = __KERNEL_PERCPU;
+- regs->gs = __KERNEL_STACK_CANARY;
++ savesegment(gs, regs->gs);
+
+ return regs;
+ }
+@@ -1181,7 +1133,7 @@ void __cpuinit cpu_init(void)
+ int i;
+
+ cpu = stack_smp_processor_id();
+- t = &per_cpu(init_tss, cpu);
++ t = init_tss + cpu;
+ oist = &per_cpu(orig_ist, cpu);
+
+ #ifdef CONFIG_NUMA
+@@ -1207,7 +1159,7 @@ void __cpuinit cpu_init(void)
+ switch_to_new_gdt(cpu);
+ loadsegment(fs, 0);
+
+- load_idt((const struct desc_ptr *)&idt_descr);
++ load_idt(&idt_descr);
+
+ memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
+ syscall_init();
+@@ -1216,7 +1168,6 @@ void __cpuinit cpu_init(void)
+ wrmsrl(MSR_KERNEL_GS_BASE, 0);
+ barrier();
+
+- x86_configure_nx();
+ if (cpu != 0)
+ enable_x2apic();
+
+@@ -1272,7 +1223,7 @@ void __cpuinit cpu_init(void)
+ {
+ int cpu = smp_processor_id();
+ struct task_struct *curr = current;
+- struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct tss_struct *t = init_tss + cpu;
+ struct thread_struct *thread = &curr->thread;
+
+ if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/cpu/intel.c linux-3.4-pax/arch/x86/kernel/cpu/intel.c
+--- linux-3.4/arch/x86/kernel/cpu/intel.c 2012-03-19 10:38:56.516050001 +0100
++++ linux-3.4-pax/arch/x86/kernel/cpu/intel.c 2012-05-21 12:10:09.476048886 +0200
+@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug
+ * Update the IDT descriptor and reload the IDT so that
+ * it uses the read-only mapped virtual address.
+ */
+- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
++ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
+ load_idt(&idt_descr);
+ }
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/cpu/Makefile linux-3.4-pax/arch/x86/kernel/cpu/Makefile
+--- linux-3.4/arch/x86/kernel/cpu/Makefile 2012-05-21 11:32:57.323927652 +0200
++++ linux-3.4-pax/arch/x86/kernel/cpu/Makefile 2012-05-21 12:10:09.476048886 +0200
+@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
+ CFLAGS_REMOVE_perf_event.o = -pg
+ endif
+
+-# Make sure load_percpu_segment has no stackprotector
+-nostackp := $(call cc-option, -fno-stack-protector)
+-CFLAGS_common.o := $(nostackp)
+-
+ obj-y := intel_cacheinfo.o scattered.o topology.o
+ obj-y += proc.o capflags.o powerflags.o common.o
+ obj-y += vmware.o hypervisor.o sched.o mshyperv.o
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/cpu/mcheck/mce.c linux-3.4-pax/arch/x86/kernel/cpu/mcheck/mce.c
+--- linux-3.4/arch/x86/kernel/cpu/mcheck/mce.c 2012-05-21 11:32:57.387927655 +0200
++++ linux-3.4-pax/arch/x86/kernel/cpu/mcheck/mce.c 2012-05-21 12:10:09.484048885 +0200
+@@ -42,6 +42,7 @@
+ #include <asm/processor.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/local.h>
+
+ #include "mce-internal.h"
+
+@@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
+ !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
+ m->cs, m->ip);
+
+- if (m->cs == __KERNEL_CS)
++ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
+ print_symbol("{%s}", m->ip);
+ pr_cont("\n");
+ }
+@@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
+
+ #define PANIC_TIMEOUT 5 /* 5 seconds */
+
+-static atomic_t mce_paniced;
++static atomic_unchecked_t mce_paniced;
+
+ static int fake_panic;
+-static atomic_t mce_fake_paniced;
++static atomic_unchecked_t mce_fake_paniced;
+
+ /* Panic in progress. Enable interrupts and wait for final IPI */
+ static void wait_for_panic(void)
+@@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct
+ /*
+ * Make sure only one CPU runs in machine check panic
+ */
+- if (atomic_inc_return(&mce_paniced) > 1)
++ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
+ wait_for_panic();
+ barrier();
+
+@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct
+ console_verbose();
+ } else {
+ /* Don't log too much for fake panic */
+- if (atomic_inc_return(&mce_fake_paniced) > 1)
++ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
+ return;
+ }
+ /* First print corrected ones that are still unlogged */
+@@ -676,7 +677,7 @@ static int mce_timed_out(u64 *t)
+ * might have been modified by someone else.
+ */
+ rmb();
+- if (atomic_read(&mce_paniced))
++ if (atomic_read_unchecked(&mce_paniced))
+ wait_for_panic();
+ if (!monarch_timeout)
+ goto out;
+@@ -1527,7 +1528,7 @@ static void unexpected_machine_check(str
+ }
+
+ /* Call the installed machine check handler for this CPU setup. */
+-void (*machine_check_vector)(struct pt_regs *, long error_code) =
++void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
+ unexpected_machine_check;
+
+ /*
+@@ -1550,7 +1551,9 @@ void __cpuinit mcheck_cpu_init(struct cp
+ return;
+ }
+
++ pax_open_kernel();
+ machine_check_vector = do_machine_check;
++ pax_close_kernel();
+
+ __mcheck_cpu_init_generic();
+ __mcheck_cpu_init_vendor(c);
+@@ -1564,7 +1567,7 @@ void __cpuinit mcheck_cpu_init(struct cp
+ */
+
+ static DEFINE_SPINLOCK(mce_chrdev_state_lock);
+-static int mce_chrdev_open_count; /* #times opened */
++static local_t mce_chrdev_open_count; /* #times opened */
+ static int mce_chrdev_open_exclu; /* already open exclusive? */
+
+ static int mce_chrdev_open(struct inode *inode, struct file *file)
+@@ -1572,7 +1575,7 @@ static int mce_chrdev_open(struct inode
+ spin_lock(&mce_chrdev_state_lock);
+
+ if (mce_chrdev_open_exclu ||
+- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
++ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
+ spin_unlock(&mce_chrdev_state_lock);
+
+ return -EBUSY;
+@@ -1580,7 +1583,7 @@ static int mce_chrdev_open(struct inode
+
+ if (file->f_flags & O_EXCL)
+ mce_chrdev_open_exclu = 1;
+- mce_chrdev_open_count++;
++ local_inc(&mce_chrdev_open_count);
+
+ spin_unlock(&mce_chrdev_state_lock);
+
+@@ -1591,7 +1594,7 @@ static int mce_chrdev_release(struct ino
+ {
+ spin_lock(&mce_chrdev_state_lock);
+
+- mce_chrdev_open_count--;
++ local_dec(&mce_chrdev_open_count);
+ mce_chrdev_open_exclu = 0;
+
+ spin_unlock(&mce_chrdev_state_lock);
+@@ -2316,7 +2319,7 @@ struct dentry *mce_get_debugfs_dir(void)
+ static void mce_reset(void)
+ {
+ cpu_missing = 0;
+- atomic_set(&mce_fake_paniced, 0);
++ atomic_set_unchecked(&mce_fake_paniced, 0);
+ atomic_set(&mce_executing, 0);
+ atomic_set(&mce_callin, 0);
+ atomic_set(&global_nwo, 0);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/cpu/mcheck/p5.c linux-3.4-pax/arch/x86/kernel/cpu/mcheck/p5.c
+--- linux-3.4/arch/x86/kernel/cpu/mcheck/p5.c 2012-05-21 11:32:57.395927656 +0200
++++ linux-3.4-pax/arch/x86/kernel/cpu/mcheck/p5.c 2012-05-21 12:10:09.488048887 +0200
+@@ -11,6 +11,7 @@
+ #include <asm/processor.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/pgtable.h>
+
+ /* By default disabled */
+ int mce_p5_enabled __read_mostly;
+@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo
+ if (!cpu_has(c, X86_FEATURE_MCE))
+ return;
+
++ pax_open_kernel();
+ machine_check_vector = pentium_machine_check;
++ pax_close_kernel();
+ /* Make sure the vector pointer is visible before we enable MCEs: */
+ wmb();
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/cpu/mcheck/winchip.c linux-3.4-pax/arch/x86/kernel/cpu/mcheck/winchip.c
+--- linux-3.4/arch/x86/kernel/cpu/mcheck/winchip.c 2012-05-21 11:32:57.399927656 +0200
++++ linux-3.4-pax/arch/x86/kernel/cpu/mcheck/winchip.c 2012-05-21 12:10:09.492048888 +0200
+@@ -10,6 +10,7 @@
+ #include <asm/processor.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/pgtable.h>
+
+ /* Machine check handler for WinChip C6: */
+ static void winchip_machine_check(struct pt_regs *regs, long error_code)
+@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_
+ {
+ u32 lo, hi;
+
++ pax_open_kernel();
+ machine_check_vector = winchip_machine_check;
++ pax_close_kernel();
+ /* Make sure the vector pointer is visible before we enable MCEs: */
+ wmb();
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/cpu/mtrr/main.c linux-3.4-pax/arch/x86/kernel/cpu/mtrr/main.c
+--- linux-3.4/arch/x86/kernel/cpu/mtrr/main.c 2011-10-24 12:48:26.227091775 +0200
++++ linux-3.4-pax/arch/x86/kernel/cpu/mtrr/main.c 2012-05-21 12:10:09.496048888 +0200
+@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
+ u64 size_or_mask, size_and_mask;
+ static bool mtrr_aps_delayed_init;
+
+-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
++static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
+
+ const struct mtrr_ops *mtrr_if;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.4-pax/arch/x86/kernel/cpu/mtrr/mtrr.h
+--- linux-3.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/kernel/cpu/mtrr/mtrr.h 2012-05-21 12:10:09.500048888 +0200
+@@ -25,7 +25,7 @@ struct mtrr_ops {
+ int (*validate_add_page)(unsigned long base, unsigned long size,
+ unsigned int type);
+ int (*have_wrcomb)(void);
+-};
++} __do_const;
+
+ extern int generic_get_free_region(unsigned long base, unsigned long size,
+ int replace_reg);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/cpu/perf_event.c linux-3.4-pax/arch/x86/kernel/cpu/perf_event.c
+--- linux-3.4/arch/x86/kernel/cpu/perf_event.c 2012-05-21 11:32:57.407927655 +0200
++++ linux-3.4-pax/arch/x86/kernel/cpu/perf_event.c 2012-05-21 12:10:09.504048887 +0200
+@@ -1835,7 +1835,7 @@ perf_callchain_user(struct perf_callchai
+ break;
+
+ perf_callchain_store(entry, frame.return_address);
+- fp = frame.next_frame;
++ fp = (const void __force_user *)frame.next_frame;
+ }
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/crash.c linux-3.4-pax/arch/x86/kernel/crash.c
+--- linux-3.4/arch/x86/kernel/crash.c 2012-01-08 19:47:49.395472991 +0100
++++ linux-3.4-pax/arch/x86/kernel/crash.c 2012-05-21 12:10:09.504048887 +0200
+@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu,
+ {
+ #ifdef CONFIG_X86_32
+ struct pt_regs fixed_regs;
+-#endif
+
+-#ifdef CONFIG_X86_32
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ crash_fixup_ss_esp(&fixed_regs, regs);
+ regs = &fixed_regs;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/doublefault_32.c linux-3.4-pax/arch/x86/kernel/doublefault_32.c
+--- linux-3.4/arch/x86/kernel/doublefault_32.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/kernel/doublefault_32.c 2012-05-21 12:10:09.508048887 +0200
+@@ -11,7 +11,7 @@
+
+ #define DOUBLEFAULT_STACKSIZE (1024)
+ static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
+-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
++#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
+
+ #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
+
+@@ -21,7 +21,7 @@ static void doublefault_fn(void)
+ unsigned long gdt, tss;
+
+ store_gdt(&gdt_desc);
+- gdt = gdt_desc.address;
++ gdt = (unsigned long)gdt_desc.address;
+
+ printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
+
+@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
+ /* 0x2 bit is always set */
+ .flags = X86_EFLAGS_SF | 0x2,
+ .sp = STACK_START,
+- .es = __USER_DS,
++ .es = __KERNEL_DS,
+ .cs = __KERNEL_CS,
+ .ss = __KERNEL_DS,
+- .ds = __USER_DS,
++ .ds = __KERNEL_DS,
+ .fs = __KERNEL_PERCPU,
+
+ .__cr3 = __pa_nodebug(swapper_pg_dir),
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/dumpstack_32.c linux-3.4-pax/arch/x86/kernel/dumpstack_32.c
+--- linux-3.4/arch/x86/kernel/dumpstack_32.c 2012-05-21 11:32:57.471927660 +0200
++++ linux-3.4-pax/arch/x86/kernel/dumpstack_32.c 2012-05-21 12:10:09.508048887 +0200
+@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
+ bp = stack_frame(task, regs);
+
+ for (;;) {
+- struct thread_info *context;
++ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
+
+- context = (struct thread_info *)
+- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
+- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
++ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
+
+- stack = (unsigned long *)context->previous_esp;
+- if (!stack)
++ if (stack_start == task_stack_page(task))
+ break;
++ stack = *(unsigned long **)stack_start;
+ if (ops->stack(data, "IRQ") < 0)
+ break;
+ touch_nmi_watchdog();
+@@ -87,7 +85,7 @@ void show_registers(struct pt_regs *regs
+ int i;
+
+ print_modules();
+- __show_regs(regs, !user_mode_vm(regs));
++ __show_regs(regs, !user_mode(regs));
+
+ printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
+ TASK_COMM_LEN, current->comm, task_pid_nr(current),
+@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+ */
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ unsigned int code_prologue = code_bytes * 43 / 64;
+ unsigned int code_len = code_bytes;
+ unsigned char c;
+ u8 *ip;
++ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
+
+ printk(KERN_EMERG "Stack:\n");
+ show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
+
+ printk(KERN_EMERG "Code: ");
+
+- ip = (u8 *)regs->ip - code_prologue;
++ ip = (u8 *)regs->ip - code_prologue + cs_base;
+ if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
+ /* try starting at IP */
+- ip = (u8 *)regs->ip;
++ ip = (u8 *)regs->ip + cs_base;
+ code_len = code_len - code_prologue + 1;
+ }
+ for (i = 0; i < code_len; i++, ip++) {
+@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
+ printk(KERN_CONT " Bad EIP value.");
+ break;
+ }
+- if (ip == (u8 *)regs->ip)
++ if (ip == (u8 *)regs->ip + cs_base)
+ printk(KERN_CONT "<%02x> ", c);
+ else
+ printk(KERN_CONT "%02x ", c);
+@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
+ {
+ unsigned short ud2;
+
++ ip = ktla_ktva(ip);
+ if (ip < PAGE_OFFSET)
+ return 0;
+ if (probe_kernel_address((unsigned short *)ip, ud2))
+@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
+
+ return ud2 == 0x0b0f;
+ }
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void pax_check_alloca(unsigned long size)
++{
++ unsigned long sp = (unsigned long)&sp, stack_left;
++
++ /* all kernel stacks are of the same size */
++ stack_left = sp & (THREAD_SIZE - 1);
++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
++}
++EXPORT_SYMBOL(pax_check_alloca);
++#endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/dumpstack_64.c linux-3.4-pax/arch/x86/kernel/dumpstack_64.c
+--- linux-3.4/arch/x86/kernel/dumpstack_64.c 2012-03-19 10:38:56.536050000 +0100
++++ linux-3.4-pax/arch/x86/kernel/dumpstack_64.c 2012-05-25 08:41:01.570799108 +0200
+@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task
+ unsigned long *irq_stack_end =
+ (unsigned long *)per_cpu(irq_stack_ptr, cpu);
+ unsigned used = 0;
+- struct thread_info *tinfo;
+ int graph = 0;
+ unsigned long dummy;
++ void *stack_start;
+
+ if (!task)
+ task = current;
+@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task
+ * current stack address. If the stacks consist of nested
+ * exceptions
+ */
+- tinfo = task_thread_info(task);
+ for (;;) {
+ char *id;
+ unsigned long *estack_end;
++
+ estack_end = in_exception_stack(cpu, (unsigned long)stack,
+ &used, &id);
+
+@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task
+ if (ops->stack(data, id) < 0)
+ break;
+
+- bp = ops->walk_stack(tinfo, stack, bp, ops,
++ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
+ data, estack_end, &graph);
+ ops->stack(data, "<EOE>");
+ /*
+@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task
+ * second-to-last pointer (index -2 to end) in the
+ * exception stack:
+ */
++ if ((u16)estack_end[-1] != __KERNEL_DS)
++ goto out;
+ stack = (unsigned long *) estack_end[-2];
+ continue;
+ }
+@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task
+ if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
+ if (ops->stack(data, "IRQ") < 0)
+ break;
+- bp = ops->walk_stack(tinfo, stack, bp,
++ bp = ops->walk_stack(task, irq_stack, stack, bp,
+ ops, data, irq_stack_end, &graph);
+ /*
+ * We link to the next stack (which would be
+@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task
+ /*
+ * This handles the process stack:
+ */
+- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
++ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
++ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
++out:
+ put_cpu();
+ }
+ EXPORT_SYMBOL(dump_trace);
+@@ -305,3 +309,50 @@ int is_valid_bugaddr(unsigned long ip)
+
+ return ud2 == 0x0b0f;
+ }
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void pax_check_alloca(unsigned long size)
++{
++ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
++ unsigned cpu, used;
++ char *id;
++
++ /* check the process stack first */
++ stack_start = (unsigned long)task_stack_page(current);
++ stack_end = stack_start + THREAD_SIZE;
++ if (likely(stack_start <= sp && sp < stack_end)) {
++ unsigned long stack_left = sp & (THREAD_SIZE - 1);
++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
++ return;
++ }
++
++ cpu = get_cpu();
++
++ /* check the irq stacks */
++ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
++ stack_start = stack_end - IRQ_STACK_SIZE;
++ if (stack_start <= sp && sp < stack_end) {
++ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
++ put_cpu();
++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
++ return;
++ }
++
++ /* check the exception stacks */
++ used = 0;
++ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
++ stack_start = stack_end - EXCEPTION_STKSZ;
++ if (stack_end && stack_start <= sp && sp < stack_end) {
++ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
++ put_cpu();
++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
++ return;
++ }
++
++ put_cpu();
++
++ /* unknown stack */
++ BUG();
++}
++EXPORT_SYMBOL(pax_check_alloca);
++#endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/dumpstack.c linux-3.4-pax/arch/x86/kernel/dumpstack.c
+--- linux-3.4/arch/x86/kernel/dumpstack.c 2012-05-21 11:32:57.467927659 +0200
++++ linux-3.4-pax/arch/x86/kernel/dumpstack.c 2012-05-21 12:10:09.516048888 +0200
+@@ -35,16 +35,14 @@ void printk_address(unsigned long addres
+ static void
+ print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+- struct thread_info *tinfo, int *graph)
++ struct task_struct *task, int *graph)
+ {
+- struct task_struct *task;
+ unsigned long ret_addr;
+ int index;
+
+ if (addr != (unsigned long)return_to_handler)
+ return;
+
+- task = tinfo->task;
+ index = task->curr_ret_stack;
+
+ if (!task->ret_stack || index < *graph)
+@@ -61,7 +59,7 @@ print_ftrace_graph_addr(unsigned long ad
+ static inline void
+ print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+- struct thread_info *tinfo, int *graph)
++ struct task_struct *task, int *graph)
+ { }
+ #endif
+
+@@ -72,10 +70,8 @@ print_ftrace_graph_addr(unsigned long ad
+ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
+ */
+
+-static inline int valid_stack_ptr(struct thread_info *tinfo,
+- void *p, unsigned int size, void *end)
++static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
+ {
+- void *t = tinfo;
+ if (end) {
+ if (p < end && p >= (end-THREAD_SIZE))
+ return 1;
+@@ -86,14 +82,14 @@ static inline int valid_stack_ptr(struct
+ }
+
+ unsigned long
+-print_context_stack(struct thread_info *tinfo,
++print_context_stack(struct task_struct *task, void *stack_start,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph)
+ {
+ struct stack_frame *frame = (struct stack_frame *)bp;
+
+- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
++ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
+ unsigned long addr;
+
+ addr = *stack;
+@@ -105,7 +101,7 @@ print_context_stack(struct thread_info *
+ } else {
+ ops->address(data, addr, 0);
+ }
+- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
++ print_ftrace_graph_addr(addr, data, ops, task, graph);
+ }
+ stack++;
+ }
+@@ -114,7 +110,7 @@ print_context_stack(struct thread_info *
+ EXPORT_SYMBOL_GPL(print_context_stack);
+
+ unsigned long
+-print_context_stack_bp(struct thread_info *tinfo,
++print_context_stack_bp(struct task_struct *task, void *stack_start,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph)
+@@ -122,7 +118,7 @@ print_context_stack_bp(struct thread_inf
+ struct stack_frame *frame = (struct stack_frame *)bp;
+ unsigned long *ret_addr = &frame->return_address;
+
+- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
++ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
+ unsigned long addr = *ret_addr;
+
+ if (!__kernel_text_address(addr))
+@@ -131,7 +127,7 @@ print_context_stack_bp(struct thread_inf
+ ops->address(data, addr, 1);
+ frame = frame->next_frame;
+ ret_addr = &frame->return_address;
+- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
++ print_ftrace_graph_addr(addr, data, ops, task, graph);
+ }
+
+ return (unsigned long)frame;
+@@ -189,7 +185,7 @@ void dump_stack(void)
+
+ bp = stack_frame(current, NULL);
+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
+- current->pid, current->comm, print_tainted(),
++ task_pid_nr(current), current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+@@ -246,7 +242,7 @@ void __kprobes oops_end(unsigned long fl
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
+- do_exit(signr);
++ do_group_exit(signr);
+ }
+
+ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+@@ -273,7 +269,7 @@ int __kprobes __die(const char *str, str
+
+ show_registers(regs);
+ #ifdef CONFIG_X86_32
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ sp = regs->sp;
+ ss = regs->ss & 0xffff;
+ } else {
+@@ -301,7 +297,7 @@ void die(const char *str, struct pt_regs
+ unsigned long flags = oops_begin();
+ int sig = SIGSEGV;
+
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ report_bug(regs->ip, regs);
+
+ if (__die(str, regs, err))
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/early_printk.c linux-3.4-pax/arch/x86/kernel/early_printk.c
+--- linux-3.4/arch/x86/kernel/early_printk.c 2012-03-19 10:38:56.540049999 +0100
++++ linux-3.4-pax/arch/x86/kernel/early_printk.c 2012-05-21 12:10:09.516048888 +0200
+@@ -7,6 +7,7 @@
+ #include <linux/pci_regs.h>
+ #include <linux/pci_ids.h>
+ #include <linux/errno.h>
++#include <linux/sched.h>
+ #include <asm/io.h>
+ #include <asm/processor.h>
+ #include <asm/fcntl.h>
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/entry_32.S linux-3.4-pax/arch/x86/kernel/entry_32.S
+--- linux-3.4/arch/x86/kernel/entry_32.S 2012-05-21 11:32:57.475927660 +0200
++++ linux-3.4-pax/arch/x86/kernel/entry_32.S 2012-05-21 12:10:09.520048888 +0200
+@@ -179,13 +179,146 @@
+ /*CFI_REL_OFFSET gs, PT_GS*/
+ .endm
+ .macro SET_KERNEL_GS reg
++
++#ifdef CONFIG_CC_STACKPROTECTOR
+ movl $(__KERNEL_STACK_CANARY), \reg
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++ movl $(__USER_DS), \reg
++#else
++ xorl \reg, \reg
++#endif
++
+ movl \reg, %gs
+ .endm
+
+ #endif /* CONFIG_X86_32_LAZY_GS */
+
+-.macro SAVE_ALL
++.macro pax_enter_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_enter_kernel
++#endif
++.endm
++
++.macro pax_exit_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_exit_kernel
++#endif
++.endm
++
++#ifdef CONFIG_PAX_KERNEXEC
++ENTRY(pax_enter_kernel)
++#ifdef CONFIG_PARAVIRT
++ pushl %eax
++ pushl %ecx
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
++ mov %eax, %esi
++#else
++ mov %cr0, %esi
++#endif
++ bts $16, %esi
++ jnc 1f
++ mov %cs, %esi
++ cmp $__KERNEL_CS, %esi
++ jz 3f
++ ljmp $__KERNEL_CS, $3f
++1: ljmp $__KERNEXEC_KERNEL_CS, $2f
++2:
++#ifdef CONFIG_PARAVIRT
++ mov %esi, %eax
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++#else
++ mov %esi, %cr0
++#endif
++3:
++#ifdef CONFIG_PARAVIRT
++ popl %ecx
++ popl %eax
++#endif
++ ret
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++#ifdef CONFIG_PARAVIRT
++ pushl %eax
++ pushl %ecx
++#endif
++ mov %cs, %esi
++ cmp $__KERNEXEC_KERNEL_CS, %esi
++ jnz 2f
++#ifdef CONFIG_PARAVIRT
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
++ mov %eax, %esi
++#else
++ mov %cr0, %esi
++#endif
++ btr $16, %esi
++ ljmp $__KERNEL_CS, $1f
++1:
++#ifdef CONFIG_PARAVIRT
++ mov %esi, %eax
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
++#else
++ mov %esi, %cr0
++#endif
++2:
++#ifdef CONFIG_PARAVIRT
++ popl %ecx
++ popl %eax
++#endif
++ ret
++ENDPROC(pax_exit_kernel)
++#endif
++
++.macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++.endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++/*
++ * ebp: thread_info
++ * ecx, edx: can be clobbered
++ */
++ENTRY(pax_erase_kstack)
++ pushl %edi
++ pushl %eax
++
++ mov TI_lowest_stack(%ebp), %edi
++ mov $0xB4DD00D5, %eax
++ std
++
++1: mov %edi, %ecx
++ and $THREAD_SIZE_asm - 1, %ecx
++ shr $2, %ecx
++ repne scasl
++ jecxz 2f
++
++ cmp $2*16, %ecx
++ jc 2f
++
++ mov $2*16, %ecx
++ repe scasl
++ jecxz 2f
++ jne 1b
++
++2: cld
++ mov %esp, %ecx
++ sub %edi, %ecx
++ shr $2, %ecx
++ rep stosl
++
++ mov TI_task_thread_sp0(%ebp), %edi
++ sub $128, %edi
++ mov %edi, TI_lowest_stack(%ebp)
++
++ popl %eax
++ popl %edi
++ ret
++ENDPROC(pax_erase_kstack)
++#endif
++
++.macro __SAVE_ALL _DS
+ cld
+ PUSH_GS
+ pushl_cfi %fs
+@@ -208,7 +341,7 @@
+ CFI_REL_OFFSET ecx, 0
+ pushl_cfi %ebx
+ CFI_REL_OFFSET ebx, 0
+- movl $(__USER_DS), %edx
++ movl $\_DS, %edx
+ movl %edx, %ds
+ movl %edx, %es
+ movl $(__KERNEL_PERCPU), %edx
+@@ -216,6 +349,15 @@
+ SET_KERNEL_GS %edx
+ .endm
+
++.macro SAVE_ALL
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ __SAVE_ALL __KERNEL_DS
++ pax_enter_kernel
++#else
++ __SAVE_ALL __USER_DS
++#endif
++.endm
++
+ .macro RESTORE_INT_REGS
+ popl_cfi %ebx
+ CFI_RESTORE ebx
+@@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
+ popfl_cfi
+ jmp syscall_exit
+ CFI_ENDPROC
+-END(ret_from_fork)
++ENDPROC(ret_from_fork)
+
+ /*
+ * Interrupt exit functions should be protected against kprobes
+@@ -335,7 +477,15 @@ resume_userspace_sig:
+ andl $SEGMENT_RPL_MASK, %eax
+ #endif
+ cmpl $USER_RPL, %eax
++
++#ifdef CONFIG_PAX_KERNEXEC
++ jae resume_userspace
++
++ pax_exit_kernel
++ jmp resume_kernel
++#else
+ jb resume_kernel # not returning to v8086 or userspace
++#endif
+
+ ENTRY(resume_userspace)
+ LOCKDEP_SYS_EXIT
+@@ -347,8 +497,8 @@ ENTRY(resume_userspace)
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
+ # int/exception return?
+ jne work_pending
+- jmp restore_all
+-END(ret_from_exception)
++ jmp restore_all_pax
++ENDPROC(ret_from_exception)
+
+ #ifdef CONFIG_PREEMPT
+ ENTRY(resume_kernel)
+@@ -363,7 +513,7 @@ need_resched:
+ jz restore_all
+ call preempt_schedule_irq
+ jmp need_resched
+-END(resume_kernel)
++ENDPROC(resume_kernel)
+ #endif
+ CFI_ENDPROC
+ /*
+@@ -397,23 +547,34 @@ sysenter_past_esp:
+ /*CFI_REL_OFFSET cs, 0*/
+ /*
+ * Push current_thread_info()->sysenter_return to the stack.
+- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
+- * pushed above; +8 corresponds to copy_thread's esp0 setting.
+ */
+- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
++ pushl_cfi $0
+ CFI_REL_OFFSET eip, 0
+
+ pushl_cfi %eax
+ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ movl TI_sysenter_return(%ebp),%ebp
++ movl %ebp,PT_EIP(%esp)
+ ENABLE_INTERRUPTS(CLBR_NONE)
+
+ /*
+ * Load the potential sixth argument from user stack.
+ * Careful about security.
+ */
++ movl PT_OLDESP(%esp),%ebp
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov PT_OLDSS(%esp),%ds
++1: movl %ds:(%ebp),%ebp
++ push %ss
++ pop %ds
++#else
+ cmpl $__PAGE_OFFSET-3,%ebp
+ jae syscall_fault
+ 1: movl (%ebp),%ebp
++#endif
++
+ movl %ebp,PT_EBP(%esp)
+ .section __ex_table,"a"
+ .align 4
+@@ -436,12 +597,24 @@ sysenter_do_call:
+ testl $_TIF_ALLWORK_MASK, %ecx
+ jne sysexit_audit
+ sysenter_exit:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushl_cfi %eax
++ movl %esp, %eax
++ call pax_randomize_kstack
++ popl_cfi %eax
++#endif
++
++ pax_erase_kstack
++
+ /* if something modifies registers it must also disable sysexit */
+ movl PT_EIP(%esp), %edx
+ movl PT_OLDESP(%esp), %ecx
+ xorl %ebp,%ebp
+ TRACE_IRQS_ON
+ 1: mov PT_FS(%esp), %fs
++2: mov PT_DS(%esp), %ds
++3: mov PT_ES(%esp), %es
+ PTGS_TO_GS
+ ENABLE_INTERRUPTS_SYSEXIT
+
+@@ -458,6 +631,9 @@ sysenter_audit:
+ movl %eax,%edx /* 2nd arg: syscall number */
+ movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
+ call __audit_syscall_entry
++
++ pax_erase_kstack
++
+ pushl_cfi %ebx
+ movl PT_EAX(%esp),%eax /* reload syscall number */
+ jmp sysenter_do_call
+@@ -483,11 +659,17 @@ sysexit_audit:
+
+ CFI_ENDPROC
+ .pushsection .fixup,"ax"
+-2: movl $0,PT_FS(%esp)
++4: movl $0,PT_FS(%esp)
++ jmp 1b
++5: movl $0,PT_DS(%esp)
++ jmp 1b
++6: movl $0,PT_ES(%esp)
+ jmp 1b
+ .section __ex_table,"a"
+ .align 4
+- .long 1b,2b
++ .long 1b,4b
++ .long 2b,5b
++ .long 3b,6b
+ .popsection
+ PTGS_TO_GS_EX
+ ENDPROC(ia32_sysenter_target)
+@@ -520,6 +702,15 @@ syscall_exit:
+ testl $_TIF_ALLWORK_MASK, %ecx # current->work
+ jne syscall_exit_work
+
++restore_all_pax:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ movl %esp, %eax
++ call pax_randomize_kstack
++#endif
++
++ pax_erase_kstack
++
+ restore_all:
+ TRACE_IRQS_IRET
+ restore_all_notrace:
+@@ -579,14 +770,34 @@ ldt_ss:
+ * compensating for the offset by changing to the ESPFIX segment with
+ * a base address that matches for the difference.
+ */
+-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
++#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
+ mov %esp, %edx /* load kernel esp */
+ mov PT_OLDESP(%esp), %eax /* load userspace esp */
+ mov %dx, %ax /* eax: new kernel esp */
+ sub %eax, %edx /* offset (low word is 0) */
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
+ shr $16, %edx
+- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
+- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ mov %cr0, %esi
++ btr $16, %esi
++ mov %esi, %cr0
++#endif
++
++ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
++ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ bts $16, %esi
++ mov %esi, %cr0
++#endif
++
+ pushl_cfi $__ESPFIX_SS
+ pushl_cfi %eax /* new kernel esp */
+ /* Disable interrupts, but do not irqtrace this section: we
+@@ -615,38 +826,30 @@ work_resched:
+ movl TI_flags(%ebp), %ecx
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
+ # than syscall tracing?
+- jz restore_all
++ jz restore_all_pax
+ testb $_TIF_NEED_RESCHED, %cl
+ jnz work_resched
+
+ work_notifysig: # deal with pending signals and
+ # notify-resume requests
++ movl %esp, %eax
+ #ifdef CONFIG_VM86
+ testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
+- movl %esp, %eax
+- jne work_notifysig_v86 # returning to kernel-space or
++ jz 1f # returning to kernel-space or
+ # vm86-space
+- TRACE_IRQS_ON
+- ENABLE_INTERRUPTS(CLBR_NONE)
+- xorl %edx, %edx
+- call do_notify_resume
+- jmp resume_userspace_sig
+
+- ALIGN
+-work_notifysig_v86:
+ pushl_cfi %ecx # save ti_flags for do_notify_resume
+ call save_v86_state # %eax contains pt_regs pointer
+ popl_cfi %ecx
+ movl %eax, %esp
+-#else
+- movl %esp, %eax
++1:
+ #endif
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
+ xorl %edx, %edx
+ call do_notify_resume
+ jmp resume_userspace_sig
+-END(work_pending)
++ENDPROC(work_pending)
+
+ # perform syscall exit tracing
+ ALIGN
+@@ -654,11 +857,14 @@ syscall_trace_entry:
+ movl $-ENOSYS,PT_EAX(%esp)
+ movl %esp, %eax
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ /* What it returned is what we'll actually use. */
+ cmpl $(NR_syscalls), %eax
+ jnae syscall_call
+ jmp syscall_exit
+-END(syscall_trace_entry)
++ENDPROC(syscall_trace_entry)
+
+ # perform syscall exit tracing
+ ALIGN
+@@ -671,20 +877,24 @@ syscall_exit_work:
+ movl %esp, %eax
+ call syscall_trace_leave
+ jmp resume_userspace
+-END(syscall_exit_work)
++ENDPROC(syscall_exit_work)
+ CFI_ENDPROC
+
+ RING0_INT_FRAME # can't unwind into user space anyway
+ syscall_fault:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ push %ss
++ pop %ds
++#endif
+ GET_THREAD_INFO(%ebp)
+ movl $-EFAULT,PT_EAX(%esp)
+ jmp resume_userspace
+-END(syscall_fault)
++ENDPROC(syscall_fault)
+
+ syscall_badsys:
+ movl $-ENOSYS,PT_EAX(%esp)
+ jmp resume_userspace
+-END(syscall_badsys)
++ENDPROC(syscall_badsys)
+ CFI_ENDPROC
+ /*
+ * End of kprobes section
+@@ -756,6 +966,36 @@ ENTRY(ptregs_clone)
+ CFI_ENDPROC
+ ENDPROC(ptregs_clone)
+
++ ALIGN;
++ENTRY(kernel_execve)
++ CFI_STARTPROC
++ pushl_cfi %ebp
++ sub $PT_OLDSS+4,%esp
++ pushl_cfi %edi
++ pushl_cfi %ecx
++ pushl_cfi %eax
++ lea 3*4(%esp),%edi
++ mov $PT_OLDSS/4+1,%ecx
++ xorl %eax,%eax
++ rep stosl
++ popl_cfi %eax
++ popl_cfi %ecx
++ popl_cfi %edi
++ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
++ pushl_cfi %esp
++ call sys_execve
++ add $4,%esp
++ CFI_ADJUST_CFA_OFFSET -4
++ GET_THREAD_INFO(%ebp)
++ test %eax,%eax
++ jz syscall_exit
++ add $PT_OLDSS+4,%esp
++ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
++ popl_cfi %ebp
++ ret
++ CFI_ENDPROC
++ENDPROC(kernel_execve)
++
+ .macro FIXUP_ESPFIX_STACK
+ /*
+ * Switch back for ESPFIX stack to the normal zerobased stack
+@@ -765,8 +1005,15 @@ ENDPROC(ptregs_clone)
+ * normal stack and adjusts ESP with the matching offset.
+ */
+ /* fixup the stack */
+- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
+- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
++ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
++ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
+ shl $16, %eax
+ addl %esp, %eax /* the adjusted stack pointer */
+ pushl_cfi $__KERNEL_DS
+@@ -819,7 +1066,7 @@ vector=vector+1
+ .endr
+ 2: jmp common_interrupt
+ .endr
+-END(irq_entries_start)
++ENDPROC(irq_entries_start)
+
+ .previous
+ END(interrupt)
+@@ -867,7 +1114,7 @@ ENTRY(coprocessor_error)
+ pushl_cfi $do_coprocessor_error
+ jmp error_code
+ CFI_ENDPROC
+-END(coprocessor_error)
++ENDPROC(coprocessor_error)
+
+ ENTRY(simd_coprocessor_error)
+ RING0_INT_FRAME
+@@ -888,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
+ #endif
+ jmp error_code
+ CFI_ENDPROC
+-END(simd_coprocessor_error)
++ENDPROC(simd_coprocessor_error)
+
+ ENTRY(device_not_available)
+ RING0_INT_FRAME
+@@ -896,7 +1143,7 @@ ENTRY(device_not_available)
+ pushl_cfi $do_device_not_available
+ jmp error_code
+ CFI_ENDPROC
+-END(device_not_available)
++ENDPROC(device_not_available)
+
+ #ifdef CONFIG_PARAVIRT
+ ENTRY(native_iret)
+@@ -905,12 +1152,12 @@ ENTRY(native_iret)
+ .align 4
+ .long native_iret, iret_exc
+ .previous
+-END(native_iret)
++ENDPROC(native_iret)
+
+ ENTRY(native_irq_enable_sysexit)
+ sti
+ sysexit
+-END(native_irq_enable_sysexit)
++ENDPROC(native_irq_enable_sysexit)
+ #endif
+
+ ENTRY(overflow)
+@@ -919,7 +1166,7 @@ ENTRY(overflow)
+ pushl_cfi $do_overflow
+ jmp error_code
+ CFI_ENDPROC
+-END(overflow)
++ENDPROC(overflow)
+
+ ENTRY(bounds)
+ RING0_INT_FRAME
+@@ -927,7 +1174,7 @@ ENTRY(bounds)
+ pushl_cfi $do_bounds
+ jmp error_code
+ CFI_ENDPROC
+-END(bounds)
++ENDPROC(bounds)
+
+ ENTRY(invalid_op)
+ RING0_INT_FRAME
+@@ -935,7 +1182,7 @@ ENTRY(invalid_op)
+ pushl_cfi $do_invalid_op
+ jmp error_code
+ CFI_ENDPROC
+-END(invalid_op)
++ENDPROC(invalid_op)
+
+ ENTRY(coprocessor_segment_overrun)
+ RING0_INT_FRAME
+@@ -943,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
+ pushl_cfi $do_coprocessor_segment_overrun
+ jmp error_code
+ CFI_ENDPROC
+-END(coprocessor_segment_overrun)
++ENDPROC(coprocessor_segment_overrun)
+
+ ENTRY(invalid_TSS)
+ RING0_EC_FRAME
+ pushl_cfi $do_invalid_TSS
+ jmp error_code
+ CFI_ENDPROC
+-END(invalid_TSS)
++ENDPROC(invalid_TSS)
+
+ ENTRY(segment_not_present)
+ RING0_EC_FRAME
+ pushl_cfi $do_segment_not_present
+ jmp error_code
+ CFI_ENDPROC
+-END(segment_not_present)
++ENDPROC(segment_not_present)
+
+ ENTRY(stack_segment)
+ RING0_EC_FRAME
+ pushl_cfi $do_stack_segment
+ jmp error_code
+ CFI_ENDPROC
+-END(stack_segment)
++ENDPROC(stack_segment)
+
+ ENTRY(alignment_check)
+ RING0_EC_FRAME
+ pushl_cfi $do_alignment_check
+ jmp error_code
+ CFI_ENDPROC
+-END(alignment_check)
++ENDPROC(alignment_check)
+
+ ENTRY(divide_error)
+ RING0_INT_FRAME
+@@ -979,7 +1226,7 @@ ENTRY(divide_error)
+ pushl_cfi $do_divide_error
+ jmp error_code
+ CFI_ENDPROC
+-END(divide_error)
++ENDPROC(divide_error)
+
+ #ifdef CONFIG_X86_MCE
+ ENTRY(machine_check)
+@@ -988,7 +1235,7 @@ ENTRY(machine_check)
+ pushl_cfi machine_check_vector
+ jmp error_code
+ CFI_ENDPROC
+-END(machine_check)
++ENDPROC(machine_check)
+ #endif
+
+ ENTRY(spurious_interrupt_bug)
+@@ -997,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
+ pushl_cfi $do_spurious_interrupt_bug
+ jmp error_code
+ CFI_ENDPROC
+-END(spurious_interrupt_bug)
++ENDPROC(spurious_interrupt_bug)
+ /*
+ * End of kprobes section
+ */
+@@ -1112,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector
+
+ ENTRY(mcount)
+ ret
+-END(mcount)
++ENDPROC(mcount)
+
+ ENTRY(ftrace_caller)
+ cmpl $0, function_trace_stop
+@@ -1141,7 +1388,7 @@ ftrace_graph_call:
+ .globl ftrace_stub
+ ftrace_stub:
+ ret
+-END(ftrace_caller)
++ENDPROC(ftrace_caller)
+
+ #else /* ! CONFIG_DYNAMIC_FTRACE */
+
+@@ -1177,7 +1424,7 @@ trace:
+ popl %ecx
+ popl %eax
+ jmp ftrace_stub
+-END(mcount)
++ENDPROC(mcount)
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+ #endif /* CONFIG_FUNCTION_TRACER */
+
+@@ -1198,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
+ popl %ecx
+ popl %eax
+ ret
+-END(ftrace_graph_caller)
++ENDPROC(ftrace_graph_caller)
+
+ .globl return_to_handler
+ return_to_handler:
+@@ -1253,15 +1500,18 @@ error_code:
+ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
+ REG_TO_PTGS %ecx
+ SET_KERNEL_GS %ecx
+- movl $(__USER_DS), %ecx
++ movl $(__KERNEL_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
++
++ pax_enter_kernel
++
+ TRACE_IRQS_OFF
+ movl %esp,%eax # pt_regs pointer
+ call *%edi
+ jmp ret_from_exception
+ CFI_ENDPROC
+-END(page_fault)
++ENDPROC(page_fault)
+
+ /*
+ * Debug traps and NMI can happen at the one SYSENTER instruction
+@@ -1303,7 +1553,7 @@ debug_stack_correct:
+ call do_debug
+ jmp ret_from_exception
+ CFI_ENDPROC
+-END(debug)
++ENDPROC(debug)
+
+ /*
+ * NMI is doubly nasty. It can happen _while_ we're handling
+@@ -1340,6 +1590,9 @@ nmi_stack_correct:
+ xorl %edx,%edx # zero error code
+ movl %esp,%eax # pt_regs pointer
+ call do_nmi
++
++ pax_exit_kernel
++
+ jmp restore_all_notrace
+ CFI_ENDPROC
+
+@@ -1376,12 +1629,15 @@ nmi_espfix_stack:
+ FIXUP_ESPFIX_STACK # %eax == %esp
+ xorl %edx,%edx # zero error code
+ call do_nmi
++
++ pax_exit_kernel
++
+ RESTORE_REGS
+ lss 12+4(%esp), %esp # back to espfix stack
+ CFI_ADJUST_CFA_OFFSET -24
+ jmp irq_return
+ CFI_ENDPROC
+-END(nmi)
++ENDPROC(nmi)
+
+ ENTRY(int3)
+ RING0_INT_FRAME
+@@ -1393,14 +1649,14 @@ ENTRY(int3)
+ call do_int3
+ jmp ret_from_exception
+ CFI_ENDPROC
+-END(int3)
++ENDPROC(int3)
+
+ ENTRY(general_protection)
+ RING0_EC_FRAME
+ pushl_cfi $do_general_protection
+ jmp error_code
+ CFI_ENDPROC
+-END(general_protection)
++ENDPROC(general_protection)
+
+ #ifdef CONFIG_KVM_GUEST
+ ENTRY(async_page_fault)
+@@ -1408,7 +1664,7 @@ ENTRY(async_page_fault)
+ pushl_cfi $do_async_page_fault
+ jmp error_code
+ CFI_ENDPROC
+-END(async_page_fault)
++ENDPROC(async_page_fault)
+ #endif
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/entry_64.S linux-3.4-pax/arch/x86/kernel/entry_64.S
+--- linux-3.4/arch/x86/kernel/entry_64.S 2012-05-21 11:32:57.479927660 +0200
++++ linux-3.4-pax/arch/x86/kernel/entry_64.S 2012-05-21 12:10:09.524048888 +0200
+@@ -56,6 +56,8 @@
+ #include <asm/ftrace.h>
+ #include <asm/percpu.h>
+ #include <linux/err.h>
++#include <asm/pgtable.h>
++#include <asm/alternative-asm.h>
+
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
+ #include <linux/elf-em.h>
+@@ -69,8 +71,9 @@
+ #ifdef CONFIG_FUNCTION_TRACER
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ ENTRY(mcount)
++ pax_force_retaddr
+ retq
+-END(mcount)
++ENDPROC(mcount)
+
+ ENTRY(ftrace_caller)
+ cmpl $0, function_trace_stop
+@@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
+ #endif
+
+ GLOBAL(ftrace_stub)
++ pax_force_retaddr
+ retq
+-END(ftrace_caller)
++ENDPROC(ftrace_caller)
+
+ #else /* ! CONFIG_DYNAMIC_FTRACE */
+ ENTRY(mcount)
+@@ -113,6 +117,7 @@ ENTRY(mcount)
+ #endif
+
+ GLOBAL(ftrace_stub)
++ pax_force_retaddr
+ retq
+
+ trace:
+@@ -122,12 +127,13 @@ trace:
+ movq 8(%rbp), %rsi
+ subq $MCOUNT_INSN_SIZE, %rdi
+
++ pax_force_fptr ftrace_trace_function
+ call *ftrace_trace_function
+
+ MCOUNT_RESTORE_FRAME
+
+ jmp ftrace_stub
+-END(mcount)
++ENDPROC(mcount)
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+ #endif /* CONFIG_FUNCTION_TRACER */
+
+@@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
+
+ MCOUNT_RESTORE_FRAME
+
++ pax_force_retaddr
+ retq
+-END(ftrace_graph_caller)
++ENDPROC(ftrace_graph_caller)
+
+ GLOBAL(return_to_handler)
+ subq $24, %rsp
+@@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
+ movq 8(%rsp), %rdx
+ movq (%rsp), %rax
+ addq $24, %rsp
++ pax_force_fptr %rdi
+ jmp *%rdi
+ #endif
+
+@@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
+ ENDPROC(native_usergs_sysret64)
+ #endif /* CONFIG_PARAVIRT */
+
++ .macro ljmpq sel, off
++#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
++ .byte 0x48; ljmp *1234f(%rip)
++ .pushsection .rodata
++ .align 16
++ 1234: .quad \off; .word \sel
++ .popsection
++#else
++ pushq $\sel
++ pushq $\off
++ lretq
++#endif
++ .endm
++
++ .macro pax_enter_kernel
++ pax_set_fptr_mask
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_enter_kernel
++#endif
++ .endm
++
++ .macro pax_exit_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_exit_kernel
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_KERNEXEC
++ENTRY(pax_enter_kernel)
++ pushq %rdi
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ GET_CR0_INTO_RDI
++ bts $16,%rdi
++ jnc 3f
++ mov %cs,%edi
++ cmp $__KERNEL_CS,%edi
++ jnz 2f
++1:
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++
++2: ljmpq __KERNEL_CS,1f
++3: ljmpq __KERNEXEC_KERNEL_CS,4f
++4: SET_RDI_INTO_CR0
++ jmp 1b
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++ pushq %rdi
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ mov %cs,%rdi
++ cmp $__KERNEXEC_KERNEL_CS,%edi
++ jz 2f
++1:
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI);
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++
++2: GET_CR0_INTO_RDI
++ btr $16,%rdi
++ ljmpq __KERNEL_CS,3f
++3: SET_RDI_INTO_CR0
++ jmp 1b
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI);
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++ENDPROC(pax_exit_kernel)
++#endif
++
++ .macro pax_enter_kernel_user
++ pax_set_fptr_mask
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++ .endm
++
++ .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushq %rax
++ call pax_randomize_kstack
++ popq %rax
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ENTRY(pax_enter_kernel_user)
++ pushq %rdi
++ pushq %rbx
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ GET_CR3_INTO_RDI
++ mov %rdi,%rbx
++ add $__START_KERNEL_map,%rbx
++ sub phys_base(%rip),%rbx
++
++#ifdef CONFIG_PARAVIRT
++ pushq %rdi
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jz 1f
++ i = 0
++ .rept USER_PGD_PTRS
++ mov i*8(%rbx),%rsi
++ mov $0,%sil
++ lea i*8(%rbx),%rdi
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++ i = i + 1
++ .endr
++ jmp 2f
++1:
++#endif
++
++ i = 0
++ .rept USER_PGD_PTRS
++ movb $0,i*8(%rbx)
++ i = i + 1
++ .endr
++
++#ifdef CONFIG_PARAVIRT
++2: popq %rdi
++#endif
++ SET_RDI_INTO_CR3
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ bts $16,%rdi
++ SET_RDI_INTO_CR0
++#endif
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rbx
++ popq %rdi
++ pax_force_retaddr
++ retq
++ENDPROC(pax_enter_kernel_user)
++
++ENTRY(pax_exit_kernel_user)
++ push %rdi
++
++#ifdef CONFIG_PARAVIRT
++ pushq %rbx
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ btr $16,%rdi
++ SET_RDI_INTO_CR0
++#endif
++
++ GET_CR3_INTO_RDI
++ add $__START_KERNEL_map,%rdi
++ sub phys_base(%rip),%rdi
++
++#ifdef CONFIG_PARAVIRT
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jz 1f
++ mov %rdi,%rbx
++ i = 0
++ .rept USER_PGD_PTRS
++ mov i*8(%rbx),%rsi
++ mov $0x67,%sil
++ lea i*8(%rbx),%rdi
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++ i = i + 1
++ .endr
++ jmp 2f
++1:
++#endif
++
++ i = 0
++ .rept USER_PGD_PTRS
++ movb $0x67,i*8(%rdi)
++ i = i + 1
++ .endr
++
++#ifdef CONFIG_PARAVIRT
++2: PV_RESTORE_REGS(CLBR_RDI)
++ popq %rbx
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++ENDPROC(pax_exit_kernel_user)
++#endif
++
++.macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++.endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++/*
++ * r11: thread_info
++ * rcx, rdx: can be clobbered
++ */
++ENTRY(pax_erase_kstack)
++ pushq %rdi
++ pushq %rax
++ pushq %r11
++
++ GET_THREAD_INFO(%r11)
++ mov TI_lowest_stack(%r11), %rdi
++ mov $0xB4DD00D5BADBABE5, %rax
++ std
++
++1: mov %edi, %ecx
++ and $THREAD_SIZE_asm - 1, %ecx
++ shr $3, %ecx
++ repne scasq
++ jecxz 2f
++
++ cmp $2*8, %ecx
++ jc 2f
++
++ mov $2*8, %ecx
++ repe scasq
++ jecxz 2f
++ jne 1b
++
++2: cld
++ mov %esp, %ecx
++ sub %edi, %ecx
++
++ cmp $THREAD_SIZE_asm, %rcx
++ jb 3f
++ ud2
++3:
++
++ shr $3, %ecx
++ rep stosq
++
++ mov TI_task_thread_sp0(%r11), %rdi
++ sub $256, %rdi
++ mov %rdi, TI_lowest_stack(%r11)
++
++ popq %r11
++ popq %rax
++ popq %rdi
++ pax_force_retaddr
++ ret
++ENDPROC(pax_erase_kstack)
++#endif
+
+ .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
+ #ifdef CONFIG_TRACE_IRQFLAGS
+@@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
+ .endm
+
+ .macro UNFAKE_STACK_FRAME
+- addq $8*6, %rsp
+- CFI_ADJUST_CFA_OFFSET -(6*8)
++ addq $8*6 + ARG_SKIP, %rsp
++ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
+ .endm
+
+ /*
+@@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
+ movq %rsp, %rsi
+
+ leaq -RBP(%rsp),%rdi /* arg1 for handler */
+- testl $3, CS-RBP(%rsi)
++ testb $3, CS-RBP(%rsi)
+ je 1f
+ SWAPGS
+ /*
+@@ -355,9 +639,10 @@ ENTRY(save_rest)
+ movq_cfi r15, R15+16
+ movq %r11, 8(%rsp) /* return address */
+ FIXUP_TOP_OF_STACK %r11, 16
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(save_rest)
++ENDPROC(save_rest)
+
+ /* save complete stack frame */
+ .pushsection .kprobes.text, "ax"
+@@ -386,9 +671,10 @@ ENTRY(save_paranoid)
+ js 1f /* negative -> in kernel */
+ SWAPGS
+ xorl %ebx,%ebx
+-1: ret
++1: pax_force_retaddr_bts
++ ret
+ CFI_ENDPROC
+-END(save_paranoid)
++ENDPROC(save_paranoid)
+ .popsection
+
+ /*
+@@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
+
+ RESTORE_REST
+
+- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
++ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
+ jz retint_restore_args
+
+ testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
+@@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
+ jmp ret_from_sys_call # go to the SYSRET fastpath
+
+ CFI_ENDPROC
+-END(ret_from_fork)
++ENDPROC(ret_from_fork)
+
+ /*
+ * System call entry. Up to 6 arguments in registers are supported.
+@@ -456,7 +742,7 @@ END(ret_from_fork)
+ ENTRY(system_call)
+ CFI_STARTPROC simple
+ CFI_SIGNAL_FRAME
+- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
++ CFI_DEF_CFA rsp,0
+ CFI_REGISTER rip,rcx
+ /*CFI_REGISTER rflags,r11*/
+ SWAPGS_UNSAFE_STACK
+@@ -469,16 +755,18 @@ GLOBAL(system_call_after_swapgs)
+
+ movq %rsp,PER_CPU_VAR(old_rsp)
+ movq PER_CPU_VAR(kernel_stack),%rsp
++ SAVE_ARGS 8*6,0
++ pax_enter_kernel_user
+ /*
+ * No need to follow this irqs off/on section - it's straight
+ * and short:
+ */
+ ENABLE_INTERRUPTS(CLBR_NONE)
+- SAVE_ARGS 8,0
+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
+ movq %rcx,RIP-ARGOFFSET(%rsp)
+ CFI_REL_OFFSET rip,RIP-ARGOFFSET
+- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++ GET_THREAD_INFO(%rcx)
++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
+ jnz tracesys
+ system_call_fastpath:
+ #if __SYSCALL_MASK == ~0
+@@ -488,7 +776,7 @@ system_call_fastpath:
+ cmpl $__NR_syscall_max,%eax
+ #endif
+ ja badsys
+- movq %r10,%rcx
++ movq R10-ARGOFFSET(%rsp),%rcx
+ call *sys_call_table(,%rax,8) # XXX: rip relative
+ movq %rax,RAX-ARGOFFSET(%rsp)
+ /*
+@@ -502,10 +790,13 @@ sysret_check:
+ LOCKDEP_SYS_EXIT
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
++ GET_THREAD_INFO(%rcx)
++ movl TI_flags(%rcx),%edx
+ andl %edi,%edx
+ jnz sysret_careful
+ CFI_REMEMBER_STATE
++ pax_exit_kernel_user
++ pax_erase_kstack
+ /*
+ * sysretq will re-enable interrupts:
+ */
+@@ -557,14 +848,18 @@ badsys:
+ * jump back to the normal fast path.
+ */
+ auditsys:
+- movq %r10,%r9 /* 6th arg: 4th syscall arg */
++ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
+ movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
+ movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
+ movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
+ movq %rax,%rsi /* 2nd arg: syscall number */
+ movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
+ call __audit_syscall_entry
++
++ pax_erase_kstack
++
+ LOAD_ARGS 0 /* reload call-clobbered registers */
++ pax_set_fptr_mask
+ jmp system_call_fastpath
+
+ /*
+@@ -585,7 +880,7 @@ sysret_audit:
+ /* Do syscall tracing */
+ tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
+ jz auditsys
+ #endif
+ SAVE_REST
+@@ -593,12 +888,16 @@ tracesys:
+ FIXUP_TOP_OF_STACK %rdi
+ movq %rsp,%rdi
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ /*
+ * Reload arg registers from stack in case ptrace changed them.
+ * We don't reload %rax because syscall_trace_enter() returned
+ * the value it wants us to use in the table lookup.
+ */
+ LOAD_ARGS ARGOFFSET, 1
++ pax_set_fptr_mask
+ RESTORE_REST
+ #if __SYSCALL_MASK == ~0
+ cmpq $__NR_syscall_max,%rax
+@@ -607,7 +906,7 @@ tracesys:
+ cmpl $__NR_syscall_max,%eax
+ #endif
+ ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
+- movq %r10,%rcx /* fixup for C */
++ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
+ call *sys_call_table(,%rax,8)
+ movq %rax,RAX-ARGOFFSET(%rsp)
+ /* Use IRET because user could have changed frame */
+@@ -628,6 +927,7 @@ GLOBAL(int_with_check)
+ andl %edi,%edx
+ jnz int_careful
+ andl $~TS_COMPAT,TI_status(%rcx)
++ pax_erase_kstack
+ jmp retint_swapgs
+
+ /* Either reschedule or signal or syscall exit tracking needed. */
+@@ -674,7 +974,7 @@ int_restore_rest:
+ TRACE_IRQS_OFF
+ jmp int_with_check
+ CFI_ENDPROC
+-END(system_call)
++ENDPROC(system_call)
+
+ /*
+ * Certain special system calls that need to save a complete full stack frame.
+@@ -690,7 +990,7 @@ ENTRY(\label)
+ call \func
+ jmp ptregscall_common
+ CFI_ENDPROC
+-END(\label)
++ENDPROC(\label)
+ .endm
+
+ PTREGSCALL stub_clone, sys_clone, %r8
+@@ -708,9 +1008,10 @@ ENTRY(ptregscall_common)
+ movq_cfi_restore R12+8, r12
+ movq_cfi_restore RBP+8, rbp
+ movq_cfi_restore RBX+8, rbx
++ pax_force_retaddr
+ ret $REST_SKIP /* pop extended registers */
+ CFI_ENDPROC
+-END(ptregscall_common)
++ENDPROC(ptregscall_common)
+
+ ENTRY(stub_execve)
+ CFI_STARTPROC
+@@ -725,7 +1026,7 @@ ENTRY(stub_execve)
+ RESTORE_REST
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
+-END(stub_execve)
++ENDPROC(stub_execve)
+
+ /*
+ * sigreturn is special because it needs to restore all registers on return.
+@@ -743,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
+ RESTORE_REST
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
+-END(stub_rt_sigreturn)
++ENDPROC(stub_rt_sigreturn)
+
+ #ifdef CONFIG_X86_X32_ABI
+ PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
+@@ -812,7 +1113,7 @@ vector=vector+1
+ 2: jmp common_interrupt
+ .endr
+ CFI_ENDPROC
+-END(irq_entries_start)
++ENDPROC(irq_entries_start)
+
+ .previous
+ END(interrupt)
+@@ -832,6 +1133,16 @@ END(interrupt)
+ subq $ORIG_RAX-RBP, %rsp
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
+ SAVE_ARGS_IRQ
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rdi)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ call \func
+ .endm
+
+@@ -863,7 +1174,7 @@ ret_from_intr:
+
+ exit_intr:
+ GET_THREAD_INFO(%rcx)
+- testl $3,CS-ARGOFFSET(%rsp)
++ testb $3,CS-ARGOFFSET(%rsp)
+ je retint_kernel
+
+ /* Interrupt came from user space */
+@@ -885,12 +1196,15 @@ retint_swapgs: /* return to user-space
+ * The iretq could re-enable interrupts:
+ */
+ DISABLE_INTERRUPTS(CLBR_ANY)
++ pax_exit_kernel_user
+ TRACE_IRQS_IRETQ
+ SWAPGS
+ jmp restore_args
+
+ retint_restore_args: /* return to kernel space */
+ DISABLE_INTERRUPTS(CLBR_ANY)
++ pax_exit_kernel
++ pax_force_retaddr RIP-ARGOFFSET
+ /*
+ * The iretq could re-enable interrupts:
+ */
+@@ -979,7 +1293,7 @@ ENTRY(retint_kernel)
+ #endif
+
+ CFI_ENDPROC
+-END(common_interrupt)
++ENDPROC(common_interrupt)
+ /*
+ * End of kprobes section
+ */
+@@ -996,7 +1310,7 @@ ENTRY(\sym)
+ interrupt \do_sym
+ jmp ret_from_intr
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ #ifdef CONFIG_SMP
+@@ -1069,12 +1383,22 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call error_entry
+ DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
+ call \do_sym
+ jmp error_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ .macro paranoidzeroentry sym do_sym
+@@ -1086,15 +1410,25 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call save_paranoid
+ TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
+ call \do_sym
+ jmp paranoid_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
++#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
+ .macro paranoidzeroentry_ist sym do_sym ist
+ ENTRY(\sym)
+ INTR_FRAME
+@@ -1104,14 +1438,30 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call save_paranoid
+ TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
++#ifdef CONFIG_SMP
++ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
++ lea init_tss(%r12), %r12
++#else
++ lea init_tss(%rip), %r12
++#endif
+ subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
+ call \do_sym
+ addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
+ jmp paranoid_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ .macro errorentry sym do_sym
+@@ -1122,13 +1472,23 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call error_entry
+ DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ movq ORIG_RAX(%rsp),%rsi /* get error code */
+ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
+ call \do_sym
+ jmp error_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ /* error code is on the stack already */
+@@ -1141,13 +1501,23 @@ ENTRY(\sym)
+ call save_paranoid
+ DEFAULT_FRAME 0
+ TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ movq ORIG_RAX(%rsp),%rsi /* get error code */
+ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
+ call \do_sym
+ jmp paranoid_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ zeroentry divide_error do_divide_error
+@@ -1177,9 +1547,10 @@ gs_change:
+ 2: mfence /* workaround */
+ SWAPGS
+ popfq_cfi
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(native_load_gs_index)
++ENDPROC(native_load_gs_index)
+
+ .section __ex_table,"a"
+ .align 8
+@@ -1201,13 +1572,14 @@ ENTRY(kernel_thread_helper)
+ * Here we are in the child and the registers are set as they were
+ * at kernel_thread() invocation in the parent.
+ */
++ pax_force_fptr %rsi
+ call *%rsi
+ # exit
+ mov %eax, %edi
+ call do_exit
+ ud2 # padding for call trace
+ CFI_ENDPROC
+-END(kernel_thread_helper)
++ENDPROC(kernel_thread_helper)
+
+ /*
+ * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
+@@ -1234,11 +1606,11 @@ ENTRY(kernel_execve)
+ RESTORE_REST
+ testq %rax,%rax
+ je int_ret_from_sys_call
+- RESTORE_ARGS
+ UNFAKE_STACK_FRAME
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(kernel_execve)
++ENDPROC(kernel_execve)
+
+ /* Call softirq on interrupt stack. Interrupts are off. */
+ ENTRY(call_softirq)
+@@ -1256,9 +1628,10 @@ ENTRY(call_softirq)
+ CFI_DEF_CFA_REGISTER rsp
+ CFI_ADJUST_CFA_OFFSET -8
+ decl PER_CPU_VAR(irq_count)
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(call_softirq)
++ENDPROC(call_softirq)
+
+ #ifdef CONFIG_XEN
+ zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
+@@ -1296,7 +1669,7 @@ ENTRY(xen_do_hypervisor_callback) # do
+ decl PER_CPU_VAR(irq_count)
+ jmp error_exit
+ CFI_ENDPROC
+-END(xen_do_hypervisor_callback)
++ENDPROC(xen_do_hypervisor_callback)
+
+ /*
+ * Hypervisor uses this for application faults while it executes.
+@@ -1355,7 +1728,7 @@ ENTRY(xen_failsafe_callback)
+ SAVE_ALL
+ jmp error_exit
+ CFI_ENDPROC
+-END(xen_failsafe_callback)
++ENDPROC(xen_failsafe_callback)
+
+ apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
+ xen_hvm_callback_vector xen_evtchn_do_upcall
+@@ -1404,16 +1777,31 @@ ENTRY(paranoid_exit)
+ TRACE_IRQS_OFF
+ testl %ebx,%ebx /* swapgs needed? */
+ jnz paranoid_restore
+- testl $3,CS(%rsp)
++ testb $3,CS(%rsp)
+ jnz paranoid_userspace
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel
++ TRACE_IRQS_IRETQ 0
++ SWAPGS_UNSAFE_STACK
++ RESTORE_ALL 8
++ pax_force_retaddr_bts
++ jmp irq_return
++#endif
+ paranoid_swapgs:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel_user
++#else
++ pax_exit_kernel
++#endif
+ TRACE_IRQS_IRETQ 0
+ SWAPGS_UNSAFE_STACK
+ RESTORE_ALL 8
+ jmp irq_return
+ paranoid_restore:
++ pax_exit_kernel
+ TRACE_IRQS_IRETQ 0
+ RESTORE_ALL 8
++ pax_force_retaddr_bts
+ jmp irq_return
+ paranoid_userspace:
+ GET_THREAD_INFO(%rcx)
+@@ -1442,7 +1830,7 @@ paranoid_schedule:
+ TRACE_IRQS_OFF
+ jmp paranoid_userspace
+ CFI_ENDPROC
+-END(paranoid_exit)
++ENDPROC(paranoid_exit)
+
+ /*
+ * Exception entry point. This expects an error code/orig_rax on the stack.
+@@ -1469,12 +1857,13 @@ ENTRY(error_entry)
+ movq_cfi r14, R14+8
+ movq_cfi r15, R15+8
+ xorl %ebx,%ebx
+- testl $3,CS+8(%rsp)
++ testb $3,CS+8(%rsp)
+ je error_kernelspace
+ error_swapgs:
+ SWAPGS
+ error_sti:
+ TRACE_IRQS_OFF
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -1501,7 +1890,7 @@ bstep_iret:
+ movq %rcx,RIP+8(%rsp)
+ jmp error_swapgs
+ CFI_ENDPROC
+-END(error_entry)
++ENDPROC(error_entry)
+
+
+ /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
+@@ -1521,7 +1910,7 @@ ENTRY(error_exit)
+ jnz retint_careful
+ jmp retint_swapgs
+ CFI_ENDPROC
+-END(error_exit)
++ENDPROC(error_exit)
+
+ /*
+ * Test if a given stack is an NMI stack or not.
+@@ -1579,9 +1968,11 @@ ENTRY(nmi)
+ * If %cs was not the kernel segment, then the NMI triggered in user
+ * space, which means it is definitely not nested.
+ */
++ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
++ je 1f
+ cmpl $__KERNEL_CS, 16(%rsp)
+ jne first_nmi
+-
++1:
+ /*
+ * Check the special variable on the stack to see if NMIs are
+ * executing.
+@@ -1728,6 +2119,16 @@ end_repeat_nmi:
+ */
+ call save_paranoid
+ DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
+ movq %rsp,%rdi
+ movq $-1,%rsi
+@@ -1735,21 +2136,32 @@ end_repeat_nmi:
+ testl %ebx,%ebx /* swapgs needed? */
+ jnz nmi_restore
+ nmi_swapgs:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel_user
++#else
++ pax_exit_kernel
++#endif
+ SWAPGS_UNSAFE_STACK
++ RESTORE_ALL 8
++ /* Clear the NMI executing stack variable */
++ movq $0, 10*8(%rsp)
++ jmp irq_return
+ nmi_restore:
++ pax_exit_kernel
+ RESTORE_ALL 8
++ pax_force_retaddr_bts
+ /* Clear the NMI executing stack variable */
+ movq $0, 10*8(%rsp)
+ jmp irq_return
+ CFI_ENDPROC
+-END(nmi)
++ENDPROC(nmi)
+
+ ENTRY(ignore_sysret)
+ CFI_STARTPROC
+ mov $-ENOSYS,%eax
+ sysret
+ CFI_ENDPROC
+-END(ignore_sysret)
++ENDPROC(ignore_sysret)
+
+ /*
+ * End of kprobes section
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/ftrace.c linux-3.4-pax/arch/x86/kernel/ftrace.c
+--- linux-3.4/arch/x86/kernel/ftrace.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/kernel/ftrace.c 2012-05-21 12:10:09.528048889 +0200
+@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
+ static const void *mod_code_newcode; /* holds the text to write to the IP */
+
+ static unsigned nmi_wait_count;
+-static atomic_t nmi_update_count = ATOMIC_INIT(0);
++static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
+
+ int ftrace_arch_read_dyn_info(char *buf, int size)
+ {
+@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
+
+ r = snprintf(buf, size, "%u %u",
+ nmi_wait_count,
+- atomic_read(&nmi_update_count));
++ atomic_read_unchecked(&nmi_update_count));
+ return r;
+ }
+
+@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
+
+ if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
+ smp_rmb();
++ pax_open_kernel();
+ ftrace_mod_code();
+- atomic_inc(&nmi_update_count);
++ pax_close_kernel();
++ atomic_inc_unchecked(&nmi_update_count);
+ }
+ /* Must have previous changes seen before executions */
+ smp_mb();
+@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
+ {
+ unsigned char replaced[MCOUNT_INSN_SIZE];
+
++ ip = ktla_ktva(ip);
++
+ /*
+ * Note: Due to modules and __init, code can
+ * disappear and change, we need to protect against faulting
+@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
+ unsigned char old[MCOUNT_INSN_SIZE], *new;
+ int ret;
+
+- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
++ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
+ new = ftrace_call_replace(ip, (unsigned long)func);
+ ret = ftrace_modify_code(ip, old, new);
+
+@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
+ {
+ unsigned char code[MCOUNT_INSN_SIZE];
+
++ ip = ktla_ktva(ip);
++
+ if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/head32.c linux-3.4-pax/arch/x86/kernel/head32.c
+--- linux-3.4/arch/x86/kernel/head32.c 2012-03-19 10:38:56.544049999 +0100
++++ linux-3.4-pax/arch/x86/kernel/head32.c 2012-05-21 12:10:09.528048889 +0200
+@@ -19,6 +19,7 @@
+ #include <asm/io_apic.h>
+ #include <asm/bios_ebda.h>
+ #include <asm/tlbflush.h>
++#include <asm/boot.h>
+
+ static void __init i386_default_early_setup(void)
+ {
+@@ -31,8 +32,7 @@ static void __init i386_default_early_se
+
+ void __init i386_start_kernel(void)
+ {
+- memblock_reserve(__pa_symbol(&_text),
+- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
++ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+ /* Reserve INITRD */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/head_32.S linux-3.4-pax/arch/x86/kernel/head_32.S
+--- linux-3.4/arch/x86/kernel/head_32.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/kernel/head_32.S 2012-05-21 12:10:09.532048889 +0200
+@@ -25,6 +25,12 @@
+ /* Physical address */
+ #define pa(X) ((X) - __PAGE_OFFSET)
+
++#ifdef CONFIG_PAX_KERNEXEC
++#define ta(X) (X)
++#else
++#define ta(X) ((X) - __PAGE_OFFSET)
++#endif
++
+ /*
+ * References to members of the new_cpu_data structure.
+ */
+@@ -54,11 +60,7 @@
+ * and small than max_low_pfn, otherwise will waste some page table entries
+ */
+
+-#if PTRS_PER_PMD > 1
+-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
+-#else
+-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
+-#endif
++#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
+
+ /* Number of possible pages in the lowmem region */
+ LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
+@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
+ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+
+ /*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
++
++/*
+ * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
+ * %esi points to the real-mode code as a 32-bit pointer.
+ * CS and DS must be 4 GB flat segments, but we don't depend on
+@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+ * can.
+ */
+ __HEAD
++
++#ifdef CONFIG_PAX_KERNEXEC
++ jmp startup_32
++/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
++.fill PAGE_SIZE-5,1,0xcc
++#endif
++
+ ENTRY(startup_32)
+ movl pa(stack_start),%ecx
+
+@@ -105,6 +120,57 @@ ENTRY(startup_32)
+ 2:
+ leal -__PAGE_OFFSET(%ecx),%esp
+
++#ifdef CONFIG_SMP
++ movl $pa(cpu_gdt_table),%edi
++ movl $__per_cpu_load,%eax
++ movw %ax,__KERNEL_PERCPU + 2(%edi)
++ rorl $16,%eax
++ movb %al,__KERNEL_PERCPU + 4(%edi)
++ movb %ah,__KERNEL_PERCPU + 7(%edi)
++ movl $__per_cpu_end - 1,%eax
++ subl $__per_cpu_start,%eax
++ movw %ax,__KERNEL_PERCPU + 0(%edi)
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ movl $NR_CPUS,%ecx
++ movl $pa(cpu_gdt_table),%edi
++1:
++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
++ addl $PAGE_SIZE_asm,%edi
++ loop 1b
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ movl $pa(boot_gdt),%edi
++ movl $__LOAD_PHYSICAL_ADDR,%eax
++ movw %ax,__BOOT_CS + 2(%edi)
++ rorl $16,%eax
++ movb %al,__BOOT_CS + 4(%edi)
++ movb %ah,__BOOT_CS + 7(%edi)
++ rorl $16,%eax
++
++ ljmp $(__BOOT_CS),$1f
++1:
++
++ movl $NR_CPUS,%ecx
++ movl $pa(cpu_gdt_table),%edi
++ addl $__PAGE_OFFSET,%eax
++1:
++ movw %ax,__KERNEL_CS + 2(%edi)
++ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
++ rorl $16,%eax
++ movb %al,__KERNEL_CS + 4(%edi)
++ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
++ movb %ah,__KERNEL_CS + 7(%edi)
++ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
++ rorl $16,%eax
++ addl $PAGE_SIZE_asm,%edi
++ loop 1b
++#endif
++
+ /*
+ * Clear BSS first so that there are no surprises...
+ */
+@@ -195,8 +261,11 @@ ENTRY(startup_32)
+ movl %eax, pa(max_pfn_mapped)
+
+ /* Do early initialization of the fixmap area */
+- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
+- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#ifdef CONFIG_COMPAT_VDSO
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#else
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#endif
+ #else /* Not PAE */
+
+ page_pde_offset = (__PAGE_OFFSET >> 20);
+@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+ movl %eax, pa(max_pfn_mapped)
+
+ /* Do early initialization of the fixmap area */
+- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
+- movl %eax,pa(initial_page_table+0xffc)
++#ifdef CONFIG_COMPAT_VDSO
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
++#else
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
++#endif
+ #endif
+
+ #ifdef CONFIG_PARAVIRT
+@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+ cmpl $num_subarch_entries, %eax
+ jae bad_subarch
+
+- movl pa(subarch_entries)(,%eax,4), %eax
+- subl $__PAGE_OFFSET, %eax
+- jmp *%eax
++ jmp *pa(subarch_entries)(,%eax,4)
+
+ bad_subarch:
+ WEAK(lguest_entry)
+@@ -255,10 +325,10 @@ WEAK(xen_entry)
+ __INITDATA
+
+ subarch_entries:
+- .long default_entry /* normal x86/PC */
+- .long lguest_entry /* lguest hypervisor */
+- .long xen_entry /* Xen hypervisor */
+- .long default_entry /* Moorestown MID */
++ .long ta(default_entry) /* normal x86/PC */
++ .long ta(lguest_entry) /* lguest hypervisor */
++ .long ta(xen_entry) /* Xen hypervisor */
++ .long ta(default_entry) /* Moorestown MID */
+ num_subarch_entries = (. - subarch_entries) / 4
+ .previous
+ #else
+@@ -312,6 +382,7 @@ default_entry:
+ orl %edx,%eax
+ movl %eax,%cr4
+
++#ifdef CONFIG_X86_PAE
+ testb $X86_CR4_PAE, %al # check if PAE is enabled
+ jz 6f
+
+@@ -340,6 +411,9 @@ default_entry:
+ /* Make changes effective */
+ wrmsr
+
++ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
++#endif
++
+ 6:
+
+ /*
+@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
+ 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
+ movl %eax,%ss # after changing gdt.
+
+- movl $(__USER_DS),%eax # DS/ES contains default USER segment
++# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
+ movl %eax,%ds
+ movl %eax,%es
+
+@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
+ */
+ cmpb $0,ready
+ jne 1f
+- movl $gdt_page,%eax
++ movl $cpu_gdt_table,%eax
+ movl $stack_canary,%ecx
++#ifdef CONFIG_SMP
++ addl $__per_cpu_load,%ecx
++#endif
+ movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
+ shrl $16, %ecx
+ movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
+ movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
+ 1:
+-#endif
+ movl $(__KERNEL_STACK_CANARY),%eax
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++ movl $(__USER_DS),%eax
++#else
++ xorl %eax,%eax
++#endif
+ movl %eax,%gs
+
+ xorl %eax,%eax # Clear LDT
+@@ -558,22 +639,22 @@ early_page_fault:
+ jmp early_fault
+
+ early_fault:
+- cld
+ #ifdef CONFIG_PRINTK
++ cmpl $1,%ss:early_recursion_flag
++ je hlt_loop
++ incl %ss:early_recursion_flag
++ cld
+ pusha
+ movl $(__KERNEL_DS),%eax
+ movl %eax,%ds
+ movl %eax,%es
+- cmpl $2,early_recursion_flag
+- je hlt_loop
+- incl early_recursion_flag
+ movl %cr2,%eax
+ pushl %eax
+ pushl %edx /* trapno */
+ pushl $fault_msg
+ call printk
++; call dump_stack
+ #endif
+- call dump_stack
+ hlt_loop:
+ hlt
+ jmp hlt_loop
+@@ -581,8 +662,11 @@ hlt_loop:
+ /* This is the default interrupt "handler" :-) */
+ ALIGN
+ ignore_int:
+- cld
+ #ifdef CONFIG_PRINTK
++ cmpl $2,%ss:early_recursion_flag
++ je hlt_loop
++ incl %ss:early_recursion_flag
++ cld
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+@@ -591,9 +675,6 @@ ignore_int:
+ movl $(__KERNEL_DS),%eax
+ movl %eax,%ds
+ movl %eax,%es
+- cmpl $2,early_recursion_flag
+- je hlt_loop
+- incl early_recursion_flag
+ pushl 16(%esp)
+ pushl 24(%esp)
+ pushl 32(%esp)
+@@ -622,29 +703,43 @@ ENTRY(initial_code)
+ /*
+ * BSS section
+ */
+-__PAGE_ALIGNED_BSS
+- .align PAGE_SIZE
+ #ifdef CONFIG_X86_PAE
++.section .initial_pg_pmd,"a",@progbits
+ initial_pg_pmd:
+ .fill 1024*KPMDS,4,0
+ #else
++.section .initial_page_table,"a",@progbits
+ ENTRY(initial_page_table)
+ .fill 1024,4,0
+ #endif
++.section .initial_pg_fixmap,"a",@progbits
+ initial_pg_fixmap:
+ .fill 1024,4,0
++.section .empty_zero_page,"a",@progbits
+ ENTRY(empty_zero_page)
+ .fill 4096,1,0
++.section .swapper_pg_dir,"a",@progbits
+ ENTRY(swapper_pg_dir)
++#ifdef CONFIG_X86_PAE
++ .fill 4,8,0
++#else
+ .fill 1024,4,0
++#endif
++
++/*
++ * The IDT has to be page-aligned to simplify the Pentium
++ * F0 0F bug workaround.. We have a special link segment
++ * for this.
++ */
++.section .idt,"a",@progbits
++ENTRY(idt_table)
++ .fill 256,8,0
+
+ /*
+ * This starts the data section.
+ */
+ #ifdef CONFIG_X86_PAE
+-__PAGE_ALIGNED_DATA
+- /* Page-aligned for the benefit of paravirt? */
+- .align PAGE_SIZE
++.section .initial_page_table,"a",@progbits
+ ENTRY(initial_page_table)
+ .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
+ # if KPMDS == 3
+@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
+ # error "Kernel PMDs should be 1, 2 or 3"
+ # endif
+ .align PAGE_SIZE /* needs to be page-sized too */
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ENTRY(cpu_pgd)
++ .rept NR_CPUS
++ .fill 4,8,0
++ .endr
++#endif
++
+ #endif
+
+ .data
+ .balign 4
+ ENTRY(stack_start)
+- .long init_thread_union+THREAD_SIZE
++ .long init_thread_union+THREAD_SIZE-8
++
++ready: .byte 0
+
++.section .rodata,"a",@progbits
+ early_recursion_flag:
+ .long 0
+
+-ready: .byte 0
+-
+ int_msg:
+ .asciz "Unknown interrupt or fault at: %p %p %p\n"
+
+@@ -707,7 +811,7 @@ fault_msg:
+ .word 0 # 32 bit align gdt_desc.address
+ boot_gdt_descr:
+ .word __BOOT_DS+7
+- .long boot_gdt - __PAGE_OFFSET
++ .long pa(boot_gdt)
+
+ .word 0 # 32-bit align idt_desc.address
+ idt_descr:
+@@ -718,7 +822,7 @@ idt_descr:
+ .word 0 # 32 bit align gdt_desc.address
+ ENTRY(early_gdt_descr)
+ .word GDT_ENTRIES*8-1
+- .long gdt_page /* Overwritten for secondary CPUs */
++ .long cpu_gdt_table /* Overwritten for secondary CPUs */
+
+ /*
+ * The boot_gdt must mirror the equivalent in setup.S and is
+@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
+ .align L1_CACHE_BYTES
+ ENTRY(boot_gdt)
+ .fill GDT_ENTRY_BOOT_CS,8,0
+- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
+- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
++ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
++ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
++
++ .align PAGE_SIZE_asm
++ENTRY(cpu_gdt_table)
++ .rept NR_CPUS
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x0000000000000000 /* 0x0b reserved */
++ .quad 0x0000000000000000 /* 0x13 reserved */
++ .quad 0x0000000000000000 /* 0x1b reserved */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
++#else
++ .quad 0x0000000000000000 /* 0x20 unused */
++#endif
++
++ .quad 0x0000000000000000 /* 0x28 unused */
++ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
++ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
++ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
++ .quad 0x0000000000000000 /* 0x4b reserved */
++ .quad 0x0000000000000000 /* 0x53 reserved */
++ .quad 0x0000000000000000 /* 0x5b reserved */
++
++ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
++ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
++ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
++ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
++
++ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
++ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
++
++ /*
++ * Segments used for calling PnP BIOS have byte granularity.
++ * The code segments and data segments have fixed 64k limits,
++ * the transfer segment sizes are set at run time.
++ */
++ .quad 0x00409b000000ffff /* 0x90 32-bit code */
++ .quad 0x00009b000000ffff /* 0x98 16-bit code */
++ .quad 0x000093000000ffff /* 0xa0 16-bit data */
++ .quad 0x0000930000000000 /* 0xa8 16-bit data */
++ .quad 0x0000930000000000 /* 0xb0 16-bit data */
++
++ /*
++ * The APM segments have byte granularity and their bases
++ * are set at run time. All have 64k limits.
++ */
++ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
++ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
++ .quad 0x004093000000ffff /* 0xc8 APM DS data */
++
++ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
++ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
++ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
++ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
++ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
++ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
++
++ /* Be sure this is zeroed to avoid false validations in Xen */
++ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
++ .endr
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/head_64.S linux-3.4-pax/arch/x86/kernel/head_64.S
+--- linux-3.4/arch/x86/kernel/head_64.S 2012-03-19 10:38:56.544049999 +0100
++++ linux-3.4-pax/arch/x86/kernel/head_64.S 2012-05-21 12:10:09.536048889 +0200
+@@ -19,6 +19,8 @@
+ #include <asm/cache.h>
+ #include <asm/processor-flags.h>
+ #include <asm/percpu.h>
++#include <asm/cpufeature.h>
++#include <asm/alternative-asm.h>
+
+ #ifdef CONFIG_PARAVIRT
+ #include <asm/asm-offsets.h>
+@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
+ L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
+ L4_START_KERNEL = pgd_index(__START_KERNEL_map)
+ L3_START_KERNEL = pud_index(__START_KERNEL_map)
++L4_VMALLOC_START = pgd_index(VMALLOC_START)
++L3_VMALLOC_START = pud_index(VMALLOC_START)
++L4_VMALLOC_END = pgd_index(VMALLOC_END)
++L3_VMALLOC_END = pud_index(VMALLOC_END)
++L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
++L3_VMEMMAP_START = pud_index(VMEMMAP_START)
+
+ .text
+ __HEAD
+@@ -85,35 +93,23 @@ startup_64:
+ */
+ addq %rbp, init_level4_pgt + 0(%rip)
+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
++ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
++ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
++ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
+
+ addq %rbp, level3_ident_pgt + 0(%rip)
++#ifndef CONFIG_XEN
++ addq %rbp, level3_ident_pgt + 8(%rip)
++#endif
+
+- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
+- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
++ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
+
+- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
+
+- /* Add an Identity mapping if I am above 1G */
+- leaq _text(%rip), %rdi
+- andq $PMD_PAGE_MASK, %rdi
+-
+- movq %rdi, %rax
+- shrq $PUD_SHIFT, %rax
+- andq $(PTRS_PER_PUD - 1), %rax
+- jz ident_complete
+-
+- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
+- leaq level3_ident_pgt(%rip), %rbx
+- movq %rdx, 0(%rbx, %rax, 8)
+-
+- movq %rdi, %rax
+- shrq $PMD_SHIFT, %rax
+- andq $(PTRS_PER_PMD - 1), %rax
+- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
+- leaq level2_spare_pgt(%rip), %rbx
+- movq %rdx, 0(%rbx, %rax, 8)
+-ident_complete:
++ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
++ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
+
+ /*
+ * Fixup the kernel text+data virtual addresses. Note that
+@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
+ * after the boot processor executes this code.
+ */
+
+- /* Enable PAE mode and PGE */
+- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
++ /* Enable PAE mode and PSE/PGE */
++ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
+ movq %rax, %cr4
+
+ /* Setup early boot stage 4 level pagetables. */
+@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_SCE, %eax /* Enable System Call */
+- btl $20,%edi /* No Execute supported? */
++ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
+ jnc 1f
+ btsl $_EFER_NX, %eax
++ leaq init_level4_pgt(%rip), %rdi
++#ifndef CONFIG_EFI
++ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
++#endif
++ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
++ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
++ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
++ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
+ 1: wrmsr /* Make changes effective */
+
+ /* Setup cr0 */
+@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
+ * jump. In addition we need to ensure %cs is set so we make this
+ * a far return.
+ */
++ pax_set_fptr_mask
+ movq initial_code(%rip),%rax
+ pushq $0 # fake return address to stop unwinder
+ pushq $__KERNEL_CS # set correct cs
+@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
+ bad_address:
+ jmp bad_address
+
+- .section ".init.text","ax"
++ __INIT
+ #ifdef CONFIG_EARLY_PRINTK
+ .globl early_idt_handlers
+ early_idt_handlers:
+@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
+ #endif /* EARLY_PRINTK */
+ 1: hlt
+ jmp 1b
++ .previous
+
+ #ifdef CONFIG_EARLY_PRINTK
++ __INITDATA
+ early_recursion_flag:
+ .long 0
++ .previous
+
++ .section .rodata,"a",@progbits
+ early_idt_msg:
+ .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
+ early_idt_ripmsg:
+ .asciz "RIP %s\n"
+-#endif /* CONFIG_EARLY_PRINTK */
+ .previous
++#endif /* CONFIG_EARLY_PRINTK */
+
++ .section .rodata,"a",@progbits
+ #define NEXT_PAGE(name) \
+ .balign PAGE_SIZE; \
+ ENTRY(name)
+@@ -338,7 +348,6 @@ ENTRY(name)
+ i = i + 1 ; \
+ .endr
+
+- .data
+ /*
+ * This default setting generates an ident mapping at address 0x100000
+ * and a mapping for the kernel that precisely maps virtual address
+@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .org init_level4_pgt + L4_VMALLOC_START*8, 0
++ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .org init_level4_pgt + L4_VMALLOC_END*8, 0
++ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
++ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .org init_level4_pgt + L4_START_KERNEL*8, 0
+ /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
+ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++NEXT_PAGE(cpu_pgd)
++ .rept NR_CPUS
++ .fill 512,8,0
++ .endr
++#endif
++
+ NEXT_PAGE(level3_ident_pgt)
+ .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
++#ifdef CONFIG_XEN
+ .fill 511,8,0
++#else
++ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
++ .fill 510,8,0
++#endif
++
++NEXT_PAGE(level3_vmalloc_start_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(level3_vmalloc_end_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(level3_vmemmap_pgt)
++ .fill L3_VMEMMAP_START,8,0
++ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
+
+ NEXT_PAGE(level3_kernel_pgt)
+ .fill L3_START_KERNEL,8,0
+@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
+ .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+
++NEXT_PAGE(level2_vmemmap_pgt)
++ .fill 512,8,0
++
+ NEXT_PAGE(level2_fixmap_pgt)
+- .fill 506,8,0
+- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
+- .fill 5,8,0
++ .fill 507,8,0
++ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
++ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
++ .fill 4,8,0
+
+-NEXT_PAGE(level1_fixmap_pgt)
++NEXT_PAGE(level1_vsyscall_pgt)
+ .fill 512,8,0
+
+-NEXT_PAGE(level2_ident_pgt)
+- /* Since I easily can, map the first 1G.
++ /* Since I easily can, map the first 2G.
+ * Don't set NX because code runs from these pages.
+ */
+- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
++NEXT_PAGE(level2_ident_pgt)
++ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
+
+ NEXT_PAGE(level2_kernel_pgt)
+ /*
+@@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
+ * If you want to increase this then increase MODULES_VADDR
+ * too.)
+ */
+- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
+- KERNEL_IMAGE_SIZE/PMD_SIZE)
+-
+-NEXT_PAGE(level2_spare_pgt)
+- .fill 512, 8, 0
++ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
+
+ #undef PMDS
+ #undef NEXT_PAGE
+
+- .data
++ .align PAGE_SIZE
++ENTRY(cpu_gdt_table)
++ .rept NR_CPUS
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
++ .quad 0x00af9b000000ffff /* __KERNEL_CS */
++ .quad 0x00cf93000000ffff /* __KERNEL_DS */
++ .quad 0x00cffb000000ffff /* __USER32_CS */
++ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
++ .quad 0x00affb000000ffff /* __USER_CS */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
++#else
++ .quad 0x0 /* unused */
++#endif
++
++ .quad 0,0 /* TSS */
++ .quad 0,0 /* LDT */
++ .quad 0,0,0 /* three TLS descriptors */
++ .quad 0x0000f40000000000 /* node/CPU stored in limit */
++ /* asm/segment.h:GDT_ENTRIES must match this */
++
++ /* zero the remaining page */
++ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
++ .endr
++
+ .align 16
+ .globl early_gdt_descr
+ early_gdt_descr:
+ .word GDT_ENTRIES*8-1
+ early_gdt_descr_base:
+- .quad INIT_PER_CPU_VAR(gdt_page)
++ .quad cpu_gdt_table
+
+ ENTRY(phys_base)
+ /* This must match the first entry in level2_kernel_pgt */
+ .quad 0x0000000000000000
+
+ #include "../../x86/xen/xen-head.S"
+-
+- .section .bss, "aw", @nobits
++
++ .section .rodata,"a",@progbits
+ .align L1_CACHE_BYTES
+ ENTRY(idt_table)
+- .skip IDT_ENTRIES * 16
++ .fill 512,8,0
+
+ .align L1_CACHE_BYTES
+ ENTRY(nmi_idt_table)
+- .skip IDT_ENTRIES * 16
++ .fill 512,8,0
+
+ __PAGE_ALIGNED_BSS
+ .align PAGE_SIZE
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/i386_ksyms_32.c linux-3.4-pax/arch/x86/kernel/i386_ksyms_32.c
+--- linux-3.4/arch/x86/kernel/i386_ksyms_32.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/kernel/i386_ksyms_32.c 2012-05-21 12:10:09.536048889 +0200
+@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
+ EXPORT_SYMBOL(cmpxchg8b_emu);
+ #endif
+
++EXPORT_SYMBOL_GPL(cpu_gdt_table);
++
+ /* Networking helper routines. */
+ EXPORT_SYMBOL(csum_partial_copy_generic);
++EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
++EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
+
+ EXPORT_SYMBOL(__get_user_1);
+ EXPORT_SYMBOL(__get_user_2);
+@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
+
+ EXPORT_SYMBOL(csum_partial);
+ EXPORT_SYMBOL(empty_zero_page);
++
++#ifdef CONFIG_PAX_KERNEXEC
++EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
++#endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/i387.c linux-3.4-pax/arch/x86/kernel/i387.c
+--- linux-3.4/arch/x86/kernel/i387.c 2012-05-21 11:32:57.483927660 +0200
++++ linux-3.4-pax/arch/x86/kernel/i387.c 2012-05-22 15:28:30.007384690 +0200
+@@ -59,7 +59,7 @@ static inline bool interrupted_kernel_fp
+ static inline bool interrupted_user_mode(void)
+ {
+ struct pt_regs *regs = get_irq_regs();
+- return regs && user_mode_vm(regs);
++ return regs && user_mode(regs);
+ }
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/i8259.c linux-3.4-pax/arch/x86/kernel/i8259.c
+--- linux-3.4/arch/x86/kernel/i8259.c 2012-05-21 11:32:57.487927661 +0200
++++ linux-3.4-pax/arch/x86/kernel/i8259.c 2012-05-21 12:10:09.544048889 +0200
+@@ -209,7 +209,7 @@ spurious_8259A_irq:
+ "spurious 8259A interrupt: IRQ%d.\n", irq);
+ spurious_irq_mask |= irqmask;
+ }
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ /*
+ * Theoretically we do not have to handle this IRQ,
+ * but in Linux this does not cause problems and is
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/init_task.c linux-3.4-pax/arch/x86/kernel/init_task.c
+--- linux-3.4/arch/x86/kernel/init_task.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/kernel/init_task.c 2012-05-21 12:10:09.544048889 +0200
+@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+-union thread_union init_thread_union __init_task_data =
+- { INIT_THREAD_INFO(init_task) };
++union thread_union init_thread_union __init_task_data;
+
+ /*
+ * Initial task structure.
+@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
+ * section. Since TSS's are completely CPU-local, we want them
+ * on exact cacheline boundaries, to eliminate cacheline ping-pong.
+ */
+-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
+-
++struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
++EXPORT_SYMBOL(init_tss);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/ioport.c linux-3.4-pax/arch/x86/kernel/ioport.c
+--- linux-3.4/arch/x86/kernel/ioport.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/kernel/ioport.c 2012-05-21 12:10:09.548048890 +0200
+@@ -54,7 +54,7 @@ asmlinkage long sys_ioperm(unsigned long
+ * because the ->io_bitmap_max value must match the bitmap
+ * contents:
+ */
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+
+ if (turn_on)
+ bitmap_clear(t->io_bitmap_ptr, from, num);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/irq_32.c linux-3.4-pax/arch/x86/kernel/irq_32.c
+--- linux-3.4/arch/x86/kernel/irq_32.c 2012-05-21 11:32:57.495927660 +0200
++++ linux-3.4-pax/arch/x86/kernel/irq_32.c 2012-05-21 12:10:09.548048890 +0200
+@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
+ __asm__ __volatile__("andl %%esp,%0" :
+ "=r" (sp) : "0" (THREAD_SIZE - 1));
+
+- return sp < (sizeof(struct thread_info) + STACK_WARN);
++ return sp < STACK_WARN;
+ }
+
+ static void print_stack_overflow(void)
+@@ -59,8 +59,8 @@ static inline void print_stack_overflow(
+ * per-CPU IRQ handling contexts (thread information and stack)
+ */
+ union irq_ctx {
+- struct thread_info tinfo;
+- u32 stack[THREAD_SIZE/sizeof(u32)];
++ unsigned long previous_esp;
++ u32 stack[THREAD_SIZE/sizeof(u32)];
+ } __attribute__((aligned(THREAD_SIZE)));
+
+ static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
+@@ -80,10 +80,9 @@ static void call_on_stack(void *func, vo
+ static inline int
+ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+ {
+- union irq_ctx *curctx, *irqctx;
++ union irq_ctx *irqctx;
+ u32 *isp, arg1, arg2;
+
+- curctx = (union irq_ctx *) current_thread_info();
+ irqctx = __this_cpu_read(hardirq_ctx);
+
+ /*
+@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struc
+ * handler) we can't do that and just have to keep using the
+ * current stack (which is the irq stack already after all)
+ */
+- if (unlikely(curctx == irqctx))
++ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
+ return 0;
+
+ /* build the stack frame on the IRQ stack */
+- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
+- irqctx->tinfo.task = curctx->tinfo.task;
+- irqctx->tinfo.previous_esp = current_stack_pointer;
++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
++ irqctx->previous_esp = current_stack_pointer;
+
+- /* Copy the preempt_count so that the [soft]irq checks work. */
+- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(MAKE_MM_SEG(0));
++#endif
+
+ if (unlikely(overflow))
+ call_on_stack(print_stack_overflow, isp);
+@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struc
+ : "0" (irq), "1" (desc), "2" (isp),
+ "D" (desc->handle_irq)
+ : "memory", "cc", "ecx");
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ return 1;
+ }
+
+@@ -121,29 +125,11 @@ execute_on_irq_stack(int overflow, struc
+ */
+ void __cpuinit irq_ctx_init(int cpu)
+ {
+- union irq_ctx *irqctx;
+-
+ if (per_cpu(hardirq_ctx, cpu))
+ return;
+
+- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+- THREAD_FLAGS,
+- THREAD_ORDER));
+- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
+- irqctx->tinfo.cpu = cpu;
+- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
+- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+-
+- per_cpu(hardirq_ctx, cpu) = irqctx;
+-
+- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+- THREAD_FLAGS,
+- THREAD_ORDER));
+- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
+- irqctx->tinfo.cpu = cpu;
+- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+-
+- per_cpu(softirq_ctx, cpu) = irqctx;
++ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
++ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
+
+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
+@@ -152,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
+ asmlinkage void do_softirq(void)
+ {
+ unsigned long flags;
+- struct thread_info *curctx;
+ union irq_ctx *irqctx;
+ u32 *isp;
+
+@@ -162,15 +147,22 @@ asmlinkage void do_softirq(void)
+ local_irq_save(flags);
+
+ if (local_softirq_pending()) {
+- curctx = current_thread_info();
+ irqctx = __this_cpu_read(softirq_ctx);
+- irqctx->tinfo.task = curctx->task;
+- irqctx->tinfo.previous_esp = current_stack_pointer;
++ irqctx->previous_esp = current_stack_pointer;
+
+ /* build the stack frame on the softirq stack */
+- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(MAKE_MM_SEG(0));
++#endif
+
+ call_on_stack(__do_softirq, isp);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ /*
+ * Shouldn't happen, we returned above if in_interrupt():
+ */
+@@ -191,7 +183,7 @@ bool handle_irq(unsigned irq, struct pt_
+ if (unlikely(!desc))
+ return false;
+
+- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
++ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
+ if (unlikely(overflow))
+ print_stack_overflow();
+ desc->handle_irq(irq, desc);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/irq_64.c linux-3.4-pax/arch/x86/kernel/irq_64.c
+--- linux-3.4/arch/x86/kernel/irq_64.c 2012-03-19 10:38:56.548049999 +0100
++++ linux-3.4-pax/arch/x86/kernel/irq_64.c 2012-05-21 12:10:09.552048890 +0200
+@@ -44,7 +44,7 @@ static inline void stack_overflow_check(
+ u64 estack_top, estack_bottom;
+ u64 curbase = (u64)task_stack_page(current);
+
+- if (user_mode_vm(regs))
++ if (user_mode(regs))
+ return;
+
+ if (regs->sp >= curbase + sizeof(struct thread_info) +
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/irq.c linux-3.4-pax/arch/x86/kernel/irq.c
+--- linux-3.4/arch/x86/kernel/irq.c 2012-05-21 11:32:57.491927661 +0200
++++ linux-3.4-pax/arch/x86/kernel/irq.c 2012-05-21 12:10:09.552048890 +0200
+@@ -18,7 +18,7 @@
+ #include <asm/mce.h>
+ #include <asm/hw_irq.h>
+
+-atomic_t irq_err_count;
++atomic_unchecked_t irq_err_count;
+
+ /* Function pointer for generic interrupt vector handling */
+ void (*x86_platform_ipi_callback)(void) = NULL;
+@@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file
+ seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
+ seq_printf(p, " Machine check polls\n");
+ #endif
+- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
++ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
+ #if defined(CONFIG_X86_IO_APIC)
+- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
++ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
+ #endif
+ return 0;
+ }
+@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
+
+ u64 arch_irq_stat(void)
+ {
+- u64 sum = atomic_read(&irq_err_count);
++ u64 sum = atomic_read_unchecked(&irq_err_count);
+
+ #ifdef CONFIG_X86_IO_APIC
+- sum += atomic_read(&irq_mis_count);
++ sum += atomic_read_unchecked(&irq_mis_count);
+ #endif
+ return sum;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/kdebugfs.c linux-3.4-pax/arch/x86/kernel/kdebugfs.c
+--- linux-3.4/arch/x86/kernel/kdebugfs.c 2012-05-21 11:32:57.507927663 +0200
++++ linux-3.4-pax/arch/x86/kernel/kdebugfs.c 2012-05-21 12:10:09.556048890 +0200
+@@ -28,6 +28,8 @@ struct setup_data_node {
+ };
+
+ static ssize_t setup_data_read(struct file *file, char __user *user_buf,
++ size_t count, loff_t *ppos) __size_overflow(3);
++static ssize_t setup_data_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+ {
+ struct setup_data_node *node = file->private_data;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/kgdb.c linux-3.4-pax/arch/x86/kernel/kgdb.c
+--- linux-3.4/arch/x86/kernel/kgdb.c 2012-05-21 11:32:57.511927663 +0200
++++ linux-3.4-pax/arch/x86/kernel/kgdb.c 2012-05-21 12:10:09.560048890 +0200
+@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem,
+ #ifdef CONFIG_X86_32
+ switch (regno) {
+ case GDB_SS:
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ *(unsigned long *)mem = __KERNEL_DS;
+ break;
+ case GDB_SP:
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ *(unsigned long *)mem = kernel_stack_pointer(regs);
+ break;
+ case GDB_GS:
+@@ -476,12 +476,12 @@ int kgdb_arch_handle_exception(int e_vec
+ case 'k':
+ /* clear the trace bit */
+ linux_regs->flags &= ~X86_EFLAGS_TF;
+- atomic_set(&kgdb_cpu_doing_single_step, -1);
++ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
+
+ /* set the trace bit if we're stepping */
+ if (remcomInBuffer[0] == 's') {
+ linux_regs->flags |= X86_EFLAGS_TF;
+- atomic_set(&kgdb_cpu_doing_single_step,
++ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
+ raw_smp_processor_id());
+ }
+
+@@ -546,7 +546,7 @@ static int __kgdb_notify(struct die_args
+
+ switch (cmd) {
+ case DIE_DEBUG:
+- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
+ if (user_mode(regs))
+ return single_step_cont(regs, args);
+ break;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/kprobes.c linux-3.4-pax/arch/x86/kernel/kprobes.c
+--- linux-3.4/arch/x86/kernel/kprobes.c 2012-05-21 11:32:57.527927663 +0200
++++ linux-3.4-pax/arch/x86/kernel/kprobes.c 2012-05-21 12:10:09.560048890 +0200
+@@ -120,8 +120,11 @@ static void __kprobes __synthesize_relat
+ } __attribute__((packed)) *insn;
+
+ insn = (struct __arch_relative_insn *)from;
++
++ pax_open_kernel();
+ insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
+ insn->op = op;
++ pax_close_kernel();
+ }
+
+ /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
+@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t
+ kprobe_opcode_t opcode;
+ kprobe_opcode_t *orig_opcodes = opcodes;
+
+- if (search_exception_tables((unsigned long)opcodes))
++ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
+ return 0; /* Page fault may occur on this address. */
+
+ retry:
+@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *des
+ /* Another subsystem puts a breakpoint, failed to recover */
+ if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+ return 0;
++ pax_open_kernel();
+ memcpy(dest, insn.kaddr, insn.length);
++ pax_close_kernel();
+
+ #ifdef CONFIG_X86_64
+ if (insn_rip_relative(&insn)) {
+@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *des
+ newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
+ BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
+ disp = (u8 *) dest + insn_offset_displacement(&insn);
++ pax_open_kernel();
+ *(s32 *) disp = (s32) newdisp;
++ pax_close_kernel();
+ }
+ #endif
+ return insn.length;
+@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struc
+ * nor set current_kprobe, because it doesn't use single
+ * stepping.
+ */
+- regs->ip = (unsigned long)p->ainsn.insn;
++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ preempt_enable_no_resched();
+ return;
+ }
+@@ -504,7 +511,7 @@ setup_singlestep(struct kprobe *p, struc
+ if (p->opcode == BREAKPOINT_INSTRUCTION)
+ regs->ip = (unsigned long)p->addr;
+ else
+- regs->ip = (unsigned long)p->ainsn.insn;
++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ }
+
+ /*
+@@ -583,7 +590,7 @@ static int __kprobes kprobe_handler(stru
+ setup_singlestep(p, regs, kcb, 0);
+ return 1;
+ }
+- } else if (*addr != BREAKPOINT_INSTRUCTION) {
++ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+@@ -628,6 +635,9 @@ static void __used __kprobes kretprobe_t
+ " movq %rax, 152(%rsp)\n"
+ RESTORE_REGS_STRING
+ " popfq\n"
++#ifdef KERNEXEC_PLUGIN
++ " btsq $63,(%rsp)\n"
++#endif
+ #else
+ " pushf\n"
+ SAVE_REGS_STRING
+@@ -765,7 +775,7 @@ static void __kprobes
+ resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+ {
+ unsigned long *tos = stack_addr(regs);
+- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
++ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ unsigned long orig_ip = (unsigned long)p->addr;
+ kprobe_opcode_t *insn = p->ainsn.insn;
+
+@@ -947,7 +957,7 @@ kprobe_exceptions_notify(struct notifier
+ struct die_args *args = data;
+ int ret = NOTIFY_DONE;
+
+- if (args->regs && user_mode_vm(args->regs))
++ if (args->regs && user_mode(args->regs))
+ return ret;
+
+ switch (val) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/kprobes-opt.c linux-3.4-pax/arch/x86/kernel/kprobes-opt.c
+--- linux-3.4/arch/x86/kernel/kprobes-opt.c 2012-05-21 11:32:57.515927663 +0200
++++ linux-3.4-pax/arch/x86/kernel/kprobes-opt.c 2012-05-21 12:10:09.564048891 +0200
+@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kpr
+ * Verify if the address gap is in 2GB range, because this uses
+ * a relative jump.
+ */
+- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
++ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
+ if (abs(rel) > 0x7fffffff)
+ return -ERANGE;
+
+@@ -359,11 +359,11 @@ int __kprobes arch_prepare_optimized_kpr
+ synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
+
+ /* Set probe function call */
+- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
++ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
+
+ /* Set returning jmp instruction at the tail of out-of-line buffer */
+ synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
+- (u8 *)op->kp.addr + op->optinsn.size);
++ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
+
+ flush_icache_range((unsigned long) buf,
+ (unsigned long) buf + TMPL_END_IDX +
+@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kpr
+ ((long)op->kp.addr + RELATIVEJUMP_SIZE));
+
+ /* Backup instructions which will be replaced by jump address */
+- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
++ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
+ RELATIVE_ADDR_SIZE);
+
+ insn_buf[0] = RELATIVEJUMP_OPCODE;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/ldt.c linux-3.4-pax/arch/x86/kernel/ldt.c
+--- linux-3.4/arch/x86/kernel/ldt.c 2012-05-21 11:32:57.551927664 +0200
++++ linux-3.4-pax/arch/x86/kernel/ldt.c 2012-05-22 15:28:30.055384687 +0200
+@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
+ if (reload) {
+ #ifdef CONFIG_SMP
+ preempt_disable();
+- load_LDT(pc);
++ load_LDT_nolock(pc);
+ if (!cpumask_equal(mm_cpumask(current->mm),
+ cpumask_of(smp_processor_id())))
+ smp_call_function(flush_ldt, current->mm, 1);
+ preempt_enable();
+ #else
+- load_LDT(pc);
++ load_LDT_nolock(pc);
+ #endif
+ }
+ if (oldsize) {
+@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
+ return err;
+
+ for (i = 0; i < old->size; i++)
+- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
++ write_ldt_entry(new->ldt, i, old->ldt + i);
+ return 0;
+ }
+
+@@ -115,6 +115,24 @@ int init_new_context(struct task_struct
+ retval = copy_ldt(&mm->context, &old_mm->context);
+ mutex_unlock(&old_mm->context.lock);
+ }
++
++ if (tsk == current) {
++ mm->context.vdso = 0;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ mm->context.user_cs_base = 0UL;
++ mm->context.user_cs_limit = ~0UL;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpus_clear(mm->context.cpu_user_cs_mask);
++#endif
++
++#endif
++#endif
++
++ }
++
+ return retval;
+ }
+
+@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
+ }
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
++ error = -EINVAL;
++ goto out_unlock;
++ }
++#endif
++
+ fill_ldt(&ldt, &ldt_info);
+ if (oldmode)
+ ldt.avl = 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/machine_kexec_32.c linux-3.4-pax/arch/x86/kernel/machine_kexec_32.c
+--- linux-3.4/arch/x86/kernel/machine_kexec_32.c 2012-05-21 11:32:57.551927664 +0200
++++ linux-3.4-pax/arch/x86/kernel/machine_kexec_32.c 2012-05-21 12:10:09.568048891 +0200
+@@ -26,7 +26,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/debugreg.h>
+
+-static void set_idt(void *newidt, __u16 limit)
++static void set_idt(struct desc_struct *newidt, __u16 limit)
+ {
+ struct desc_ptr curidt;
+
+@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
+ }
+
+
+-static void set_gdt(void *newgdt, __u16 limit)
++static void set_gdt(struct desc_struct *newgdt, __u16 limit)
+ {
+ struct desc_ptr curgdt;
+
+@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
+ }
+
+ control_page = page_address(image->control_code_page);
+- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
++ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
+
+ relocate_kernel_ptr = control_page;
+ page_list[PA_CONTROL_PAGE] = __pa(control_page);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/microcode_intel.c linux-3.4-pax/arch/x86/kernel/microcode_intel.c
+--- linux-3.4/arch/x86/kernel/microcode_intel.c 2012-05-21 11:32:57.571927665 +0200
++++ linux-3.4-pax/arch/x86/kernel/microcode_intel.c 2012-05-22 15:32:56.323370470 +0200
+@@ -430,13 +430,13 @@ static enum ucode_state request_microcod
+
+ static int get_ucode_user(void *to, const void *from, size_t n)
+ {
+- return copy_from_user(to, from, n);
++ return copy_from_user(to, (const void __force_user *)from, n);
+ }
+
+ static enum ucode_state
+ request_microcode_user(int cpu, const void __user *buf, size_t size)
+ {
+- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
++ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
+ }
+
+ static void microcode_fini_cpu(int cpu)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/module.c linux-3.4-pax/arch/x86/kernel/module.c
+--- linux-3.4/arch/x86/kernel/module.c 2012-05-21 11:32:57.583927666 +0200
++++ linux-3.4-pax/arch/x86/kernel/module.c 2012-05-22 15:33:37.911368250 +0200
+@@ -35,15 +35,60 @@
+ #define DEBUGP(fmt...)
+ #endif
+
+-void *module_alloc(unsigned long size)
++static inline void *__module_alloc(unsigned long size, pgprot_t prot)
+ {
+- if (PAGE_ALIGN(size) > MODULES_LEN)
++ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
+ return NULL;
+ return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
++ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
+ -1, __builtin_return_address(0));
+ }
+
++void *module_alloc(unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++ return __module_alloc(size, PAGE_KERNEL);
++#else
++ return __module_alloc(size, PAGE_KERNEL_EXEC);
++#endif
++
++}
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
++void *module_alloc_exec(unsigned long size)
++{
++ struct vm_struct *area;
++
++ if (size == 0)
++ return NULL;
++
++ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
++ return area ? area->addr : NULL;
++}
++EXPORT_SYMBOL(module_alloc_exec);
++
++void module_free_exec(struct module *mod, void *module_region)
++{
++ vunmap(module_region);
++}
++EXPORT_SYMBOL(module_free_exec);
++#else
++void module_free_exec(struct module *mod, void *module_region)
++{
++ module_free(mod, module_region);
++}
++EXPORT_SYMBOL(module_free_exec);
++
++void *module_alloc_exec(unsigned long size)
++{
++ return __module_alloc(size, PAGE_KERNEL_RX);
++}
++EXPORT_SYMBOL(module_alloc_exec);
++#endif
++#endif
++
+ #ifdef CONFIG_X86_32
+ int apply_relocate(Elf32_Shdr *sechdrs,
+ const char *strtab,
+@@ -54,14 +99,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+ unsigned int i;
+ Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+- uint32_t *location;
++ uint32_t *plocation, location;
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+- + rel[i].r_offset;
++ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
++ location = (uint32_t)plocation;
++ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
++ plocation = ktla_ktva((void *)plocation);
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+@@ -70,11 +117,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_386_32:
+ /* We add the value into the location given */
+- *location += sym->st_value;
++ pax_open_kernel();
++ *plocation += sym->st_value;
++ pax_close_kernel();
+ break;
+ case R_386_PC32:
+ /* Add the value, subtract its postition */
+- *location += sym->st_value - (uint32_t)location;
++ pax_open_kernel();
++ *plocation += sym->st_value - location;
++ pax_close_kernel();
+ break;
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+@@ -119,21 +170,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
+ case R_X86_64_NONE:
+ break;
+ case R_X86_64_64:
++ pax_open_kernel();
+ *(u64 *)loc = val;
++ pax_close_kernel();
+ break;
+ case R_X86_64_32:
++ pax_open_kernel();
+ *(u32 *)loc = val;
++ pax_close_kernel();
+ if (val != *(u32 *)loc)
+ goto overflow;
+ break;
+ case R_X86_64_32S:
++ pax_open_kernel();
+ *(s32 *)loc = val;
++ pax_close_kernel();
+ if ((s64)val != *(s32 *)loc)
+ goto overflow;
+ break;
+ case R_X86_64_PC32:
+ val -= (u64)loc;
++ pax_open_kernel();
+ *(u32 *)loc = val;
++ pax_close_kernel();
++
+ #if 0
+ if ((s64)val != *(s32 *)loc)
+ goto overflow;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/nmi.c linux-3.4-pax/arch/x86/kernel/nmi.c
+--- linux-3.4/arch/x86/kernel/nmi.c 2012-03-19 10:38:56.556049998 +0100
++++ linux-3.4-pax/arch/x86/kernel/nmi.c 2012-05-21 12:10:09.580048892 +0200
+@@ -505,6 +505,17 @@ static inline void nmi_nesting_postproce
+ dotraplinkage notrace __kprobes void
+ do_nmi(struct pt_regs *regs, long error_code)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (!user_mode(regs)) {
++ unsigned long cs = regs->cs & 0xFFFF;
++ unsigned long ip = ktva_ktla(regs->ip);
++
++ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
++ regs->ip = ip;
++ }
++#endif
++
+ nmi_nesting_preprocess(regs);
+
+ nmi_enter();
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/paravirt.c linux-3.4-pax/arch/x86/kernel/paravirt.c
+--- linux-3.4/arch/x86/kernel/paravirt.c 2012-05-21 11:32:57.599927668 +0200
++++ linux-3.4-pax/arch/x86/kernel/paravirt.c 2012-05-21 12:10:09.584048893 +0200
+@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
+ {
+ return x;
+ }
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
++PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
++#endif
+
+ void __init default_banner(void)
+ {
+@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type,
+ if (opfunc == NULL)
+ /* If there's no function, patch it with a ud2a (BUG) */
+ ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
+- else if (opfunc == _paravirt_nop)
++ else if (opfunc == (void *)_paravirt_nop)
+ /* If the operation is a nop, then nop the callsite */
+ ret = paravirt_patch_nop();
+
+ /* identity functions just return their single argument */
+- else if (opfunc == _paravirt_ident_32)
++ else if (opfunc == (void *)_paravirt_ident_32)
+ ret = paravirt_patch_ident_32(insnbuf, len);
+- else if (opfunc == _paravirt_ident_64)
++ else if (opfunc == (void *)_paravirt_ident_64)
++ ret = paravirt_patch_ident_64(insnbuf, len);
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
++ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
+ ret = paravirt_patch_ident_64(insnbuf, len);
++#endif
+
+ else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
+ type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
+@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insn
+ if (insn_len > len || start == NULL)
+ insn_len = len;
+ else
+- memcpy(insnbuf, start, insn_len);
++ memcpy(insnbuf, ktla_ktva(start), insn_len);
+
+ return insn_len;
+ }
+@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
+ preempt_enable();
+ }
+
+-struct pv_info pv_info = {
++struct pv_info pv_info __read_only = {
+ .name = "bare hardware",
+ .paravirt_enabled = 0,
+ .kernel_rpl = 0,
+@@ -315,16 +322,16 @@ struct pv_info pv_info = {
+ #endif
+ };
+
+-struct pv_init_ops pv_init_ops = {
++struct pv_init_ops pv_init_ops __read_only = {
+ .patch = native_patch,
+ };
+
+-struct pv_time_ops pv_time_ops = {
++struct pv_time_ops pv_time_ops __read_only = {
+ .sched_clock = native_sched_clock,
+ .steal_clock = native_steal_clock,
+ };
+
+-struct pv_irq_ops pv_irq_ops = {
++struct pv_irq_ops pv_irq_ops __read_only = {
+ .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
+ .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
+ .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
+@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
+ #endif
+ };
+
+-struct pv_cpu_ops pv_cpu_ops = {
++struct pv_cpu_ops pv_cpu_ops __read_only = {
+ .cpuid = native_cpuid,
+ .get_debugreg = native_get_debugreg,
+ .set_debugreg = native_set_debugreg,
+@@ -397,21 +404,26 @@ struct pv_cpu_ops pv_cpu_ops = {
+ .end_context_switch = paravirt_nop,
+ };
+
+-struct pv_apic_ops pv_apic_ops = {
++struct pv_apic_ops pv_apic_ops __read_only = {
+ #ifdef CONFIG_X86_LOCAL_APIC
+ .startup_ipi_hook = paravirt_nop,
+ #endif
+ };
+
+-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
++#ifdef CONFIG_X86_32
++#ifdef CONFIG_X86_PAE
++/* 64-bit pagetable entries */
++#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
++#else
+ /* 32-bit pagetable entries */
+ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
++#endif
+ #else
+ /* 64-bit pagetable entries */
+ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
+ #endif
+
+-struct pv_mmu_ops pv_mmu_ops = {
++struct pv_mmu_ops pv_mmu_ops __read_only = {
+
+ .read_cr2 = native_read_cr2,
+ .write_cr2 = native_write_cr2,
+@@ -461,6 +473,7 @@ struct pv_mmu_ops pv_mmu_ops = {
+ .make_pud = PTE_IDENT,
+
+ .set_pgd = native_set_pgd,
++ .set_pgd_batched = native_set_pgd_batched,
+ #endif
+ #endif /* PAGETABLE_LEVELS >= 3 */
+
+@@ -480,6 +493,12 @@ struct pv_mmu_ops pv_mmu_ops = {
+ },
+
+ .set_fixmap = native_set_fixmap,
++
++#ifdef CONFIG_PAX_KERNEXEC
++ .pax_open_kernel = native_pax_open_kernel,
++ .pax_close_kernel = native_pax_close_kernel,
++#endif
++
+ };
+
+ EXPORT_SYMBOL_GPL(pv_time_ops);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/paravirt-spinlocks.c linux-3.4-pax/arch/x86/kernel/paravirt-spinlocks.c
+--- linux-3.4/arch/x86/kernel/paravirt-spinlocks.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/kernel/paravirt-spinlocks.c 2012-05-21 12:10:09.584048893 +0200
+@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
+ arch_spin_lock(lock);
+ }
+
+-struct pv_lock_ops pv_lock_ops = {
++struct pv_lock_ops pv_lock_ops __read_only = {
+ #ifdef CONFIG_SMP
+ .spin_is_locked = __ticket_spin_is_locked,
+ .spin_is_contended = __ticket_spin_is_contended,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/pci-iommu_table.c linux-3.4-pax/arch/x86/kernel/pci-iommu_table.c
+--- linux-3.4/arch/x86/kernel/pci-iommu_table.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/kernel/pci-iommu_table.c 2012-05-21 12:10:09.588048893 +0200
+@@ -2,7 +2,7 @@
+ #include <asm/iommu_table.h>
+ #include <linux/string.h>
+ #include <linux/kallsyms.h>
+-
++#include <linux/sched.h>
+
+ #define DEBUG 1
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/process_32.c linux-3.4-pax/arch/x86/kernel/process_32.c
+--- linux-3.4/arch/x86/kernel/process_32.c 2012-05-21 11:32:57.619927668 +0200
++++ linux-3.4-pax/arch/x86/kernel/process_32.c 2012-05-21 12:10:09.588048893 +0200
+@@ -64,6 +64,7 @@ asmlinkage void ret_from_fork(void) __as
+ unsigned long thread_saved_pc(struct task_struct *tsk)
+ {
+ return ((unsigned long *)tsk->thread.sp)[3];
++//XXX return tsk->thread.eip;
+ }
+
+ void __show_regs(struct pt_regs *regs, int all)
+@@ -73,15 +74,14 @@ void __show_regs(struct pt_regs *regs, i
+ unsigned long sp;
+ unsigned short ss, gs;
+
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ sp = regs->sp;
+ ss = regs->ss & 0xffff;
+- gs = get_user_gs(regs);
+ } else {
+ sp = kernel_stack_pointer(regs);
+ savesegment(ss, ss);
+- savesegment(gs, gs);
+ }
++ gs = get_user_gs(regs);
+
+ show_regs_common();
+
+@@ -143,13 +143,14 @@ int copy_thread(unsigned long clone_flag
+ struct task_struct *tsk;
+ int err;
+
+- childregs = task_pt_regs(p);
++ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
+ *childregs = *regs;
+ childregs->ax = 0;
+ childregs->sp = sp;
+
+ p->thread.sp = (unsigned long) childregs;
+ p->thread.sp0 = (unsigned long) (childregs+1);
++ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
+
+ p->thread.ip = (unsigned long) ret_from_fork;
+
+@@ -240,7 +241,7 @@ __switch_to(struct task_struct *prev_p,
+ struct thread_struct *prev = &prev_p->thread,
+ *next = &next_p->thread;
+ int cpu = smp_processor_id();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+ fpu_switch_t fpu;
+
+ /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+@@ -264,6 +265,10 @@ __switch_to(struct task_struct *prev_p,
+ */
+ lazy_save_gs(prev->gs);
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(task_thread_info(next_p)->addr_limit);
++#endif
++
+ /*
+ * Load the per-thread Thread-Local Storage descriptor.
+ */
+@@ -294,6 +299,9 @@ __switch_to(struct task_struct *prev_p,
+ */
+ arch_end_context_switch(next_p);
+
++ percpu_write(current_task, next_p);
++ percpu_write(current_tinfo, &next_p->tinfo);
++
+ /*
+ * Restore %gs if needed (which is common)
+ */
+@@ -302,8 +310,6 @@ __switch_to(struct task_struct *prev_p,
+
+ switch_fpu_finish(next_p, fpu);
+
+- percpu_write(current_task, next_p);
+-
+ return prev_p;
+ }
+
+@@ -333,4 +339,3 @@ unsigned long get_wchan(struct task_stru
+ } while (count++ < 16);
+ return 0;
+ }
+-
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/process_64.c linux-3.4-pax/arch/x86/kernel/process_64.c
+--- linux-3.4/arch/x86/kernel/process_64.c 2012-05-21 11:32:57.623927668 +0200
++++ linux-3.4-pax/arch/x86/kernel/process_64.c 2012-05-21 12:10:09.592048893 +0200
+@@ -162,8 +162,7 @@ int copy_thread(unsigned long clone_flag
+ struct pt_regs *childregs;
+ struct task_struct *me = current;
+
+- childregs = ((struct pt_regs *)
+- (THREAD_SIZE + task_stack_page(p))) - 1;
++ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
+ *childregs = *regs;
+
+ childregs->ax = 0;
+@@ -175,6 +174,7 @@ int copy_thread(unsigned long clone_flag
+ p->thread.sp = (unsigned long) childregs;
+ p->thread.sp0 = (unsigned long) (childregs+1);
+ p->thread.usersp = me->thread.usersp;
++ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
+
+ set_tsk_thread_flag(p, TIF_FORK);
+
+@@ -280,7 +280,7 @@ __switch_to(struct task_struct *prev_p,
+ struct thread_struct *prev = &prev_p->thread;
+ struct thread_struct *next = &next_p->thread;
+ int cpu = smp_processor_id();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+ unsigned fsindex, gsindex;
+ fpu_switch_t fpu;
+
+@@ -362,10 +362,9 @@ __switch_to(struct task_struct *prev_p,
+ prev->usersp = percpu_read(old_rsp);
+ percpu_write(old_rsp, next->usersp);
+ percpu_write(current_task, next_p);
++ percpu_write(current_tinfo, &next_p->tinfo);
+
+- percpu_write(kernel_stack,
+- (unsigned long)task_stack_page(next_p) +
+- THREAD_SIZE - KERNEL_STACK_OFFSET);
++ percpu_write(kernel_stack, next->sp0);
+
+ /*
+ * Now maybe reload the debug registers and handle I/O bitmaps
+@@ -434,12 +433,11 @@ unsigned long get_wchan(struct task_stru
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+ stack = (unsigned long)task_stack_page(p);
+- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
++ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
+ return 0;
+ fp = *(u64 *)(p->thread.sp);
+ do {
+- if (fp < (unsigned long)stack ||
+- fp >= (unsigned long)stack+THREAD_SIZE)
++ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
+ return 0;
+ ip = *(u64 *)(fp+8);
+ if (!in_sched_functions(ip))
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/process.c linux-3.4-pax/arch/x86/kernel/process.c
+--- linux-3.4/arch/x86/kernel/process.c 2012-05-21 11:32:57.615927667 +0200
++++ linux-3.4-pax/arch/x86/kernel/process.c 2012-05-21 12:10:09.596048892 +0200
+@@ -69,16 +69,33 @@ void free_thread_xstate(struct task_stru
+
+ void free_thread_info(struct thread_info *ti)
+ {
+- free_thread_xstate(ti->task);
+ free_pages((unsigned long)ti, THREAD_ORDER);
+ }
+
++static struct kmem_cache *task_struct_cachep;
++
+ void arch_task_cache_init(void)
+ {
+- task_xstate_cachep =
+- kmem_cache_create("task_xstate", xstate_size,
++ /* create a slab on which task_structs can be allocated */
++ task_struct_cachep =
++ kmem_cache_create("task_struct", sizeof(struct task_struct),
++ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
++
++ task_xstate_cachep =
++ kmem_cache_create("task_xstate", xstate_size,
+ __alignof__(union thread_xstate),
+- SLAB_PANIC | SLAB_NOTRACK, NULL);
++ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
++}
++
++struct task_struct *alloc_task_struct_node(int node)
++{
++ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
++}
++
++void free_task_struct(struct task_struct *task)
++{
++ free_thread_xstate(task);
++ kmem_cache_free(task_struct_cachep, task);
+ }
+
+ /*
+@@ -91,7 +108,7 @@ void exit_thread(void)
+ unsigned long *bp = t->io_bitmap_ptr;
+
+ if (bp) {
+- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
++ struct tss_struct *tss = init_tss + get_cpu();
+
+ t->io_bitmap_ptr = NULL;
+ clear_thread_flag(TIF_IO_BITMAP);
+@@ -127,7 +144,7 @@ void show_regs_common(void)
+
+ printk(KERN_CONT "\n");
+ printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
+- current->pid, current->comm, print_tainted(),
++ task_pid_nr(current), current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+@@ -141,6 +158,9 @@ void flush_thread(void)
+ {
+ struct task_struct *tsk = current;
+
++#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
++ loadsegment(gs, 0);
++#endif
+ flush_ptrace_hw_breakpoint(tsk);
+ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
+ /*
+@@ -303,10 +323,10 @@ int kernel_thread(int (*fn)(void *), voi
+ regs.di = (unsigned long) arg;
+
+ #ifdef CONFIG_X86_32
+- regs.ds = __USER_DS;
+- regs.es = __USER_DS;
++ regs.ds = __KERNEL_DS;
++ regs.es = __KERNEL_DS;
+ regs.fs = __KERNEL_PERCPU;
+- regs.gs = __KERNEL_STACK_CANARY;
++ savesegment(gs, regs.gs);
+ #else
+ regs.ss = __KERNEL_DS;
+ #endif
+@@ -392,7 +412,7 @@ static void __exit_idle(void)
+ void exit_idle(void)
+ {
+ /* idle loop has pid 0 */
+- if (current->pid)
++ if (task_pid_nr(current))
+ return;
+ __exit_idle();
+ }
+@@ -501,7 +521,7 @@ bool set_pm_idle_to_default(void)
+
+ return ret;
+ }
+-void stop_this_cpu(void *dummy)
++__noreturn void stop_this_cpu(void *dummy)
+ {
+ local_irq_disable();
+ /*
+@@ -743,16 +763,37 @@ static int __init idle_setup(char *str)
+ }
+ early_param("idle", idle_setup);
+
+-unsigned long arch_align_stack(unsigned long sp)
++#ifdef CONFIG_PAX_RANDKSTACK
++void pax_randomize_kstack(struct pt_regs *regs)
+ {
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
++ struct thread_struct *thread = &current->thread;
++ unsigned long time;
+
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long range_end = mm->brk + 0x02000000;
+- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+-}
++ if (!randomize_va_space)
++ return;
++
++ if (v8086_mode(regs))
++ return;
++
++ rdtscl(time);
++
++ /* P4 seems to return a 0 LSB, ignore it */
++#ifdef CONFIG_MPENTIUM4
++ time &= 0x3EUL;
++ time <<= 2;
++#elif defined(CONFIG_X86_64)
++ time &= 0xFUL;
++ time <<= 4;
++#else
++ time &= 0x1FUL;
++ time <<= 3;
++#endif
++
++ thread->sp0 ^= time;
++ load_sp0(init_tss + smp_processor_id(), thread);
+
++#ifdef CONFIG_X86_64
++ percpu_write(kernel_stack, thread->sp0);
++#endif
++}
++#endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/ptrace.c linux-3.4-pax/arch/x86/kernel/ptrace.c
+--- linux-3.4/arch/x86/kernel/ptrace.c 2012-05-21 11:32:57.627927668 +0200
++++ linux-3.4-pax/arch/x86/kernel/ptrace.c 2012-05-22 15:28:30.071384686 +0200
+@@ -824,7 +824,7 @@ long arch_ptrace(struct task_struct *chi
+ unsigned long addr, unsigned long data)
+ {
+ int ret;
+- unsigned long __user *datap = (unsigned long __user *)data;
++ unsigned long __user *datap = (__force unsigned long __user *)data;
+
+ switch (request) {
+ /* read the word at location addr in the USER area. */
+@@ -909,14 +909,14 @@ long arch_ptrace(struct task_struct *chi
+ if ((int) addr < 0)
+ return -EIO;
+ ret = do_get_thread_area(child, addr,
+- (struct user_desc __user *)data);
++ (__force struct user_desc __user *) data);
+ break;
+
+ case PTRACE_SET_THREAD_AREA:
+ if ((int) addr < 0)
+ return -EIO;
+ ret = do_set_thread_area(child, addr,
+- (struct user_desc __user *)data, 0);
++ (__force struct user_desc __user *) data, 0);
+ break;
+ #endif
+
+@@ -1432,7 +1432,7 @@ static void fill_sigtrap_info(struct tas
+ memset(info, 0, sizeof(*info));
+ info->si_signo = SIGTRAP;
+ info->si_code = si_code;
+- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
++ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
+ }
+
+ void user_single_step_siginfo(struct task_struct *tsk,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/pvclock.c linux-3.4-pax/arch/x86/kernel/pvclock.c
+--- linux-3.4/arch/x86/kernel/pvclock.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/kernel/pvclock.c 2012-05-21 12:10:09.600048892 +0200
+@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
+ return pv_tsc_khz;
+ }
+
+-static atomic64_t last_value = ATOMIC64_INIT(0);
++static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
+
+ void pvclock_resume(void)
+ {
+- atomic64_set(&last_value, 0);
++ atomic64_set_unchecked(&last_value, 0);
+ }
+
+ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
+@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
+ * updating at the same time, and one of them could be slightly behind,
+ * making the assumption that last_value always go forward fail to hold.
+ */
+- last = atomic64_read(&last_value);
++ last = atomic64_read_unchecked(&last_value);
+ do {
+ if (ret < last)
+ return last;
+- last = atomic64_cmpxchg(&last_value, last, ret);
++ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
+ } while (unlikely(last != ret));
+
+ return ret;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/reboot.c linux-3.4-pax/arch/x86/kernel/reboot.c
+--- linux-3.4/arch/x86/kernel/reboot.c 2012-03-19 10:38:56.564049998 +0100
++++ linux-3.4-pax/arch/x86/kernel/reboot.c 2012-05-21 12:10:09.604048893 +0200
+@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
+ EXPORT_SYMBOL(pm_power_off);
+
+ static const struct desc_ptr no_idt = {};
+-static int reboot_mode;
++static unsigned short reboot_mode;
+ enum reboot_type reboot_type = BOOT_ACPI;
+ int reboot_force;
+
+@@ -335,13 +335,17 @@ core_initcall(reboot_init);
+ extern const unsigned char machine_real_restart_asm[];
+ extern const u64 machine_real_restart_gdt[3];
+
+-void machine_real_restart(unsigned int type)
++__noreturn void machine_real_restart(unsigned int type)
+ {
+ void *restart_va;
+ unsigned long restart_pa;
+- void (*restart_lowmem)(unsigned int);
++ void (* __noreturn restart_lowmem)(unsigned int);
+ u64 *lowmem_gdt;
+
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
++ struct desc_struct *gdt;
++#endif
++
+ local_irq_disable();
+
+ /* Write zero to CMOS register number 0x0f, which the BIOS POST
+@@ -367,14 +371,14 @@ void machine_real_restart(unsigned int t
+ boot)". This seems like a fairly standard thing that gets set by
+ REBOOT.COM programs, and the previous reset routine did this
+ too. */
+- *((unsigned short *)0x472) = reboot_mode;
++ *(unsigned short *)(__va(0x472)) = reboot_mode;
+
+ /* Patch the GDT in the low memory trampoline */
+ lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
+
+ restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
+ restart_pa = virt_to_phys(restart_va);
+- restart_lowmem = (void (*)(unsigned int))restart_pa;
++ restart_lowmem = (void *)restart_pa;
+
+ /* GDT[0]: GDT self-pointer */
+ lowmem_gdt[0] =
+@@ -385,7 +389,33 @@ void machine_real_restart(unsigned int t
+ GDT_ENTRY(0x009b, restart_pa, 0xffff);
+
+ /* Jump to the identity-mapped low memory code */
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
++ gdt = get_cpu_gdt_table(smp_processor_id());
++ pax_open_kernel();
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
++ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
++ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
++#endif
++#ifdef CONFIG_PAX_KERNEXEC
++ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
++ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
++ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
++ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
++ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
++ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
++#endif
++ pax_close_kernel();
++#endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
++ unreachable();
++#else
+ restart_lowmem(type);
++#endif
++
+ }
+ #ifdef CONFIG_APM_MODULE
+ EXPORT_SYMBOL(machine_real_restart);
+@@ -556,7 +586,7 @@ void __attribute__((weak)) mach_reboot_f
+ * try to force a triple fault and then cycle between hitting the keyboard
+ * controller and doing that
+ */
+-static void native_machine_emergency_restart(void)
++__noreturn static void native_machine_emergency_restart(void)
+ {
+ int i;
+ int attempt = 0;
+@@ -680,13 +710,13 @@ void native_machine_shutdown(void)
+ #endif
+ }
+
+-static void __machine_emergency_restart(int emergency)
++static __noreturn void __machine_emergency_restart(int emergency)
+ {
+ reboot_emergency = emergency;
+ machine_ops.emergency_restart();
+ }
+
+-static void native_machine_restart(char *__unused)
++static __noreturn void native_machine_restart(char *__unused)
+ {
+ printk("machine restart\n");
+
+@@ -695,7 +725,7 @@ static void native_machine_restart(char
+ __machine_emergency_restart(0);
+ }
+
+-static void native_machine_halt(void)
++static __noreturn void native_machine_halt(void)
+ {
+ /* stop other cpus and apics */
+ machine_shutdown();
+@@ -706,7 +736,7 @@ static void native_machine_halt(void)
+ stop_this_cpu(NULL);
+ }
+
+-static void native_machine_power_off(void)
++__noreturn static void native_machine_power_off(void)
+ {
+ if (pm_power_off) {
+ if (!reboot_force)
+@@ -715,6 +745,7 @@ static void native_machine_power_off(voi
+ }
+ /* a fallback in case there is no PM info available */
+ tboot_shutdown(TB_SHUTDOWN_HALT);
++ unreachable();
+ }
+
+ struct machine_ops machine_ops = {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/relocate_kernel_64.S linux-3.4-pax/arch/x86/kernel/relocate_kernel_64.S
+--- linux-3.4/arch/x86/kernel/relocate_kernel_64.S 2011-10-24 12:48:26.271091772 +0200
++++ linux-3.4-pax/arch/x86/kernel/relocate_kernel_64.S 2012-05-21 12:10:09.608048893 +0200
+@@ -11,6 +11,7 @@
+ #include <asm/kexec.h>
+ #include <asm/processor-flags.h>
+ #include <asm/pgtable_types.h>
++#include <asm/alternative-asm.h>
+
+ /*
+ * Must be relocatable PIC code callable as a C function
+@@ -160,13 +161,14 @@ identity_mapped:
+ xorq %rbp, %rbp
+ xorq %r8, %r8
+ xorq %r9, %r9
+- xorq %r10, %r9
++ xorq %r10, %r10
+ xorq %r11, %r11
+ xorq %r12, %r12
+ xorq %r13, %r13
+ xorq %r14, %r14
+ xorq %r15, %r15
+
++ pax_force_retaddr 0, 1
+ ret
+
+ 1:
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/setup.c linux-3.4-pax/arch/x86/kernel/setup.c
+--- linux-3.4/arch/x86/kernel/setup.c 2012-05-21 11:32:57.635927669 +0200
++++ linux-3.4-pax/arch/x86/kernel/setup.c 2012-05-21 12:10:09.608048893 +0200
+@@ -447,7 +447,7 @@ static void __init parse_setup_data(void
+
+ switch (data->type) {
+ case SETUP_E820_EXT:
+- parse_e820_ext(data);
++ parse_e820_ext((struct setup_data __force_kernel *)data);
+ break;
+ case SETUP_DTB:
+ add_dtb(pa_data);
+@@ -639,7 +639,7 @@ static void __init trim_bios_range(void)
+ * area (640->1Mb) as ram even though it is not.
+ * take them out.
+ */
+- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
++ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
+ sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ }
+
+@@ -763,14 +763,14 @@ void __init setup_arch(char **cmdline_p)
+
+ if (!boot_params.hdr.root_flags)
+ root_mountflags &= ~MS_RDONLY;
+- init_mm.start_code = (unsigned long) _text;
+- init_mm.end_code = (unsigned long) _etext;
++ init_mm.start_code = ktla_ktva((unsigned long) _text);
++ init_mm.end_code = ktla_ktva((unsigned long) _etext);
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = _brk_end;
+
+- code_resource.start = virt_to_phys(_text);
+- code_resource.end = virt_to_phys(_etext)-1;
+- data_resource.start = virt_to_phys(_etext);
++ code_resource.start = virt_to_phys(ktla_ktva(_text));
++ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
++ data_resource.start = virt_to_phys(_sdata);
+ data_resource.end = virt_to_phys(_edata)-1;
+ bss_resource.start = virt_to_phys(&__bss_start);
+ bss_resource.end = virt_to_phys(&__bss_stop)-1;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/setup_percpu.c linux-3.4-pax/arch/x86/kernel/setup_percpu.c
+--- linux-3.4/arch/x86/kernel/setup_percpu.c 2012-05-21 11:32:57.635927669 +0200
++++ linux-3.4-pax/arch/x86/kernel/setup_percpu.c 2012-05-22 15:28:30.075384686 +0200
+@@ -21,19 +21,17 @@
+ #include <asm/cpu.h>
+ #include <asm/stackprotector.h>
+
+-DEFINE_PER_CPU(int, cpu_number);
++#ifdef CONFIG_SMP
++DEFINE_PER_CPU(unsigned int, cpu_number);
+ EXPORT_PER_CPU_SYMBOL(cpu_number);
++#endif
+
+-#ifdef CONFIG_X86_64
+ #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
+-#else
+-#define BOOT_PERCPU_OFFSET 0
+-#endif
+
+ DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
+ EXPORT_PER_CPU_SYMBOL(this_cpu_off);
+
+-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
++unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
+ [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
+ };
+ EXPORT_SYMBOL(__per_cpu_offset);
+@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
+ {
+ #ifdef CONFIG_X86_32
+ struct desc_struct gdt;
++ unsigned long base = per_cpu_offset(cpu);
+
+- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
+- 0x2 | DESCTYPE_S, 0x8);
+- gdt.s = 1;
++ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
++ 0x83 | DESCTYPE_S, 0xC);
+ write_gdt_entry(get_cpu_gdt_table(cpu),
+ GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
+ #endif
+@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
+ /* alrighty, percpu areas up and running */
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu) {
++#ifdef CONFIG_CC_STACKPROTECTOR
++#ifdef CONFIG_X86_32
++ unsigned long canary = per_cpu(stack_canary.canary, cpu);
++#endif
++#endif
+ per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
+ per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
+ per_cpu(cpu_number, cpu) = cpu;
+@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
+ */
+ set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
+ #endif
++#ifdef CONFIG_CC_STACKPROTECTOR
++#ifdef CONFIG_X86_32
++ if (!cpu)
++ per_cpu(stack_canary.canary, cpu) = canary;
++#endif
++#endif
+ /*
+ * Up to this point, the boot CPU has been using .init.data
+ * area. Reload any changed state for the boot CPU.
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/signal.c linux-3.4-pax/arch/x86/kernel/signal.c
+--- linux-3.4/arch/x86/kernel/signal.c 2012-05-21 11:32:57.639927669 +0200
++++ linux-3.4-pax/arch/x86/kernel/signal.c 2012-05-21 12:10:09.616048893 +0200
+@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsi
+ * Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0.
+ */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ #else /* !CONFIG_X86_32 */
+ sp = round_down(sp, 16) - 8;
+ #endif
+@@ -241,11 +241,11 @@ get_sigframe(struct k_sigaction *ka, str
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (onsigstack && !likely(on_sig_stack(sp)))
+- return (void __user *)-1L;
++ return (__force void __user *)-1L;
+
+ /* save i387 state */
+ if (used_math() && save_i387_xstate(*fpstate) < 0)
+- return (void __user *)-1L;
++ return (__force void __user *)-1L;
+
+ return (void __user *)sp;
+ }
+@@ -300,9 +300,9 @@ __setup_frame(int sig, struct k_sigactio
+ }
+
+ if (current->mm->context.vdso)
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
++ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
+ else
+- restorer = &frame->retcode;
++ restorer = (void __user *)&frame->retcode;
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+
+@@ -316,7 +316,7 @@ __setup_frame(int sig, struct k_sigactio
+ * reasons and because gdb uses it as a signature to notice
+ * signal handler stack frames.
+ */
+- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
++ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
+
+ if (err)
+ return -EFAULT;
+@@ -370,7 +370,10 @@ static int __setup_rt_frame(int sig, str
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Set up to return from userspace. */
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
++ if (current->mm->context.vdso)
++ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
++ else
++ restorer = (void __user *)&frame->retcode;
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+ put_user_ex(restorer, &frame->pretcode);
+@@ -382,7 +385,7 @@ static int __setup_rt_frame(int sig, str
+ * reasons and because gdb uses it as a signature to notice
+ * signal handler stack frames.
+ */
+- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
++ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+@@ -773,7 +776,7 @@ static void do_signal(struct pt_regs *re
+ * X86_32: vm86 regs switched out by assembly code before reaching
+ * here, so testing against kernel CS suffices.
+ */
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ return;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/smpboot.c linux-3.4-pax/arch/x86/kernel/smpboot.c
+--- linux-3.4/arch/x86/kernel/smpboot.c 2012-05-21 11:32:57.651927669 +0200
++++ linux-3.4-pax/arch/x86/kernel/smpboot.c 2012-05-21 12:10:09.616048893 +0200
+@@ -699,17 +699,20 @@ static int __cpuinit do_boot_cpu(int api
+ set_idle_for_cpu(cpu, c_idle.idle);
+ do_rest:
+ per_cpu(current_task, cpu) = c_idle.idle;
++ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
+ #ifdef CONFIG_X86_32
+ /* Stack for startup_32 can be just as for start_secondary onwards */
+ irq_ctx_init(cpu);
+ #else
+ clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
+ initial_gs = per_cpu_offset(cpu);
+- per_cpu(kernel_stack, cpu) =
+- (unsigned long)task_stack_page(c_idle.idle) -
+- KERNEL_STACK_OFFSET + THREAD_SIZE;
++ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
+ #endif
++
++ pax_open_kernel();
+ early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
++ pax_close_kernel();
++
+ initial_code = (unsigned long)start_secondary;
+ stack_start = c_idle.idle->thread.sp;
+
+@@ -851,6 +854,12 @@ int __cpuinit native_cpu_up(unsigned int
+
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
++ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++ KERNEL_PGD_PTRS);
++#endif
++
+ err = do_boot_cpu(apicid, cpu);
+ if (err) {
+ pr_debug("do_boot_cpu failed %d\n", err);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/step.c linux-3.4-pax/arch/x86/kernel/step.c
+--- linux-3.4/arch/x86/kernel/step.c 2011-10-24 12:48:26.279091772 +0200
++++ linux-3.4-pax/arch/x86/kernel/step.c 2012-05-21 12:10:09.620048894 +0200
+@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
+ struct desc_struct *desc;
+ unsigned long base;
+
+- seg &= ~7UL;
++ seg >>= 3;
+
+ mutex_lock(&child->mm->context.lock);
+- if (unlikely((seg >> 3) >= child->mm->context.size))
++ if (unlikely(seg >= child->mm->context.size))
+ addr = -1L; /* bogus selector, access would fault */
+ else {
+ desc = child->mm->context.ldt + seg;
+@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
+ addr += base;
+ }
+ mutex_unlock(&child->mm->context.lock);
+- }
++ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
++ addr = ktla_ktva(addr);
+
+ return addr;
+ }
+@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
+ unsigned char opcode[15];
+ unsigned long addr = convert_ip_to_linear(child, regs);
+
++ if (addr == -EINVAL)
++ return 0;
++
+ copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
+ for (i = 0; i < copied; i++) {
+ switch (opcode[i]) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/sys_i386_32.c linux-3.4-pax/arch/x86/kernel/sys_i386_32.c
+--- linux-3.4/arch/x86/kernel/sys_i386_32.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/kernel/sys_i386_32.c 2012-05-21 12:10:09.624048894 +0200
+@@ -24,17 +24,224 @@
+
+ #include <asm/syscalls.h>
+
+-/*
+- * Do a system call from kernel instead of calling sys_execve so we
+- * end up with proper pt_regs.
+- */
+-int kernel_execve(const char *filename,
+- const char *const argv[],
+- const char *const envp[])
++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
+ {
+- long __res;
+- asm volatile ("int $0x80"
+- : "=a" (__res)
+- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
+- return __res;
++ unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (len > pax_task_size || addr > pax_task_size - len)
++ return -EINVAL;
++
++ return 0;
++}
++
++unsigned long
++arch_get_unmapped_area(struct file *filp, unsigned long addr,
++ unsigned long len, unsigned long pgoff, unsigned long flags)
++{
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++ unsigned long start_addr, pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ if (len > pax_task_size)
++ return -ENOMEM;
++
++ if (flags & MAP_FIXED)
++ return addr;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
++ if (addr) {
++ addr = PAGE_ALIGN(addr);
++ if (pax_task_size - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
++ }
++ if (len > mm->cached_hole_size) {
++ start_addr = addr = mm->free_area_cache;
++ } else {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ }
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
++ start_addr = 0x00110000UL;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ start_addr += mm->delta_mmap & 0x03FFF000UL;
++#endif
++
++ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
++ start_addr = addr = mm->mmap_base;
++ else
++ addr = start_addr;
++ }
++#endif
++
++full_search:
++ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
++ /* At this point: (!vma || addr < vma->vm_end). */
++ if (pax_task_size - len < addr) {
++ /*
++ * Start a new search - just in case we missed
++ * some holes.
++ */
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ goto full_search;
++ }
++ return -ENOMEM;
++ }
++ if (check_heap_stack_gap(vma, addr, len))
++ break;
++ if (addr + mm->cached_hole_size < vma->vm_start)
++ mm->cached_hole_size = vma->vm_start - addr;
++ addr = vma->vm_end;
++ if (mm->start_brk <= addr && addr < mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ goto full_search;
++ }
++ }
++
++ /*
++ * Remember the place where we stopped the search:
++ */
++ mm->free_area_cache = addr + len;
++ return addr;
++}
++
++unsigned long
++arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
++ const unsigned long len, const unsigned long pgoff,
++ const unsigned long flags)
++{
++ struct vm_area_struct *vma;
++ struct mm_struct *mm = current->mm;
++ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ /* requested length too big for entire address space */
++ if (len > pax_task_size)
++ return -ENOMEM;
++
++ if (flags & MAP_FIXED)
++ return addr;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
++ goto bottomup;
++#endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
++ /* requesting a specific address */
++ if (addr) {
++ addr = PAGE_ALIGN(addr);
++ if (pax_task_size - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
++ }
++
++ /* check if free_area_cache is useful for us */
++ if (len <= mm->cached_hole_size) {
++ mm->cached_hole_size = 0;
++ mm->free_area_cache = mm->mmap_base;
++ }
++
++ /* either no address requested or can't fit in requested address hole */
++ addr = mm->free_area_cache;
++
++ /* make sure it can fit in the remaining address space */
++ if (addr > len) {
++ vma = find_vma(mm, addr-len);
++ if (check_heap_stack_gap(vma, addr - len, len))
++ /* remember the address as a hint for next time */
++ return (mm->free_area_cache = addr-len);
++ }
++
++ if (mm->mmap_base < len)
++ goto bottomup;
++
++ addr = mm->mmap_base-len;
++
++ do {
++ /*
++ * Lookup failure means no vma is above this address,
++ * else if new region fits below vma->vm_start,
++ * return with success:
++ */
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ /* remember the address as a hint for next time */
++ return (mm->free_area_cache = addr);
++
++ /* remember the largest hole we saw so far */
++ if (addr + mm->cached_hole_size < vma->vm_start)
++ mm->cached_hole_size = vma->vm_start - addr;
++
++ /* try just below the current vma->vm_start */
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
++
++bottomup:
++ /*
++ * A failed mmap() very likely causes application failure,
++ * so fall back to the bottom-up function here. This scenario
++ * can happen with large stack limits and large mmap()
++ * allocations.
++ */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
++ else
++#endif
++
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
++ mm->cached_hole_size = ~0UL;
++ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
++ /*
++ * Restore the topdown base:
++ */
++ mm->mmap_base = base;
++ mm->free_area_cache = base;
++ mm->cached_hole_size = ~0UL;
++
++ return addr;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/sys_x86_64.c linux-3.4-pax/arch/x86/kernel/sys_x86_64.c
+--- linux-3.4/arch/x86/kernel/sys_x86_64.c 2012-05-21 11:32:57.659927670 +0200
++++ linux-3.4-pax/arch/x86/kernel/sys_x86_64.c 2012-05-21 12:10:09.624048894 +0200
+@@ -95,8 +95,8 @@ out:
+ return error;
+ }
+
+-static void find_start_end(unsigned long flags, unsigned long *begin,
+- unsigned long *end)
++static void find_start_end(struct mm_struct *mm, unsigned long flags,
++ unsigned long *begin, unsigned long *end)
+ {
+ if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
+ unsigned long new_begin;
+@@ -115,7 +115,7 @@ static void find_start_end(unsigned long
+ *begin = new_begin;
+ }
+ } else {
+- *begin = TASK_UNMAPPED_BASE;
++ *begin = mm->mmap_base;
+ *end = TASK_SIZE;
+ }
+ }
+@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp
+ if (flags & MAP_FIXED)
+ return addr;
+
+- find_start_end(flags, &begin, &end);
++ find_start_end(mm, flags, &begin, &end);
+
+ if (len > end)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+- if (end - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
+@@ -172,7 +175,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct fi
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+- unsigned long addr = addr0, start_addr;
++ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE)
+@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct fi
+ if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
+ goto bottomup;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+- vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
+- return addr;
++ if (TASK_SIZE - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
+ }
+
+ /* check if free_area_cache is useful for us */
+@@ -240,7 +248,7 @@ try_again:
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || addr+len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ /* remember the address as a hint for next time */
+ return mm->free_area_cache = addr;
+
+@@ -249,8 +257,8 @@ try_again:
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- } while (len < vma->vm_start);
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
+
+ fail:
+ /*
+@@ -270,13 +278,21 @@ bottomup:
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+- mm->free_area_cache = mm->mmap_base;
++ mm->mmap_base = base;
++ mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
+
+ return addr;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/tboot.c linux-3.4-pax/arch/x86/kernel/tboot.c
+--- linux-3.4/arch/x86/kernel/tboot.c 2012-05-21 11:32:57.663927670 +0200
++++ linux-3.4-pax/arch/x86/kernel/tboot.c 2012-05-21 12:10:09.628048894 +0200
+@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
+
+ void tboot_shutdown(u32 shutdown_type)
+ {
+- void (*shutdown)(void);
++ void (* __noreturn shutdown)(void);
+
+ if (!tboot_enabled())
+ return;
+@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
+
+ switch_to_tboot_pt();
+
+- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
++ shutdown = (void *)tboot->shutdown_entry;
+ shutdown();
+
+ /* should not reach here */
+@@ -299,7 +299,7 @@ static int tboot_sleep(u8 sleep_state, u
+ return 0;
+ }
+
+-static atomic_t ap_wfs_count;
++static atomic_unchecked_t ap_wfs_count;
+
+ static int tboot_wait_for_aps(int num_aps)
+ {
+@@ -323,9 +323,9 @@ static int __cpuinit tboot_cpu_callback(
+ {
+ switch (action) {
+ case CPU_DYING:
+- atomic_inc(&ap_wfs_count);
++ atomic_inc_unchecked(&ap_wfs_count);
+ if (num_online_cpus() == 1)
+- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
++ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
+ return NOTIFY_BAD;
+ break;
+ }
+@@ -344,7 +344,7 @@ static __init int tboot_late_init(void)
+
+ tboot_create_trampoline();
+
+- atomic_set(&ap_wfs_count, 0);
++ atomic_set_unchecked(&ap_wfs_count, 0);
+ register_hotcpu_notifier(&tboot_cpu_notifier);
+
+ acpi_os_set_prepare_sleep(&tboot_sleep);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/time.c linux-3.4-pax/arch/x86/kernel/time.c
+--- linux-3.4/arch/x86/kernel/time.c 2012-05-21 11:32:57.675927671 +0200
++++ linux-3.4-pax/arch/x86/kernel/time.c 2012-05-21 12:10:09.628048894 +0200
+@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs
+ {
+ unsigned long pc = instruction_pointer(regs);
+
+- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++ if (!user_mode(regs) && in_lock_functions(pc)) {
+ #ifdef CONFIG_FRAME_POINTER
+- return *(unsigned long *)(regs->bp + sizeof(long));
++ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
+ #else
+ unsigned long *sp =
+ (unsigned long *)kernel_stack_pointer(regs);
+@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs
+ * or above a saved flags. Eflags has bits 22-31 zero,
+ * kernel addresses don't.
+ */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ return ktla_ktva(sp[0]);
++#else
+ if (sp[0] >> 22)
+ return sp[0];
+ if (sp[1] >> 22)
+ return sp[1];
+ #endif
++
++#endif
+ }
+ return pc;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/tls.c linux-3.4-pax/arch/x86/kernel/tls.c
+--- linux-3.4/arch/x86/kernel/tls.c 2012-05-21 11:32:57.679927670 +0200
++++ linux-3.4-pax/arch/x86/kernel/tls.c 2012-05-21 12:10:09.632048894 +0200
+@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struc
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
++ return -EINVAL;
++#endif
++
+ set_tls_desc(p, idx, &info, 1);
+
+ return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/trampoline_32.S linux-3.4-pax/arch/x86/kernel/trampoline_32.S
+--- linux-3.4/arch/x86/kernel/trampoline_32.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/kernel/trampoline_32.S 2012-05-21 12:10:09.636048894 +0200
+@@ -32,6 +32,12 @@
+ #include <asm/segment.h>
+ #include <asm/page_types.h>
+
++#ifdef CONFIG_PAX_KERNEXEC
++#define ta(X) (X)
++#else
++#define ta(X) ((X) - __PAGE_OFFSET)
++#endif
++
+ #ifdef CONFIG_SMP
+
+ .section ".x86_trampoline","a"
+@@ -62,7 +68,7 @@ r_base = .
+ inc %ax # protected mode (PE) bit
+ lmsw %ax # into protected mode
+ # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
+- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
++ ljmpl $__BOOT_CS, $ta(startup_32_smp)
+
+ # These need to be in the same 64K segment as the above;
+ # hence we don't use the boot_gdt_descr defined in head.S
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/trampoline_64.S linux-3.4-pax/arch/x86/kernel/trampoline_64.S
+--- linux-3.4/arch/x86/kernel/trampoline_64.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/kernel/trampoline_64.S 2012-05-21 12:10:09.640048895 +0200
+@@ -90,7 +90,7 @@ startup_32:
+ movl $__KERNEL_DS, %eax # Initialize the %ds segment register
+ movl %eax, %ds
+
+- movl $X86_CR4_PAE, %eax
++ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
+ movl %eax, %cr4 # Enable PAE mode
+
+ # Setup trampoline 4 level pagetables
+@@ -138,7 +138,7 @@ tidt:
+ # so the kernel can live anywhere
+ .balign 4
+ tgdt:
+- .short tgdt_end - tgdt # gdt limit
++ .short tgdt_end - tgdt - 1 # gdt limit
+ .long tgdt - r_base
+ .short 0
+ .quad 0x00cf9b000000ffff # __KERNEL32_CS
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/traps.c linux-3.4-pax/arch/x86/kernel/traps.c
+--- linux-3.4/arch/x86/kernel/traps.c 2012-05-21 11:32:57.683927670 +0200
++++ linux-3.4-pax/arch/x86/kernel/traps.c 2012-05-21 12:10:09.640048895 +0200
+@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
+
+ /* Do we ignore FPU interrupts ? */
+ char ignore_fpu_irq;
+-
+-/*
+- * The IDT has to be page-aligned to simplify the Pentium
+- * F0 0F bug workaround.
+- */
+-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
+ #endif
+
+ DECLARE_BITMAP(used_vectors, NR_VECTORS);
+@@ -108,13 +102,13 @@ static inline void preempt_conditional_c
+ }
+
+ static void __kprobes
+-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
++do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
+ long error_code, siginfo_t *info)
+ {
+ struct task_struct *tsk = current;
+
+ #ifdef CONFIG_X86_32
+- if (regs->flags & X86_VM_MASK) {
++ if (v8086_mode(regs)) {
+ /*
+ * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
+ * On nmi (interrupt 2), do_trap should not be called.
+@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str
+ }
+ #endif
+
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ goto kernel_trap;
+
+ #ifdef CONFIG_X86_32
+@@ -148,7 +142,7 @@ trap_signal:
+ printk_ratelimit()) {
+ printk(KERN_INFO
+ "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
+- tsk->comm, tsk->pid, str,
++ tsk->comm, task_pid_nr(tsk), str,
+ regs->ip, regs->sp, error_code);
+ print_vma_addr(" in ", regs->ip);
+ printk("\n");
+@@ -165,8 +159,20 @@ kernel_trap:
+ if (!fixup_exception(regs)) {
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_nr = trapnr;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
++ str = "PAX: suspicious stack segment fault";
++#endif
++
+ die(str, regs, error_code);
+ }
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (trapnr == 4)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ return;
+
+ #ifdef CONFIG_X86_32
+@@ -259,14 +265,30 @@ do_general_protection(struct pt_regs *re
+ conditional_sti(regs);
+
+ #ifdef CONFIG_X86_32
+- if (regs->flags & X86_VM_MASK)
++ if (v8086_mode(regs))
+ goto gp_in_vm86;
+ #endif
+
+ tsk = current;
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ goto gp_in_kernel;
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
++ struct mm_struct *mm = tsk->mm;
++ unsigned long limit;
++
++ down_write(&mm->mmap_sem);
++ limit = mm->context.user_cs_limit;
++ if (limit < TASK_SIZE) {
++ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
++ up_write(&mm->mmap_sem);
++ return;
++ }
++ up_write(&mm->mmap_sem);
++ }
++#endif
++
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_nr = X86_TRAP_GP;
+
+@@ -299,6 +321,13 @@ gp_in_kernel:
+ if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
+ X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
+ return;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
++ die("PAX: suspicious general protection fault", regs, error_code);
++ else
++#endif
++
+ die("general protection fault", regs, error_code);
+ }
+
+@@ -425,7 +454,7 @@ dotraplinkage void __kprobes do_debug(st
+ /* It's safe to allow irq's after DR6 has been saved */
+ preempt_conditional_sti(regs);
+
+- if (regs->flags & X86_VM_MASK) {
++ if (v8086_mode(regs)) {
+ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
+ X86_TRAP_DB);
+ preempt_conditional_cli(regs);
+@@ -440,7 +469,7 @@ dotraplinkage void __kprobes do_debug(st
+ * We already checked v86 mode above, so we can check for kernel mode
+ * by just checking the CPL of CS.
+ */
+- if ((dr6 & DR_STEP) && !user_mode(regs)) {
++ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
+ tsk->thread.debugreg6 &= ~DR_STEP;
+ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+ regs->flags &= ~X86_EFLAGS_TF;
+@@ -471,7 +500,7 @@ void math_error(struct pt_regs *regs, in
+ return;
+ conditional_sti(regs);
+
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ {
+ if (!fixup_exception(regs)) {
+ task->thread.error_code = error_code;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/vm86_32.c linux-3.4-pax/arch/x86/kernel/vm86_32.c
+--- linux-3.4/arch/x86/kernel/vm86_32.c 2012-05-21 11:32:57.687927672 +0200
++++ linux-3.4-pax/arch/x86/kernel/vm86_32.c 2012-05-22 15:28:30.091384685 +0200
+@@ -148,7 +148,7 @@ struct pt_regs *save_v86_state(struct ke
+ do_exit(SIGSEGV);
+ }
+
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+ current->thread.sp0 = current->thread.saved_sp0;
+ current->thread.sysenter_cs = __KERNEL_CS;
+ load_sp0(tss, &current->thread);
+@@ -326,7 +326,7 @@ static void do_sys_vm86(struct kernel_vm
+ tsk->thread.saved_fs = info->regs32->fs;
+ tsk->thread.saved_gs = get_user_gs(info->regs32);
+
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+ tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ if (cpu_has_sep)
+ tsk->thread.sysenter_cs = 0;
+@@ -533,7 +533,7 @@ static void do_int(struct kernel_vm86_re
+ goto cannot_handle;
+ if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
+ goto cannot_handle;
+- intr_ptr = (unsigned long __user *) (i << 2);
++ intr_ptr = (__force unsigned long __user *) (i << 2);
+ if (get_user(segoffs, intr_ptr))
+ goto cannot_handle;
+ if ((segoffs >> 16) == BIOSSEG)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/vmlinux.lds.S linux-3.4-pax/arch/x86/kernel/vmlinux.lds.S
+--- linux-3.4/arch/x86/kernel/vmlinux.lds.S 2011-10-24 12:48:26.291091771 +0200
++++ linux-3.4-pax/arch/x86/kernel/vmlinux.lds.S 2012-05-21 12:10:09.648048895 +0200
+@@ -26,6 +26,13 @@
+ #include <asm/page_types.h>
+ #include <asm/cache.h>
+ #include <asm/boot.h>
++#include <asm/segment.h>
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
++#else
++#define __KERNEL_TEXT_OFFSET 0
++#endif
+
+ #undef i386 /* in case the preprocessor is a 32bit one */
+
+@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
+
+ PHDRS {
+ text PT_LOAD FLAGS(5); /* R_E */
++#ifdef CONFIG_X86_32
++ module PT_LOAD FLAGS(5); /* R_E */
++#endif
++#ifdef CONFIG_XEN
++ rodata PT_LOAD FLAGS(5); /* R_E */
++#else
++ rodata PT_LOAD FLAGS(4); /* R__ */
++#endif
+ data PT_LOAD FLAGS(6); /* RW_ */
+-#ifdef CONFIG_X86_64
++ init.begin PT_LOAD FLAGS(6); /* RW_ */
+ #ifdef CONFIG_SMP
+ percpu PT_LOAD FLAGS(6); /* RW_ */
+ #endif
++ text.init PT_LOAD FLAGS(5); /* R_E */
++ text.exit PT_LOAD FLAGS(5); /* R_E */
+ init PT_LOAD FLAGS(7); /* RWE */
+-#endif
+ note PT_NOTE FLAGS(0); /* ___ */
+ }
+
+ SECTIONS
+ {
+ #ifdef CONFIG_X86_32
+- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
+- phys_startup_32 = startup_32 - LOAD_OFFSET;
++ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
+ #else
+- . = __START_KERNEL;
+- phys_startup_64 = startup_64 - LOAD_OFFSET;
++ . = __START_KERNEL;
+ #endif
+
+ /* Text and read-only data */
+- .text : AT(ADDR(.text) - LOAD_OFFSET) {
+- _text = .;
++ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
+ /* bootstrapping code */
++#ifdef CONFIG_X86_32
++ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++#else
++ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++#endif
++ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++ _text = .;
+ HEAD_TEXT
+ #ifdef CONFIG_X86_32
+ . = ALIGN(PAGE_SIZE);
+@@ -108,13 +128,47 @@ SECTIONS
+ IRQENTRY_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+- /* End of text section */
+- _etext = .;
+ } :text = 0x9090
+
+- NOTES :text :note
++ . += __KERNEL_TEXT_OFFSET;
++
++#ifdef CONFIG_X86_32
++ . = ALIGN(PAGE_SIZE);
++ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
++
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
++ MODULES_EXEC_VADDR = .;
++ BYTE(0)
++ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
++ . = ALIGN(HPAGE_SIZE);
++ MODULES_EXEC_END = . - 1;
++#endif
++
++ } :module
++#endif
++
++ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
++ /* End of text section */
++ _etext = . - __KERNEL_TEXT_OFFSET;
++ }
++
++#ifdef CONFIG_X86_32
++ . = ALIGN(PAGE_SIZE);
++ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
++ *(.idt)
++ . = ALIGN(PAGE_SIZE);
++ *(.empty_zero_page)
++ *(.initial_pg_fixmap)
++ *(.initial_pg_pmd)
++ *(.initial_page_table)
++ *(.swapper_pg_dir)
++ } :rodata
++#endif
++
++ . = ALIGN(PAGE_SIZE);
++ NOTES :rodata :note
+
+- EXCEPTION_TABLE(16) :text = 0x9090
++ EXCEPTION_TABLE(16) :rodata
+
+ #if defined(CONFIG_DEBUG_RODATA)
+ /* .text should occupy whole number of pages */
+@@ -126,16 +180,20 @@ SECTIONS
+
+ /* Data */
+ .data : AT(ADDR(.data) - LOAD_OFFSET) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ . = ALIGN(HPAGE_SIZE);
++#else
++ . = ALIGN(PAGE_SIZE);
++#endif
++
+ /* Start of data section */
+ _sdata = .;
+
+ /* init_task */
+ INIT_TASK_DATA(THREAD_SIZE)
+
+-#ifdef CONFIG_X86_32
+- /* 32 bit has nosave before _edata */
+ NOSAVE_DATA
+-#endif
+
+ PAGE_ALIGNED_DATA(PAGE_SIZE)
+
+@@ -176,12 +234,19 @@ SECTIONS
+ #endif /* CONFIG_X86_64 */
+
+ /* Init code and data - will be freed after init */
+- . = ALIGN(PAGE_SIZE);
+ .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
++ BYTE(0)
++
++#ifdef CONFIG_PAX_KERNEXEC
++ . = ALIGN(HPAGE_SIZE);
++#else
++ . = ALIGN(PAGE_SIZE);
++#endif
++
+ __init_begin = .; /* paired with __init_end */
+- }
++ } :init.begin
+
+-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
++#ifdef CONFIG_SMP
+ /*
+ * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
+ * output PHDR, so the next output section - .init.text - should
+@@ -190,12 +255,27 @@ SECTIONS
+ PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
+ #endif
+
+- INIT_TEXT_SECTION(PAGE_SIZE)
+-#ifdef CONFIG_X86_64
+- :init
+-#endif
++ . = ALIGN(PAGE_SIZE);
++ init_begin = .;
++ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
++ VMLINUX_SYMBOL(_sinittext) = .;
++ INIT_TEXT
++ VMLINUX_SYMBOL(_einittext) = .;
++ . = ALIGN(PAGE_SIZE);
++ } :text.init
+
+- INIT_DATA_SECTION(16)
++ /*
++ * .exit.text is discard at runtime, not link time, to deal with
++ * references from .altinstructions and .eh_frame
++ */
++ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
++ EXIT_TEXT
++ . = ALIGN(16);
++ } :text.exit
++ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
++
++ . = ALIGN(PAGE_SIZE);
++ INIT_DATA_SECTION(16) :init
+
+ /*
+ * Code and data for a variety of lowlevel trampolines, to be
+@@ -269,19 +349,12 @@ SECTIONS
+ }
+
+ . = ALIGN(8);
+- /*
+- * .exit.text is discard at runtime, not link time, to deal with
+- * references from .altinstructions and .eh_frame
+- */
+- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
+- EXIT_TEXT
+- }
+
+ .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
+ EXIT_DATA
+ }
+
+-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
++#ifndef CONFIG_SMP
+ PERCPU_SECTION(INTERNODE_CACHE_BYTES)
+ #endif
+
+@@ -300,16 +373,10 @@ SECTIONS
+ .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
+ __smp_locks = .;
+ *(.smp_locks)
+- . = ALIGN(PAGE_SIZE);
+ __smp_locks_end = .;
++ . = ALIGN(PAGE_SIZE);
+ }
+
+-#ifdef CONFIG_X86_64
+- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
+- NOSAVE_DATA
+- }
+-#endif
+-
+ /* BSS */
+ . = ALIGN(PAGE_SIZE);
+ .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
+@@ -325,6 +392,7 @@ SECTIONS
+ __brk_base = .;
+ . += 64 * 1024; /* 64k alignment slop space */
+ *(.brk_reservation) /* areas brk users have reserved */
++ . = ALIGN(HPAGE_SIZE);
+ __brk_limit = .;
+ }
+
+@@ -351,13 +419,12 @@ SECTIONS
+ * for the boot processor.
+ */
+ #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
+-INIT_PER_CPU(gdt_page);
+ INIT_PER_CPU(irq_stack_union);
+
+ /*
+ * Build-time check on the image size:
+ */
+-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
++. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
+ "kernel image bigger than KERNEL_IMAGE_SIZE");
+
+ #ifdef CONFIG_SMP
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/vsyscall_64.c linux-3.4-pax/arch/x86/kernel/vsyscall_64.c
+--- linux-3.4/arch/x86/kernel/vsyscall_64.c 2012-05-21 11:32:57.687927672 +0200
++++ linux-3.4-pax/arch/x86/kernel/vsyscall_64.c 2012-05-21 12:10:09.648048895 +0200
+@@ -54,15 +54,13 @@
+ DEFINE_VVAR(int, vgetcpu_mode);
+ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
+
+-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
++static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
+
+ static int __init vsyscall_setup(char *str)
+ {
+ if (str) {
+ if (!strcmp("emulate", str))
+ vsyscall_mode = EMULATE;
+- else if (!strcmp("native", str))
+- vsyscall_mode = NATIVE;
+ else if (!strcmp("none", str))
+ vsyscall_mode = NONE;
+ else
+@@ -206,7 +204,7 @@ bool emulate_vsyscall(struct pt_regs *re
+
+ tsk = current;
+ if (seccomp_mode(&tsk->seccomp))
+- do_exit(SIGKILL);
++ do_group_exit(SIGKILL);
+
+ /*
+ * With a real vsyscall, page faults cause SIGSEGV. We want to
+@@ -278,8 +276,7 @@ bool emulate_vsyscall(struct pt_regs *re
+ return true;
+
+ sigsegv:
+- force_sig(SIGSEGV, current);
+- return true;
++ do_group_exit(SIGKILL);
+ }
+
+ /*
+@@ -332,10 +329,7 @@ void __init map_vsyscall(void)
+ extern char __vvar_page;
+ unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
+
+- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
+- vsyscall_mode == NATIVE
+- ? PAGE_KERNEL_VSYSCALL
+- : PAGE_KERNEL_VVAR);
++ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
+ BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
+ (unsigned long)VSYSCALL_START);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/x8664_ksyms_64.c linux-3.4-pax/arch/x86/kernel/x8664_ksyms_64.c
+--- linux-3.4/arch/x86/kernel/x8664_ksyms_64.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/kernel/x8664_ksyms_64.c 2012-05-21 12:10:09.652048895 +0200
+@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
+ EXPORT_SYMBOL(copy_user_generic_string);
+ EXPORT_SYMBOL(copy_user_generic_unrolled);
+ EXPORT_SYMBOL(__copy_user_nocache);
+-EXPORT_SYMBOL(_copy_from_user);
+-EXPORT_SYMBOL(_copy_to_user);
+
+ EXPORT_SYMBOL(copy_page);
+ EXPORT_SYMBOL(clear_page);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kernel/xsave.c linux-3.4-pax/arch/x86/kernel/xsave.c
+--- linux-3.4/arch/x86/kernel/xsave.c 2012-05-21 11:32:57.691927673 +0200
++++ linux-3.4-pax/arch/x86/kernel/xsave.c 2012-05-21 12:10:09.652048895 +0200
+@@ -131,7 +131,7 @@ int check_for_xstate(struct i387_fxsave_
+ fx_sw_user->xstate_size > fx_sw_user->extended_size)
+ return -EINVAL;
+
+- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
++ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
+ fx_sw_user->extended_size -
+ FP_XSTATE_MAGIC2_SIZE));
+ if (err)
+@@ -267,7 +267,7 @@ fx_only:
+ * the other extended state.
+ */
+ xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
+- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
++ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
+ }
+
+ /*
+@@ -296,7 +296,7 @@ int restore_i387_xstate(void __user *buf
+ if (use_xsave())
+ err = restore_user_xstate(buf);
+ else
+- err = fxrstor_checking((__force struct i387_fxsave_struct *)
++ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
+ buf);
+ if (unlikely(err)) {
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kvm/cpuid.c linux-3.4-pax/arch/x86/kvm/cpuid.c
+--- linux-3.4/arch/x86/kvm/cpuid.c 2012-05-21 11:32:57.695927673 +0200
++++ linux-3.4-pax/arch/x86/kvm/cpuid.c 2012-05-21 12:10:09.656048895 +0200
+@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm
+ struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries)
+ {
+- int r;
++ int r, i;
+
+ r = -E2BIG;
+ if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+ goto out;
+ r = -EFAULT;
+- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
+- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
++ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
+ goto out;
++ for (i = 0; i < cpuid->nent; ++i) {
++ struct kvm_cpuid_entry2 cpuid_entry;
++ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
++ goto out;
++ vcpu->arch.cpuid_entries[i] = cpuid_entry;
++ }
+ vcpu->arch.cpuid_nent = cpuid->nent;
+ kvm_apic_set_version(vcpu);
+ kvm_x86_ops->cpuid_update(vcpu);
+@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm
+ struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries)
+ {
+- int r;
++ int r, i;
+
+ r = -E2BIG;
+ if (cpuid->nent < vcpu->arch.cpuid_nent)
+ goto out;
+ r = -EFAULT;
+- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
+- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
++ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
+ goto out;
++ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
++ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
++ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
++ goto out;
++ }
+ return 0;
+
+ out:
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kvm/emulate.c linux-3.4-pax/arch/x86/kvm/emulate.c
+--- linux-3.4/arch/x86/kvm/emulate.c 2012-05-21 11:32:57.715927673 +0200
++++ linux-3.4-pax/arch/x86/kvm/emulate.c 2012-05-21 12:10:09.660048896 +0200
+@@ -252,6 +252,7 @@ struct gprefix {
+
+ #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
+ do { \
++ unsigned long _tmp; \
+ __asm__ __volatile__ ( \
+ _PRE_EFLAGS("0", "4", "2") \
+ _op _suffix " %"_x"3,%1; " \
+@@ -266,8 +267,6 @@ struct gprefix {
+ /* Raw emulation: instruction has two explicit operands. */
+ #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
+ do { \
+- unsigned long _tmp; \
+- \
+ switch ((ctxt)->dst.bytes) { \
+ case 2: \
+ ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
+@@ -283,7 +282,6 @@ struct gprefix {
+
+ #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
+ do { \
+- unsigned long _tmp; \
+ switch ((ctxt)->dst.bytes) { \
+ case 1: \
+ ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kvm/lapic.c linux-3.4-pax/arch/x86/kvm/lapic.c
+--- linux-3.4/arch/x86/kvm/lapic.c 2012-05-21 11:32:57.723927673 +0200
++++ linux-3.4-pax/arch/x86/kvm/lapic.c 2012-05-21 12:10:09.664048896 +0200
+@@ -54,7 +54,7 @@
+ #define APIC_BUS_CYCLE_NS 1
+
+ /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
+-#define apic_debug(fmt, arg...)
++#define apic_debug(fmt, arg...) do {} while (0)
+
+ #define APIC_LVT_NUM 6
+ /* 14 is the version for Xeon and Pentium 8.4.8*/
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kvm/paging_tmpl.h linux-3.4-pax/arch/x86/kvm/paging_tmpl.h
+--- linux-3.4/arch/x86/kvm/paging_tmpl.h 2012-05-21 11:32:57.731927674 +0200
++++ linux-3.4-pax/arch/x86/kvm/paging_tmpl.h 2012-05-21 12:10:09.668048896 +0200
+@@ -197,7 +197,7 @@ retry_walk:
+ if (unlikely(kvm_is_error_hva(host_addr)))
+ goto error;
+
+- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
++ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
+ if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
+ goto error;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kvm/svm.c linux-3.4-pax/arch/x86/kvm/svm.c
+--- linux-3.4/arch/x86/kvm/svm.c 2012-05-21 11:32:57.747927675 +0200
++++ linux-3.4-pax/arch/x86/kvm/svm.c 2012-05-21 12:10:09.672048896 +0200
+@@ -3509,7 +3509,11 @@ static void reload_tss(struct kvm_vcpu *
+ int cpu = raw_smp_processor_id();
+
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
++
++ pax_open_kernel();
+ sd->tss_desc->type = 9; /* available 32/64-bit TSS */
++ pax_close_kernel();
++
+ load_TR_desc();
+ }
+
+@@ -3887,6 +3891,10 @@ static void svm_vcpu_run(struct kvm_vcpu
+ #endif
+ #endif
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ reload_tss(vcpu);
+
+ local_irq_disable();
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kvm/vmx.c linux-3.4-pax/arch/x86/kvm/vmx.c
+--- linux-3.4/arch/x86/kvm/vmx.c 2012-05-21 11:32:57.755927675 +0200
++++ linux-3.4-pax/arch/x86/kvm/vmx.c 2012-05-21 12:10:09.680048897 +0200
+@@ -1303,7 +1303,11 @@ static void reload_tss(void)
+ struct desc_struct *descs;
+
+ descs = (void *)gdt->address;
++
++ pax_open_kernel();
+ descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
++ pax_close_kernel();
++
+ load_TR_desc();
+ }
+
+@@ -2625,8 +2629,11 @@ static __init int hardware_setup(void)
+ if (!cpu_has_vmx_flexpriority())
+ flexpriority_enabled = 0;
+
+- if (!cpu_has_vmx_tpr_shadow())
+- kvm_x86_ops->update_cr8_intercept = NULL;
++ if (!cpu_has_vmx_tpr_shadow()) {
++ pax_open_kernel();
++ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
++ pax_close_kernel();
++ }
+
+ if (enable_ept && !cpu_has_vmx_ept_2m_page())
+ kvm_disable_largepages();
+@@ -3642,7 +3649,7 @@ static void vmx_set_constant_host_state(
+ vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
+
+ asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
+- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
++ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
+
+ rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
+ vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
+@@ -6180,6 +6187,12 @@ static void __noclone vmx_vcpu_run(struc
+ "jmp .Lkvm_vmx_return \n\t"
+ ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
+ ".Lkvm_vmx_return: "
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
++ ".Lkvm_vmx_return2: "
++#endif
++
+ /* Save guest registers, load host registers, keep flags */
+ "mov %0, %c[wordsize](%%"R"sp) \n\t"
+ "pop %0 \n\t"
+@@ -6228,6 +6241,11 @@ static void __noclone vmx_vcpu_run(struc
+ #endif
+ [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
+ [wordsize]"i"(sizeof(ulong))
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ ,[cs]"i"(__KERNEL_CS)
++#endif
++
+ : "cc", "memory"
+ , R"ax", R"bx", R"di", R"si"
+ #ifdef CONFIG_X86_64
+@@ -6256,7 +6274,16 @@ static void __noclone vmx_vcpu_run(struc
+ }
+ }
+
+- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
++ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ loadsegment(fs, __KERNEL_PERCPU);
++#endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ vmx->loaded_vmcs->launched = 1;
+
+ vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/kvm/x86.c linux-3.4-pax/arch/x86/kvm/x86.c
+--- linux-3.4/arch/x86/kvm/x86.c 2012-05-21 11:32:57.763927676 +0200
++++ linux-3.4-pax/arch/x86/kvm/x86.c 2012-05-22 15:28:30.095384685 +0200
+@@ -1357,8 +1357,8 @@ static int xen_hvm_config(struct kvm_vcp
+ {
+ struct kvm *kvm = vcpu->kvm;
+ int lm = is_long_mode(vcpu);
+- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
+- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
++ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
++ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
+ u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
+ : kvm->arch.xen_hvm_config.blob_size_32;
+ u32 page_num = data & ~PAGE_MASK;
+@@ -2213,6 +2213,8 @@ long kvm_arch_dev_ioctl(struct file *fil
+ if (n < msr_list.nmsrs)
+ goto out;
+ r = -EFAULT;
++ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
++ goto out;
+ if (copy_to_user(user_msr_list->indices, &msrs_to_save,
+ num_msrs_to_save * sizeof(u32)))
+ goto out;
+@@ -2338,7 +2340,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
+ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq)
+ {
+- if (irq->irq < 0 || irq->irq >= 256)
++ if (irq->irq >= 256)
+ return -EINVAL;
+ if (irqchip_in_kernel(vcpu->kvm))
+ return -ENXIO;
+@@ -4860,7 +4862,7 @@ static void kvm_set_mmio_spte_mask(void)
+ kvm_mmu_set_mmio_spte_mask(mask);
+ }
+
+-int kvm_arch_init(void *opaque)
++int kvm_arch_init(const void *opaque)
+ {
+ int r;
+ struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lguest/boot.c linux-3.4-pax/arch/x86/lguest/boot.c
+--- linux-3.4/arch/x86/lguest/boot.c 2012-03-19 10:38:56.640049994 +0100
++++ linux-3.4-pax/arch/x86/lguest/boot.c 2012-05-21 12:10:09.696048898 +0200
+@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vt
+ * Rebooting also tells the Host we're finished, but the RESTART flag tells the
+ * Launcher to reboot us.
+ */
+-static void lguest_restart(char *reason)
++static __noreturn void lguest_restart(char *reason)
+ {
+ hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
++ BUG();
+ }
+
+ /*G:050
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/atomic64_386_32.S linux-3.4-pax/arch/x86/lib/atomic64_386_32.S
+--- linux-3.4/arch/x86/lib/atomic64_386_32.S 2012-05-21 11:32:57.803927678 +0200
++++ linux-3.4-pax/arch/x86/lib/atomic64_386_32.S 2012-05-21 12:10:09.696048898 +0200
+@@ -48,6 +48,10 @@ BEGIN(read)
+ movl (v), %eax
+ movl 4(v), %edx
+ RET_ENDP
++BEGIN(read_unchecked)
++ movl (v), %eax
++ movl 4(v), %edx
++RET_ENDP
+ #undef v
+
+ #define v %esi
+@@ -55,6 +59,10 @@ BEGIN(set)
+ movl %ebx, (v)
+ movl %ecx, 4(v)
+ RET_ENDP
++BEGIN(set_unchecked)
++ movl %ebx, (v)
++ movl %ecx, 4(v)
++RET_ENDP
+ #undef v
+
+ #define v %esi
+@@ -70,6 +78,20 @@ RET_ENDP
+ BEGIN(add)
+ addl %eax, (v)
+ adcl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 0f
++ subl %eax, (v)
++ sbbl %edx, 4(v)
++ int $4
++0:
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(add_unchecked)
++ addl %eax, (v)
++ adcl %edx, 4(v)
+ RET_ENDP
+ #undef v
+
+@@ -77,6 +99,24 @@ RET_ENDP
+ BEGIN(add_return)
+ addl (v), %eax
+ adcl 4(v), %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
++ movl %eax, (v)
++ movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(add_return_unchecked)
++ addl (v), %eax
++ adcl 4(v), %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+ RET_ENDP
+@@ -86,6 +126,20 @@ RET_ENDP
+ BEGIN(sub)
+ subl %eax, (v)
+ sbbl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 0f
++ addl %eax, (v)
++ adcl %edx, 4(v)
++ int $4
++0:
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(sub_unchecked)
++ subl %eax, (v)
++ sbbl %edx, 4(v)
+ RET_ENDP
+ #undef v
+
+@@ -96,6 +150,27 @@ BEGIN(sub_return)
+ sbbl $0, %edx
+ addl (v), %eax
+ adcl 4(v), %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
++ movl %eax, (v)
++ movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(sub_return_unchecked)
++ negl %edx
++ negl %eax
++ sbbl $0, %edx
++ addl (v), %eax
++ adcl 4(v), %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+ RET_ENDP
+@@ -105,6 +180,20 @@ RET_ENDP
+ BEGIN(inc)
+ addl $1, (v)
+ adcl $0, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 0f
++ subl $1, (v)
++ sbbl $0, 4(v)
++ int $4
++0:
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(inc_unchecked)
++ addl $1, (v)
++ adcl $0, 4(v)
+ RET_ENDP
+ #undef v
+
+@@ -114,6 +203,26 @@ BEGIN(inc_return)
+ movl 4(v), %edx
+ addl $1, %eax
+ adcl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
++ movl %eax, (v)
++ movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(inc_return_unchecked)
++ movl (v), %eax
++ movl 4(v), %edx
++ addl $1, %eax
++ adcl $0, %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+ RET_ENDP
+@@ -123,6 +232,20 @@ RET_ENDP
+ BEGIN(dec)
+ subl $1, (v)
+ sbbl $0, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 0f
++ addl $1, (v)
++ adcl $0, 4(v)
++ int $4
++0:
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(dec_unchecked)
++ subl $1, (v)
++ sbbl $0, 4(v)
+ RET_ENDP
+ #undef v
+
+@@ -132,6 +255,26 @@ BEGIN(dec_return)
+ movl 4(v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
++ movl %eax, (v)
++ movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(dec_return_unchecked)
++ movl (v), %eax
++ movl 4(v), %edx
++ subl $1, %eax
++ sbbl $0, %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+ RET_ENDP
+@@ -143,6 +286,13 @@ BEGIN(add_unless)
+ adcl %edx, %edi
+ addl (v), %eax
+ adcl 4(v), %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
+ cmpl %eax, %ecx
+ je 3f
+ 1:
+@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
+ 1:
+ addl $1, %eax
+ adcl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
+ movl %eax, (v)
+ movl %edx, 4(v)
+ movl $1, %eax
+@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
+ movl 4(v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 1f)
++#endif
++
+ js 1f
+ movl %eax, (v)
+ movl %edx, 4(v)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/atomic64_cx8_32.S linux-3.4-pax/arch/x86/lib/atomic64_cx8_32.S
+--- linux-3.4/arch/x86/lib/atomic64_cx8_32.S 2012-05-21 11:32:57.803927678 +0200
++++ linux-3.4-pax/arch/x86/lib/atomic64_cx8_32.S 2012-05-21 12:10:09.700048898 +0200
+@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
+ CFI_STARTPROC
+
+ read64 %ecx
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(atomic64_read_cx8)
+
++ENTRY(atomic64_read_unchecked_cx8)
++ CFI_STARTPROC
++
++ read64 %ecx
++ pax_force_retaddr
++ ret
++ CFI_ENDPROC
++ENDPROC(atomic64_read_unchecked_cx8)
++
+ ENTRY(atomic64_set_cx8)
+ CFI_STARTPROC
+
+@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
+ cmpxchg8b (%esi)
+ jne 1b
+
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(atomic64_set_cx8)
+
++ENTRY(atomic64_set_unchecked_cx8)
++ CFI_STARTPROC
++
++1:
++/* we don't need LOCK_PREFIX since aligned 64-bit writes
++ * are atomic on 586 and newer */
++ cmpxchg8b (%esi)
++ jne 1b
++
++ pax_force_retaddr
++ ret
++ CFI_ENDPROC
++ENDPROC(atomic64_set_unchecked_cx8)
++
+ ENTRY(atomic64_xchg_cx8)
+ CFI_STARTPROC
+
+@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
+ cmpxchg8b (%esi)
+ jne 1b
+
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(atomic64_xchg_cx8)
+
+-.macro addsub_return func ins insc
+-ENTRY(atomic64_\func\()_return_cx8)
++.macro addsub_return func ins insc unchecked=""
++ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
+ CFI_STARTPROC
+ SAVE ebp
+ SAVE ebx
+@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
+ movl %edx, %ecx
+ \ins\()l %esi, %ebx
+ \insc\()l %edi, %ecx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++2:
++ _ASM_EXTABLE(2b, 3f)
++#endif
++.endif
++
+ LOCK_PREFIX
+ cmpxchg8b (%ebp)
+ jne 1b
+-
+-10:
+ movl %ebx, %eax
+ movl %ecx, %edx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++3:
++#endif
++.endif
++
+ RESTORE edi
+ RESTORE esi
+ RESTORE ebx
+ RESTORE ebp
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-ENDPROC(atomic64_\func\()_return_cx8)
++ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
+ .endm
+
+ addsub_return add add adc
+ addsub_return sub sub sbb
++addsub_return add add adc _unchecked
++addsub_return sub sub sbb _unchecked
+
+-.macro incdec_return func ins insc
+-ENTRY(atomic64_\func\()_return_cx8)
++.macro incdec_return func ins insc unchecked=""
++ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
+ CFI_STARTPROC
+ SAVE ebx
+
+@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
+ movl %edx, %ecx
+ \ins\()l $1, %ebx
+ \insc\()l $0, %ecx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++2:
++ _ASM_EXTABLE(2b, 3f)
++#endif
++.endif
++
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+
+-10:
+ movl %ebx, %eax
+ movl %ecx, %edx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++3:
++#endif
++.endif
++
+ RESTORE ebx
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-ENDPROC(atomic64_\func\()_return_cx8)
++ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
+ .endm
+
+ incdec_return inc add adc
+ incdec_return dec sub sbb
++incdec_return inc add adc _unchecked
++incdec_return dec sub sbb _unchecked
+
+ ENTRY(atomic64_dec_if_positive_cx8)
+ CFI_STARTPROC
+@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
+ movl %edx, %ecx
+ subl $1, %ebx
+ sbb $0, %ecx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
+ js 2f
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
+ movl %ebx, %eax
+ movl %ecx, %edx
+ RESTORE ebx
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(atomic64_dec_if_positive_cx8)
+@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
+ movl %edx, %ecx
+ addl %ebp, %ebx
+ adcl %edi, %ecx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 3f)
++#endif
++
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
+ CFI_ADJUST_CFA_OFFSET -8
+ RESTORE ebx
+ RESTORE ebp
++ pax_force_retaddr
+ ret
+ 4:
+ cmpl %edx, 4(%esp)
+@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
+ xorl %ecx, %ecx
+ addl $1, %ebx
+ adcl %edx, %ecx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 3f)
++#endif
++
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
+ movl $1, %eax
+ 3:
+ RESTORE ebx
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(atomic64_inc_not_zero_cx8)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/checksum_32.S linux-3.4-pax/arch/x86/lib/checksum_32.S
+--- linux-3.4/arch/x86/lib/checksum_32.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/lib/checksum_32.S 2012-05-21 12:10:09.700048898 +0200
+@@ -28,7 +28,8 @@
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
+ #include <asm/errno.h>
+-
++#include <asm/segment.h>
++
+ /*
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
+
+ #define ARGBASE 16
+ #define FP 12
+-
+-ENTRY(csum_partial_copy_generic)
++
++ENTRY(csum_partial_copy_generic_to_user)
+ CFI_STARTPROC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %gs
++ popl_cfi %es
++ jmp csum_partial_copy_generic
++#endif
++
++ENTRY(csum_partial_copy_generic_from_user)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %gs
++ popl_cfi %ds
++#endif
++
++ENTRY(csum_partial_copy_generic)
+ subl $4,%esp
+ CFI_ADJUST_CFA_OFFSET 4
+ pushl_cfi %edi
+@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
+ jmp 4f
+ SRC(1: movw (%esi), %bx )
+ addl $2, %esi
+-DST( movw %bx, (%edi) )
++DST( movw %bx, %es:(%edi) )
+ addl $2, %edi
+ addw %bx, %ax
+ adcl $0, %eax
+@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
+ SRC(1: movl (%esi), %ebx )
+ SRC( movl 4(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, (%edi) )
++DST( movl %ebx, %es:(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 4(%edi) )
++DST( movl %edx, %es:4(%edi) )
+
+ SRC( movl 8(%esi), %ebx )
+ SRC( movl 12(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 8(%edi) )
++DST( movl %ebx, %es:8(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 12(%edi) )
++DST( movl %edx, %es:12(%edi) )
+
+ SRC( movl 16(%esi), %ebx )
+ SRC( movl 20(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 16(%edi) )
++DST( movl %ebx, %es:16(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 20(%edi) )
++DST( movl %edx, %es:20(%edi) )
+
+ SRC( movl 24(%esi), %ebx )
+ SRC( movl 28(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 24(%edi) )
++DST( movl %ebx, %es:24(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 28(%edi) )
++DST( movl %edx, %es:28(%edi) )
+
+ lea 32(%esi), %esi
+ lea 32(%edi), %edi
+@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
+ shrl $2, %edx # This clears CF
+ SRC(3: movl (%esi), %ebx )
+ adcl %ebx, %eax
+-DST( movl %ebx, (%edi) )
++DST( movl %ebx, %es:(%edi) )
+ lea 4(%esi), %esi
+ lea 4(%edi), %edi
+ dec %edx
+@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
+ jb 5f
+ SRC( movw (%esi), %cx )
+ leal 2(%esi), %esi
+-DST( movw %cx, (%edi) )
++DST( movw %cx, %es:(%edi) )
+ leal 2(%edi), %edi
+ je 6f
+ shll $16,%ecx
+ SRC(5: movb (%esi), %cl )
+-DST( movb %cl, (%edi) )
++DST( movb %cl, %es:(%edi) )
+ 6: addl %ecx, %eax
+ adcl $0, %eax
+ 7:
+@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
+
+ 6001:
+ movl ARGBASE+20(%esp), %ebx # src_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+
+ # zero the complete destination - computing the rest
+ # is too much work
+@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
+
+ 6002:
+ movl ARGBASE+24(%esp), %ebx # dst_err_ptr
+- movl $-EFAULT,(%ebx)
++ movl $-EFAULT,%ss:(%ebx)
+ jmp 5000b
+
+ .previous
+
++ pushl_cfi %ss
++ popl_cfi %ds
++ pushl_cfi %ss
++ popl_cfi %es
+ popl_cfi %ebx
+ CFI_RESTORE ebx
+ popl_cfi %esi
+@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
+ popl_cfi %ecx # equivalent to addl $4,%esp
+ ret
+ CFI_ENDPROC
+-ENDPROC(csum_partial_copy_generic)
++ENDPROC(csum_partial_copy_generic_to_user)
+
+ #else
+
+ /* Version for PentiumII/PPro */
+
+ #define ROUND1(x) \
++ nop; nop; nop; \
+ SRC(movl x(%esi), %ebx ) ; \
+ addl %ebx, %eax ; \
+- DST(movl %ebx, x(%edi) ) ;
++ DST(movl %ebx, %es:x(%edi)) ;
+
+ #define ROUND(x) \
++ nop; nop; nop; \
+ SRC(movl x(%esi), %ebx ) ; \
+ adcl %ebx, %eax ; \
+- DST(movl %ebx, x(%edi) ) ;
++ DST(movl %ebx, %es:x(%edi)) ;
+
+ #define ARGBASE 12
+-
+-ENTRY(csum_partial_copy_generic)
++
++ENTRY(csum_partial_copy_generic_to_user)
+ CFI_STARTPROC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %gs
++ popl_cfi %es
++ jmp csum_partial_copy_generic
++#endif
++
++ENTRY(csum_partial_copy_generic_from_user)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %gs
++ popl_cfi %ds
++#endif
++
++ENTRY(csum_partial_copy_generic)
+ pushl_cfi %ebx
+ CFI_REL_OFFSET ebx, 0
+ pushl_cfi %edi
+@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
+ subl %ebx, %edi
+ lea -1(%esi),%edx
+ andl $-32,%edx
+- lea 3f(%ebx,%ebx), %ebx
++ lea 3f(%ebx,%ebx,2), %ebx
+ testl %esi, %esi
+ jmp *%ebx
+ 1: addl $64,%esi
+@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
+ jb 5f
+ SRC( movw (%esi), %dx )
+ leal 2(%esi), %esi
+-DST( movw %dx, (%edi) )
++DST( movw %dx, %es:(%edi) )
+ leal 2(%edi), %edi
+ je 6f
+ shll $16,%edx
+ 5:
+ SRC( movb (%esi), %dl )
+-DST( movb %dl, (%edi) )
++DST( movb %dl, %es:(%edi) )
+ 6: addl %edx, %eax
+ adcl $0, %eax
+ 7:
+ .section .fixup, "ax"
+ 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+ # zero the complete destination (computing the rest is too much work)
+ movl ARGBASE+8(%esp),%edi # dst
+ movl ARGBASE+12(%esp),%ecx # len
+@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
+ rep; stosb
+ jmp 7b
+ 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+ jmp 7b
+ .previous
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %ss
++ popl_cfi %ds
++ pushl_cfi %ss
++ popl_cfi %es
++#endif
++
+ popl_cfi %esi
+ CFI_RESTORE esi
+ popl_cfi %edi
+@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
+ CFI_RESTORE ebx
+ ret
+ CFI_ENDPROC
+-ENDPROC(csum_partial_copy_generic)
++ENDPROC(csum_partial_copy_generic_to_user)
+
+ #undef ROUND
+ #undef ROUND1
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/clear_page_64.S linux-3.4-pax/arch/x86/lib/clear_page_64.S
+--- linux-3.4/arch/x86/lib/clear_page_64.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/lib/clear_page_64.S 2012-05-21 12:10:09.700048898 +0200
+@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
+ movl $4096/8,%ecx
+ xorl %eax,%eax
+ rep stosq
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(clear_page_c)
+@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
+ movl $4096,%ecx
+ xorl %eax,%eax
+ rep stosb
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(clear_page_c_e)
+@@ -43,6 +45,7 @@ ENTRY(clear_page)
+ leaq 64(%rdi),%rdi
+ jnz .Lloop
+ nop
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ .Lclear_page_end:
+@@ -58,7 +61,7 @@ ENDPROC(clear_page)
+
+ #include <asm/cpufeature.h>
+
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 1: .byte 0xeb /* jmp <disp8> */
+ .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
+ 2: .byte 0xeb /* jmp <disp8> */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/cmpxchg16b_emu.S linux-3.4-pax/arch/x86/lib/cmpxchg16b_emu.S
+--- linux-3.4/arch/x86/lib/cmpxchg16b_emu.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/lib/cmpxchg16b_emu.S 2012-05-21 12:10:09.704048898 +0200
+@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
+
+ popf
+ mov $1, %al
++ pax_force_retaddr
+ ret
+
+ not_same:
+ popf
+ xor %al,%al
++ pax_force_retaddr
+ ret
+
+ CFI_ENDPROC
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/copy_page_64.S linux-3.4-pax/arch/x86/lib/copy_page_64.S
+--- linux-3.4/arch/x86/lib/copy_page_64.S 2012-05-21 11:32:57.807927678 +0200
++++ linux-3.4-pax/arch/x86/lib/copy_page_64.S 2012-05-21 12:10:09.704048898 +0200
+@@ -9,6 +9,7 @@ copy_page_c:
+ CFI_STARTPROC
+ movl $4096/8,%ecx
+ rep movsq
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(copy_page_c)
+@@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
+
+ ENTRY(copy_page)
+ CFI_STARTPROC
+- subq $2*8,%rsp
+- CFI_ADJUST_CFA_OFFSET 2*8
++ subq $3*8,%rsp
++ CFI_ADJUST_CFA_OFFSET 3*8
+ movq %rbx,(%rsp)
+ CFI_REL_OFFSET rbx, 0
+ movq %r12,1*8(%rsp)
+ CFI_REL_OFFSET r12, 1*8
++ movq %r13,2*8(%rsp)
++ CFI_REL_OFFSET r13, 2*8
+
+ movl $(4096/64)-5,%ecx
+ .p2align 4
+@@ -37,7 +40,7 @@ ENTRY(copy_page)
+ movq 16 (%rsi), %rdx
+ movq 24 (%rsi), %r8
+ movq 32 (%rsi), %r9
+- movq 40 (%rsi), %r10
++ movq 40 (%rsi), %r13
+ movq 48 (%rsi), %r11
+ movq 56 (%rsi), %r12
+
+@@ -48,7 +51,7 @@ ENTRY(copy_page)
+ movq %rdx, 16 (%rdi)
+ movq %r8, 24 (%rdi)
+ movq %r9, 32 (%rdi)
+- movq %r10, 40 (%rdi)
++ movq %r13, 40 (%rdi)
+ movq %r11, 48 (%rdi)
+ movq %r12, 56 (%rdi)
+
+@@ -67,7 +70,7 @@ ENTRY(copy_page)
+ movq 16 (%rsi), %rdx
+ movq 24 (%rsi), %r8
+ movq 32 (%rsi), %r9
+- movq 40 (%rsi), %r10
++ movq 40 (%rsi), %r13
+ movq 48 (%rsi), %r11
+ movq 56 (%rsi), %r12
+
+@@ -76,7 +79,7 @@ ENTRY(copy_page)
+ movq %rdx, 16 (%rdi)
+ movq %r8, 24 (%rdi)
+ movq %r9, 32 (%rdi)
+- movq %r10, 40 (%rdi)
++ movq %r13, 40 (%rdi)
+ movq %r11, 48 (%rdi)
+ movq %r12, 56 (%rdi)
+
+@@ -89,8 +92,11 @@ ENTRY(copy_page)
+ CFI_RESTORE rbx
+ movq 1*8(%rsp),%r12
+ CFI_RESTORE r12
+- addq $2*8,%rsp
+- CFI_ADJUST_CFA_OFFSET -2*8
++ movq 2*8(%rsp),%r13
++ CFI_RESTORE r13
++ addq $3*8,%rsp
++ CFI_ADJUST_CFA_OFFSET -3*8
++ pax_force_retaddr
+ ret
+ .Lcopy_page_end:
+ CFI_ENDPROC
+@@ -101,7 +107,7 @@ ENDPROC(copy_page)
+
+ #include <asm/cpufeature.h>
+
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 1: .byte 0xeb /* jmp <disp8> */
+ .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
+ 2:
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/copy_user_64.S linux-3.4-pax/arch/x86/lib/copy_user_64.S
+--- linux-3.4/arch/x86/lib/copy_user_64.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/lib/copy_user_64.S 2012-05-21 12:10:09.704048898 +0200
+@@ -16,6 +16,7 @@
+ #include <asm/thread_info.h>
+ #include <asm/cpufeature.h>
+ #include <asm/alternative-asm.h>
++#include <asm/pgtable.h>
+
+ /*
+ * By placing feature2 after feature1 in altinstructions section, we logically
+@@ -29,7 +30,7 @@
+ .byte 0xe9 /* 32bit jump */
+ .long \orig-1f /* by default jump to orig */
+ 1:
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 2: .byte 0xe9 /* near jump with 32bit immediate */
+ .long \alt1-1b /* offset */ /* or alternatively to alt1 */
+ 3: .byte 0xe9 /* near jump with 32bit immediate */
+@@ -71,47 +72,20 @@
+ #endif
+ .endm
+
+-/* Standard copy_to_user with segment limit checking */
+-ENTRY(_copy_to_user)
+- CFI_STARTPROC
+- GET_THREAD_INFO(%rax)
+- movq %rdi,%rcx
+- addq %rdx,%rcx
+- jc bad_to_user
+- cmpq TI_addr_limit(%rax),%rcx
+- ja bad_to_user
+- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
+- copy_user_generic_unrolled,copy_user_generic_string, \
+- copy_user_enhanced_fast_string
+- CFI_ENDPROC
+-ENDPROC(_copy_to_user)
+-
+-/* Standard copy_from_user with segment limit checking */
+-ENTRY(_copy_from_user)
+- CFI_STARTPROC
+- GET_THREAD_INFO(%rax)
+- movq %rsi,%rcx
+- addq %rdx,%rcx
+- jc bad_from_user
+- cmpq TI_addr_limit(%rax),%rcx
+- ja bad_from_user
+- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
+- copy_user_generic_unrolled,copy_user_generic_string, \
+- copy_user_enhanced_fast_string
+- CFI_ENDPROC
+-ENDPROC(_copy_from_user)
+-
+ .section .fixup,"ax"
+ /* must zero dest */
+ ENTRY(bad_from_user)
+ bad_from_user:
+ CFI_STARTPROC
++ testl %edx,%edx
++ js bad_to_user
+ movl %edx,%ecx
+ xorl %eax,%eax
+ rep
+ stosb
+ bad_to_user:
+ movl %edx,%eax
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(bad_from_user)
+@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
+ jz 17f
+ 1: movq (%rsi),%r8
+ 2: movq 1*8(%rsi),%r9
+-3: movq 2*8(%rsi),%r10
++3: movq 2*8(%rsi),%rax
+ 4: movq 3*8(%rsi),%r11
+ 5: movq %r8,(%rdi)
+ 6: movq %r9,1*8(%rdi)
+-7: movq %r10,2*8(%rdi)
++7: movq %rax,2*8(%rdi)
+ 8: movq %r11,3*8(%rdi)
+ 9: movq 4*8(%rsi),%r8
+ 10: movq 5*8(%rsi),%r9
+-11: movq 6*8(%rsi),%r10
++11: movq 6*8(%rsi),%rax
+ 12: movq 7*8(%rsi),%r11
+ 13: movq %r8,4*8(%rdi)
+ 14: movq %r9,5*8(%rdi)
+-15: movq %r10,6*8(%rdi)
++15: movq %rax,6*8(%rdi)
+ 16: movq %r11,7*8(%rdi)
+ leaq 64(%rsi),%rsi
+ leaq 64(%rdi),%rdi
+@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
+ decl %ecx
+ jnz 21b
+ 23: xor %eax,%eax
++ pax_force_retaddr
+ ret
+
+ .section .fixup,"ax"
+@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
+ 3: rep
+ movsb
+ 4: xorl %eax,%eax
++ pax_force_retaddr
+ ret
+
+ .section .fixup,"ax"
+@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
+ 1: rep
+ movsb
+ 2: xorl %eax,%eax
++ pax_force_retaddr
+ ret
+
+ .section .fixup,"ax"
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/copy_user_nocache_64.S linux-3.4-pax/arch/x86/lib/copy_user_nocache_64.S
+--- linux-3.4/arch/x86/lib/copy_user_nocache_64.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/lib/copy_user_nocache_64.S 2012-05-21 12:10:09.704048898 +0200
+@@ -8,12 +8,14 @@
+
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
++#include <asm/alternative-asm.h>
+
+ #define FIX_ALIGNMENT 1
+
+ #include <asm/current.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/thread_info.h>
++#include <asm/pgtable.h>
+
+ .macro ALIGN_DESTINATION
+ #ifdef FIX_ALIGNMENT
+@@ -50,6 +52,15 @@
+ */
+ ENTRY(__copy_user_nocache)
+ CFI_STARTPROC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov $PAX_USER_SHADOW_BASE,%rcx
++ cmp %rcx,%rsi
++ jae 1f
++ add %rcx,%rsi
++1:
++#endif
++
+ cmpl $8,%edx
+ jb 20f /* less then 8 bytes, go to byte copy loop */
+ ALIGN_DESTINATION
+@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
+ jz 17f
+ 1: movq (%rsi),%r8
+ 2: movq 1*8(%rsi),%r9
+-3: movq 2*8(%rsi),%r10
++3: movq 2*8(%rsi),%rax
+ 4: movq 3*8(%rsi),%r11
+ 5: movnti %r8,(%rdi)
+ 6: movnti %r9,1*8(%rdi)
+-7: movnti %r10,2*8(%rdi)
++7: movnti %rax,2*8(%rdi)
+ 8: movnti %r11,3*8(%rdi)
+ 9: movq 4*8(%rsi),%r8
+ 10: movq 5*8(%rsi),%r9
+-11: movq 6*8(%rsi),%r10
++11: movq 6*8(%rsi),%rax
+ 12: movq 7*8(%rsi),%r11
+ 13: movnti %r8,4*8(%rdi)
+ 14: movnti %r9,5*8(%rdi)
+-15: movnti %r10,6*8(%rdi)
++15: movnti %rax,6*8(%rdi)
+ 16: movnti %r11,7*8(%rdi)
+ leaq 64(%rsi),%rsi
+ leaq 64(%rdi),%rdi
+@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
+ jnz 21b
+ 23: xorl %eax,%eax
+ sfence
++ pax_force_retaddr
+ ret
+
+ .section .fixup,"ax"
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/csum-copy_64.S linux-3.4-pax/arch/x86/lib/csum-copy_64.S
+--- linux-3.4/arch/x86/lib/csum-copy_64.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/lib/csum-copy_64.S 2012-05-21 12:10:09.708048898 +0200
+@@ -8,6 +8,7 @@
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
+ #include <asm/errno.h>
++#include <asm/alternative-asm.h>
+
+ /*
+ * Checksum copy with exception handling.
+@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
+ CFI_RESTORE rbp
+ addq $7*8, %rsp
+ CFI_ADJUST_CFA_OFFSET -7*8
++ pax_force_retaddr 0, 1
+ ret
+ CFI_RESTORE_STATE
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/csum-wrappers_64.c linux-3.4-pax/arch/x86/lib/csum-wrappers_64.c
+--- linux-3.4/arch/x86/lib/csum-wrappers_64.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/lib/csum-wrappers_64.c 2012-05-21 12:10:09.708048898 +0200
+@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void _
+ len -= 2;
+ }
+ }
+- isum = csum_partial_copy_generic((__force const void *)src,
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++#endif
++
++ isum = csum_partial_copy_generic((const void __force_kernel *)src,
+ dst, len, isum, errp, NULL);
+ if (unlikely(*errp))
+ goto out_err;
+@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *sr
+ }
+
+ *errp = 0;
+- return csum_partial_copy_generic(src, (void __force *)dst,
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
++#endif
++
++ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
+ len, isum, NULL, errp);
+ }
+ EXPORT_SYMBOL(csum_partial_copy_to_user);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/getuser.S linux-3.4-pax/arch/x86/lib/getuser.S
+--- linux-3.4/arch/x86/lib/getuser.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/lib/getuser.S 2012-05-21 12:10:09.708048898 +0200
+@@ -33,15 +33,38 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/thread_info.h>
+ #include <asm/asm.h>
++#include <asm/segment.h>
++#include <asm/pgtable.h>
++#include <asm/alternative-asm.h>
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg gs;
++#else
++#define __copyuser_seg
++#endif
+
+ .text
+ ENTRY(__get_user_1)
+ CFI_STARTPROC
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
+-1: movzb (%_ASM_AX),%edx
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
++ cmp %_ASM_DX,%_ASM_AX
++ jae 1234f
++ add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
++#endif
++
++1: __copyuser_seg movzb (%_ASM_AX),%edx
+ xor %eax,%eax
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(__get_user_1)
+@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
+ ENTRY(__get_user_2)
+ CFI_STARTPROC
+ add $1,%_ASM_AX
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+ jc bad_get_user
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
+-2: movzwl -1(%_ASM_AX),%edx
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
++ cmp %_ASM_DX,%_ASM_AX
++ jae 1234f
++ add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
++#endif
++
++2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
+ xor %eax,%eax
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(__get_user_2)
+@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
+ ENTRY(__get_user_4)
+ CFI_STARTPROC
+ add $3,%_ASM_AX
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+ jc bad_get_user
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
+-3: mov -3(%_ASM_AX),%edx
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
++ cmp %_ASM_DX,%_ASM_AX
++ jae 1234f
++ add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
++#endif
++
++3: __copyuser_seg mov -3(%_ASM_AX),%edx
+ xor %eax,%eax
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(__get_user_4)
+@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
++ cmp %_ASM_DX,%_ASM_AX
++ jae 1234f
++ add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
+ 4: movq -7(%_ASM_AX),%_ASM_DX
+ xor %eax,%eax
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(__get_user_8)
+@@ -91,6 +152,7 @@ bad_get_user:
+ CFI_STARTPROC
+ xor %edx,%edx
+ mov $(-EFAULT),%_ASM_AX
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ END(bad_get_user)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/insn.c linux-3.4-pax/arch/x86/lib/insn.c
+--- linux-3.4/arch/x86/lib/insn.c 2012-05-21 11:32:57.811927678 +0200
++++ linux-3.4-pax/arch/x86/lib/insn.c 2012-05-21 12:10:09.712048899 +0200
+@@ -21,6 +21,11 @@
+ #include <linux/string.h>
+ #include <asm/inat.h>
+ #include <asm/insn.h>
++#ifdef __KERNEL__
++#include <asm/pgtable_types.h>
++#else
++#define ktla_ktva(addr) addr
++#endif
+
+ /* Verify next sizeof(t) bytes can be on the same instruction */
+ #define validate_next(t, insn, n) \
+@@ -49,8 +54,8 @@
+ void insn_init(struct insn *insn, const void *kaddr, int x86_64)
+ {
+ memset(insn, 0, sizeof(*insn));
+- insn->kaddr = kaddr;
+- insn->next_byte = kaddr;
++ insn->kaddr = ktla_ktva(kaddr);
++ insn->next_byte = ktla_ktva(kaddr);
+ insn->x86_64 = x86_64 ? 1 : 0;
+ insn->opnd_bytes = 4;
+ if (x86_64)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/iomap_copy_64.S linux-3.4-pax/arch/x86/lib/iomap_copy_64.S
+--- linux-3.4/arch/x86/lib/iomap_copy_64.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/lib/iomap_copy_64.S 2012-05-21 12:10:09.712048899 +0200
+@@ -17,6 +17,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
++#include <asm/alternative-asm.h>
+
+ /*
+ * override generic version in lib/iomap_copy.c
+@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
+ CFI_STARTPROC
+ movl %edx,%ecx
+ rep movsd
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(__iowrite32_copy)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/memcpy_64.S linux-3.4-pax/arch/x86/lib/memcpy_64.S
+--- linux-3.4/arch/x86/lib/memcpy_64.S 2012-05-21 11:32:57.815927678 +0200
++++ linux-3.4-pax/arch/x86/lib/memcpy_64.S 2012-05-21 12:10:09.716048899 +0200
+@@ -33,6 +33,7 @@
+ rep movsq
+ movl %edx, %ecx
+ rep movsb
++ pax_force_retaddr
+ ret
+ .Lmemcpy_e:
+ .previous
+@@ -49,6 +50,7 @@
+ movq %rdi, %rax
+ movq %rdx, %rcx
+ rep movsb
++ pax_force_retaddr
+ ret
+ .Lmemcpy_e_e:
+ .previous
+@@ -76,13 +78,13 @@ ENTRY(memcpy)
+ */
+ movq 0*8(%rsi), %r8
+ movq 1*8(%rsi), %r9
+- movq 2*8(%rsi), %r10
++ movq 2*8(%rsi), %rcx
+ movq 3*8(%rsi), %r11
+ leaq 4*8(%rsi), %rsi
+
+ movq %r8, 0*8(%rdi)
+ movq %r9, 1*8(%rdi)
+- movq %r10, 2*8(%rdi)
++ movq %rcx, 2*8(%rdi)
+ movq %r11, 3*8(%rdi)
+ leaq 4*8(%rdi), %rdi
+ jae .Lcopy_forward_loop
+@@ -105,12 +107,12 @@ ENTRY(memcpy)
+ subq $0x20, %rdx
+ movq -1*8(%rsi), %r8
+ movq -2*8(%rsi), %r9
+- movq -3*8(%rsi), %r10
++ movq -3*8(%rsi), %rcx
+ movq -4*8(%rsi), %r11
+ leaq -4*8(%rsi), %rsi
+ movq %r8, -1*8(%rdi)
+ movq %r9, -2*8(%rdi)
+- movq %r10, -3*8(%rdi)
++ movq %rcx, -3*8(%rdi)
+ movq %r11, -4*8(%rdi)
+ leaq -4*8(%rdi), %rdi
+ jae .Lcopy_backward_loop
+@@ -130,12 +132,13 @@ ENTRY(memcpy)
+ */
+ movq 0*8(%rsi), %r8
+ movq 1*8(%rsi), %r9
+- movq -2*8(%rsi, %rdx), %r10
++ movq -2*8(%rsi, %rdx), %rcx
+ movq -1*8(%rsi, %rdx), %r11
+ movq %r8, 0*8(%rdi)
+ movq %r9, 1*8(%rdi)
+- movq %r10, -2*8(%rdi, %rdx)
++ movq %rcx, -2*8(%rdi, %rdx)
+ movq %r11, -1*8(%rdi, %rdx)
++ pax_force_retaddr
+ retq
+ .p2align 4
+ .Lless_16bytes:
+@@ -148,6 +151,7 @@ ENTRY(memcpy)
+ movq -1*8(%rsi, %rdx), %r9
+ movq %r8, 0*8(%rdi)
+ movq %r9, -1*8(%rdi, %rdx)
++ pax_force_retaddr
+ retq
+ .p2align 4
+ .Lless_8bytes:
+@@ -161,6 +165,7 @@ ENTRY(memcpy)
+ movl -4(%rsi, %rdx), %r8d
+ movl %ecx, (%rdi)
+ movl %r8d, -4(%rdi, %rdx)
++ pax_force_retaddr
+ retq
+ .p2align 4
+ .Lless_3bytes:
+@@ -179,6 +184,7 @@ ENTRY(memcpy)
+ movb %cl, (%rdi)
+
+ .Lend:
++ pax_force_retaddr
+ retq
+ CFI_ENDPROC
+ ENDPROC(memcpy)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/memmove_64.S linux-3.4-pax/arch/x86/lib/memmove_64.S
+--- linux-3.4/arch/x86/lib/memmove_64.S 2011-10-24 12:48:26.351091768 +0200
++++ linux-3.4-pax/arch/x86/lib/memmove_64.S 2012-05-21 12:10:09.716048899 +0200
+@@ -61,13 +61,13 @@ ENTRY(memmove)
+ 5:
+ sub $0x20, %rdx
+ movq 0*8(%rsi), %r11
+- movq 1*8(%rsi), %r10
++ movq 1*8(%rsi), %rcx
+ movq 2*8(%rsi), %r9
+ movq 3*8(%rsi), %r8
+ leaq 4*8(%rsi), %rsi
+
+ movq %r11, 0*8(%rdi)
+- movq %r10, 1*8(%rdi)
++ movq %rcx, 1*8(%rdi)
+ movq %r9, 2*8(%rdi)
+ movq %r8, 3*8(%rdi)
+ leaq 4*8(%rdi), %rdi
+@@ -81,10 +81,10 @@ ENTRY(memmove)
+ 4:
+ movq %rdx, %rcx
+ movq -8(%rsi, %rdx), %r11
+- lea -8(%rdi, %rdx), %r10
++ lea -8(%rdi, %rdx), %r9
+ shrq $3, %rcx
+ rep movsq
+- movq %r11, (%r10)
++ movq %r11, (%r9)
+ jmp 13f
+ .Lmemmove_end_forward:
+
+@@ -95,14 +95,14 @@ ENTRY(memmove)
+ 7:
+ movq %rdx, %rcx
+ movq (%rsi), %r11
+- movq %rdi, %r10
++ movq %rdi, %r9
+ leaq -8(%rsi, %rdx), %rsi
+ leaq -8(%rdi, %rdx), %rdi
+ shrq $3, %rcx
+ std
+ rep movsq
+ cld
+- movq %r11, (%r10)
++ movq %r11, (%r9)
+ jmp 13f
+
+ /*
+@@ -127,13 +127,13 @@ ENTRY(memmove)
+ 8:
+ subq $0x20, %rdx
+ movq -1*8(%rsi), %r11
+- movq -2*8(%rsi), %r10
++ movq -2*8(%rsi), %rcx
+ movq -3*8(%rsi), %r9
+ movq -4*8(%rsi), %r8
+ leaq -4*8(%rsi), %rsi
+
+ movq %r11, -1*8(%rdi)
+- movq %r10, -2*8(%rdi)
++ movq %rcx, -2*8(%rdi)
+ movq %r9, -3*8(%rdi)
+ movq %r8, -4*8(%rdi)
+ leaq -4*8(%rdi), %rdi
+@@ -151,11 +151,11 @@ ENTRY(memmove)
+ * Move data from 16 bytes to 31 bytes.
+ */
+ movq 0*8(%rsi), %r11
+- movq 1*8(%rsi), %r10
++ movq 1*8(%rsi), %rcx
+ movq -2*8(%rsi, %rdx), %r9
+ movq -1*8(%rsi, %rdx), %r8
+ movq %r11, 0*8(%rdi)
+- movq %r10, 1*8(%rdi)
++ movq %rcx, 1*8(%rdi)
+ movq %r9, -2*8(%rdi, %rdx)
+ movq %r8, -1*8(%rdi, %rdx)
+ jmp 13f
+@@ -167,9 +167,9 @@ ENTRY(memmove)
+ * Move data from 8 bytes to 15 bytes.
+ */
+ movq 0*8(%rsi), %r11
+- movq -1*8(%rsi, %rdx), %r10
++ movq -1*8(%rsi, %rdx), %r9
+ movq %r11, 0*8(%rdi)
+- movq %r10, -1*8(%rdi, %rdx)
++ movq %r9, -1*8(%rdi, %rdx)
+ jmp 13f
+ 10:
+ cmpq $4, %rdx
+@@ -178,9 +178,9 @@ ENTRY(memmove)
+ * Move data from 4 bytes to 7 bytes.
+ */
+ movl (%rsi), %r11d
+- movl -4(%rsi, %rdx), %r10d
++ movl -4(%rsi, %rdx), %r9d
+ movl %r11d, (%rdi)
+- movl %r10d, -4(%rdi, %rdx)
++ movl %r9d, -4(%rdi, %rdx)
+ jmp 13f
+ 11:
+ cmp $2, %rdx
+@@ -189,9 +189,9 @@ ENTRY(memmove)
+ * Move data from 2 bytes to 3 bytes.
+ */
+ movw (%rsi), %r11w
+- movw -2(%rsi, %rdx), %r10w
++ movw -2(%rsi, %rdx), %r9w
+ movw %r11w, (%rdi)
+- movw %r10w, -2(%rdi, %rdx)
++ movw %r9w, -2(%rdi, %rdx)
+ jmp 13f
+ 12:
+ cmp $1, %rdx
+@@ -202,6 +202,7 @@ ENTRY(memmove)
+ movb (%rsi), %r11b
+ movb %r11b, (%rdi)
+ 13:
++ pax_force_retaddr
+ retq
+ CFI_ENDPROC
+
+@@ -210,6 +211,7 @@ ENTRY(memmove)
+ /* Forward moving data. */
+ movq %rdx, %rcx
+ rep movsb
++ pax_force_retaddr
+ retq
+ .Lmemmove_end_forward_efs:
+ .previous
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/memset_64.S linux-3.4-pax/arch/x86/lib/memset_64.S
+--- linux-3.4/arch/x86/lib/memset_64.S 2012-05-21 11:32:57.819927679 +0200
++++ linux-3.4-pax/arch/x86/lib/memset_64.S 2012-05-21 12:10:09.720048899 +0200
+@@ -30,6 +30,7 @@
+ movl %edx,%ecx
+ rep stosb
+ movq %r9,%rax
++ pax_force_retaddr
+ ret
+ .Lmemset_e:
+ .previous
+@@ -52,6 +53,7 @@
+ movq %rdx,%rcx
+ rep stosb
+ movq %r9,%rax
++ pax_force_retaddr
+ ret
+ .Lmemset_e_e:
+ .previous
+@@ -59,7 +61,7 @@
+ ENTRY(memset)
+ ENTRY(__memset)
+ CFI_STARTPROC
+- movq %rdi,%r10
++ movq %rdi,%r11
+
+ /* expand byte value */
+ movzbl %sil,%ecx
+@@ -117,7 +119,8 @@ ENTRY(__memset)
+ jnz .Lloop_1
+
+ .Lende:
+- movq %r10,%rax
++ movq %r11,%rax
++ pax_force_retaddr
+ ret
+
+ CFI_RESTORE_STATE
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/mmx_32.c linux-3.4-pax/arch/x86/lib/mmx_32.c
+--- linux-3.4/arch/x86/lib/mmx_32.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/lib/mmx_32.c 2012-05-21 12:10:09.720048899 +0200
+@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
+ {
+ void *p;
+ int i;
++ unsigned long cr0;
+
+ if (unlikely(in_interrupt()))
+ return __memcpy(to, from, len);
+@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+- "1: prefetch (%0)\n" /* This set is 28 bytes */
+- " prefetch 64(%0)\n"
+- " prefetch 128(%0)\n"
+- " prefetch 192(%0)\n"
+- " prefetch 256(%0)\n"
++ "1: prefetch (%1)\n" /* This set is 28 bytes */
++ " prefetch 64(%1)\n"
++ " prefetch 128(%1)\n"
++ " prefetch 192(%1)\n"
++ " prefetch 256(%1)\n"
+ "2: \n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++ "3: \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : : "r" (from));
++ : "=&r" (cr0) : "r" (from) : "ax");
+
+ for ( ; i > 5; i--) {
+ __asm__ __volatile__ (
+- "1: prefetch 320(%0)\n"
+- "2: movq (%0), %%mm0\n"
+- " movq 8(%0), %%mm1\n"
+- " movq 16(%0), %%mm2\n"
+- " movq 24(%0), %%mm3\n"
+- " movq %%mm0, (%1)\n"
+- " movq %%mm1, 8(%1)\n"
+- " movq %%mm2, 16(%1)\n"
+- " movq %%mm3, 24(%1)\n"
+- " movq 32(%0), %%mm0\n"
+- " movq 40(%0), %%mm1\n"
+- " movq 48(%0), %%mm2\n"
+- " movq 56(%0), %%mm3\n"
+- " movq %%mm0, 32(%1)\n"
+- " movq %%mm1, 40(%1)\n"
+- " movq %%mm2, 48(%1)\n"
+- " movq %%mm3, 56(%1)\n"
++ "1: prefetch 320(%1)\n"
++ "2: movq (%1), %%mm0\n"
++ " movq 8(%1), %%mm1\n"
++ " movq 16(%1), %%mm2\n"
++ " movq 24(%1), %%mm3\n"
++ " movq %%mm0, (%2)\n"
++ " movq %%mm1, 8(%2)\n"
++ " movq %%mm2, 16(%2)\n"
++ " movq %%mm3, 24(%2)\n"
++ " movq 32(%1), %%mm0\n"
++ " movq 40(%1), %%mm1\n"
++ " movq 48(%1), %%mm2\n"
++ " movq 56(%1), %%mm3\n"
++ " movq %%mm0, 32(%2)\n"
++ " movq %%mm1, 40(%2)\n"
++ " movq %%mm2, 48(%2)\n"
++ " movq %%mm3, 56(%2)\n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++ "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : : "r" (from), "r" (to) : "memory");
++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+
+ from += 64;
+ to += 64;
+@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
+ static void fast_copy_page(void *to, void *from)
+ {
+ int i;
++ unsigned long cr0;
+
+ kernel_fpu_begin();
+
+@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
+ * but that is for later. -AV
+ */
+ __asm__ __volatile__(
+- "1: prefetch (%0)\n"
+- " prefetch 64(%0)\n"
+- " prefetch 128(%0)\n"
+- " prefetch 192(%0)\n"
+- " prefetch 256(%0)\n"
++ "1: prefetch (%1)\n"
++ " prefetch 64(%1)\n"
++ " prefetch 128(%1)\n"
++ " prefetch 192(%1)\n"
++ " prefetch 256(%1)\n"
+ "2: \n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++ "3: \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+- _ASM_EXTABLE(1b, 3b) : : "r" (from));
++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
+
+ for (i = 0; i < (4096-320)/64; i++) {
+ __asm__ __volatile__ (
+- "1: prefetch 320(%0)\n"
+- "2: movq (%0), %%mm0\n"
+- " movntq %%mm0, (%1)\n"
+- " movq 8(%0), %%mm1\n"
+- " movntq %%mm1, 8(%1)\n"
+- " movq 16(%0), %%mm2\n"
+- " movntq %%mm2, 16(%1)\n"
+- " movq 24(%0), %%mm3\n"
+- " movntq %%mm3, 24(%1)\n"
+- " movq 32(%0), %%mm4\n"
+- " movntq %%mm4, 32(%1)\n"
+- " movq 40(%0), %%mm5\n"
+- " movntq %%mm5, 40(%1)\n"
+- " movq 48(%0), %%mm6\n"
+- " movntq %%mm6, 48(%1)\n"
+- " movq 56(%0), %%mm7\n"
+- " movntq %%mm7, 56(%1)\n"
++ "1: prefetch 320(%1)\n"
++ "2: movq (%1), %%mm0\n"
++ " movntq %%mm0, (%2)\n"
++ " movq 8(%1), %%mm1\n"
++ " movntq %%mm1, 8(%2)\n"
++ " movq 16(%1), %%mm2\n"
++ " movntq %%mm2, 16(%2)\n"
++ " movq 24(%1), %%mm3\n"
++ " movntq %%mm3, 24(%2)\n"
++ " movq 32(%1), %%mm4\n"
++ " movntq %%mm4, 32(%2)\n"
++ " movq 40(%1), %%mm5\n"
++ " movntq %%mm5, 40(%2)\n"
++ " movq 48(%1), %%mm6\n"
++ " movntq %%mm6, 48(%2)\n"
++ " movq 56(%1), %%mm7\n"
++ " movntq %%mm7, 56(%2)\n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++ "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+
+ from += 64;
+ to += 64;
+@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
+ static void fast_copy_page(void *to, void *from)
+ {
+ int i;
++ unsigned long cr0;
+
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+- "1: prefetch (%0)\n"
+- " prefetch 64(%0)\n"
+- " prefetch 128(%0)\n"
+- " prefetch 192(%0)\n"
+- " prefetch 256(%0)\n"
++ "1: prefetch (%1)\n"
++ " prefetch 64(%1)\n"
++ " prefetch 128(%1)\n"
++ " prefetch 192(%1)\n"
++ " prefetch 256(%1)\n"
+ "2: \n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++ "3: \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+- _ASM_EXTABLE(1b, 3b) : : "r" (from));
++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
+
+ for (i = 0; i < 4096/64; i++) {
+ __asm__ __volatile__ (
+- "1: prefetch 320(%0)\n"
+- "2: movq (%0), %%mm0\n"
+- " movq 8(%0), %%mm1\n"
+- " movq 16(%0), %%mm2\n"
+- " movq 24(%0), %%mm3\n"
+- " movq %%mm0, (%1)\n"
+- " movq %%mm1, 8(%1)\n"
+- " movq %%mm2, 16(%1)\n"
+- " movq %%mm3, 24(%1)\n"
+- " movq 32(%0), %%mm0\n"
+- " movq 40(%0), %%mm1\n"
+- " movq 48(%0), %%mm2\n"
+- " movq 56(%0), %%mm3\n"
+- " movq %%mm0, 32(%1)\n"
+- " movq %%mm1, 40(%1)\n"
+- " movq %%mm2, 48(%1)\n"
+- " movq %%mm3, 56(%1)\n"
++ "1: prefetch 320(%1)\n"
++ "2: movq (%1), %%mm0\n"
++ " movq 8(%1), %%mm1\n"
++ " movq 16(%1), %%mm2\n"
++ " movq 24(%1), %%mm3\n"
++ " movq %%mm0, (%2)\n"
++ " movq %%mm1, 8(%2)\n"
++ " movq %%mm2, 16(%2)\n"
++ " movq %%mm3, 24(%2)\n"
++ " movq 32(%1), %%mm0\n"
++ " movq 40(%1), %%mm1\n"
++ " movq 48(%1), %%mm2\n"
++ " movq 56(%1), %%mm3\n"
++ " movq %%mm0, 32(%2)\n"
++ " movq %%mm1, 40(%2)\n"
++ " movq %%mm2, 48(%2)\n"
++ " movq %%mm3, 56(%2)\n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++ "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : : "r" (from), "r" (to) : "memory");
++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+
+ from += 64;
+ to += 64;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/msr-reg.S linux-3.4-pax/arch/x86/lib/msr-reg.S
+--- linux-3.4/arch/x86/lib/msr-reg.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/lib/msr-reg.S 2012-05-21 12:10:09.720048899 +0200
+@@ -3,6 +3,7 @@
+ #include <asm/dwarf2.h>
+ #include <asm/asm.h>
+ #include <asm/msr.h>
++#include <asm/alternative-asm.h>
+
+ #ifdef CONFIG_X86_64
+ /*
+@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
+ CFI_STARTPROC
+ pushq_cfi %rbx
+ pushq_cfi %rbp
+- movq %rdi, %r10 /* Save pointer */
++ movq %rdi, %r9 /* Save pointer */
+ xorl %r11d, %r11d /* Return value */
+ movl (%rdi), %eax
+ movl 4(%rdi), %ecx
+@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
+ movl 28(%rdi), %edi
+ CFI_REMEMBER_STATE
+ 1: \op
+-2: movl %eax, (%r10)
++2: movl %eax, (%r9)
+ movl %r11d, %eax /* Return value */
+- movl %ecx, 4(%r10)
+- movl %edx, 8(%r10)
+- movl %ebx, 12(%r10)
+- movl %ebp, 20(%r10)
+- movl %esi, 24(%r10)
+- movl %edi, 28(%r10)
++ movl %ecx, 4(%r9)
++ movl %edx, 8(%r9)
++ movl %ebx, 12(%r9)
++ movl %ebp, 20(%r9)
++ movl %esi, 24(%r9)
++ movl %edi, 28(%r9)
+ popq_cfi %rbp
+ popq_cfi %rbx
++ pax_force_retaddr
+ ret
+ 3:
+ CFI_RESTORE_STATE
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/putuser.S linux-3.4-pax/arch/x86/lib/putuser.S
+--- linux-3.4/arch/x86/lib/putuser.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/lib/putuser.S 2012-05-21 12:10:09.724048899 +0200
+@@ -15,7 +15,9 @@
+ #include <asm/thread_info.h>
+ #include <asm/errno.h>
+ #include <asm/asm.h>
+-
++#include <asm/segment.h>
++#include <asm/pgtable.h>
++#include <asm/alternative-asm.h>
+
+ /*
+ * __put_user_X
+@@ -29,52 +31,119 @@
+ * as they get called from within inline assembly.
+ */
+
+-#define ENTER CFI_STARTPROC ; \
+- GET_THREAD_INFO(%_ASM_BX)
+-#define EXIT ret ; \
++#define ENTER CFI_STARTPROC
++#define EXIT pax_force_retaddr; ret ; \
+ CFI_ENDPROC
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define _DEST %_ASM_CX,%_ASM_BX
++#else
++#define _DEST %_ASM_CX
++#endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg gs;
++#else
++#define __copyuser_seg
++#endif
++
+ .text
+ ENTRY(__put_user_1)
+ ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++ GET_THREAD_INFO(%_ASM_BX)
+ cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
+ jae bad_put_user
+-1: movb %al,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
++ cmp %_ASM_BX,%_ASM_CX
++ jb 1234f
++ xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++1: __copyuser_seg movb %al,(_DEST)
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_1)
+
+ ENTRY(__put_user_2)
+ ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++ GET_THREAD_INFO(%_ASM_BX)
+ mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+ sub $1,%_ASM_BX
+ cmp %_ASM_BX,%_ASM_CX
+ jae bad_put_user
+-2: movw %ax,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
++ cmp %_ASM_BX,%_ASM_CX
++ jb 1234f
++ xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++2: __copyuser_seg movw %ax,(_DEST)
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_2)
+
+ ENTRY(__put_user_4)
+ ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++ GET_THREAD_INFO(%_ASM_BX)
+ mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+ sub $3,%_ASM_BX
+ cmp %_ASM_BX,%_ASM_CX
+ jae bad_put_user
+-3: movl %eax,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
++ cmp %_ASM_BX,%_ASM_CX
++ jb 1234f
++ xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++3: __copyuser_seg movl %eax,(_DEST)
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_4)
+
+ ENTRY(__put_user_8)
+ ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++ GET_THREAD_INFO(%_ASM_BX)
+ mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+ sub $7,%_ASM_BX
+ cmp %_ASM_BX,%_ASM_CX
+ jae bad_put_user
+-4: mov %_ASM_AX,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
++ cmp %_ASM_BX,%_ASM_CX
++ jb 1234f
++ xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++4: __copyuser_seg mov %_ASM_AX,(_DEST)
+ #ifdef CONFIG_X86_32
+-5: movl %edx,4(%_ASM_CX)
++5: __copyuser_seg movl %edx,4(_DEST)
+ #endif
+ xor %eax,%eax
+ EXIT
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/rwlock.S linux-3.4-pax/arch/x86/lib/rwlock.S
+--- linux-3.4/arch/x86/lib/rwlock.S 2011-10-24 12:48:26.351091768 +0200
++++ linux-3.4-pax/arch/x86/lib/rwlock.S 2012-05-21 12:10:09.724048899 +0200
+@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
+ FRAME
+ 0: LOCK_PREFIX
+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 1234f
++ LOCK_PREFIX
++ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
++ int $4
++1234:
++ _ASM_EXTABLE(1234b, 1234b)
++#endif
++
+ 1: rep; nop
+ cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
+ jne 1b
+ LOCK_PREFIX
+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 1234f
++ LOCK_PREFIX
++ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
++ int $4
++1234:
++ _ASM_EXTABLE(1234b, 1234b)
++#endif
++
+ jnz 0b
+ ENDFRAME
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ END(__write_lock_failed)
+@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
+ FRAME
+ 0: LOCK_PREFIX
+ READ_LOCK_SIZE(inc) (%__lock_ptr)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 1234f
++ LOCK_PREFIX
++ READ_LOCK_SIZE(dec) (%__lock_ptr)
++ int $4
++1234:
++ _ASM_EXTABLE(1234b, 1234b)
++#endif
++
+ 1: rep; nop
+ READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
+ js 1b
+ LOCK_PREFIX
+ READ_LOCK_SIZE(dec) (%__lock_ptr)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 1234f
++ LOCK_PREFIX
++ READ_LOCK_SIZE(inc) (%__lock_ptr)
++ int $4
++1234:
++ _ASM_EXTABLE(1234b, 1234b)
++#endif
++
+ js 0b
+ ENDFRAME
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ END(__read_lock_failed)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/rwsem.S linux-3.4-pax/arch/x86/lib/rwsem.S
+--- linux-3.4/arch/x86/lib/rwsem.S 2011-10-24 12:48:26.355091768 +0200
++++ linux-3.4-pax/arch/x86/lib/rwsem.S 2012-05-21 12:10:09.724048899 +0200
+@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
+ __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
+ CFI_RESTORE __ASM_REG(dx)
+ restore_common_regs
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(call_rwsem_down_read_failed)
+@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
+ movq %rax,%rdi
+ call rwsem_down_write_failed
+ restore_common_regs
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(call_rwsem_down_write_failed)
+@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
+ movq %rax,%rdi
+ call rwsem_wake
+ restore_common_regs
+-1: ret
++1: pax_force_retaddr
++ ret
+ CFI_ENDPROC
+ ENDPROC(call_rwsem_wake)
+
+@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
+ __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
+ CFI_RESTORE __ASM_REG(dx)
+ restore_common_regs
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(call_rwsem_downgrade_wake)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/thunk_64.S linux-3.4-pax/arch/x86/lib/thunk_64.S
+--- linux-3.4/arch/x86/lib/thunk_64.S 2011-10-24 12:48:26.359091768 +0200
++++ linux-3.4-pax/arch/x86/lib/thunk_64.S 2012-05-21 12:10:09.728048899 +0200
+@@ -8,6 +8,7 @@
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
+ #include <asm/calling.h>
++#include <asm/alternative-asm.h>
+
+ /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
+ .macro THUNK name, func, put_ret_addr_in_rdi=0
+@@ -41,5 +42,6 @@
+ SAVE_ARGS
+ restore:
+ RESTORE_ARGS
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/usercopy_32.c linux-3.4-pax/arch/x86/lib/usercopy_32.c
+--- linux-3.4/arch/x86/lib/usercopy_32.c 2012-05-21 11:32:57.823927679 +0200
++++ linux-3.4-pax/arch/x86/lib/usercopy_32.c 2012-05-21 12:10:09.732048900 +0200
+@@ -41,10 +41,12 @@ do { \
+ int __d0; \
+ might_fault(); \
+ __asm__ __volatile__( \
++ __COPYUSER_SET_ES \
+ "0: rep; stosl\n" \
+ " movl %2,%0\n" \
+ "1: rep; stosb\n" \
+ "2:\n" \
++ __COPYUSER_RESTORE_ES \
+ ".section .fixup,\"ax\"\n" \
+ "3: lea 0(%2,%0,4),%0\n" \
+ " jmp 2b\n" \
+@@ -113,6 +115,7 @@ long strnlen_user(const char __user *s,
+ might_fault();
+
+ __asm__ __volatile__(
++ __COPYUSER_SET_ES
+ " testl %0, %0\n"
+ " jz 3f\n"
+ " andl %0,%%ecx\n"
+@@ -121,6 +124,7 @@ long strnlen_user(const char __user *s,
+ " subl %%ecx,%0\n"
+ " addl %0,%%eax\n"
+ "1:\n"
++ __COPYUSER_RESTORE_ES
+ ".section .fixup,\"ax\"\n"
+ "2: xorl %%eax,%%eax\n"
+ " jmp 1b\n"
+@@ -140,7 +144,7 @@ EXPORT_SYMBOL(strnlen_user);
+
+ #ifdef CONFIG_X86_INTEL_USERCOPY
+ static unsigned long
+-__copy_user_intel(void __user *to, const void *from, unsigned long size)
++__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
+ {
+ int d0, d1;
+ __asm__ __volatile__(
+@@ -152,36 +156,36 @@ __copy_user_intel(void __user *to, const
+ " .align 2,0x90\n"
+ "3: movl 0(%4), %%eax\n"
+ "4: movl 4(%4), %%edx\n"
+- "5: movl %%eax, 0(%3)\n"
+- "6: movl %%edx, 4(%3)\n"
++ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
++ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
+ "7: movl 8(%4), %%eax\n"
+ "8: movl 12(%4),%%edx\n"
+- "9: movl %%eax, 8(%3)\n"
+- "10: movl %%edx, 12(%3)\n"
++ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
++ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
+ "11: movl 16(%4), %%eax\n"
+ "12: movl 20(%4), %%edx\n"
+- "13: movl %%eax, 16(%3)\n"
+- "14: movl %%edx, 20(%3)\n"
++ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
++ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
+ "15: movl 24(%4), %%eax\n"
+ "16: movl 28(%4), %%edx\n"
+- "17: movl %%eax, 24(%3)\n"
+- "18: movl %%edx, 28(%3)\n"
++ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
++ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
+ "19: movl 32(%4), %%eax\n"
+ "20: movl 36(%4), %%edx\n"
+- "21: movl %%eax, 32(%3)\n"
+- "22: movl %%edx, 36(%3)\n"
++ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
++ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
+ "23: movl 40(%4), %%eax\n"
+ "24: movl 44(%4), %%edx\n"
+- "25: movl %%eax, 40(%3)\n"
+- "26: movl %%edx, 44(%3)\n"
++ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
++ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
+ "27: movl 48(%4), %%eax\n"
+ "28: movl 52(%4), %%edx\n"
+- "29: movl %%eax, 48(%3)\n"
+- "30: movl %%edx, 52(%3)\n"
++ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
++ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
+ "31: movl 56(%4), %%eax\n"
+ "32: movl 60(%4), %%edx\n"
+- "33: movl %%eax, 56(%3)\n"
+- "34: movl %%edx, 60(%3)\n"
++ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
++ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
+ " addl $-64, %0\n"
+ " addl $64, %4\n"
+ " addl $64, %3\n"
+@@ -191,10 +195,119 @@ __copy_user_intel(void __user *to, const
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
++ __COPYUSER_SET_ES
+ "99: rep; movsl\n"
+ "36: movl %%eax, %0\n"
+ "37: rep; movsb\n"
+ "100:\n"
++ __COPYUSER_RESTORE_ES
++ ".section .fixup,\"ax\"\n"
++ "101: lea 0(%%eax,%0,4),%0\n"
++ " jmp 100b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 1b,100b\n"
++ " .long 2b,100b\n"
++ " .long 3b,100b\n"
++ " .long 4b,100b\n"
++ " .long 5b,100b\n"
++ " .long 6b,100b\n"
++ " .long 7b,100b\n"
++ " .long 8b,100b\n"
++ " .long 9b,100b\n"
++ " .long 10b,100b\n"
++ " .long 11b,100b\n"
++ " .long 12b,100b\n"
++ " .long 13b,100b\n"
++ " .long 14b,100b\n"
++ " .long 15b,100b\n"
++ " .long 16b,100b\n"
++ " .long 17b,100b\n"
++ " .long 18b,100b\n"
++ " .long 19b,100b\n"
++ " .long 20b,100b\n"
++ " .long 21b,100b\n"
++ " .long 22b,100b\n"
++ " .long 23b,100b\n"
++ " .long 24b,100b\n"
++ " .long 25b,100b\n"
++ " .long 26b,100b\n"
++ " .long 27b,100b\n"
++ " .long 28b,100b\n"
++ " .long 29b,100b\n"
++ " .long 30b,100b\n"
++ " .long 31b,100b\n"
++ " .long 32b,100b\n"
++ " .long 33b,100b\n"
++ " .long 34b,100b\n"
++ " .long 35b,100b\n"
++ " .long 36b,100b\n"
++ " .long 37b,100b\n"
++ " .long 99b,101b\n"
++ ".previous"
++ : "=&c"(size), "=&D" (d0), "=&S" (d1)
++ : "1"(to), "2"(from), "0"(size)
++ : "eax", "edx", "memory");
++ return size;
++}
++
++static unsigned long
++__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
++{
++ int d0, d1;
++ __asm__ __volatile__(
++ " .align 2,0x90\n"
++ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
++ " cmpl $67, %0\n"
++ " jbe 3f\n"
++ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
++ " .align 2,0x90\n"
++ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
++ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
++ "5: movl %%eax, 0(%3)\n"
++ "6: movl %%edx, 4(%3)\n"
++ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
++ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
++ "9: movl %%eax, 8(%3)\n"
++ "10: movl %%edx, 12(%3)\n"
++ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
++ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
++ "13: movl %%eax, 16(%3)\n"
++ "14: movl %%edx, 20(%3)\n"
++ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
++ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
++ "17: movl %%eax, 24(%3)\n"
++ "18: movl %%edx, 28(%3)\n"
++ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
++ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
++ "21: movl %%eax, 32(%3)\n"
++ "22: movl %%edx, 36(%3)\n"
++ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
++ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
++ "25: movl %%eax, 40(%3)\n"
++ "26: movl %%edx, 44(%3)\n"
++ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
++ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
++ "29: movl %%eax, 48(%3)\n"
++ "30: movl %%edx, 52(%3)\n"
++ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
++ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
++ "33: movl %%eax, 56(%3)\n"
++ "34: movl %%edx, 60(%3)\n"
++ " addl $-64, %0\n"
++ " addl $64, %4\n"
++ " addl $64, %3\n"
++ " cmpl $63, %0\n"
++ " ja 1b\n"
++ "35: movl %0, %%eax\n"
++ " shrl $2, %0\n"
++ " andl $3, %%eax\n"
++ " cld\n"
++ "99: rep; "__copyuser_seg" movsl\n"
++ "36: movl %%eax, %0\n"
++ "37: rep; "__copyuser_seg" movsb\n"
++ "100:\n"
+ ".section .fixup,\"ax\"\n"
+ "101: lea 0(%%eax,%0,4),%0\n"
+ " jmp 100b\n"
+@@ -247,46 +360,48 @@ __copy_user_intel(void __user *to, const
+ }
+
+ static unsigned long
++__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
++static unsigned long
+ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+ {
+ int d0, d1;
+ __asm__ __volatile__(
+ " .align 2,0x90\n"
+- "0: movl 32(%4), %%eax\n"
++ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+ " jbe 2f\n"
+- "1: movl 64(%4), %%eax\n"
++ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
+ " .align 2,0x90\n"
+- "2: movl 0(%4), %%eax\n"
+- "21: movl 4(%4), %%edx\n"
++ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
++ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
+ " movl %%eax, 0(%3)\n"
+ " movl %%edx, 4(%3)\n"
+- "3: movl 8(%4), %%eax\n"
+- "31: movl 12(%4),%%edx\n"
++ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
++ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
+ " movl %%eax, 8(%3)\n"
+ " movl %%edx, 12(%3)\n"
+- "4: movl 16(%4), %%eax\n"
+- "41: movl 20(%4), %%edx\n"
++ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
++ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
+ " movl %%eax, 16(%3)\n"
+ " movl %%edx, 20(%3)\n"
+- "10: movl 24(%4), %%eax\n"
+- "51: movl 28(%4), %%edx\n"
++ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
++ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
+ " movl %%eax, 24(%3)\n"
+ " movl %%edx, 28(%3)\n"
+- "11: movl 32(%4), %%eax\n"
+- "61: movl 36(%4), %%edx\n"
++ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
++ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
+ " movl %%eax, 32(%3)\n"
+ " movl %%edx, 36(%3)\n"
+- "12: movl 40(%4), %%eax\n"
+- "71: movl 44(%4), %%edx\n"
++ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
++ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
+ " movl %%eax, 40(%3)\n"
+ " movl %%edx, 44(%3)\n"
+- "13: movl 48(%4), %%eax\n"
+- "81: movl 52(%4), %%edx\n"
++ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
++ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
+ " movl %%eax, 48(%3)\n"
+ " movl %%edx, 52(%3)\n"
+- "14: movl 56(%4), %%eax\n"
+- "91: movl 60(%4), %%edx\n"
++ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
++ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
+ " movl %%eax, 56(%3)\n"
+ " movl %%edx, 60(%3)\n"
+ " addl $-64, %0\n"
+@@ -298,9 +413,9 @@ __copy_user_zeroing_intel(void *to, cons
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
+- "6: rep; movsl\n"
++ "6: rep; "__copyuser_seg" movsl\n"
+ " movl %%eax,%0\n"
+- "7: rep; movsb\n"
++ "7: rep; "__copyuser_seg" movsb\n"
+ "8:\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+@@ -347,47 +462,49 @@ __copy_user_zeroing_intel(void *to, cons
+ */
+
+ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
++ const void __user *from, unsigned long size) __size_overflow(3);
++static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+ const void __user *from, unsigned long size)
+ {
+ int d0, d1;
+
+ __asm__ __volatile__(
+ " .align 2,0x90\n"
+- "0: movl 32(%4), %%eax\n"
++ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+ " jbe 2f\n"
+- "1: movl 64(%4), %%eax\n"
++ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
+ " .align 2,0x90\n"
+- "2: movl 0(%4), %%eax\n"
+- "21: movl 4(%4), %%edx\n"
++ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
++ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
+ " movnti %%eax, 0(%3)\n"
+ " movnti %%edx, 4(%3)\n"
+- "3: movl 8(%4), %%eax\n"
+- "31: movl 12(%4),%%edx\n"
++ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
++ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
+ " movnti %%eax, 8(%3)\n"
+ " movnti %%edx, 12(%3)\n"
+- "4: movl 16(%4), %%eax\n"
+- "41: movl 20(%4), %%edx\n"
++ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
++ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
+ " movnti %%eax, 16(%3)\n"
+ " movnti %%edx, 20(%3)\n"
+- "10: movl 24(%4), %%eax\n"
+- "51: movl 28(%4), %%edx\n"
++ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
++ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
+ " movnti %%eax, 24(%3)\n"
+ " movnti %%edx, 28(%3)\n"
+- "11: movl 32(%4), %%eax\n"
+- "61: movl 36(%4), %%edx\n"
++ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
++ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
+ " movnti %%eax, 32(%3)\n"
+ " movnti %%edx, 36(%3)\n"
+- "12: movl 40(%4), %%eax\n"
+- "71: movl 44(%4), %%edx\n"
++ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
++ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
+ " movnti %%eax, 40(%3)\n"
+ " movnti %%edx, 44(%3)\n"
+- "13: movl 48(%4), %%eax\n"
+- "81: movl 52(%4), %%edx\n"
++ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
++ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
+ " movnti %%eax, 48(%3)\n"
+ " movnti %%edx, 52(%3)\n"
+- "14: movl 56(%4), %%eax\n"
+- "91: movl 60(%4), %%edx\n"
++ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
++ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
+ " movnti %%eax, 56(%3)\n"
+ " movnti %%edx, 60(%3)\n"
+ " addl $-64, %0\n"
+@@ -400,9 +517,9 @@ static unsigned long __copy_user_zeroing
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
+- "6: rep; movsl\n"
++ "6: rep; "__copyuser_seg" movsl\n"
+ " movl %%eax,%0\n"
+- "7: rep; movsb\n"
++ "7: rep; "__copyuser_seg" movsb\n"
+ "8:\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+@@ -444,47 +561,49 @@ static unsigned long __copy_user_zeroing
+ }
+
+ static unsigned long __copy_user_intel_nocache(void *to,
++ const void __user *from, unsigned long size) __size_overflow(3);
++static unsigned long __copy_user_intel_nocache(void *to,
+ const void __user *from, unsigned long size)
+ {
+ int d0, d1;
+
+ __asm__ __volatile__(
+ " .align 2,0x90\n"
+- "0: movl 32(%4), %%eax\n"
++ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+ " jbe 2f\n"
+- "1: movl 64(%4), %%eax\n"
++ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
+ " .align 2,0x90\n"
+- "2: movl 0(%4), %%eax\n"
+- "21: movl 4(%4), %%edx\n"
++ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
++ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
+ " movnti %%eax, 0(%3)\n"
+ " movnti %%edx, 4(%3)\n"
+- "3: movl 8(%4), %%eax\n"
+- "31: movl 12(%4),%%edx\n"
++ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
++ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
+ " movnti %%eax, 8(%3)\n"
+ " movnti %%edx, 12(%3)\n"
+- "4: movl 16(%4), %%eax\n"
+- "41: movl 20(%4), %%edx\n"
++ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
++ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
+ " movnti %%eax, 16(%3)\n"
+ " movnti %%edx, 20(%3)\n"
+- "10: movl 24(%4), %%eax\n"
+- "51: movl 28(%4), %%edx\n"
++ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
++ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
+ " movnti %%eax, 24(%3)\n"
+ " movnti %%edx, 28(%3)\n"
+- "11: movl 32(%4), %%eax\n"
+- "61: movl 36(%4), %%edx\n"
++ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
++ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
+ " movnti %%eax, 32(%3)\n"
+ " movnti %%edx, 36(%3)\n"
+- "12: movl 40(%4), %%eax\n"
+- "71: movl 44(%4), %%edx\n"
++ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
++ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
+ " movnti %%eax, 40(%3)\n"
+ " movnti %%edx, 44(%3)\n"
+- "13: movl 48(%4), %%eax\n"
+- "81: movl 52(%4), %%edx\n"
++ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
++ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
+ " movnti %%eax, 48(%3)\n"
+ " movnti %%edx, 52(%3)\n"
+- "14: movl 56(%4), %%eax\n"
+- "91: movl 60(%4), %%edx\n"
++ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
++ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
+ " movnti %%eax, 56(%3)\n"
+ " movnti %%edx, 60(%3)\n"
+ " addl $-64, %0\n"
+@@ -497,9 +616,9 @@ static unsigned long __copy_user_intel_n
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
+- "6: rep; movsl\n"
++ "6: rep; "__copyuser_seg" movsl\n"
+ " movl %%eax,%0\n"
+- "7: rep; movsb\n"
++ "7: rep; "__copyuser_seg" movsb\n"
+ "8:\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+@@ -542,32 +661,36 @@ static unsigned long __copy_user_intel_n
+ */
+ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
+ unsigned long size);
+-unsigned long __copy_user_intel(void __user *to, const void *from,
++unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
++ unsigned long size);
++unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
+ unsigned long size);
+ unsigned long __copy_user_zeroing_intel_nocache(void *to,
+ const void __user *from, unsigned long size);
+ #endif /* CONFIG_X86_INTEL_USERCOPY */
+
+ /* Generic arbitrary sized copy. */
+-#define __copy_user(to, from, size) \
++#define __copy_user(to, from, size, prefix, set, restore) \
+ do { \
+ int __d0, __d1, __d2; \
+ __asm__ __volatile__( \
++ set \
+ " cmp $7,%0\n" \
+ " jbe 1f\n" \
+ " movl %1,%0\n" \
+ " negl %0\n" \
+ " andl $7,%0\n" \
+ " subl %0,%3\n" \
+- "4: rep; movsb\n" \
++ "4: rep; "prefix"movsb\n" \
+ " movl %3,%0\n" \
+ " shrl $2,%0\n" \
+ " andl $3,%3\n" \
+ " .align 2,0x90\n" \
+- "0: rep; movsl\n" \
++ "0: rep; "prefix"movsl\n" \
+ " movl %3,%0\n" \
+- "1: rep; movsb\n" \
++ "1: rep; "prefix"movsb\n" \
+ "2:\n" \
++ restore \
+ ".section .fixup,\"ax\"\n" \
+ "5: addl %3,%0\n" \
+ " jmp 2b\n" \
+@@ -595,14 +718,14 @@ do { \
+ " negl %0\n" \
+ " andl $7,%0\n" \
+ " subl %0,%3\n" \
+- "4: rep; movsb\n" \
++ "4: rep; "__copyuser_seg"movsb\n" \
+ " movl %3,%0\n" \
+ " shrl $2,%0\n" \
+ " andl $3,%3\n" \
+ " .align 2,0x90\n" \
+- "0: rep; movsl\n" \
++ "0: rep; "__copyuser_seg"movsl\n" \
+ " movl %3,%0\n" \
+- "1: rep; movsb\n" \
++ "1: rep; "__copyuser_seg"movsb\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "5: addl %3,%0\n" \
+@@ -688,9 +811,9 @@ survive:
+ }
+ #endif
+ if (movsl_is_ok(to, from, n))
+- __copy_user(to, from, n);
++ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
+ else
+- n = __copy_user_intel(to, from, n);
++ n = __generic_copy_to_user_intel(to, from, n);
+ return n;
+ }
+ EXPORT_SYMBOL(__copy_to_user_ll);
+@@ -710,10 +833,9 @@ unsigned long __copy_from_user_ll_nozero
+ unsigned long n)
+ {
+ if (movsl_is_ok(to, from, n))
+- __copy_user(to, from, n);
++ __copy_user(to, from, n, __copyuser_seg, "", "");
+ else
+- n = __copy_user_intel((void __user *)to,
+- (const void *)from, n);
++ n = __generic_copy_from_user_intel(to, from, n);
+ return n;
+ }
+ EXPORT_SYMBOL(__copy_from_user_ll_nozero);
+@@ -740,65 +862,50 @@ unsigned long __copy_from_user_ll_nocach
+ if (n > 64 && cpu_has_xmm2)
+ n = __copy_user_intel_nocache(to, from, n);
+ else
+- __copy_user(to, from, n);
++ __copy_user(to, from, n, __copyuser_seg, "", "");
+ #else
+- __copy_user(to, from, n);
++ __copy_user(to, from, n, __copyuser_seg, "", "");
+ #endif
+ return n;
+ }
+ EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
+
+-/**
+- * copy_to_user: - Copy a block of data into user space.
+- * @to: Destination address, in user space.
+- * @from: Source address, in kernel space.
+- * @n: Number of bytes to copy.
+- *
+- * Context: User context only. This function may sleep.
+- *
+- * Copy data from kernel space to user space.
+- *
+- * Returns number of bytes that could not be copied.
+- * On success, this will be zero.
+- */
+-unsigned long
+-copy_to_user(void __user *to, const void *from, unsigned long n)
++void copy_from_user_overflow(void)
+ {
+- if (access_ok(VERIFY_WRITE, to, n))
+- n = __copy_to_user(to, from, n);
+- return n;
++ WARN(1, "Buffer overflow detected!\n");
+ }
+-EXPORT_SYMBOL(copy_to_user);
++EXPORT_SYMBOL(copy_from_user_overflow);
+
+-/**
+- * copy_from_user: - Copy a block of data from user space.
+- * @to: Destination address, in kernel space.
+- * @from: Source address, in user space.
+- * @n: Number of bytes to copy.
+- *
+- * Context: User context only. This function may sleep.
+- *
+- * Copy data from user space to kernel space.
+- *
+- * Returns number of bytes that could not be copied.
+- * On success, this will be zero.
+- *
+- * If some data could not be copied, this function will pad the copied
+- * data to the requested size using zero bytes.
+- */
+-unsigned long
+-_copy_from_user(void *to, const void __user *from, unsigned long n)
++void copy_to_user_overflow(void)
+ {
+- if (access_ok(VERIFY_READ, from, n))
+- n = __copy_from_user(to, from, n);
+- else
+- memset(to, 0, n);
+- return n;
++ WARN(1, "Buffer overflow detected!\n");
+ }
+-EXPORT_SYMBOL(_copy_from_user);
++EXPORT_SYMBOL(copy_to_user_overflow);
+
+-void copy_from_user_overflow(void)
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++void __set_fs(mm_segment_t x)
+ {
+- WARN(1, "Buffer overflow detected!\n");
++ switch (x.seg) {
++ case 0:
++ loadsegment(gs, 0);
++ break;
++ case TASK_SIZE_MAX:
++ loadsegment(gs, __USER_DS);
++ break;
++ case -1UL:
++ loadsegment(gs, __KERNEL_DS);
++ break;
++ default:
++ BUG();
++ }
++ return;
+ }
+-EXPORT_SYMBOL(copy_from_user_overflow);
++EXPORT_SYMBOL(__set_fs);
++
++void set_fs(mm_segment_t x)
++{
++ current_thread_info()->addr_limit = x;
++ __set_fs(x);
++}
++EXPORT_SYMBOL(set_fs);
++#endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/lib/usercopy_64.c linux-3.4-pax/arch/x86/lib/usercopy_64.c
+--- linux-3.4/arch/x86/lib/usercopy_64.c 2012-05-21 11:32:57.827927679 +0200
++++ linux-3.4-pax/arch/x86/lib/usercopy_64.c 2012-05-21 12:10:09.732048900 +0200
+@@ -16,6 +16,12 @@ unsigned long __clear_user(void __user *
+ {
+ long __d0;
+ might_fault();
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
++ addr += PAX_USER_SHADOW_BASE;
++#endif
++
+ /* no memory constraint because it doesn't change any memory gcc knows
+ about */
+ asm volatile(
+@@ -100,12 +106,20 @@ long strlen_user(const char __user *s)
+ }
+ EXPORT_SYMBOL(strlen_user);
+
+-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
++unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
+ {
+- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
+- return copy_user_generic((__force void *)to, (__force void *)from, len);
+- }
+- return len;
++ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
++ to += PAX_USER_SHADOW_BASE;
++ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
++ from += PAX_USER_SHADOW_BASE;
++#endif
++
++ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
++ }
++ return len;
+ }
+ EXPORT_SYMBOL(copy_in_user);
+
+@@ -115,7 +129,7 @@ EXPORT_SYMBOL(copy_in_user);
+ * it is not necessary to optimize tail handling.
+ */
+ unsigned long
+-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
+ {
+ char c;
+ unsigned zero_len;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/Makefile linux-3.4-pax/arch/x86/Makefile
+--- linux-3.4/arch/x86/Makefile 2012-05-21 11:32:56.719927619 +0200
++++ linux-3.4-pax/arch/x86/Makefile 2012-05-21 12:10:09.736048900 +0200
+@@ -46,6 +46,7 @@ else
+ UTS_MACHINE := x86_64
+ CHECKFLAGS += -D__x86_64__ -m64
+
++ biarch := $(call cc-option,-m64)
+ KBUILD_AFLAGS += -m64
+ KBUILD_CFLAGS += -m64
+
+@@ -221,3 +222,12 @@ define archhelp
+ echo ' FDARGS="..." arguments for the booted kernel'
+ echo ' FDINITRD=file initrd for the booted kernel'
+ endef
++
++define OLD_LD
++
++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
++*** Please upgrade your binutils to 2.18 or newer
++endef
++
++archprepare:
++ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/extable.c linux-3.4-pax/arch/x86/mm/extable.c
+--- linux-3.4/arch/x86/mm/extable.c 2012-03-19 10:38:56.684049992 +0100
++++ linux-3.4-pax/arch/x86/mm/extable.c 2012-05-21 12:10:09.740048900 +0200
+@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
+ const struct exception_table_entry *fixup;
+
+ #ifdef CONFIG_PNPBIOS
+- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
++ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
+ extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
+ extern u32 pnp_bios_is_utter_crap;
+ pnp_bios_is_utter_crap = 1;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/fault.c linux-3.4-pax/arch/x86/mm/fault.c
+--- linux-3.4/arch/x86/mm/fault.c 2012-05-21 11:32:57.831927679 +0200
++++ linux-3.4-pax/arch/x86/mm/fault.c 2012-05-26 01:07:07.428801082 +0200
+@@ -13,11 +13,18 @@
+ #include <linux/perf_event.h> /* perf_sw_event */
+ #include <linux/hugetlb.h> /* hstate_index_to_shift */
+ #include <linux/prefetch.h> /* prefetchw */
++#include <linux/unistd.h>
++#include <linux/compiler.h>
+
+ #include <asm/traps.h> /* dotraplinkage, ... */
+ #include <asm/pgalloc.h> /* pgd_*(), ... */
+ #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
+ #include <asm/fixmap.h> /* VSYSCALL_START */
++#include <asm/tlbflush.h>
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#include <asm/stacktrace.h>
++#endif
+
+ /*
+ * Page fault error code bits:
+@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_
+ int ret = 0;
+
+ /* kprobe_running() needs smp_processor_id() */
+- if (kprobes_built_in() && !user_mode_vm(regs)) {
++ if (kprobes_built_in() && !user_mode(regs)) {
+ preempt_disable();
+ if (kprobe_running() && kprobe_fault_handler(regs, 14))
+ ret = 1;
+@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
+ return !instr_lo || (instr_lo>>1) == 1;
+ case 0x00:
+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
+- if (probe_kernel_address(instr, opcode))
++ if (user_mode(regs)) {
++ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
++ return 0;
++ } else if (probe_kernel_address(instr, opcode))
+ return 0;
+
+ *prefetch = (instr_lo == 0xF) &&
+@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
+ while (instr < max_instr) {
+ unsigned char opcode;
+
+- if (probe_kernel_address(instr, opcode))
++ if (user_mode(regs)) {
++ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
++ break;
++ } else if (probe_kernel_address(instr, opcode))
+ break;
+
+ instr++;
+@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int s
+ force_sig_info(si_signo, &info, tsk);
+ }
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++static int pax_handle_fetch_fault(struct pt_regs *regs);
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ pgd = pgd_offset(mm, address);
++ if (!pgd_present(*pgd))
++ return NULL;
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return NULL;
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return NULL;
++ return pmd;
++}
++#endif
++
+ DEFINE_SPINLOCK(pgd_lock);
+ LIST_HEAD(pgd_list);
+
+@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
+ for (address = VMALLOC_START & PMD_MASK;
+ address >= TASK_SIZE && address < FIXADDR_TOP;
+ address += PMD_SIZE) {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ unsigned long cpu;
++#else
+ struct page *page;
++#endif
+
+ spin_lock(&pgd_lock);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
++ pgd_t *pgd = get_cpu_pgd(cpu);
++ pmd_t *ret;
++#else
+ list_for_each_entry(page, &pgd_list, lru) {
++ pgd_t *pgd = page_address(page);
+ spinlock_t *pgt_lock;
+ pmd_t *ret;
+
+@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
+ pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+
+ spin_lock(pgt_lock);
+- ret = vmalloc_sync_one(page_address(page), address);
++#endif
++
++ ret = vmalloc_sync_one(pgd, address);
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ spin_unlock(pgt_lock);
++#endif
+
+ if (!ret)
+ break;
+@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fa
+ * an interrupt in the middle of a task switch..
+ */
+ pgd_paddr = read_cr3();
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
++#endif
++
+ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
+ if (!pmd_k)
+ return -1;
+@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fa
+ * happen within a race in page table update. In the later
+ * case just flush:
+ */
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
++ pgd = pgd_offset_cpu(smp_processor_id(), address);
++#else
+ pgd = pgd_offset(current->active_mm, address);
++#endif
++
+ pgd_ref = pgd_offset_k(address);
+ if (pgd_none(*pgd_ref))
+ return -1;
+@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *r
+ static int is_errata100(struct pt_regs *regs, unsigned long address)
+ {
+ #ifdef CONFIG_X86_64
+- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
+ return 1;
+ #endif
+ return 0;
+@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *r
+ }
+
+ static const char nx_warning[] = KERN_CRIT
+-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
++"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
+
+ static void
+ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+@@ -576,15 +646,21 @@ show_fault_oops(struct pt_regs *regs, un
+ if (!oops_may_print())
+ return;
+
+- if (error_code & PF_INSTR) {
++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
+ unsigned int level;
+
+ pte_t *pte = lookup_address(address, &level);
+
+ if (pte && pte_present(*pte) && !pte_exec(*pte))
+- printk(nx_warning, current_uid());
++ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++ if (init_mm.start_code <= address && address < init_mm.end_code)
++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
++ current->comm, task_pid_nr(current), current_uid(), current_euid());
++#endif
++
+ printk(KERN_ALERT "BUG: unable to handle kernel ");
+ if (address < PAGE_SIZE)
+ printk(KERN_CONT "NULL pointer dereference");
+@@ -748,6 +824,21 @@ __bad_area_nosemaphore(struct pt_regs *r
+ }
+ #endif
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (pax_is_fetch_fault(regs, error_code, address)) {
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ switch (pax_handle_fetch_fault(regs)) {
++ case 2:
++ return;
++ }
++#endif
++
++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ if (unlikely(show_unhandled_signals))
+ show_signal_msg(regs, error_code, address, tsk);
+
+@@ -844,7 +935,7 @@ do_sigbus(struct pt_regs *regs, unsigned
+ if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
+ printk(KERN_ERR
+ "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
+- tsk->comm, tsk->pid, address);
++ tsk->comm, task_pid_nr(tsk), address);
+ code = BUS_MCEERR_AR;
+ }
+ #endif
+@@ -900,6 +991,99 @@ static int spurious_fault_check(unsigned
+ return 1;
+ }
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
++{
++ pte_t *pte;
++ pmd_t *pmd;
++ spinlock_t *ptl;
++ unsigned char pte_mask;
++
++ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
++ !(mm->pax_flags & MF_PAX_PAGEEXEC))
++ return 0;
++
++ /* PaX: it's our fault, let's handle it if we can */
++
++ /* PaX: take a look at read faults before acquiring any locks */
++ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
++ /* instruction fetch attempt from a protected page in user mode */
++ up_read(&mm->mmap_sem);
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ switch (pax_handle_fetch_fault(regs)) {
++ case 2:
++ return 1;
++ }
++#endif
++
++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++
++ pmd = pax_get_pmd(mm, address);
++ if (unlikely(!pmd))
++ return 0;
++
++ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
++ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
++ pte_unmap_unlock(pte, ptl);
++ return 0;
++ }
++
++ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
++ /* write attempt to a protected page in user mode */
++ pte_unmap_unlock(pte, ptl);
++ return 0;
++ }
++
++#ifdef CONFIG_SMP
++ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
++#else
++ if (likely(address > get_limit(regs->cs)))
++#endif
++ {
++ set_pte(pte, pte_mkread(*pte));
++ __flush_tlb_one(address);
++ pte_unmap_unlock(pte, ptl);
++ up_read(&mm->mmap_sem);
++ return 1;
++ }
++
++ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
++
++ /*
++ * PaX: fill DTLB with user rights and retry
++ */
++ __asm__ __volatile__ (
++ "orb %2,(%1)\n"
++#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
++/*
++ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
++ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
++ * page fault when examined during a TLB load attempt. this is true not only
++ * for PTEs holding a non-present entry but also present entries that will
++ * raise a page fault (such as those set up by PaX, or the copy-on-write
++ * mechanism). in effect it means that we do *not* need to flush the TLBs
++ * for our target pages since their PTEs are simply not in the TLBs at all.
++
++ * the best thing in omitting it is that we gain around 15-20% speed in the
++ * fast path of the page fault handler and can get rid of tracing since we
++ * can no longer flush unintended entries.
++ */
++ "invlpg (%0)\n"
++#endif
++ __copyuser_seg"testb $0,(%0)\n"
++ "xorb %3,(%1)\n"
++ :
++ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
++ : "memory", "cc");
++ pte_unmap_unlock(pte, ptl);
++ up_read(&mm->mmap_sem);
++ return 1;
++}
++#endif
++
+ /*
+ * Handle a spurious fault caused by a stale TLB entry.
+ *
+@@ -972,6 +1156,9 @@ int show_unhandled_signals = 1;
+ static inline int
+ access_error(unsigned long error_code, struct vm_area_struct *vma)
+ {
++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
++ return 1;
++
+ if (error_code & PF_WRITE) {
+ /* write, present and write, not present: */
+ if (unlikely(!(vma->vm_flags & VM_WRITE)))
+@@ -1005,19 +1192,34 @@ do_page_fault(struct pt_regs *regs, unsi
+ {
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+- unsigned long address;
+ struct mm_struct *mm;
+ int fault;
+ int write = error_code & PF_WRITE;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (write ? FAULT_FLAG_WRITE : 0);
+
++ /* Get the faulting address: */
++ unsigned long address = read_cr2();
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
++ if (!search_exception_tables(regs->ip)) {
++ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
++ bad_area_nosemaphore(regs, error_code, address);
++ return;
++ }
++ if (address < PAX_USER_SHADOW_BASE) {
++ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
++ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
++ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
++ } else
++ address -= PAX_USER_SHADOW_BASE;
++ }
++#endif
++
+ tsk = current;
+ mm = tsk->mm;
+
+- /* Get the faulting address: */
+- address = read_cr2();
+-
+ /*
+ * Detect and handle instructions that would cause a page fault for
+ * both a tracked kernel page and a userspace page.
+@@ -1077,7 +1279,7 @@ do_page_fault(struct pt_regs *regs, unsi
+ * User-mode registers count as a user access even for any
+ * potential system fault or CPU buglet:
+ */
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ local_irq_enable();
+ error_code |= PF_USER;
+ } else {
+@@ -1132,6 +1334,11 @@ retry:
+ might_sleep();
+ }
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
++ return;
++#endif
++
+ vma = find_vma(mm, address);
+ if (unlikely(!vma)) {
+ bad_area(regs, error_code, address);
+@@ -1143,18 +1350,24 @@ retry:
+ bad_area(regs, error_code, address);
+ return;
+ }
+- if (error_code & PF_USER) {
+- /*
+- * Accessing the stack below %sp is always a bug.
+- * The large cushion allows instructions like enter
+- * and pusha to work. ("enter $65535, $31" pushes
+- * 32 pointers and then decrements %sp by 65535.)
+- */
+- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
+- bad_area(regs, error_code, address);
+- return;
+- }
++ /*
++ * Accessing the stack below %sp is always a bug.
++ * The large cushion allows instructions like enter
++ * and pusha to work. ("enter $65535, $31" pushes
++ * 32 pointers and then decrements %sp by 65535.)
++ */
++ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
++ bad_area(regs, error_code, address);
++ return;
+ }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
++ bad_area(regs, error_code, address);
++ return;
++ }
++#endif
++
+ if (unlikely(expand_stack(vma, address))) {
+ bad_area(regs, error_code, address);
+ return;
+@@ -1209,3 +1422,292 @@ good_area:
+
+ up_read(&mm->mmap_sem);
+ }
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
++{
++ struct mm_struct *mm = current->mm;
++ unsigned long ip = regs->ip;
++
++ if (v8086_mode(regs))
++ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
++ return true;
++ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
++ return true;
++ return false;
++ }
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
++ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
++ return true;
++ return false;
++ }
++#endif
++
++ return false;
++}
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++static int pax_handle_fetch_fault_32(struct pt_regs *regs)
++{
++ int err;
++
++ do { /* PaX: libffi trampoline emulation */
++ unsigned char mov, jmp;
++ unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++ if ((regs->ip + 9) >> 32)
++ break;
++#endif
++
++ err = get_user(mov, (unsigned char __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++
++ if (err)
++ break;
++
++ if (mov == 0xB8 && jmp == 0xE9) {
++ regs->ax = addr1;
++ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #1 */
++ unsigned char mov1, mov2;
++ unsigned short jmp;
++ unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++ if ((regs->ip + 11) >> 32)
++ break;
++#endif
++
++ err = get_user(mov1, (unsigned char __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
++ regs->cx = addr1;
++ regs->ax = addr2;
++ regs->ip = addr2;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #2 */
++ unsigned char mov, jmp;
++ unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++ if ((regs->ip + 9) >> 32)
++ break;
++#endif
++
++ err = get_user(mov, (unsigned char __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++
++ if (err)
++ break;
++
++ if (mov == 0xB9 && jmp == 0xE9) {
++ regs->cx = addr1;
++ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
++ return 2;
++ }
++ } while (0);
++
++ return 1; /* PaX in action */
++}
++
++#ifdef CONFIG_X86_64
++static int pax_handle_fetch_fault_64(struct pt_regs *regs)
++{
++ int err;
++
++ do { /* PaX: libffi trampoline emulation */
++ unsigned short mov1, mov2, jmp1;
++ unsigned char stcclc, jmp2;
++ unsigned long addr1, addr2;
++
++ err = get_user(mov1, (unsigned short __user *)regs->ip);
++ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
++ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++ regs->r11 = addr1;
++ regs->r10 = addr2;
++ if (stcclc == 0xF8)
++ regs->flags &= ~X86_EFLAGS_CF;
++ else
++ regs->flags |= X86_EFLAGS_CF;
++ regs->ip = addr1;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #1 */
++ unsigned short mov1, mov2, jmp1;
++ unsigned char jmp2;
++ unsigned int addr1;
++ unsigned long addr2;
++
++ err = get_user(mov1, (unsigned short __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++ regs->r11 = addr1;
++ regs->r10 = addr2;
++ regs->ip = addr1;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #2 */
++ unsigned short mov1, mov2, jmp1;
++ unsigned char jmp2;
++ unsigned long addr1, addr2;
++
++ err = get_user(mov1, (unsigned short __user *)regs->ip);
++ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++ regs->r11 = addr1;
++ regs->r10 = addr2;
++ regs->ip = addr1;
++ return 2;
++ }
++ } while (0);
++
++ return 1; /* PaX in action */
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->ip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when gcc trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ if (v8086_mode(regs))
++ return 1;
++
++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++ return 1;
++
++#ifdef CONFIG_X86_32
++ return pax_handle_fetch_fault_32(regs);
++#else
++ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
++ return pax_handle_fetch_fault_32(regs);
++ else
++ return pax_handle_fetch_fault_64(regs);
++#endif
++}
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char __force_user *)pc+i))
++ printk(KERN_CONT "?? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++
++ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
++ for (i = -1; i < 80 / (long)sizeof(long); i++) {
++ unsigned long c;
++ if (get_user(c, (unsigned long __force_user *)sp+i)) {
++#ifdef CONFIG_X86_32
++ printk(KERN_CONT "???????? ");
++#else
++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
++ printk(KERN_CONT "???????? ???????? ");
++ else
++ printk(KERN_CONT "???????????????? ");
++#endif
++ } else {
++#ifdef CONFIG_X86_64
++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
++ printk(KERN_CONT "%08x ", (unsigned int)c);
++ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
++ } else
++#endif
++ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
++ }
++ }
++ printk("\n");
++}
++#endif
++
++/**
++ * probe_kernel_write(): safely attempt to write to a location
++ * @dst: address to write to
++ * @src: pointer to the data that shall be written
++ * @size: size of the data chunk
++ *
++ * Safely write to address @dst from the buffer at @src. If a kernel fault
++ * happens, handle that and return -EFAULT.
++ */
++long notrace probe_kernel_write(void *dst, const void *src, size_t size)
++{
++ long ret;
++ mm_segment_t old_fs = get_fs();
++
++ set_fs(KERNEL_DS);
++ pagefault_disable();
++ pax_open_kernel();
++ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
++ pax_close_kernel();
++ pagefault_enable();
++ set_fs(old_fs);
++
++ return ret ? -EFAULT : 0;
++}
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/gup.c linux-3.4-pax/arch/x86/mm/gup.c
+--- linux-3.4/arch/x86/mm/gup.c 2012-01-08 19:47:49.779472971 +0100
++++ linux-3.4-pax/arch/x86/mm/gup.c 2012-05-27 13:47:34.453228015 +0200
+@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
++ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+ (void __user *)start, len)))
+ return 0;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/highmem_32.c linux-3.4-pax/arch/x86/mm/highmem_32.c
+--- linux-3.4/arch/x86/mm/highmem_32.c 2012-05-21 11:32:57.847927680 +0200
++++ linux-3.4-pax/arch/x86/mm/highmem_32.c 2012-05-21 12:10:09.748048900 +0200
+@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ BUG_ON(!pte_none(*(kmap_pte-idx)));
++
++ pax_open_kernel();
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
++ pax_close_kernel();
++
+ arch_flush_lazy_mmu_mode();
+
+ return (void *)vaddr;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/hugetlbpage.c linux-3.4-pax/arch/x86/mm/hugetlbpage.c
+--- linux-3.4/arch/x86/mm/hugetlbpage.c 2012-05-21 11:32:57.851927680 +0200
++++ linux-3.4-pax/arch/x86/mm/hugetlbpage.c 2012-05-21 12:10:09.748048900 +0200
+@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
+ struct hstate *h = hstate_file(file);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+- unsigned long start_addr;
++ unsigned long start_addr, pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
+
+ if (len > mm->cached_hole_size) {
+- start_addr = mm->free_area_cache;
++ start_addr = mm->free_area_cache;
+ } else {
+- start_addr = TASK_UNMAPPED_BASE;
+- mm->cached_hole_size = 0;
++ start_addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
+
+ full_search:
+@@ -280,26 +287,27 @@ full_search:
+
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+- if (TASK_SIZE - len < addr) {
++ if (pax_task_size - len < addr) {
+ /*
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
+- mm->free_area_cache = addr + len;
+- return addr;
+- }
++ if (check_heap_stack_gap(vma, addr, len))
++ break;
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+ addr = ALIGN(vma->vm_end, huge_page_size(h));
+ }
++
++ mm->free_area_cache = addr + len;
++ return addr;
+ }
+
+ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -310,9 +318,8 @@ static unsigned long hugetlb_get_unmappe
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long base = mm->mmap_base;
+- unsigned long addr = addr0;
++ unsigned long addr;
+ unsigned long largest_hole = mm->cached_hole_size;
+- unsigned long start_addr;
+
+ /* don't allow allocations above current base */
+ if (mm->free_area_cache > base)
+@@ -322,16 +329,15 @@ static unsigned long hugetlb_get_unmappe
+ largest_hole = 0;
+ mm->free_area_cache = base;
+ }
+-try_again:
+- start_addr = mm->free_area_cache;
+
+ /* make sure it can fit in the remaining address space */
+ if (mm->free_area_cache < len)
+ goto fail;
+
+ /* either no address requested or can't fit in requested address hole */
+- addr = (mm->free_area_cache - len) & huge_page_mask(h);
++ addr = mm->free_area_cache - len;
+ do {
++ addr &= huge_page_mask(h);
+ /*
+ * Lookup failure means no vma is above this address,
+ * i.e. return with success:
+@@ -340,10 +346,10 @@ try_again:
+ if (!vma)
+ return addr;
+
+- if (addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* remember the address as a hint for next time */
+- mm->cached_hole_size = largest_hole;
+- return (mm->free_area_cache = addr);
++ mm->cached_hole_size = largest_hole;
++ return (mm->free_area_cache = addr);
+ } else if (mm->free_area_cache == vma->vm_end) {
+ /* pull free_area_cache down to the first hole */
+ mm->free_area_cache = vma->vm_start;
+@@ -352,29 +358,34 @@ try_again:
+
+ /* remember the largest hole we saw so far */
+ if (addr + largest_hole < vma->vm_start)
+- largest_hole = vma->vm_start - addr;
++ largest_hole = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = (vma->vm_start - len) & huge_page_mask(h);
+- } while (len <= vma->vm_start);
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
+
+ fail:
+ /*
+- * if hint left us with no space for the requested
+- * mapping then try again:
+- */
+- if (start_addr != base) {
+- mm->free_area_cache = base;
+- largest_hole = 0;
+- goto try_again;
+- }
+- /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
++ else
++#endif
++
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+ addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
+ len, pgoff, flags);
+@@ -382,6 +393,7 @@ fail:
+ /*
+ * Restore the topdown base:
+ */
++ mm->mmap_base = base;
+ mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
+
+@@ -395,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
+ struct hstate *h = hstate_file(file);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
++ unsigned long pax_task_size = TASK_SIZE;
+
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+- if (len > TASK_SIZE)
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ if (len > pax_task_size)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED) {
+@@ -410,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
+ if (addr) {
+ addr = ALIGN(addr, huge_page_size(h));
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/init_32.c linux-3.4-pax/arch/x86/mm/init_32.c
+--- linux-3.4/arch/x86/mm/init_32.c 2012-05-21 11:32:57.859927681 +0200
++++ linux-3.4-pax/arch/x86/mm/init_32.c 2012-05-21 12:10:09.752048901 +0200
+@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
+ }
+
+ /*
+- * Creates a middle page table and puts a pointer to it in the
+- * given global directory entry. This only returns the gd entry
+- * in non-PAE compilation mode, since the middle layer is folded.
+- */
+-static pmd_t * __init one_md_table_init(pgd_t *pgd)
+-{
+- pud_t *pud;
+- pmd_t *pmd_table;
+-
+-#ifdef CONFIG_X86_PAE
+- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
+- if (after_bootmem)
+- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
+- else
+- pmd_table = (pmd_t *)alloc_low_page();
+- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
+- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+- pud = pud_offset(pgd, 0);
+- BUG_ON(pmd_table != pmd_offset(pud, 0));
+-
+- return pmd_table;
+- }
+-#endif
+- pud = pud_offset(pgd, 0);
+- pmd_table = pmd_offset(pud, 0);
+-
+- return pmd_table;
+-}
+-
+-/*
+ * Create a page table and place a pointer to it in a middle page
+ * directory entry:
+ */
+@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_ini
+ page_table = (pte_t *)alloc_low_page();
+
+ paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
++#else
+ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
++#endif
+ BUG_ON(page_table != pte_offset_kernel(pmd, 0));
+ }
+
+ return pte_offset_kernel(pmd, 0);
+ }
+
++static pmd_t * __init one_md_table_init(pgd_t *pgd)
++{
++ pud_t *pud;
++ pmd_t *pmd_table;
++
++ pud = pud_offset(pgd, 0);
++ pmd_table = pmd_offset(pud, 0);
++
++ return pmd_table;
++}
++
+ pmd_t * __init populate_extra_pmd(unsigned long vaddr)
+ {
+ int pgd_idx = pgd_index(vaddr);
+@@ -202,6 +187,7 @@ page_table_range_init(unsigned long star
+ int pgd_idx, pmd_idx;
+ unsigned long vaddr;
+ pgd_t *pgd;
++ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte = NULL;
+
+@@ -211,8 +197,13 @@ page_table_range_init(unsigned long star
+ pgd = pgd_base + pgd_idx;
+
+ for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
+- pmd = one_md_table_init(pgd);
+- pmd = pmd + pmd_index(vaddr);
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++
++#ifdef CONFIG_X86_PAE
++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
++#endif
++
+ for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
+ pmd++, pmd_idx++) {
+ pte = page_table_kmap_check(one_page_table_init(pmd),
+@@ -224,11 +215,20 @@ page_table_range_init(unsigned long star
+ }
+ }
+
+-static inline int is_kernel_text(unsigned long addr)
++static inline int is_kernel_text(unsigned long start, unsigned long end)
+ {
+- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
+- return 1;
+- return 0;
++ if ((start > ktla_ktva((unsigned long)_etext) ||
++ end <= ktla_ktva((unsigned long)_stext)) &&
++ (start > ktla_ktva((unsigned long)_einittext) ||
++ end <= ktla_ktva((unsigned long)_sinittext)) &&
++
++#ifdef CONFIG_ACPI_SLEEP
++ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
++#endif
++
++ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
++ return 0;
++ return 1;
+ }
+
+ /*
+@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned lo
+ unsigned long last_map_addr = end;
+ unsigned long start_pfn, end_pfn;
+ pgd_t *pgd_base = swapper_pg_dir;
+- int pgd_idx, pmd_idx, pte_ofs;
++ unsigned int pgd_idx, pmd_idx, pte_ofs;
+ unsigned long pfn;
+ pgd_t *pgd;
++ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned pages_2m, pages_4k;
+@@ -280,8 +281,13 @@ repeat:
+ pfn = start_pfn;
+ pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+ pgd = pgd_base + pgd_idx;
+- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
+- pmd = one_md_table_init(pgd);
++ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
++ pud = pud_offset(pgd, 0);
++ pmd = pmd_offset(pud, 0);
++
++#ifdef CONFIG_X86_PAE
++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
++#endif
+
+ if (pfn >= end_pfn)
+ continue;
+@@ -293,14 +299,13 @@ repeat:
+ #endif
+ for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
+ pmd++, pmd_idx++) {
+- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
++ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
+
+ /*
+ * Map with big pages if possible, otherwise
+ * create normal page tables:
+ */
+ if (use_pse) {
+- unsigned int addr2;
+ pgprot_t prot = PAGE_KERNEL_LARGE;
+ /*
+ * first pass will use the same initial
+@@ -310,11 +315,7 @@ repeat:
+ __pgprot(PTE_IDENT_ATTR |
+ _PAGE_PSE);
+
+- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
+- PAGE_OFFSET + PAGE_SIZE-1;
+-
+- if (is_kernel_text(addr) ||
+- is_kernel_text(addr2))
++ if (is_kernel_text(address, address + PMD_SIZE))
+ prot = PAGE_KERNEL_LARGE_EXEC;
+
+ pages_2m++;
+@@ -331,7 +332,7 @@ repeat:
+ pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+ pte += pte_ofs;
+ for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
+- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
++ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
+ pgprot_t prot = PAGE_KERNEL;
+ /*
+ * first pass will use the same initial
+@@ -339,7 +340,7 @@ repeat:
+ */
+ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
+
+- if (is_kernel_text(addr))
++ if (is_kernel_text(address, address + PAGE_SIZE))
+ prot = PAGE_KERNEL_EXEC;
+
+ pages_4k++;
+@@ -465,7 +466,7 @@ void __init native_pagetable_setup_start
+
+ pud = pud_offset(pgd, va);
+ pmd = pmd_offset(pud, va);
+- if (!pmd_present(*pmd))
++ if (!pmd_present(*pmd) || pmd_huge(*pmd))
+ break;
+
+ pte = pte_offset_kernel(pmd, va);
+@@ -517,12 +518,10 @@ void __init early_ioremap_page_table_ran
+
+ static void __init pagetable_init(void)
+ {
+- pgd_t *pgd_base = swapper_pg_dir;
+-
+- permanent_kmaps_init(pgd_base);
++ permanent_kmaps_init(swapper_pg_dir);
+ }
+
+-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
+ EXPORT_SYMBOL_GPL(__supported_pte_mask);
+
+ /* user-defined highmem size */
+@@ -734,6 +733,12 @@ void __init mem_init(void)
+
+ pci_iommu_alloc();
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
++ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++ KERNEL_PGD_PTRS);
++#endif
++
+ #ifdef CONFIG_FLATMEM
+ BUG_ON(!mem_map);
+ #endif
+@@ -760,7 +765,7 @@ void __init mem_init(void)
+ reservedpages++;
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
++ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
+@@ -801,10 +806,10 @@ void __init mem_init(void)
+ ((unsigned long)&__init_end -
+ (unsigned long)&__init_begin) >> 10,
+
+- (unsigned long)&_etext, (unsigned long)&_edata,
+- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
++ (unsigned long)&_sdata, (unsigned long)&_edata,
++ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
+
+- (unsigned long)&_text, (unsigned long)&_etext,
++ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
+ ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
+
+ /*
+@@ -882,6 +887,7 @@ void set_kernel_text_rw(void)
+ if (!kernel_set_to_readonly)
+ return;
+
++ start = ktla_ktva(start);
+ pr_debug("Set kernel text: %lx - %lx for read write\n",
+ start, start+size);
+
+@@ -896,6 +902,7 @@ void set_kernel_text_ro(void)
+ if (!kernel_set_to_readonly)
+ return;
+
++ start = ktla_ktva(start);
+ pr_debug("Set kernel text: %lx - %lx for read only\n",
+ start, start+size);
+
+@@ -924,6 +931,7 @@ void mark_rodata_ro(void)
+ unsigned long start = PFN_ALIGN(_text);
+ unsigned long size = PFN_ALIGN(_etext) - start;
+
++ start = ktla_ktva(start);
+ set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
+ printk(KERN_INFO "Write protecting the kernel text: %luk\n",
+ size >> 10);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/init_64.c linux-3.4-pax/arch/x86/mm/init_64.c
+--- linux-3.4/arch/x86/mm/init_64.c 2012-05-21 11:32:57.863927680 +0200
++++ linux-3.4-pax/arch/x86/mm/init_64.c 2012-05-21 12:10:09.752048901 +0200
+@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpa
+ * around without checking the pgd every time.
+ */
+
+-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
+ EXPORT_SYMBOL_GPL(__supported_pte_mask);
+
+ int force_personality32;
+@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long star
+
+ for (address = start; address <= end; address += PGDIR_SIZE) {
+ const pgd_t *pgd_ref = pgd_offset_k(address);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ unsigned long cpu;
++#else
+ struct page *page;
++#endif
+
+ if (pgd_none(*pgd_ref))
+ continue;
+
+ spin_lock(&pgd_lock);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
++ pgd_t *pgd = pgd_offset_cpu(cpu, address);
++#else
+ list_for_each_entry(page, &pgd_list, lru) {
+ pgd_t *pgd;
+ spinlock_t *pgt_lock;
+@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long star
+ /* the pgt_lock only for Xen */
+ pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+ spin_lock(pgt_lock);
++#endif
+
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
+@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long star
+ BUG_ON(pgd_page_vaddr(*pgd)
+ != pgd_page_vaddr(*pgd_ref));
+
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ spin_unlock(pgt_lock);
++#endif
++
+ }
+ spin_unlock(&pgd_lock);
+ }
+@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsig
+ {
+ if (pgd_none(*pgd)) {
+ pud_t *pud = (pud_t *)spp_getpage();
+- pgd_populate(&init_mm, pgd, pud);
++ pgd_populate_kernel(&init_mm, pgd, pud);
+ if (pud != pud_offset(pgd, 0))
+ printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
+ pud, pud_offset(pgd, 0));
+@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsig
+ {
+ if (pud_none(*pud)) {
+ pmd_t *pmd = (pmd_t *) spp_getpage();
+- pud_populate(&init_mm, pud, pmd);
++ pud_populate_kernel(&init_mm, pud, pmd);
+ if (pmd != pmd_offset(pud, 0))
+ printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
+ pmd, pmd_offset(pud, 0));
+@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
+ pmd = fill_pmd(pud, vaddr);
+ pte = fill_pte(pmd, vaddr);
+
++ pax_open_kernel();
+ set_pte(pte, new_pte);
++ pax_close_kernel();
+
+ /*
+ * It's enough to flush this one mapping.
+@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(
+ pgd = pgd_offset_k((unsigned long)__va(phys));
+ if (pgd_none(*pgd)) {
+ pud = (pud_t *) spp_getpage();
+- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
+- _PAGE_USER));
++ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
+ }
+ pud = pud_offset(pgd, (unsigned long)__va(phys));
+ if (pud_none(*pud)) {
+ pmd = (pmd_t *) spp_getpage();
+- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
+- _PAGE_USER));
++ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
+ }
+ pmd = pmd_offset(pud, phys);
+ BUG_ON(!pmd_none(*pmd));
+@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsign
+ if (pfn >= pgt_buf_top)
+ panic("alloc_low_page: ran out of memory");
+
+- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
++ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
+ clear_page(adr);
+ *phys = pfn * PAGE_SIZE;
+ return adr;
+@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *vi
+
+ phys = __pa(virt);
+ left = phys & (PAGE_SIZE - 1);
+- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
++ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
+ adr = (void *)(((unsigned long)adr) | left);
+
+ return adr;
+@@ -545,7 +559,7 @@ phys_pud_init(pud_t *pud_page, unsigned
+ unmap_low_page(pmd);
+
+ spin_lock(&init_mm.page_table_lock);
+- pud_populate(&init_mm, pud, __va(pmd_phys));
++ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
+ spin_unlock(&init_mm.page_table_lock);
+ }
+ __flush_tlb_all();
+@@ -591,7 +605,7 @@ kernel_physical_mapping_init(unsigned lo
+ unmap_low_page(pud);
+
+ spin_lock(&init_mm.page_table_lock);
+- pgd_populate(&init_mm, pgd, __va(pud_phys));
++ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
+ spin_unlock(&init_mm.page_table_lock);
+ pgd_changed = true;
+ }
+@@ -683,6 +697,12 @@ void __init mem_init(void)
+
+ pci_iommu_alloc();
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
++ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++ KERNEL_PGD_PTRS);
++#endif
++
+ /* clear_bss() already clear the empty_zero_page */
+
+ reservedpages = 0;
+@@ -843,8 +863,8 @@ int kern_addr_valid(unsigned long addr)
+ static struct vm_area_struct gate_vma = {
+ .vm_start = VSYSCALL_START,
+ .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
+- .vm_page_prot = PAGE_READONLY_EXEC,
+- .vm_flags = VM_READ | VM_EXEC
++ .vm_page_prot = PAGE_READONLY,
++ .vm_flags = VM_READ
+ };
+
+ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
+@@ -878,7 +898,7 @@ int in_gate_area_no_mm(unsigned long add
+
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
+- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
+ return "[vdso]";
+ if (vma == &gate_vma)
+ return "[vsyscall]";
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/init.c linux-3.4-pax/arch/x86/mm/init.c
+--- linux-3.4/arch/x86/mm/init.c 2012-05-21 11:32:57.855927681 +0200
++++ linux-3.4-pax/arch/x86/mm/init.c 2012-05-21 12:10:09.756048901 +0200
+@@ -16,6 +16,7 @@
+ #include <asm/tlb.h>
+ #include <asm/proto.h>
+ #include <asm/dma.h> /* for MAX_DMA_PFN */
++#include <asm/desc.h>
+
+ unsigned long __initdata pgt_buf_start;
+ unsigned long __meminitdata pgt_buf_end;
+@@ -32,7 +33,7 @@ int direct_gbpages
+ static void __init find_early_table_space(unsigned long end, int use_pse,
+ int use_gbpages)
+ {
+- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
++ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
+ phys_addr_t base;
+
+ puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
+@@ -313,7 +314,13 @@ unsigned long __init_refok init_memory_m
+ */
+ int devmem_is_allowed(unsigned long pagenr)
+ {
+- if (pagenr <= 256)
++ if (!pagenr)
++ return 1;
++#ifdef CONFIG_VM86
++ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
++ return 1;
++#endif
++ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
+ return 1;
+ if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+ return 0;
+@@ -373,6 +380,86 @@ void free_init_pages(char *what, unsigne
+
+ void free_initmem(void)
+ {
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
++ /* PaX: limit KERNEL_CS to actual size */
++ unsigned long addr, limit;
++ struct desc_struct d;
++ int cpu;
++
++ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++
++ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
++ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
++ }
++
++ /* PaX: make KERNEL_CS read-only */
++ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
++ if (!paravirt_enabled())
++ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
++/*
++ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ }
++*/
++#ifdef CONFIG_X86_PAE
++ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
++/*
++ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ }
++*/
++#endif
++
++#ifdef CONFIG_MODULES
++ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
++#endif
++
++#else
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ unsigned long addr, end;
++
++ /* PaX: make kernel code/rodata read-only, rest non-executable */
++ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ if (!pmd_present(*pmd))
++ continue;
++ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ else
++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ }
++
++ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
++ end = addr + KERNEL_IMAGE_SIZE;
++ for (; addr < end; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ if (!pmd_present(*pmd))
++ continue;
++ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ }
++#endif
++
++ flush_tlb_all();
++#endif
++
+ free_init_pages("unused kernel memory",
+ (unsigned long)(&__init_begin),
+ (unsigned long)(&__init_end));
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/iomap_32.c linux-3.4-pax/arch/x86/mm/iomap_32.c
+--- linux-3.4/arch/x86/mm/iomap_32.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/mm/iomap_32.c 2012-05-21 12:10:09.756048901 +0200
+@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++
++ pax_open_kernel();
+ set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
++ pax_close_kernel();
++
+ arch_flush_lazy_mmu_mode();
+
+ return (void *)vaddr;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/ioremap.c linux-3.4-pax/arch/x86/mm/ioremap.c
+--- linux-3.4/arch/x86/mm/ioremap.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/mm/ioremap.c 2012-05-21 12:10:09.760048901 +0200
+@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
+ for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
+ int is_ram = page_is_ram(pfn);
+
+- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
++ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
+ return NULL;
+ WARN_ON_ONCE(is_ram);
+ }
+@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long ph
+
+ /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
+ if (page_is_ram(start >> PAGE_SHIFT))
++#ifdef CONFIG_HIGHMEM
++ if ((start >> PAGE_SHIFT) < max_low_pfn)
++#endif
+ return __va(phys);
+
+ addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
+@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_se
+ early_param("early_ioremap_debug", early_ioremap_debug_setup);
+
+ static __initdata int after_paging_init;
+-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
++static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
+
+ static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+ {
+@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
+ slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
+
+ pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
+- memset(bm_pte, 0, sizeof(bm_pte));
+- pmd_populate_kernel(&init_mm, pmd, bm_pte);
++ pmd_populate_user(&init_mm, pmd, bm_pte);
+
+ /*
+ * The boot-ioremap range spans multiple pmds, for which
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.4-pax/arch/x86/mm/kmemcheck/kmemcheck.c
+--- linux-3.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/mm/kmemcheck/kmemcheck.c 2012-05-21 12:10:09.760048901 +0200
+@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
+ * memory (e.g. tracked pages)? For now, we need this to avoid
+ * invoking kmemcheck for PnP BIOS calls.
+ */
+- if (regs->flags & X86_VM_MASK)
++ if (v8086_mode(regs))
+ return false;
+- if (regs->cs != __KERNEL_CS)
++ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
+ return false;
+
+ pte = kmemcheck_pte_lookup(address);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/mmap.c linux-3.4-pax/arch/x86/mm/mmap.c
+--- linux-3.4/arch/x86/mm/mmap.c 2012-03-19 10:38:56.692049991 +0100
++++ linux-3.4-pax/arch/x86/mm/mmap.c 2012-05-21 12:10:09.764048901 +0200
+@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size
+ * Leave an at least ~128 MB hole with possible stack randomization.
+ */
+ #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
+-#define MAX_GAP (TASK_SIZE/6*5)
++#define MAX_GAP (pax_task_size/6*5)
+
+ static int mmap_is_legacy(void)
+ {
+@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
+ return rnd << PAGE_SHIFT;
+ }
+
+-static unsigned long mmap_base(void)
++static unsigned long mmap_base(struct mm_struct *mm)
+ {
+ unsigned long gap = rlimit(RLIMIT_STACK);
++ unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
+
+ if (gap < MIN_GAP)
+ gap = MIN_GAP;
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
+
+- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
++ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
+ }
+
+ /*
+ * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
+ * does, but not when emulating X86_32
+ */
+-static unsigned long mmap_legacy_base(void)
++static unsigned long mmap_legacy_base(struct mm_struct *mm)
+ {
+- if (mmap_is_ia32())
++ if (mmap_is_ia32()) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ return SEGMEXEC_TASK_UNMAPPED_BASE;
++ else
++#endif
++
+ return TASK_UNMAPPED_BASE;
+- else
++ } else
+ return TASK_UNMAPPED_BASE + mmap_rnd();
+ }
+
+@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(vo
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ if (mmap_is_legacy()) {
+- mm->mmap_base = mmap_legacy_base();
++ mm->mmap_base = mmap_legacy_base(mm);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+- mm->mmap_base = mmap_base();
++ mm->mmap_base = mmap_base(mm);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/mmio-mod.c linux-3.4-pax/arch/x86/mm/mmio-mod.c
+--- linux-3.4/arch/x86/mm/mmio-mod.c 2012-03-19 10:38:56.692049991 +0100
++++ linux-3.4-pax/arch/x86/mm/mmio-mod.c 2012-05-21 12:10:09.764048901 +0200
+@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, s
+ break;
+ default:
+ {
+- unsigned char *ip = (unsigned char *)instptr;
++ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
+ my_trace->opcode = MMIO_UNKNOWN_OP;
+ my_trace->width = 0;
+ my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
+@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p,
+ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
+ void __iomem *addr)
+ {
+- static atomic_t next_id;
++ static atomic_unchecked_t next_id;
+ struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
+ /* These are page-unaligned. */
+ struct mmiotrace_map map = {
+@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_
+ .private = trace
+ },
+ .phys = offset,
+- .id = atomic_inc_return(&next_id)
++ .id = atomic_inc_return_unchecked(&next_id)
+ };
+ map.map_id = trace->id;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/pageattr.c linux-3.4-pax/arch/x86/mm/pageattr.c
+--- linux-3.4/arch/x86/mm/pageattr.c 2012-03-19 10:38:56.696049991 +0100
++++ linux-3.4-pax/arch/x86/mm/pageattr.c 2012-05-21 12:10:09.768048902 +0200
+@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
+ */
+ #ifdef CONFIG_PCI_BIOS
+ if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
+- pgprot_val(forbidden) |= _PAGE_NX;
++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
+ #endif
+
+ /*
+@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
+ * Does not cover __inittext since that is gone later on. On
+ * 64bit we do not enforce !NX on the low mapping
+ */
+- if (within(address, (unsigned long)_text, (unsigned long)_etext))
+- pgprot_val(forbidden) |= _PAGE_NX;
++ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
+
++#ifdef CONFIG_DEBUG_RODATA
+ /*
+ * The .rodata section needs to be read-only. Using the pfn
+ * catches all aliases.
+@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
+ if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
+ __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
+ pgprot_val(forbidden) |= _PAGE_RW;
++#endif
+
+ #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
+ /*
+@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
+ }
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
++ pgprot_val(forbidden) |= _PAGE_RW;
++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
++ }
++#endif
++
+ prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
+
+ return prot;
+@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
+ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
+ {
+ /* change init_mm */
++ pax_open_kernel();
+ set_pte_atomic(kpte, pte);
++
+ #ifdef CONFIG_X86_32
+ if (!SHARED_KERNEL_PMD) {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ unsigned long cpu;
++#else
+ struct page *page;
++#endif
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
++ pgd_t *pgd = get_cpu_pgd(cpu);
++#else
+ list_for_each_entry(page, &pgd_list, lru) {
+- pgd_t *pgd;
++ pgd_t *pgd = (pgd_t *)page_address(page);
++#endif
++
+ pud_t *pud;
+ pmd_t *pmd;
+
+- pgd = (pgd_t *)page_address(page) + pgd_index(address);
++ pgd += pgd_index(address);
+ pud = pud_offset(pgd, address);
+ pmd = pmd_offset(pud, address);
+ set_pte_atomic((pte_t *)pmd, pte);
+ }
+ }
+ #endif
++ pax_close_kernel();
+ }
+
+ static int
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/pageattr-test.c linux-3.4-pax/arch/x86/mm/pageattr-test.c
+--- linux-3.4/arch/x86/mm/pageattr-test.c 2011-10-24 12:48:26.371091767 +0200
++++ linux-3.4-pax/arch/x86/mm/pageattr-test.c 2012-05-21 12:10:09.772048902 +0200
+@@ -36,7 +36,7 @@ enum {
+
+ static int pte_testbit(pte_t pte)
+ {
+- return pte_flags(pte) & _PAGE_UNUSED1;
++ return pte_flags(pte) & _PAGE_CPA_TEST;
+ }
+
+ struct split_state {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/pat.c linux-3.4-pax/arch/x86/mm/pat.c
+--- linux-3.4/arch/x86/mm/pat.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/mm/pat.c 2012-05-21 12:10:09.772048902 +0200
+@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
+
+ if (!entry) {
+ printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
+- current->comm, current->pid, start, end);
++ current->comm, task_pid_nr(current), start, end);
+ return -EINVAL;
+ }
+
+@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
+ while (cursor < to) {
+ if (!devmem_is_allowed(pfn)) {
+ printk(KERN_INFO
+- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
+- current->comm, from, to);
++ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
++ current->comm, from, to, cursor);
+ return 0;
+ }
+ cursor += PAGE_SIZE;
+@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
+ printk(KERN_INFO
+ "%s:%d ioremap_change_attr failed %s "
+ "for %Lx-%Lx\n",
+- current->comm, current->pid,
++ current->comm, task_pid_nr(current),
+ cattr_name(flags),
+ base, (unsigned long long)(base + size));
+ return -EINVAL;
+@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
+ if (want_flags != flags) {
+ printk(KERN_WARNING
+ "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
+- current->comm, current->pid,
++ current->comm, task_pid_nr(current),
+ cattr_name(want_flags),
+ (unsigned long long)paddr,
+ (unsigned long long)(paddr + size),
+@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
+ free_memtype(paddr, paddr + size);
+ printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
+ " for %Lx-%Lx, got %s\n",
+- current->comm, current->pid,
++ current->comm, task_pid_nr(current),
+ cattr_name(want_flags),
+ (unsigned long long)paddr,
+ (unsigned long long)(paddr + size),
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/pf_in.c linux-3.4-pax/arch/x86/mm/pf_in.c
+--- linux-3.4/arch/x86/mm/pf_in.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/mm/pf_in.c 2012-05-21 12:10:09.776048902 +0200
+@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
+ int i;
+ enum reason_type rv = OTHERS;
+
+- p = (unsigned char *)ins_addr;
++ p = (unsigned char *)ktla_ktva(ins_addr);
+ p += skip_prefix(p, &prf);
+ p += get_opcode(p, &opcode);
+
+@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
+ struct prefix_bits prf;
+ int i;
+
+- p = (unsigned char *)ins_addr;
++ p = (unsigned char *)ktla_ktva(ins_addr);
+ p += skip_prefix(p, &prf);
+ p += get_opcode(p, &opcode);
+
+@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
+ struct prefix_bits prf;
+ int i;
+
+- p = (unsigned char *)ins_addr;
++ p = (unsigned char *)ktla_ktva(ins_addr);
+ p += skip_prefix(p, &prf);
+ p += get_opcode(p, &opcode);
+
+@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
+ struct prefix_bits prf;
+ int i;
+
+- p = (unsigned char *)ins_addr;
++ p = (unsigned char *)ktla_ktva(ins_addr);
+ p += skip_prefix(p, &prf);
+ p += get_opcode(p, &opcode);
+ for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
+@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
+ struct prefix_bits prf;
+ int i;
+
+- p = (unsigned char *)ins_addr;
++ p = (unsigned char *)ktla_ktva(ins_addr);
+ p += skip_prefix(p, &prf);
+ p += get_opcode(p, &opcode);
+ for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/pgtable_32.c linux-3.4-pax/arch/x86/mm/pgtable_32.c
+--- linux-3.4/arch/x86/mm/pgtable_32.c 2012-05-21 11:32:57.883927683 +0200
++++ linux-3.4-pax/arch/x86/mm/pgtable_32.c 2012-05-21 12:10:09.776048902 +0200
+@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr,
+ return;
+ }
+ pte = pte_offset_kernel(pmd, vaddr);
++
++ pax_open_kernel();
+ if (pte_val(pteval))
+ set_pte_at(&init_mm, vaddr, pte, pteval);
+ else
+ pte_clear(&init_mm, vaddr, pte);
++ pax_close_kernel();
+
+ /*
+ * It's enough to flush this one mapping.
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/pgtable.c linux-3.4-pax/arch/x86/mm/pgtable.c
+--- linux-3.4/arch/x86/mm/pgtable.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/mm/pgtable.c 2012-05-28 00:31:51.235163437 +0200
+@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *p
+ list_del(&page->lru);
+ }
+
+-#define UNSHARED_PTRS_PER_PGD \
+- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
+
++void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
++{
++ unsigned int count = USER_PGD_PTRS;
++
++ while (count--)
++ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
++}
++#endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
++{
++ unsigned int count = USER_PGD_PTRS;
++
++ while (count--) {
++ pgd_t pgd;
++
++#ifdef CONFIG_X86_64
++ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
++#else
++ pgd = *src++;
++#endif
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
++#endif
++
++ *dst++ = pgd;
++ }
++
++}
++#endif
++
++#ifdef CONFIG_X86_64
++#define pxd_t pud_t
++#define pyd_t pgd_t
++#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
++#define pxd_free(mm, pud) pud_free((mm), (pud))
++#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
++#define pyd_offset(mm, address) pgd_offset((mm), (address))
++#define PYD_SIZE PGDIR_SIZE
++#else
++#define pxd_t pmd_t
++#define pyd_t pud_t
++#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
++#define pxd_free(mm, pud) pmd_free((mm), (pud))
++#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
++#define pyd_offset(mm, address) pud_offset((mm), (address))
++#define PYD_SIZE PUD_SIZE
++#endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
++static inline void pgd_dtor(pgd_t *pgd) {}
++#else
+ static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
+ {
+ BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
+@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
+ pgd_list_del(pgd);
+ spin_unlock(&pgd_lock);
+ }
++#endif
+
+ /*
+ * List of all pgd's needed for non-PAE so it can invalidate entries
+@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
+ * -- wli
+ */
+
+-#ifdef CONFIG_X86_PAE
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
+ /*
+ * In PAE mode, we need to do a cr3 reload (=tlb flush) when
+ * updating the top-level pagetable entries to guarantee the
+@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
+ * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
+ * and initialize the kernel pmds here.
+ */
+-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
++#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
+
+ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+ {
+@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm,
+ */
+ flush_tlb_mm(mm);
+ }
++#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
++#define PREALLOCATED_PXDS USER_PGD_PTRS
+ #else /* !CONFIG_X86_PAE */
+
+ /* No need to prepopulate any pagetable entries in non-PAE modes. */
+-#define PREALLOCATED_PMDS 0
++#define PREALLOCATED_PXDS 0
+
+ #endif /* CONFIG_X86_PAE */
+
+-static void free_pmds(pmd_t *pmds[])
++static void free_pxds(pxd_t *pxds[])
+ {
+ int i;
+
+- for(i = 0; i < PREALLOCATED_PMDS; i++)
+- if (pmds[i])
+- free_page((unsigned long)pmds[i]);
++ for(i = 0; i < PREALLOCATED_PXDS; i++)
++ if (pxds[i])
++ free_page((unsigned long)pxds[i]);
+ }
+
+-static int preallocate_pmds(pmd_t *pmds[])
++static int preallocate_pxds(pxd_t *pxds[])
+ {
+ int i;
+ bool failed = false;
+
+- for(i = 0; i < PREALLOCATED_PMDS; i++) {
+- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
+- if (pmd == NULL)
++ for(i = 0; i < PREALLOCATED_PXDS; i++) {
++ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
++ if (pxd == NULL)
+ failed = true;
+- pmds[i] = pmd;
++ pxds[i] = pxd;
+ }
+
+ if (failed) {
+- free_pmds(pmds);
++ free_pxds(pxds);
+ return -ENOMEM;
+ }
+
+@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[
+ * preallocate which never got a corresponding vma will need to be
+ * freed manually.
+ */
+-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
++static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
+ {
+ int i;
+
+- for(i = 0; i < PREALLOCATED_PMDS; i++) {
++ for(i = 0; i < PREALLOCATED_PXDS; i++) {
+ pgd_t pgd = pgdp[i];
+
+ if (pgd_val(pgd) != 0) {
+- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
++ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
+
+- pgdp[i] = native_make_pgd(0);
++ set_pgd(pgdp + i, native_make_pgd(0));
+
+- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
+- pmd_free(mm, pmd);
++ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
++ pxd_free(mm, pxd);
+ }
+ }
+ }
+
+-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
++static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
+ {
+- pud_t *pud;
++ pyd_t *pyd;
+ unsigned long addr;
+ int i;
+
+- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
++ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
+ return;
+
+- pud = pud_offset(pgd, 0);
+-
+- for (addr = i = 0; i < PREALLOCATED_PMDS;
+- i++, pud++, addr += PUD_SIZE) {
+- pmd_t *pmd = pmds[i];
++#ifdef CONFIG_X86_64
++ pyd = pyd_offset(mm, 0L);
++#else
++ pyd = pyd_offset(pgd, 0L);
++#endif
++
++ for (addr = i = 0; i < PREALLOCATED_PXDS;
++ i++, pyd++, addr += PYD_SIZE) {
++ pxd_t *pxd = pxds[i];
+
+ if (i >= KERNEL_PGD_BOUNDARY)
+- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
+- sizeof(pmd_t) * PTRS_PER_PMD);
++ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
++ sizeof(pxd_t) * PTRS_PER_PMD);
+
+- pud_populate(mm, pud, pmd);
++ pyd_populate(mm, pyd, pxd);
+ }
+ }
+
+ pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+ pgd_t *pgd;
+- pmd_t *pmds[PREALLOCATED_PMDS];
++ pxd_t *pxds[PREALLOCATED_PXDS];
+
+ pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
+
+@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+
+ mm->pgd = pgd;
+
+- if (preallocate_pmds(pmds) != 0)
++ if (preallocate_pxds(pxds) != 0)
+ goto out_free_pgd;
+
+ if (paravirt_pgd_alloc(mm) != 0)
+- goto out_free_pmds;
++ goto out_free_pxds;
+
+ /*
+ * Make sure that pre-populating the pmds is atomic with
+@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+ spin_lock(&pgd_lock);
+
+ pgd_ctor(mm, pgd);
+- pgd_prepopulate_pmd(mm, pgd, pmds);
++ pgd_prepopulate_pxd(mm, pgd, pxds);
+
+ spin_unlock(&pgd_lock);
+
+ return pgd;
+
+-out_free_pmds:
+- free_pmds(pmds);
++out_free_pxds:
++ free_pxds(pxds);
+ out_free_pgd:
+ free_page((unsigned long)pgd);
+ out:
+@@ -295,7 +356,7 @@ out:
+
+ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ {
+- pgd_mop_up_pmds(mm, pgd);
++ pgd_mop_up_pxds(mm, pgd);
+ pgd_dtor(pgd);
+ paravirt_pgd_free(mm, pgd);
+ free_page((unsigned long)pgd);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/setup_nx.c linux-3.4-pax/arch/x86/mm/setup_nx.c
+--- linux-3.4/arch/x86/mm/setup_nx.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/mm/setup_nx.c 2012-05-21 12:10:09.780048902 +0200
+@@ -5,8 +5,10 @@
+ #include <asm/pgtable.h>
+ #include <asm/proto.h>
+
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ static int disable_nx __cpuinitdata;
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ /*
+ * noexec = on|off
+ *
+@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
+ return 0;
+ }
+ early_param("noexec", noexec_setup);
++#endif
++
++#endif
+
+ void __cpuinit x86_configure_nx(void)
+ {
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ if (cpu_has_nx && !disable_nx)
+ __supported_pte_mask |= _PAGE_NX;
+ else
++#endif
+ __supported_pte_mask &= ~_PAGE_NX;
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/mm/tlb.c linux-3.4-pax/arch/x86/mm/tlb.c
+--- linux-3.4/arch/x86/mm/tlb.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/mm/tlb.c 2012-05-21 12:10:09.780048902 +0200
+@@ -65,7 +65,11 @@ void leave_mm(int cpu)
+ BUG();
+ cpumask_clear_cpu(cpu,
+ mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ load_cr3(swapper_pg_dir);
++#endif
++
+ }
+ EXPORT_SYMBOL_GPL(leave_mm);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/net/bpf_jit_comp.c linux-3.4-pax/arch/x86/net/bpf_jit_comp.c
+--- linux-3.4/arch/x86/net/bpf_jit_comp.c 2012-05-21 11:32:57.887927682 +0200
++++ linux-3.4-pax/arch/x86/net/bpf_jit_comp.c 2012-05-21 12:10:09.784048902 +0200
+@@ -120,6 +120,11 @@ static inline void bpf_flush_icache(void
+ set_fs(old_fs);
+ }
+
++struct bpf_jit_work {
++ struct work_struct work;
++ void *image;
++};
++
+ #define CHOOSE_LOAD_FUNC(K, func) \
+ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
+
+@@ -146,6 +151,10 @@ void bpf_jit_compile(struct sk_filter *f
+ if (addrs == NULL)
+ return;
+
++ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
++ if (!fp->work)
++ goto out;
++
+ /* Before first pass, make a rough estimation of addrs[]
+ * each bpf instruction is translated to less than 64 bytes
+ */
+@@ -589,17 +598,18 @@ cond_branch: f_offset = addrs[i + filt
+ break;
+ default:
+ /* hmm, too complex filter, give up with jit compiler */
+- goto out;
++ goto error;
+ }
+ ilen = prog - temp;
+ if (image) {
+ if (unlikely(proglen + ilen > oldproglen)) {
+ pr_err("bpb_jit_compile fatal error\n");
+- kfree(addrs);
+- module_free(NULL, image);
+- return;
++ module_free_exec(NULL, image);
++ goto error;
+ }
++ pax_open_kernel();
+ memcpy(image + proglen, temp, ilen);
++ pax_close_kernel();
+ }
+ proglen += ilen;
+ addrs[i] = proglen;
+@@ -620,11 +630,9 @@ cond_branch: f_offset = addrs[i + filt
+ break;
+ }
+ if (proglen == oldproglen) {
+- image = module_alloc(max_t(unsigned int,
+- proglen,
+- sizeof(struct work_struct)));
++ image = module_alloc_exec(proglen);
+ if (!image)
+- goto out;
++ goto error;
+ }
+ oldproglen = proglen;
+ }
+@@ -640,7 +648,10 @@ cond_branch: f_offset = addrs[i + filt
+ bpf_flush_icache(image, image + proglen);
+
+ fp->bpf_func = (void *)image;
+- }
++ } else
++error:
++ kfree(fp->work);
++
+ out:
+ kfree(addrs);
+ return;
+@@ -648,18 +659,20 @@ out:
+
+ static void jit_free_defer(struct work_struct *arg)
+ {
+- module_free(NULL, arg);
++ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
++ kfree(arg);
+ }
+
+ /* run from softirq, we must use a work_struct to call
+- * module_free() from process context
++ * module_free_exec() from process context
+ */
+ void bpf_jit_free(struct sk_filter *fp)
+ {
+ if (fp->bpf_func != sk_run_filter) {
+- struct work_struct *work = (struct work_struct *)fp->bpf_func;
++ struct work_struct *work = &fp->work->work;
+
+ INIT_WORK(work, jit_free_defer);
++ fp->work->image = fp->bpf_func;
+ schedule_work(work);
+ }
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/net/bpf_jit.S linux-3.4-pax/arch/x86/net/bpf_jit.S
+--- linux-3.4/arch/x86/net/bpf_jit.S 2012-05-21 11:32:57.883927683 +0200
++++ linux-3.4-pax/arch/x86/net/bpf_jit.S 2012-05-21 12:10:09.788048903 +0200
+@@ -9,6 +9,7 @@
+ */
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
++#include <asm/alternative-asm.h>
+
+ /*
+ * Calling convention :
+@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
+ jle bpf_slow_path_word
+ mov (SKBDATA,%rsi),%eax
+ bswap %eax /* ntohl() */
++ pax_force_retaddr
+ ret
+
+ sk_load_half:
+@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
+ jle bpf_slow_path_half
+ movzwl (SKBDATA,%rsi),%eax
+ rol $8,%ax # ntohs()
++ pax_force_retaddr
+ ret
+
+ sk_load_byte:
+@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
+ cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
+ jle bpf_slow_path_byte
+ movzbl (SKBDATA,%rsi),%eax
++ pax_force_retaddr
+ ret
+
+ /**
+@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
+ movzbl (SKBDATA,%rsi),%ebx
+ and $15,%bl
+ shl $2,%bl
++ pax_force_retaddr
+ ret
+
+ /* rsi contains offset and can be scratched */
+@@ -109,6 +114,7 @@ bpf_slow_path_word:
+ js bpf_error
+ mov -12(%rbp),%eax
+ bswap %eax
++ pax_force_retaddr
+ ret
+
+ bpf_slow_path_half:
+@@ -117,12 +123,14 @@ bpf_slow_path_half:
+ mov -12(%rbp),%ax
+ rol $8,%ax
+ movzwl %ax,%eax
++ pax_force_retaddr
+ ret
+
+ bpf_slow_path_byte:
+ bpf_slow_path_common(1)
+ js bpf_error
+ movzbl -12(%rbp),%eax
++ pax_force_retaddr
+ ret
+
+ bpf_slow_path_byte_msh:
+@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
+ and $15,%al
+ shl $2,%al
+ xchg %eax,%ebx
++ pax_force_retaddr
+ ret
+
+ #define sk_negative_common(SIZE) \
+@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
+ sk_negative_common(4)
+ mov (%rax), %eax
+ bswap %eax
++ pax_force_retaddr
+ ret
+
+ bpf_slow_path_half_neg:
+@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
+ mov (%rax),%ax
+ rol $8,%ax
+ movzwl %ax,%eax
++ pax_force_retaddr
+ ret
+
+ bpf_slow_path_byte_neg:
+@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
+ .globl sk_load_byte_negative_offset
+ sk_negative_common(1)
+ movzbl (%rax), %eax
++ pax_force_retaddr
+ ret
+
+ bpf_slow_path_byte_msh_neg:
+@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
+ and $15,%al
+ shl $2,%al
+ xchg %eax,%ebx
++ pax_force_retaddr
+ ret
+
+ bpf_error:
+@@ -197,4 +210,5 @@ bpf_error:
+ xor %eax,%eax
+ mov -8(%rbp),%rbx
+ leaveq
++ pax_force_retaddr
+ ret
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/oprofile/backtrace.c linux-3.4-pax/arch/x86/oprofile/backtrace.c
+--- linux-3.4/arch/x86/oprofile/backtrace.c 2012-05-21 11:32:58.227927701 +0200
++++ linux-3.4-pax/arch/x86/oprofile/backtrace.c 2012-05-21 12:10:09.788048903 +0200
+@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_fram
+ struct stack_frame_ia32 *fp;
+ unsigned long bytes;
+
+- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
++ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
+ if (bytes != sizeof(bufhead))
+ return NULL;
+
+- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
++ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
+
+ oprofile_add_trace(bufhead[0].return_address);
+
+@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_bac
+ struct stack_frame bufhead[2];
+ unsigned long bytes;
+
+- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
++ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
+ if (bytes != sizeof(bufhead))
+ return NULL;
+
+@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const reg
+ {
+ struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
+
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ unsigned long stack = kernel_stack_pointer(regs);
+ if (depth)
+ dump_trace(NULL, regs, (unsigned long *)stack, 0,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/pci/mrst.c linux-3.4-pax/arch/x86/pci/mrst.c
+--- linux-3.4/arch/x86/pci/mrst.c 2012-05-21 11:32:58.243927702 +0200
++++ linux-3.4-pax/arch/x86/pci/mrst.c 2012-05-21 12:10:09.788048903 +0200
+@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
+ printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
+ pci_mmcfg_late_init();
+ pcibios_enable_irq = mrst_pci_irq_enable;
+- pci_root_ops = pci_mrst_ops;
++ pax_open_kernel();
++ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
++ pax_close_kernel();
+ pci_soc_mode = 1;
+ /* Continue with standard init */
+ return 1;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/pci/pcbios.c linux-3.4-pax/arch/x86/pci/pcbios.c
+--- linux-3.4/arch/x86/pci/pcbios.c 2012-03-19 10:38:56.712049990 +0100
++++ linux-3.4-pax/arch/x86/pci/pcbios.c 2012-05-21 12:10:09.792048903 +0200
+@@ -79,50 +79,93 @@ union bios32 {
+ static struct {
+ unsigned long address;
+ unsigned short segment;
+-} bios32_indirect = { 0, __KERNEL_CS };
++} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
+
+ /*
+ * Returns the entry point for the given service, NULL on error
+ */
+
+-static unsigned long bios32_service(unsigned long service)
++static unsigned long __devinit bios32_service(unsigned long service)
+ {
+ unsigned char return_code; /* %al */
+ unsigned long address; /* %ebx */
+ unsigned long length; /* %ecx */
+ unsigned long entry; /* %edx */
+ unsigned long flags;
++ struct desc_struct d, *gdt;
+
+ local_irq_save(flags);
+- __asm__("lcall *(%%edi); cld"
++
++ gdt = get_cpu_gdt_table(smp_processor_id());
++
++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
++
++ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
+ : "=a" (return_code),
+ "=b" (address),
+ "=c" (length),
+ "=d" (entry)
+ : "0" (service),
+ "1" (0),
+- "D" (&bios32_indirect));
++ "D" (&bios32_indirect),
++ "r"(__PCIBIOS_DS)
++ : "memory");
++
++ pax_open_kernel();
++ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
++ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
++ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
++ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
++ pax_close_kernel();
++
+ local_irq_restore(flags);
+
+ switch (return_code) {
+- case 0:
+- return address + entry;
+- case 0x80: /* Not present */
+- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
+- return 0;
+- default: /* Shouldn't happen */
+- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
+- service, return_code);
++ case 0: {
++ int cpu;
++ unsigned char flags;
++
++ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
++ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
++ printk(KERN_WARNING "bios32_service: not valid\n");
+ return 0;
++ }
++ address = address + PAGE_OFFSET;
++ length += 16UL; /* some BIOSs underreport this... */
++ flags = 4;
++ if (length >= 64*1024*1024) {
++ length >>= PAGE_SHIFT;
++ flags |= 8;
++ }
++
++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
++ gdt = get_cpu_gdt_table(cpu);
++ pack_descriptor(&d, address, length, 0x9b, flags);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
++ pack_descriptor(&d, address, length, 0x93, flags);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
++ }
++ return entry;
++ }
++ case 0x80: /* Not present */
++ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
++ return 0;
++ default: /* Shouldn't happen */
++ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
++ service, return_code);
++ return 0;
+ }
+ }
+
+ static struct {
+ unsigned long address;
+ unsigned short segment;
+-} pci_indirect = { 0, __KERNEL_CS };
++} pci_indirect __read_only = { 0, __PCIBIOS_CS };
+
+-static int pci_bios_present;
++static int pci_bios_present __read_only;
+
+ static int __devinit check_pcibios(void)
+ {
+@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
+ unsigned long flags, pcibios_entry;
+
+ if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
+- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
++ pci_indirect.address = pcibios_entry;
+
+ local_irq_save(flags);
+- __asm__(
+- "lcall *(%%edi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%edi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
+ "=b" (ebx),
+ "=c" (ecx)
+ : "1" (PCIBIOS_PCI_BIOS_PRESENT),
+- "D" (&pci_indirect)
++ "D" (&pci_indirect),
++ "r" (__PCIBIOS_DS)
+ : "memory");
+ local_irq_restore(flags);
+
+@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int se
+
+ switch (len) {
+ case 1:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int se
+ : "1" (PCIBIOS_READ_CONFIG_BYTE),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ /*
+ * Zero-extend the result beyond 8 bits, do not trust the
+ * BIOS having done it:
+@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int se
+ *value &= 0xff;
+ break;
+ case 2:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int se
+ : "1" (PCIBIOS_READ_CONFIG_WORD),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ /*
+ * Zero-extend the result beyond 16 bits, do not trust the
+ * BIOS having done it:
+@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int se
+ *value &= 0xffff;
+ break;
+ case 4:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int se
+ : "1" (PCIBIOS_READ_CONFIG_DWORD),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ }
+
+@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int s
+
+ switch (len) {
+ case 1:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int s
+ "c" (value),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ case 2:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int s
+ "c" (value),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ case 4:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int s
+ "c" (value),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ }
+
+@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_i
+
+ DBG("PCI: Fetching IRQ routing table... ");
+ __asm__("push %%es\n\t"
++ "movw %w8, %%ds\n\t"
+ "push %%ds\n\t"
+ "pop %%es\n\t"
+- "lcall *(%%esi); cld\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
+ "pop %%es\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_i
+ "1" (0),
+ "D" ((long) &opt),
+ "S" (&pci_indirect),
+- "m" (opt)
++ "m" (opt),
++ "r" (__PCIBIOS_DS)
+ : "memory");
+ DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
+ if (ret & 0xff00)
+@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_d
+ {
+ int ret;
+
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w5, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_d
+ : "0" (PCIBIOS_SET_PCI_HW_INT),
+ "b" ((dev->bus->number << 8) | dev->devfn),
+ "c" ((irq << 8) | (pin + 10)),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ return !(ret & 0xff00);
+ }
+ EXPORT_SYMBOL(pcibios_set_irq_routing);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/platform/efi/efi_32.c linux-3.4-pax/arch/x86/platform/efi/efi_32.c
+--- linux-3.4/arch/x86/platform/efi/efi_32.c 2012-01-08 19:47:49.851472967 +0100
++++ linux-3.4-pax/arch/x86/platform/efi/efi_32.c 2012-05-21 12:10:09.792048903 +0200
+@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
+ {
+ struct desc_ptr gdt_descr;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ struct desc_struct d;
++#endif
++
+ local_irq_save(efi_rt_eflags);
+
+ load_cr3(initial_page_table);
+ __flush_tlb_all();
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
++ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
++#endif
++
+ gdt_descr.address = __pa(get_cpu_gdt_table(0));
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
+ {
+ struct desc_ptr gdt_descr;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ struct desc_struct d;
++
++ memset(&d, 0, sizeof d);
++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
++#endif
++
+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/platform/efi/efi_stub_32.S linux-3.4-pax/arch/x86/platform/efi/efi_stub_32.S
+--- linux-3.4/arch/x86/platform/efi/efi_stub_32.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/platform/efi/efi_stub_32.S 2012-05-21 12:10:09.796048903 +0200
+@@ -6,7 +6,9 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/init.h>
+ #include <asm/page_types.h>
++#include <asm/segment.h>
+
+ /*
+ * efi_call_phys(void *, ...) is a function with variable parameters.
+@@ -20,7 +22,7 @@
+ * service functions will comply with gcc calling convention, too.
+ */
+
+-.text
++__INIT
+ ENTRY(efi_call_phys)
+ /*
+ * 0. The function can only be called in Linux kernel. So CS has been
+@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
+ * The mapping of lower virtual memory has been created in prelog and
+ * epilog.
+ */
+- movl $1f, %edx
+- subl $__PAGE_OFFSET, %edx
+- jmp *%edx
++ movl $(__KERNEXEC_EFI_DS), %edx
++ mov %edx, %ds
++ mov %edx, %es
++ mov %edx, %ss
++ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
+ 1:
+
+ /*
+@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
+ * parameter 2, ..., param n. To make things easy, we save the return
+ * address of efi_call_phys in a global variable.
+ */
+- popl %edx
+- movl %edx, saved_return_addr
+- /* get the function pointer into ECX*/
+- popl %ecx
+- movl %ecx, efi_rt_function_ptr
+- movl $2f, %edx
+- subl $__PAGE_OFFSET, %edx
+- pushl %edx
++ popl (saved_return_addr)
++ popl (efi_rt_function_ptr)
+
+ /*
+ * 3. Clear PG bit in %CR0.
+@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
+ /*
+ * 5. Call the physical function.
+ */
+- jmp *%ecx
++ call *(efi_rt_function_ptr-__PAGE_OFFSET)
+
+-2:
+ /*
+ * 6. After EFI runtime service returns, control will return to
+ * following instruction. We'd better readjust stack pointer first.
+@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
+ movl %cr0, %edx
+ orl $0x80000000, %edx
+ movl %edx, %cr0
+- jmp 1f
+-1:
++
+ /*
+ * 8. Now restore the virtual mode from flat mode by
+ * adding EIP with PAGE_OFFSET.
+ */
+- movl $1f, %edx
+- jmp *%edx
++ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
+ 1:
++ movl $(__KERNEL_DS), %edx
++ mov %edx, %ds
++ mov %edx, %es
++ mov %edx, %ss
+
+ /*
+ * 9. Balance the stack. And because EAX contain the return value,
+ * we'd better not clobber it.
+ */
+- leal efi_rt_function_ptr, %edx
+- movl (%edx), %ecx
+- pushl %ecx
++ pushl (efi_rt_function_ptr)
+
+ /*
+- * 10. Push the saved return address onto the stack and return.
++ * 10. Return to the saved return address.
+ */
+- leal saved_return_addr, %edx
+- movl (%edx), %ecx
+- pushl %ecx
+- ret
++ jmpl *(saved_return_addr)
+ ENDPROC(efi_call_phys)
+ .previous
+
+-.data
++__INITDATA
+ saved_return_addr:
+ .long 0
+ efi_rt_function_ptr:
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/platform/efi/efi_stub_64.S linux-3.4-pax/arch/x86/platform/efi/efi_stub_64.S
+--- linux-3.4/arch/x86/platform/efi/efi_stub_64.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/platform/efi/efi_stub_64.S 2012-05-21 12:10:09.796048903 +0200
+@@ -7,6 +7,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ #define SAVE_XMM \
+ mov %rsp, %rax; \
+@@ -40,6 +41,7 @@ ENTRY(efi_call0)
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call0)
+
+@@ -50,6 +52,7 @@ ENTRY(efi_call1)
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call1)
+
+@@ -60,6 +63,7 @@ ENTRY(efi_call2)
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call2)
+
+@@ -71,6 +75,7 @@ ENTRY(efi_call3)
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call3)
+
+@@ -83,6 +88,7 @@ ENTRY(efi_call4)
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call4)
+
+@@ -96,6 +102,7 @@ ENTRY(efi_call5)
+ call *%rdi
+ addq $48, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call5)
+
+@@ -112,5 +119,6 @@ ENTRY(efi_call6)
+ call *%rdi
+ addq $48, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call6)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/platform/mrst/mrst.c linux-3.4-pax/arch/x86/platform/mrst/mrst.c
+--- linux-3.4/arch/x86/platform/mrst/mrst.c 2012-05-21 11:32:58.275927703 +0200
++++ linux-3.4-pax/arch/x86/platform/mrst/mrst.c 2012-05-21 12:10:09.800048903 +0200
+@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_arra
+ EXPORT_SYMBOL_GPL(sfi_mrtc_array);
+ int sfi_mrtc_num;
+
+-static void mrst_power_off(void)
++static __noreturn void mrst_power_off(void)
+ {
++ BUG();
+ }
+
+-static void mrst_reboot(void)
++static __noreturn void mrst_reboot(void)
+ {
+ intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
++ BUG();
+ }
+
+ /* parse all the mtimer info to a static mtimer array */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/power/cpu.c linux-3.4-pax/arch/x86/power/cpu.c
+--- linux-3.4/arch/x86/power/cpu.c 2012-05-21 11:32:58.343927707 +0200
++++ linux-3.4-pax/arch/x86/power/cpu.c 2012-05-21 12:10:09.804048904 +0200
+@@ -132,7 +132,7 @@ static void do_fpu_end(void)
+ static void fix_processor_context(void)
+ {
+ int cpu = smp_processor_id();
+- struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct tss_struct *t = init_tss + cpu;
+
+ set_tss_desc(cpu, t); /*
+ * This just modifies memory; should not be
+@@ -142,7 +142,9 @@ static void fix_processor_context(void)
+ */
+
+ #ifdef CONFIG_X86_64
++ pax_open_kernel();
+ get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
++ pax_close_kernel();
+
+ syscall_init(); /* This sets MSR_*STAR and related */
+ #endif
+Binary files linux-3.4/arch/x86/tools/insn_sanity and linux-3.4-pax/arch/x86/tools/insn_sanity differ
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/tools/relocs.c linux-3.4-pax/arch/x86/tools/relocs.c
+--- linux-3.4/arch/x86/tools/relocs.c 2012-05-21 11:32:58.379927709 +0200
++++ linux-3.4-pax/arch/x86/tools/relocs.c 2012-05-21 15:05:18.584619663 +0200
+@@ -12,10 +12,13 @@
+ #include <regex.h>
+ #include <tools/le_byteshift.h>
+
++#include "../../../include/generated/autoconf.h"
++
+ static void die(char *fmt, ...);
+
+ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+ static Elf32_Ehdr ehdr;
++static Elf32_Phdr *phdr;
+ static unsigned long reloc_count, reloc_idx;
+ static unsigned long *relocs;
+ static unsigned long reloc16_count, reloc16_idx;
+@@ -311,9 +314,39 @@ static void read_ehdr(FILE *fp)
+ }
+ }
+
++static void read_phdrs(FILE *fp)
++{
++ unsigned int i;
++
++ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
++ if (!phdr) {
++ die("Unable to allocate %d program headers\n",
++ ehdr.e_phnum);
++ }
++ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
++ die("Seek to %d failed: %s\n",
++ ehdr.e_phoff, strerror(errno));
++ }
++ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
++ die("Cannot read ELF program headers: %s\n",
++ strerror(errno));
++ }
++ for(i = 0; i < ehdr.e_phnum; i++) {
++ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
++ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
++ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
++ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
++ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
++ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
++ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
++ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
++ }
++
++}
++
+ static void read_shdrs(FILE *fp)
+ {
+- int i;
++ unsigned int i;
+ Elf32_Shdr shdr;
+
+ secs = calloc(ehdr.e_shnum, sizeof(struct section));
+@@ -348,7 +381,7 @@ static void read_shdrs(FILE *fp)
+
+ static void read_strtabs(FILE *fp)
+ {
+- int i;
++ unsigned int i;
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_STRTAB) {
+@@ -373,7 +406,7 @@ static void read_strtabs(FILE *fp)
+
+ static void read_symtabs(FILE *fp)
+ {
+- int i,j;
++ unsigned int i,j;
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+@@ -406,7 +439,9 @@ static void read_symtabs(FILE *fp)
+
+ static void read_relocs(FILE *fp)
+ {
+- int i,j;
++ unsigned int i,j;
++ uint32_t base;
++
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_REL) {
+@@ -426,9 +461,22 @@ static void read_relocs(FILE *fp)
+ die("Cannot read symbol table: %s\n",
+ strerror(errno));
+ }
++ base = 0;
++
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
++ for (j = 0; j < ehdr.e_phnum; j++) {
++ if (phdr[j].p_type != PT_LOAD )
++ continue;
++ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
++ continue;
++ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
++ break;
++ }
++#endif
++
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
+ Elf32_Rel *rel = &sec->reltab[j];
+- rel->r_offset = elf32_to_cpu(rel->r_offset);
++ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
+ rel->r_info = elf32_to_cpu(rel->r_info);
+ }
+ }
+@@ -437,13 +485,13 @@ static void read_relocs(FILE *fp)
+
+ static void print_absolute_symbols(void)
+ {
+- int i;
++ unsigned int i;
+ printf("Absolute symbols\n");
+ printf(" Num: Value Size Type Bind Visibility Name\n");
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ char *sym_strtab;
+- int j;
++ unsigned int j;
+
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+ continue;
+@@ -470,7 +518,7 @@ static void print_absolute_symbols(void)
+
+ static void print_absolute_relocs(void)
+ {
+- int i, printed = 0;
++ unsigned int i, printed = 0;
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+@@ -539,7 +587,7 @@ static void print_absolute_relocs(void)
+ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
+ int use_real_mode)
+ {
+- int i;
++ unsigned int i;
+ /* Walk through the relocations */
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ char *sym_strtab;
+@@ -569,6 +617,22 @@ static void walk_relocs(void (*visit)(El
+ sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
+ r_type = ELF32_R_TYPE(rel->r_info);
+
++ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
++ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
++ continue;
++
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
++ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
++ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
++ continue;
++#endif
++
+ shn_abs = sym->st_shndx == SHN_ABS;
+
+ switch (r_type) {
+@@ -662,7 +726,7 @@ static int write32(unsigned int v, FILE
+
+ static void emit_relocs(int as_text, int use_real_mode)
+ {
+- int i;
++ unsigned int i;
+ /* Count how many relocations I have and allocate space for them. */
+ reloc_count = 0;
+ walk_relocs(count_reloc, use_real_mode);
+@@ -789,6 +853,7 @@ int main(int argc, char **argv)
+ fname, strerror(errno));
+ }
+ read_ehdr(fp);
++ read_phdrs(fp);
+ read_shdrs(fp);
+ read_strtabs(fp);
+ read_symtabs(fp);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/vdso/Makefile linux-3.4-pax/arch/x86/vdso/Makefile
+--- linux-3.4/arch/x86/vdso/Makefile 2012-05-21 11:32:58.439927712 +0200
++++ linux-3.4-pax/arch/x86/vdso/Makefile 2012-05-21 12:10:09.808048904 +0200
+@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
+ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
+ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
+
+-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
++VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ GCOV_PROFILE := n
+
+ #
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/vdso/vdso32-setup.c linux-3.4-pax/arch/x86/vdso/vdso32-setup.c
+--- linux-3.4/arch/x86/vdso/vdso32-setup.c 2012-05-21 11:32:58.443927712 +0200
++++ linux-3.4-pax/arch/x86/vdso/vdso32-setup.c 2012-05-21 12:10:09.808048904 +0200
+@@ -25,6 +25,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/vdso.h>
+ #include <asm/proto.h>
++#include <asm/mman.h>
+
+ enum {
+ VDSO_DISABLED = 0,
+@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
+ void enable_sep_cpu(void)
+ {
+ int cpu = get_cpu();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+
+ if (!boot_cpu_has(X86_FEATURE_SEP)) {
+ put_cpu();
+@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
+ gate_vma.vm_start = FIXADDR_USER_START;
+ gate_vma.vm_end = FIXADDR_USER_END;
+ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+- gate_vma.vm_page_prot = __P101;
++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+
+ return 0;
+ }
+@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct l
+ if (compat)
+ addr = VDSO_HIGH_BASE;
+ else {
+- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
++ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+ }
+
+- current->mm->context.vdso = (void *)addr;
++ current->mm->context.vdso = addr;
+
+ if (compat_uses_vma || !compat) {
+ /*
+@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct l
+ }
+
+ current_thread_info()->sysenter_return =
+- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
++ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
+
+ up_fail:
+ if (ret)
+- current->mm->context.vdso = NULL;
++ current->mm->context.vdso = 0;
+
+ up_write(&mm->mmap_sem);
+
+@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
+
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
+- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
+ return "[vdso]";
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
++ return "[vdso]";
++#endif
++
+ return NULL;
+ }
+
+@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(stru
+ * Check to see if the corresponding task was created in compat vdso
+ * mode.
+ */
+- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
++ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
+ return &gate_vma;
+ return NULL;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/vdso/vma.c linux-3.4-pax/arch/x86/vdso/vma.c
+--- linux-3.4/arch/x86/vdso/vma.c 2012-05-21 11:32:58.447927713 +0200
++++ linux-3.4-pax/arch/x86/vdso/vma.c 2012-05-21 12:10:09.812048904 +0200
+@@ -16,8 +16,6 @@
+ #include <asm/vdso.h>
+ #include <asm/page.h>
+
+-unsigned int __read_mostly vdso_enabled = 1;
+-
+ extern char vdso_start[], vdso_end[];
+ extern unsigned short vdso_sync_cpuid;
+
+@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned
+ * unaligned here as a result of stack start randomization.
+ */
+ addr = PAGE_ALIGN(addr);
+- addr = align_addr(addr, NULL, ALIGN_VDSO);
+
+ return addr;
+ }
+@@ -154,30 +151,31 @@ static int setup_additional_pages(struct
+ unsigned size)
+ {
+ struct mm_struct *mm = current->mm;
+- unsigned long addr;
++ unsigned long addr = 0;
+ int ret;
+
+- if (!vdso_enabled)
+- return 0;
+-
+ down_write(&mm->mmap_sem);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ addr = vdso_addr(mm->start_stack, size);
++ addr = align_addr(addr, NULL, ALIGN_VDSO);
+ addr = get_unmapped_area(NULL, addr, size, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+- current->mm->context.vdso = (void *)addr;
++ mm->context.vdso = addr;
+
+ ret = install_special_mapping(mm, addr, size,
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ pages);
+- if (ret) {
+- current->mm->context.vdso = NULL;
+- goto up_fail;
+- }
++ if (ret)
++ mm->context.vdso = 0;
+
+ up_fail:
+ up_write(&mm->mmap_sem);
+@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct li
+ vdsox32_size);
+ }
+ #endif
+-
+-static __init int vdso_setup(char *s)
+-{
+- vdso_enabled = simple_strtoul(s, NULL, 0);
+- return 0;
+-}
+-__setup("vdso=", vdso_setup);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/xen/enlighten.c linux-3.4-pax/arch/x86/xen/enlighten.c
+--- linux-3.4/arch/x86/xen/enlighten.c 2012-05-21 11:32:58.451927713 +0200
++++ linux-3.4-pax/arch/x86/xen/enlighten.c 2012-05-21 12:10:09.816048904 +0200
+@@ -95,8 +95,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
+
+ struct shared_info xen_dummy_shared_info;
+
+-void *xen_initial_gdt;
+-
+ RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
+ __read_mostly int xen_have_vector_callback;
+ EXPORT_SYMBOL_GPL(xen_have_vector_callback);
+@@ -1154,30 +1152,30 @@ static const struct pv_apic_ops xen_apic
+ #endif
+ };
+
+-static void xen_reboot(int reason)
++static __noreturn void xen_reboot(int reason)
+ {
+ struct sched_shutdown r = { .reason = reason };
+
+- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
+- BUG();
++ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
++ BUG();
+ }
+
+-static void xen_restart(char *msg)
++static __noreturn void xen_restart(char *msg)
+ {
+ xen_reboot(SHUTDOWN_reboot);
+ }
+
+-static void xen_emergency_restart(void)
++static __noreturn void xen_emergency_restart(void)
+ {
+ xen_reboot(SHUTDOWN_reboot);
+ }
+
+-static void xen_machine_halt(void)
++static __noreturn void xen_machine_halt(void)
+ {
+ xen_reboot(SHUTDOWN_poweroff);
+ }
+
+-static void xen_machine_power_off(void)
++static __noreturn void xen_machine_power_off(void)
+ {
+ if (pm_power_off)
+ pm_power_off();
+@@ -1280,7 +1278,17 @@ asmlinkage void __init xen_start_kernel(
+ __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+
+ /* Work out if we support NX */
+- x86_configure_nx();
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
++ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
++ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
++ unsigned l, h;
++
++ __supported_pte_mask |= _PAGE_NX;
++ rdmsr(MSR_EFER, l, h);
++ l |= EFER_NX;
++ wrmsr(MSR_EFER, l, h);
++ }
++#endif
+
+ xen_setup_features();
+
+@@ -1311,13 +1319,6 @@ asmlinkage void __init xen_start_kernel(
+
+ machine_ops = xen_machine_ops;
+
+- /*
+- * The only reliable way to retain the initial address of the
+- * percpu gdt_page is to remember it here, so we can go and
+- * mark it RW later, when the initial percpu area is freed.
+- */
+- xen_initial_gdt = &per_cpu(gdt_page, 0);
+-
+ xen_smp_init();
+
+ #ifdef CONFIG_ACPI_NUMA
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/xen/mmu.c linux-3.4-pax/arch/x86/xen/mmu.c
+--- linux-3.4/arch/x86/xen/mmu.c 2012-05-21 11:32:58.459927713 +0200
++++ linux-3.4-pax/arch/x86/xen/mmu.c 2012-05-21 12:10:09.820048904 +0200
+@@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetabl
+ convert_pfn_mfn(init_level4_pgt);
+ convert_pfn_mfn(level3_ident_pgt);
+ convert_pfn_mfn(level3_kernel_pgt);
++ convert_pfn_mfn(level3_vmalloc_start_pgt);
++ convert_pfn_mfn(level3_vmalloc_end_pgt);
++ convert_pfn_mfn(level3_vmemmap_pgt);
+
+ l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
+ l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
+@@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetabl
+ set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
++ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
++ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
++ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
++ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+
+@@ -1964,6 +1971,7 @@ static void __init xen_post_allocator_in
+ pv_mmu_ops.set_pud = xen_set_pud;
+ #if PAGETABLE_LEVELS == 4
+ pv_mmu_ops.set_pgd = xen_set_pgd;
++ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
+ #endif
+
+ /* This will work as long as patching hasn't happened yet
+@@ -2045,6 +2053,7 @@ static const struct pv_mmu_ops xen_mmu_o
+ .pud_val = PV_CALLEE_SAVE(xen_pud_val),
+ .make_pud = PV_CALLEE_SAVE(xen_make_pud),
+ .set_pgd = xen_set_pgd_hyper,
++ .set_pgd_batched = xen_set_pgd_hyper,
+
+ .alloc_pud = xen_alloc_pmd_init,
+ .release_pud = xen_release_pmd_init,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/xen/smp.c linux-3.4-pax/arch/x86/xen/smp.c
+--- linux-3.4/arch/x86/xen/smp.c 2012-05-21 11:32:58.471927714 +0200
++++ linux-3.4-pax/arch/x86/xen/smp.c 2012-05-21 12:10:09.824048905 +0200
+@@ -215,11 +215,6 @@ static void __init xen_smp_prepare_boot_
+ {
+ BUG_ON(smp_processor_id() != 0);
+ native_smp_prepare_boot_cpu();
+-
+- /* We've switched to the "real" per-cpu gdt, so make sure the
+- old memory can be recycled */
+- make_lowmem_page_readwrite(xen_initial_gdt);
+-
+ xen_filter_cpu_maps();
+ xen_setup_vcpu_info_placement();
+ }
+@@ -296,12 +291,12 @@ cpu_initialize_context(unsigned int cpu,
+ gdt = get_cpu_gdt_table(cpu);
+
+ ctxt->flags = VGCF_IN_KERNEL;
+- ctxt->user_regs.ds = __USER_DS;
+- ctxt->user_regs.es = __USER_DS;
++ ctxt->user_regs.ds = __KERNEL_DS;
++ ctxt->user_regs.es = __KERNEL_DS;
+ ctxt->user_regs.ss = __KERNEL_DS;
+ #ifdef CONFIG_X86_32
+ ctxt->user_regs.fs = __KERNEL_PERCPU;
+- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
++ savesegment(gs, ctxt->user_regs.gs);
+ #else
+ ctxt->gs_base_kernel = per_cpu_offset(cpu);
+ #endif
+@@ -352,13 +347,12 @@ static int __cpuinit xen_cpu_up(unsigned
+ int rc;
+
+ per_cpu(current_task, cpu) = idle;
++ per_cpu(current_tinfo, cpu) = &idle->tinfo;
+ #ifdef CONFIG_X86_32
+ irq_ctx_init(cpu);
+ #else
+ clear_tsk_thread_flag(idle, TIF_FORK);
+- per_cpu(kernel_stack, cpu) =
+- (unsigned long)task_stack_page(idle) -
+- KERNEL_STACK_OFFSET + THREAD_SIZE;
++ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
+ #endif
+ xen_setup_runstate_info(cpu);
+ xen_setup_timer(cpu);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/xen/xen-asm_32.S linux-3.4-pax/arch/x86/xen/xen-asm_32.S
+--- linux-3.4/arch/x86/xen/xen-asm_32.S 2011-10-24 12:48:26.419091764 +0200
++++ linux-3.4-pax/arch/x86/xen/xen-asm_32.S 2012-05-21 12:10:09.824048905 +0200
+@@ -83,14 +83,14 @@ ENTRY(xen_iret)
+ ESP_OFFSET=4 # bytes pushed onto stack
+
+ /*
+- * Store vcpu_info pointer for easy access. Do it this way to
+- * avoid having to reload %fs
++ * Store vcpu_info pointer for easy access.
+ */
+ #ifdef CONFIG_SMP
+- GET_THREAD_INFO(%eax)
+- movl TI_cpu(%eax), %eax
+- movl __per_cpu_offset(,%eax,4), %eax
+- mov xen_vcpu(%eax), %eax
++ push %fs
++ mov $(__KERNEL_PERCPU), %eax
++ mov %eax, %fs
++ mov PER_CPU_VAR(xen_vcpu), %eax
++ pop %fs
+ #else
+ movl xen_vcpu, %eax
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/xen/xen-head.S linux-3.4-pax/arch/x86/xen/xen-head.S
+--- linux-3.4/arch/x86/xen/xen-head.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/arch/x86/xen/xen-head.S 2012-05-21 12:10:09.824048905 +0200
+@@ -19,6 +19,17 @@ ENTRY(startup_xen)
+ #ifdef CONFIG_X86_32
+ mov %esi,xen_start_info
+ mov $init_thread_union+THREAD_SIZE,%esp
++#ifdef CONFIG_SMP
++ movl $cpu_gdt_table,%edi
++ movl $__per_cpu_load,%eax
++ movw %ax,__KERNEL_PERCPU + 2(%edi)
++ rorl $16,%eax
++ movb %al,__KERNEL_PERCPU + 4(%edi)
++ movb %ah,__KERNEL_PERCPU + 7(%edi)
++ movl $__per_cpu_end - 1,%eax
++ subl $__per_cpu_start,%eax
++ movw %ax,__KERNEL_PERCPU + 0(%edi)
++#endif
+ #else
+ mov %rsi,xen_start_info
+ mov $init_thread_union+THREAD_SIZE,%rsp
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/arch/x86/xen/xen-ops.h linux-3.4-pax/arch/x86/xen/xen-ops.h
+--- linux-3.4/arch/x86/xen/xen-ops.h 2011-10-24 12:48:26.419091764 +0200
++++ linux-3.4-pax/arch/x86/xen/xen-ops.h 2012-05-21 12:10:09.828048905 +0200
+@@ -10,8 +10,6 @@
+ extern const char xen_hypervisor_callback[];
+ extern const char xen_failsafe_callback[];
+
+-extern void *xen_initial_gdt;
+-
+ struct trap_info;
+ void xen_copy_trap_info(struct trap_info *traps);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/block/blk-iopoll.c linux-3.4-pax/block/blk-iopoll.c
+--- linux-3.4/block/blk-iopoll.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/block/blk-iopoll.c 2012-05-21 12:10:09.828048905 +0200
+@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
+ }
+ EXPORT_SYMBOL(blk_iopoll_complete);
+
+-static void blk_iopoll_softirq(struct softirq_action *h)
++static void blk_iopoll_softirq(void)
+ {
+ struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
+ int rearm = 0, budget = blk_iopoll_budget;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/block/blk-map.c linux-3.4-pax/block/blk-map.c
+--- linux-3.4/block/blk-map.c 2012-01-08 19:47:50.103472954 +0100
++++ linux-3.4-pax/block/blk-map.c 2012-05-21 12:10:09.832048905 +0200
+@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue
+ if (!len || !kbuf)
+ return -EINVAL;
+
+- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
++ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
+ if (do_copy)
+ bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
+ else
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/block/blk-softirq.c linux-3.4-pax/block/blk-softirq.c
+--- linux-3.4/block/blk-softirq.c 2012-05-21 11:32:58.571927719 +0200
++++ linux-3.4-pax/block/blk-softirq.c 2012-05-21 12:10:09.832048905 +0200
+@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head,
+ * Softirq action handler - move entries to local list and loop over them
+ * while passing them to the queue registered handler.
+ */
+-static void blk_done_softirq(struct softirq_action *h)
++static void blk_done_softirq(void)
+ {
+ struct list_head *cpu_list, local_list;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/block/bsg.c linux-3.4-pax/block/bsg.c
+--- linux-3.4/block/bsg.c 2012-03-19 10:38:56.976049975 +0100
++++ linux-3.4-pax/block/bsg.c 2012-05-21 12:10:09.836048905 +0200
+@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
+ struct sg_io_v4 *hdr, struct bsg_device *bd,
+ fmode_t has_write_perm)
+ {
++ unsigned char tmpcmd[sizeof(rq->__cmd)];
++ unsigned char *cmdptr;
++
+ if (hdr->request_len > BLK_MAX_CDB) {
+ rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
+ if (!rq->cmd)
+ return -ENOMEM;
+- }
++ cmdptr = rq->cmd;
++ } else
++ cmdptr = tmpcmd;
+
+- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
++ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
+ hdr->request_len))
+ return -EFAULT;
+
++ if (cmdptr != rq->cmd)
++ memcpy(rq->cmd, cmdptr, hdr->request_len);
++
+ if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
+ if (blk_verify_command(rq->cmd, has_write_perm))
+ return -EPERM;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/block/compat_ioctl.c linux-3.4-pax/block/compat_ioctl.c
+--- linux-3.4/block/compat_ioctl.c 2012-03-19 10:38:56.980049975 +0100
++++ linux-3.4-pax/block/compat_ioctl.c 2012-05-21 12:10:09.840048905 +0200
+@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_
+ err |= __get_user(f->spec1, &uf->spec1);
+ err |= __get_user(f->fmt_gap, &uf->fmt_gap);
+ err |= __get_user(name, &uf->name);
+- f->name = compat_ptr(name);
++ f->name = (void __force_kernel *)compat_ptr(name);
+ if (err) {
+ err = -EFAULT;
+ goto out;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/block/partitions/efi.c linux-3.4-pax/block/partitions/efi.c
+--- linux-3.4/block/partitions/efi.c 2012-03-19 10:38:56.992049975 +0100
++++ linux-3.4-pax/block/partitions/efi.c 2012-05-21 12:10:09.840048905 +0200
+@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries
+ if (!gpt)
+ return NULL;
+
+- count = le32_to_cpu(gpt->num_partition_entries) *
+- le32_to_cpu(gpt->sizeof_partition_entry);
+- if (!count)
++ if (!le32_to_cpu(gpt->num_partition_entries))
+ return NULL;
+- pte = kzalloc(count, GFP_KERNEL);
++ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
+ if (!pte)
+ return NULL;
+
++ count = le32_to_cpu(gpt->num_partition_entries) *
++ le32_to_cpu(gpt->sizeof_partition_entry);
+ if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
+ (u8 *) pte,
+ count) < count) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/block/scsi_ioctl.c linux-3.4-pax/block/scsi_ioctl.c
+--- linux-3.4/block/scsi_ioctl.c 2012-03-19 10:38:57.004049975 +0100
++++ linux-3.4-pax/block/scsi_ioctl.c 2012-05-21 12:10:09.844048906 +0200
+@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
+ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
+ struct sg_io_hdr *hdr, fmode_t mode)
+ {
+- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
++ unsigned char tmpcmd[sizeof(rq->__cmd)];
++ unsigned char *cmdptr;
++
++ if (rq->cmd != rq->__cmd)
++ cmdptr = rq->cmd;
++ else
++ cmdptr = tmpcmd;
++
++ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
+ return -EFAULT;
++
++ if (cmdptr != rq->cmd)
++ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
++
+ if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
+ return -EPERM;
+
+@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *
+ int err;
+ unsigned int in_len, out_len, bytes, opcode, cmdlen;
+ char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
++ unsigned char tmpcmd[sizeof(rq->__cmd)];
++ unsigned char *cmdptr;
+
+ if (!sic)
+ return -EINVAL;
+@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *
+ */
+ err = -EFAULT;
+ rq->cmd_len = cmdlen;
+- if (copy_from_user(rq->cmd, sic->data, cmdlen))
++
++ if (rq->cmd != rq->__cmd)
++ cmdptr = rq->cmd;
++ else
++ cmdptr = tmpcmd;
++
++ if (copy_from_user(cmdptr, sic->data, cmdlen))
+ goto error;
+
++ if (rq->cmd != cmdptr)
++ memcpy(rq->cmd, cmdptr, cmdlen);
++
+ if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
+ goto error;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/crypto/cryptd.c linux-3.4-pax/crypto/cryptd.c
+--- linux-3.4/crypto/cryptd.c 2012-01-08 19:47:50.187472949 +0100
++++ linux-3.4-pax/crypto/cryptd.c 2012-05-21 12:10:09.856048906 +0200
+@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
+
+ struct cryptd_blkcipher_request_ctx {
+ crypto_completion_t complete;
+-};
++} __no_const;
+
+ struct cryptd_hash_ctx {
+ struct crypto_shash *child;
+@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
+
+ struct cryptd_aead_request_ctx {
+ crypto_completion_t complete;
+-};
++} __no_const;
+
+ static void cryptd_queue_worker(struct work_struct *work);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/Documentation/dontdiff linux-3.4-pax/Documentation/dontdiff
+--- linux-3.4/Documentation/dontdiff 2012-05-21 11:32:28.607926092 +0200
++++ linux-3.4-pax/Documentation/dontdiff 2012-05-28 20:59:17.758479030 +0200
+@@ -2,9 +2,11 @@
+ *.aux
+ *.bin
+ *.bz2
++*.c.[012]*.*
+ *.cis
+ *.cpio
+ *.csp
++*.dbg
+ *.dsp
+ *.dvi
+ *.elf
+@@ -14,6 +16,7 @@
+ *.gcov
+ *.gen.S
+ *.gif
++*.gmo
+ *.grep
+ *.grp
+ *.gz
+@@ -48,9 +51,11 @@
+ *.tab.h
+ *.tex
+ *.ver
++*.vim
+ *.xml
+ *.xz
+ *_MODULES
++*_reg_safe.h
+ *_vga16.c
+ *~
+ \#*#
+@@ -69,6 +74,7 @@ Image
+ Module.markers
+ Module.symvers
+ PENDING
++PERF*
+ SCCS
+ System.map*
+ TAGS
+@@ -92,19 +98,24 @@ bounds.h
+ bsetup
+ btfixupprep
+ build
++builtin-policy.h
+ bvmlinux
+ bzImage*
+ capability_names.h
+ capflags.c
+ classlist.h*
++clut_vga16.c
++common-cmds.h
+ comp*.log
+ compile.h*
+ conf
+ config
+ config-*
+ config_data.h*
++config.c
+ config.mak
+ config.mak.autogen
++config.tmp
+ conmakehash
+ consolemap_deftbl.c*
+ cpustr.h
+@@ -115,9 +126,11 @@ devlist.h*
+ dnotify_test
+ docproc
+ dslm
++dtc-lexer.lex.c
+ elf2ecoff
+ elfconfig.h*
+ evergreen_reg_safe.h
++exception_policy.conf
+ fixdep
+ flask.h
+ fore200e_mkfirm
+@@ -125,12 +138,15 @@ fore200e_pca_fw.c*
+ gconf
+ gconf.glade.h
+ gen-devlist
++gen-kdb_cmds.c
+ gen_crc32table
+ gen_init_cpio
+ generated
+ genheaders
+ genksyms
+ *_gray256.c
++hash
++hid-example
+ hpet_example
+ hugepage-mmap
+ hugepage-shm
+@@ -145,7 +161,7 @@ int32.c
+ int4.c
+ int8.c
+ kallsyms
+-kconfig
++kern_constants.h
+ keywords.c
+ ksym.c*
+ ksym.h*
+@@ -153,7 +169,7 @@ kxgettext
+ lkc_defs.h
+ lex.c
+ lex.*.c
+-linux
++lib1funcs.S
+ logo_*.c
+ logo_*_clut224.c
+ logo_*_mono.c
+@@ -164,14 +180,15 @@ machtypes.h
+ map
+ map_hugetlb
+ maui_boot.h
+-media
+ mconf
++mdp
+ miboot*
+ mk_elfconfig
+ mkboot
+ mkbugboot
+ mkcpustr
+ mkdep
++mkpiggy
+ mkprep
+ mkregtable
+ mktables
+@@ -188,6 +205,7 @@ oui.c*
+ page-types
+ parse.c
+ parse.h
++parse-events*
+ patches*
+ pca200e.bin
+ pca200e_ecd.bin2
+@@ -197,6 +215,7 @@ perf-archive
+ piggyback
+ piggy.gzip
+ piggy.S
++pmu-*
+ pnmtologo
+ ppc_defs.h*
+ pss_boot.h
+@@ -207,6 +226,7 @@ r300_reg_safe.h
+ r420_reg_safe.h
+ r600_reg_safe.h
+ recordmcount
++regdb.c
+ relocs
+ rlim_names.h
+ rn50_reg_safe.h
+@@ -217,6 +237,7 @@ setup
+ setup.bin
+ setup.elf
+ sImage
++slabinfo
+ sm_tbl*
+ split-include
+ syscalltab.h
+@@ -227,6 +248,7 @@ tftpboot.img
+ timeconst.h
+ times.h*
+ trix_boot.h
++user_constants.h
+ utsrelease.h*
+ vdso-syms.lds
+ vdso.lds
+@@ -238,13 +260,17 @@ vdso32.lds
+ vdso32.so.dbg
+ vdso64.lds
+ vdso64.so.dbg
++vdsox32.lds
++vdsox32-syms.lds
+ version.h*
+ vmImage
+ vmlinux
+ vmlinux-*
+ vmlinux.aout
+ vmlinux.bin.all
++vmlinux.bin.bz2
+ vmlinux.lds
++vmlinux.relocs
+ vmlinuz
+ voffset.h
+ vsyscall.lds
+@@ -252,9 +278,11 @@ vsyscall_32.lds
+ wanxlfw.inc
+ uImage
+ unifdef
++utsrelease.h
+ wakeup.bin
+ wakeup.elf
+ wakeup.lds
+ zImage*
+ zconf.hash.c
++zconf.lex.c
+ zoffset.h
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/Documentation/kernel-parameters.txt linux-3.4-pax/Documentation/kernel-parameters.txt
+--- linux-3.4/Documentation/kernel-parameters.txt 2012-05-21 11:32:29.039926115 +0200
++++ linux-3.4-pax/Documentation/kernel-parameters.txt 2012-05-21 12:10:09.860048907 +0200
+@@ -2021,6 +2021,13 @@ bytes respectively. Such letter suffixes
+ the specified number of seconds. This is to be used if
+ your oopses keep scrolling off the screen.
+
++ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
++ virtualization environments that don't cope well with the
++ expand down segment used by UDEREF on X86-32 or the frequent
++ page table updates on X86-64.
++
++ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
++
+ pcbit= [HW,ISDN]
+
+ pcd. [PARIDE]
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/acpi/apei/cper.c linux-3.4-pax/drivers/acpi/apei/cper.c
+--- linux-3.4/drivers/acpi/apei/cper.c 2012-05-21 11:32:58.767927731 +0200
++++ linux-3.4-pax/drivers/acpi/apei/cper.c 2012-05-21 12:10:09.864048907 +0200
+@@ -38,12 +38,12 @@
+ */
+ u64 cper_next_record_id(void)
+ {
+- static atomic64_t seq;
++ static atomic64_unchecked_t seq;
+
+- if (!atomic64_read(&seq))
+- atomic64_set(&seq, ((u64)get_seconds()) << 32);
++ if (!atomic64_read_unchecked(&seq))
++ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
+
+- return atomic64_inc_return(&seq);
++ return atomic64_inc_return_unchecked(&seq);
+ }
+ EXPORT_SYMBOL_GPL(cper_next_record_id);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/acpi/ec_sys.c linux-3.4-pax/drivers/acpi/ec_sys.c
+--- linux-3.4/drivers/acpi/ec_sys.c 2012-05-21 11:32:58.799927732 +0200
++++ linux-3.4-pax/drivers/acpi/ec_sys.c 2012-05-21 12:10:09.868048907 +0200
+@@ -12,6 +12,7 @@
+ #include <linux/acpi.h>
+ #include <linux/debugfs.h>
+ #include <linux/module.h>
++#include <linux/uaccess.h>
+ #include "internal.h"
+
+ MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
+@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct fi
+ * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
+ */
+ unsigned int size = EC_SPACE_SIZE;
+- u8 *data = (u8 *) buf;
++ u8 data;
+ loff_t init_off = *off;
+ int err = 0;
+
+@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct fi
+ size = count;
+
+ while (size) {
+- err = ec_read(*off, &data[*off - init_off]);
++ err = ec_read(*off, &data);
+ if (err)
+ return err;
++ if (put_user(data, &buf[*off - init_off]))
++ return -EFAULT;
+ *off += 1;
+ size--;
+ }
+@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct f
+
+ unsigned int size = count;
+ loff_t init_off = *off;
+- u8 *data = (u8 *) buf;
+ int err = 0;
+
+ if (*off >= EC_SPACE_SIZE)
+@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct f
+ }
+
+ while (size) {
+- u8 byte_write = data[*off - init_off];
++ u8 byte_write;
++ if (get_user(byte_write, &buf[*off - init_off]))
++ return -EFAULT;
+ err = ec_write(*off, byte_write);
+ if (err)
+ return err;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/acpi/proc.c linux-3.4-pax/drivers/acpi/proc.c
+--- linux-3.4/drivers/acpi/proc.c 2012-01-08 19:47:50.571472929 +0100
++++ linux-3.4-pax/drivers/acpi/proc.c 2012-05-21 12:10:09.872048907 +0200
+@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct f
+ size_t count, loff_t * ppos)
+ {
+ struct list_head *node, *next;
+- char strbuf[5];
+- char str[5] = "";
+- unsigned int len = count;
++ char strbuf[5] = {0};
+
+- if (len > 4)
+- len = 4;
+- if (len < 0)
++ if (count > 4)
++ count = 4;
++ if (copy_from_user(strbuf, buffer, count))
+ return -EFAULT;
+-
+- if (copy_from_user(strbuf, buffer, len))
+- return -EFAULT;
+- strbuf[len] = '\0';
+- sscanf(strbuf, "%s", str);
++ strbuf[count] = '\0';
+
+ mutex_lock(&acpi_device_lock);
+ list_for_each_safe(node, next, &acpi_wakeup_device_list) {
+@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct f
+ if (!dev->wakeup.flags.valid)
+ continue;
+
+- if (!strncmp(dev->pnp.bus_id, str, 4)) {
++ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
+ if (device_can_wakeup(&dev->dev)) {
+ bool enable = !device_may_wakeup(&dev->dev);
+ device_set_wakeup_enable(&dev->dev, enable);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/acpi/processor_driver.c linux-3.4-pax/drivers/acpi/processor_driver.c
+--- linux-3.4/drivers/acpi/processor_driver.c 2012-05-21 11:32:58.811927732 +0200
++++ linux-3.4-pax/drivers/acpi/processor_driver.c 2012-05-21 12:10:09.872048907 +0200
+@@ -556,7 +556,7 @@ static int __cpuinit acpi_processor_add(
+ return 0;
+ #endif
+
+- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
++ BUG_ON(pr->id >= nr_cpu_ids);
+
+ /*
+ * Buggy BIOS check
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ata/libata-core.c linux-3.4-pax/drivers/ata/libata-core.c
+--- linux-3.4/drivers/ata/libata-core.c 2012-05-21 11:32:58.871927736 +0200
++++ linux-3.4-pax/drivers/ata/libata-core.c 2012-05-21 12:10:09.884048908 +0200
+@@ -4736,7 +4736,7 @@ void ata_qc_free(struct ata_queued_cmd *
+ struct ata_port *ap;
+ unsigned int tag;
+
+- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
++ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+ ap = qc->ap;
+
+ qc->flags = 0;
+@@ -4752,7 +4752,7 @@ void __ata_qc_complete(struct ata_queued
+ struct ata_port *ap;
+ struct ata_link *link;
+
+- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
++ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+ WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
+ ap = qc->ap;
+ link = qc->dev->link;
+@@ -5816,6 +5816,7 @@ static void ata_finalize_port_ops(struct
+ return;
+
+ spin_lock(&lock);
++ pax_open_kernel();
+
+ for (cur = ops->inherits; cur; cur = cur->inherits) {
+ void **inherit = (void **)cur;
+@@ -5829,8 +5830,9 @@ static void ata_finalize_port_ops(struct
+ if (IS_ERR(*pp))
+ *pp = NULL;
+
+- ops->inherits = NULL;
++ *(struct ata_port_operations **)&ops->inherits = NULL;
+
++ pax_close_kernel();
+ spin_unlock(&lock);
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ata/pata_arasan_cf.c linux-3.4-pax/drivers/ata/pata_arasan_cf.c
+--- linux-3.4/drivers/ata/pata_arasan_cf.c 2012-05-21 11:32:58.887927737 +0200
++++ linux-3.4-pax/drivers/ata/pata_arasan_cf.c 2012-05-21 12:10:09.888048908 +0200
+@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
+ /* Handle platform specific quirks */
+ if (pdata->quirk) {
+ if (pdata->quirk & CF_BROKEN_PIO) {
+- ap->ops->set_piomode = NULL;
++ pax_open_kernel();
++ *(void **)&ap->ops->set_piomode = NULL;
++ pax_close_kernel();
+ ap->pio_mask = 0;
+ }
+ if (pdata->quirk & CF_BROKEN_MWDMA)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/atm/adummy.c linux-3.4-pax/drivers/atm/adummy.c
+--- linux-3.4/drivers/atm/adummy.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/atm/adummy.c 2012-05-21 12:10:09.888048908 +0200
+@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ return 0;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/atm/ambassador.c linux-3.4-pax/drivers/atm/ambassador.c
+--- linux-3.4/drivers/atm/ambassador.c 2011-10-24 12:48:27.047091731 +0200
++++ linux-3.4-pax/drivers/atm/ambassador.c 2012-05-21 12:10:09.892048908 +0200
+@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
+ PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
+
+ // VC layer stats
+- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+
+ // free the descriptor
+ kfree (tx_descr);
+@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
+ dump_skb ("<<<", vc, skb);
+
+ // VC layer stats
+- atomic_inc(&atm_vcc->stats->rx);
++ atomic_inc_unchecked(&atm_vcc->stats->rx);
+ __net_timestamp(skb);
+ // end of our responsibility
+ atm_vcc->push (atm_vcc, skb);
+@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
+ } else {
+ PRINTK (KERN_INFO, "dropped over-size frame");
+ // should we count this?
+- atomic_inc(&atm_vcc->stats->rx_drop);
++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+ }
+
+ } else {
+@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * at
+ }
+
+ if (check_area (skb->data, skb->len)) {
+- atomic_inc(&atm_vcc->stats->tx_err);
++ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
+ return -ENOMEM; // ?
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/atm/atmtcp.c linux-3.4-pax/drivers/atm/atmtcp.c
+--- linux-3.4/drivers/atm/atmtcp.c 2011-10-24 12:48:27.067091730 +0200
++++ linux-3.4-pax/drivers/atm/atmtcp.c 2012-05-21 12:10:09.896048909 +0200
+@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+ if (dev_data) return 0;
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -ENOLINK;
+ }
+ size = skb->len+sizeof(struct atmtcp_hdr);
+@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
+ if (!new_skb) {
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -ENOBUFS;
+ }
+ hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
+@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+ out_vcc->push(out_vcc,new_skb);
+- atomic_inc(&vcc->stats->tx);
+- atomic_inc(&out_vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->tx);
++ atomic_inc_unchecked(&out_vcc->stats->rx);
+ return 0;
+ }
+
+@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
+ out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
+ read_unlock(&vcc_sklist_lock);
+ if (!out_vcc) {
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ goto done;
+ }
+ skb_pull(skb,sizeof(struct atmtcp_hdr));
+@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
+ __net_timestamp(new_skb);
+ skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
+ out_vcc->push(out_vcc,new_skb);
+- atomic_inc(&vcc->stats->tx);
+- atomic_inc(&out_vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->tx);
++ atomic_inc_unchecked(&out_vcc->stats->rx);
+ done:
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/atm/eni.c linux-3.4-pax/drivers/atm/eni.c
+--- linux-3.4/drivers/atm/eni.c 2012-05-21 11:32:58.927927739 +0200
++++ linux-3.4-pax/drivers/atm/eni.c 2012-05-21 12:10:09.900048909 +0200
+@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
+ DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
+ vcc->dev->number);
+ length = 0;
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ else {
+ length = ATM_CELL_SIZE-1; /* no HEC */
+@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
+ size);
+ }
+ eff = length = 0;
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ else {
+ size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
+@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
+ "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
+ vcc->dev->number,vcc->vci,length,size << 2,descr);
+ length = eff = 0;
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ }
+ skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
+@@ -767,7 +767,7 @@ rx_dequeued++;
+ vcc->push(vcc,skb);
+ pushed++;
+ }
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ wake_up(&eni_dev->rx_wait);
+ }
+@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
+ PCI_DMA_TODEVICE);
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb_irq(skb);
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ wake_up(&eni_dev->tx_wait);
+ dma_complete++;
+ }
+@@ -1567,7 +1567,7 @@ tx_complete++;
+ /*--------------------------------- entries ---------------------------------*/
+
+
+-static const char *media_name[] __devinitdata = {
++static const char *media_name[] __devinitconst = {
+ "MMF", "SMF", "MMF", "03?", /* 0- 3 */
+ "UTP", "05?", "06?", "07?", /* 4- 7 */
+ "TAXI","09?", "10?", "11?", /* 8-11 */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/atm/firestream.c linux-3.4-pax/drivers/atm/firestream.c
+--- linux-3.4/drivers/atm/firestream.c 2012-05-21 11:32:58.951927741 +0200
++++ linux-3.4-pax/drivers/atm/firestream.c 2012-05-21 12:10:09.900048909 +0200
+@@ -749,7 +749,7 @@ static void process_txdone_queue (struct
+ }
+ }
+
+- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+
+ fs_dprintk (FS_DEBUG_TXMEM, "i");
+ fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
+@@ -816,7 +816,7 @@ static void process_incoming (struct fs_
+ #endif
+ skb_put (skb, qe->p1 & 0xffff);
+ ATM_SKB(skb)->vcc = atm_vcc;
+- atomic_inc(&atm_vcc->stats->rx);
++ atomic_inc_unchecked(&atm_vcc->stats->rx);
+ __net_timestamp(skb);
+ fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
+ atm_vcc->push (atm_vcc, skb);
+@@ -837,12 +837,12 @@ static void process_incoming (struct fs_
+ kfree (pe);
+ }
+ if (atm_vcc)
+- atomic_inc(&atm_vcc->stats->rx_drop);
++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+ break;
+ case 0x1f: /* Reassembly abort: no buffers. */
+ /* Silently increment error counter. */
+ if (atm_vcc)
+- atomic_inc(&atm_vcc->stats->rx_drop);
++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+ break;
+ default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
+ printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/atm/fore200e.c linux-3.4-pax/drivers/atm/fore200e.c
+--- linux-3.4/drivers/atm/fore200e.c 2011-10-24 12:48:27.079091729 +0200
++++ linux-3.4-pax/drivers/atm/fore200e.c 2012-05-21 12:10:09.904048909 +0200
+@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
+ #endif
+ /* check error condition */
+ if (*entry->status & STATUS_ERROR)
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ else
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ }
+ }
+
+@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
+ if (skb == NULL) {
+ DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
+
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return -ENOMEM;
+ }
+
+@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
+
+ dev_kfree_skb_any(skb);
+
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return -ENOMEM;
+ }
+
+ ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
+
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
+
+@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
+ DPRINTK(2, "damaged PDU on %d.%d.%d\n",
+ fore200e->atm_dev->number,
+ entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ }
+
+@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
+ goto retry_here;
+ }
+
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+
+ fore200e->tx_sat++;
+ DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/atm/he.c linux-3.4-pax/drivers/atm/he.c
+--- linux-3.4/drivers/atm/he.c 2012-03-19 10:38:57.928049927 +0100
++++ linux-3.4-pax/drivers/atm/he.c 2012-05-21 12:10:09.912048909 +0200
+@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
+
+ if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
+ hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ goto return_host_buffers;
+ }
+
+@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
+ RBRQ_LEN_ERR(he_dev->rbrq_head)
+ ? "LEN_ERR" : "",
+ vcc->vpi, vcc->vci);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto return_host_buffers;
+ }
+
+@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
+ vcc->push(vcc, skb);
+ spin_lock(&he_dev->global_lock);
+
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ return_host_buffers:
+ ++pdus_assembled;
+@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
+ tpd->vcc->pop(tpd->vcc, tpd->skb);
+ else
+ dev_kfree_skb_any(tpd->skb);
+- atomic_inc(&tpd->vcc->stats->tx_err);
++ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
+ }
+ pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
+ return;
+@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -EINVAL;
+ }
+
+@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -EINVAL;
+ }
+ #endif
+@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ spin_unlock_irqrestore(&he_dev->global_lock, flags);
+ return -ENOMEM;
+ }
+@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ spin_unlock_irqrestore(&he_dev->global_lock, flags);
+ return -ENOMEM;
+ }
+@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
+ __enqueue_tpd(he_dev, tpd, cid);
+ spin_unlock_irqrestore(&he_dev->global_lock, flags);
+
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ return 0;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/atm/horizon.c linux-3.4-pax/drivers/atm/horizon.c
+--- linux-3.4/drivers/atm/horizon.c 2012-05-21 11:32:58.955927740 +0200
++++ linux-3.4-pax/drivers/atm/horizon.c 2012-05-21 12:10:09.916048910 +0200
+@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
+ {
+ struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
+ // VC layer stats
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ __net_timestamp(skb);
+ // end of our responsibility
+ vcc->push (vcc, skb);
+@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
+ dev->tx_iovec = NULL;
+
+ // VC layer stats
+- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+
+ // free the skb
+ hrz_kfree_skb (skb);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/atm/idt77252.c linux-3.4-pax/drivers/atm/idt77252.c
+--- linux-3.4/drivers/atm/idt77252.c 2012-01-08 19:47:51.235472893 +0100
++++ linux-3.4-pax/drivers/atm/idt77252.c 2012-05-21 12:10:09.920048910 +0200
+@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, str
+ else
+ dev_kfree_skb(skb);
+
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ }
+
+ atomic_dec(&scq->used);
+@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, st
+ if ((sb = dev_alloc_skb(64)) == NULL) {
+ printk("%s: Can't allocate buffers for aal0.\n",
+ card->name);
+- atomic_add(i, &vcc->stats->rx_drop);
++ atomic_add_unchecked(i, &vcc->stats->rx_drop);
+ break;
+ }
+ if (!atm_charge(vcc, sb->truesize)) {
+ RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
+ card->name);
+- atomic_add(i - 1, &vcc->stats->rx_drop);
++ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
+ dev_kfree_skb(sb);
+ break;
+ }
+@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, st
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ cell += ATM_CELL_PAYLOAD;
+ }
+@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, st
+ "(CDC: %08x)\n",
+ card->name, len, rpp->len, readl(SAR_REG_CDC));
+ recycle_rx_pool_skb(card, rpp);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+ if (stat & SAR_RSQE_CRC) {
+ RXPRINTK("%s: AAL5 CRC error.\n", card->name);
+ recycle_rx_pool_skb(card, rpp);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+ if (skb_queue_len(&rpp->queue) > 1) {
+@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, st
+ RXPRINTK("%s: Can't alloc RX skb.\n",
+ card->name);
+ recycle_rx_pool_skb(card, rpp);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+ if (!atm_charge(vcc, skb->truesize)) {
+@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, st
+ __net_timestamp(skb);
+
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ return;
+ }
+@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, st
+ __net_timestamp(skb);
+
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ if (skb->truesize > SAR_FB_SIZE_3)
+ add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
+@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
+ if (vcc->qos.aal != ATM_AAL0) {
+ RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
+ card->name, vpi, vci);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ goto drop;
+ }
+
+ if ((sb = dev_alloc_skb(64)) == NULL) {
+ printk("%s: Can't allocate buffers for AAL0.\n",
+ card->name);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto drop;
+ }
+
+@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ drop:
+ skb_pull(queue, 64);
+@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
+
+ if (vc == NULL) {
+ printk("%s: NULL connection in send().\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+ if (!test_bit(VCF_TX, &vc->flags)) {
+ printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
+ break;
+ default:
+ printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ if (skb_shinfo(skb)->nr_frags != 0) {
+ printk("%s: No scatter-gather yet.\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
+
+ err = queue_skb(card, vc, skb, oam);
+ if (err) {
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return err;
+ }
+@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
+ skb = dev_alloc_skb(64);
+ if (!skb) {
+ printk("%s: Out of memory in send_oam().\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -ENOMEM;
+ }
+ atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/atm/iphase.c linux-3.4-pax/drivers/atm/iphase.c
+--- linux-3.4/drivers/atm/iphase.c 2012-05-21 11:32:58.987927742 +0200
++++ linux-3.4-pax/drivers/atm/iphase.c 2012-05-21 12:10:09.924048910 +0200
+@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
+ status = (u_short) (buf_desc_ptr->desc_mode);
+ if (status & (RX_CER | RX_PTE | RX_OFL))
+ {
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ IF_ERR(printk("IA: bad packet, dropping it");)
+ if (status & RX_CER) {
+ IF_ERR(printk(" cause: packet CRC error\n");)
+@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
+ len = dma_addr - buf_addr;
+ if (len > iadev->rx_buf_sz) {
+ printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out_free_desc;
+ }
+
+@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *
+ ia_vcc = INPH_IA_VCC(vcc);
+ if (ia_vcc == NULL)
+ {
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ atm_return(vcc, skb->truesize);
+ dev_kfree_skb_any(skb);
+ goto INCR_DLE;
+@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *
+ if ((length > iadev->rx_buf_sz) || (length >
+ (skb->len - sizeof(struct cpcs_trailer))))
+ {
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
+ length, skb->len);)
+ atm_return(vcc, skb->truesize);
+@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *
+
+ IF_RX(printk("rx_dle_intr: skb push");)
+ vcc->push(vcc,skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ iadev->rx_pkt_cnt++;
+ }
+ INCR_DLE:
+@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev,
+ {
+ struct k_sonet_stats *stats;
+ stats = &PRIV(_ia_dev[board])->sonet_stats;
+- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
+- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
+- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
+- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
+- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
+- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
+- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
+- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
+- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
++ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
++ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
++ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
++ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
++ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
++ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
++ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
++ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
++ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
+ }
+ ia_cmds.status = 0;
+ break;
+@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
+ if ((desc == 0) || (desc > iadev->num_tx_desc))
+ {
+ IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ if (vcc->pop)
+ vcc->pop(vcc, skb);
+ else
+@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
+ ATM_DESC(skb) = vcc->vci;
+ skb_queue_tail(&iadev->tx_dma_q, skb);
+
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ iadev->tx_pkt_cnt++;
+ /* Increment transaction counter */
+ writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
+
+ #if 0
+ /* add flow control logic */
+- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
++ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
+ if (iavcc->vc_desc_cnt > 10) {
+ vcc->tx_quota = vcc->tx_quota * 3 / 4;
+ printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/atm/lanai.c linux-3.4-pax/drivers/atm/lanai.c
+--- linux-3.4/drivers/atm/lanai.c 2012-05-21 11:32:59.003927743 +0200
++++ linux-3.4-pax/drivers/atm/lanai.c 2012-05-21 12:10:09.928048910 +0200
+@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
+ vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
+ lanai_endtx(lanai, lvcc);
+ lanai_free_skb(lvcc->tx.atmvcc, skb);
+- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
++ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
+ }
+
+ /* Try to fill the buffer - don't call unless there is backlog */
+@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
+ ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
+ __net_timestamp(skb);
+ lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
+ out:
+ lvcc->rx.buf.ptr = end;
+ cardvcc_write(lvcc, endptr, vcc_rxreadptr);
+@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_d
+ DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
+ "vcc %d\n", lanai->number, (unsigned int) s, vci);
+ lanai->stats.service_rxnotaal5++;
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ return 0;
+ }
+ if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
+@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_d
+ int bytes;
+ read_unlock(&vcc_sklist_lock);
+ DPRINTK("got trashed rx pdu on vci %d\n", vci);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ lvcc->stats.x.aal5.service_trash++;
+ bytes = (SERVICE_GET_END(s) * 16) -
+ (((unsigned long) lvcc->rx.buf.ptr) -
+@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_d
+ }
+ if (s & SERVICE_STREAM) {
+ read_unlock(&vcc_sklist_lock);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ lvcc->stats.x.aal5.service_stream++;
+ printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
+ "PDU on VCI %d!\n", lanai->number, vci);
+@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_d
+ return 0;
+ }
+ DPRINTK("got rx crc error on vci %d\n", vci);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ lvcc->stats.x.aal5.service_rxcrc++;
+ lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
+ cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/atm/nicstar.c linux-3.4-pax/drivers/atm/nicstar.c
+--- linux-3.4/drivers/atm/nicstar.c 2011-10-24 12:48:27.111091728 +0200
++++ linux-3.4-pax/drivers/atm/nicstar.c 2012-05-21 12:10:09.932048910 +0200
+@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
+ if ((vc = (vc_map *) vcc->dev_data) == NULL) {
+ printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
+ card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
+ if (!vc->tx) {
+ printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
+ card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
+ if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
+ printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
+ card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ if (skb_shinfo(skb)->nr_frags != 0) {
+ printk("nicstar%d: No scatter-gather yet.\n", card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
+ }
+
+ if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EIO;
+ }
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ return 0;
+ }
+@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
+ printk
+ ("nicstar%d: Can't allocate buffers for aal0.\n",
+ card->index);
+- atomic_add(i, &vcc->stats->rx_drop);
++ atomic_add_unchecked(i, &vcc->stats->rx_drop);
+ break;
+ }
+ if (!atm_charge(vcc, sb->truesize)) {
+ RXPRINTK
+ ("nicstar%d: atm_charge() dropped aal0 packets.\n",
+ card->index);
+- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
++ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
+ dev_kfree_skb_any(sb);
+ break;
+ }
+@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ cell += ATM_CELL_PAYLOAD;
+ }
+
+@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
+ if (iovb == NULL) {
+ printk("nicstar%d: Out of iovec buffers.\n",
+ card->index);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ recycle_rx_buf(card, skb);
+ return;
+ }
+@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
+ small or large buffer itself. */
+ } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
+ printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+ NS_MAX_IOVECS);
+ NS_PRV_IOVCNT(iovb) = 0;
+@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
+ ("nicstar%d: Expected a small buffer, and this is not one.\n",
+ card->index);
+ which_list(card, skb);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_rx_buf(card, skb);
+ vc->rx_iov = NULL;
+ recycle_iov_buf(card, iovb);
+@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
+ ("nicstar%d: Expected a large buffer, and this is not one.\n",
+ card->index);
+ which_list(card, skb);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+ NS_PRV_IOVCNT(iovb));
+ vc->rx_iov = NULL;
+@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
+ printk(" - PDU size mismatch.\n");
+ else
+ printk(".\n");
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+ NS_PRV_IOVCNT(iovb));
+ vc->rx_iov = NULL;
+@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
+ /* skb points to a small buffer */
+ if (!atm_charge(vcc, skb->truesize)) {
+ push_rxbufs(card, skb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ } else {
+ skb_put(skb, len);
+ dequeue_sm_buf(card, skb);
+@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
+ ATM_SKB(skb)->vcc = vcc;
+ __net_timestamp(skb);
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
+ struct sk_buff *sb;
+@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
+ if (len <= NS_SMBUFSIZE) {
+ if (!atm_charge(vcc, sb->truesize)) {
+ push_rxbufs(card, sb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ } else {
+ skb_put(sb, len);
+ dequeue_sm_buf(card, sb);
+@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+
+ push_rxbufs(card, skb);
+@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
+
+ if (!atm_charge(vcc, skb->truesize)) {
+ push_rxbufs(card, skb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ } else {
+ dequeue_lg_buf(card, skb);
+ #ifdef NS_USE_DESTRUCTORS
+@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
+ ATM_SKB(skb)->vcc = vcc;
+ __net_timestamp(skb);
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+
+ push_rxbufs(card, sb);
+@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
+ printk
+ ("nicstar%d: Out of huge buffers.\n",
+ card->index);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ recycle_iovec_rx_bufs(card,
+ (struct iovec *)
+ iovb->data,
+@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
+ card->hbpool.count++;
+ } else
+ dev_kfree_skb_any(hb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ } else {
+ /* Copy the small buffer to the huge buffer */
+ sb = (struct sk_buff *)iov->iov_base;
+@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
+ #endif /* NS_USE_DESTRUCTORS */
+ __net_timestamp(hb);
+ vcc->push(vcc, hb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/atm/solos-pci.c linux-3.4-pax/drivers/atm/solos-pci.c
+--- linux-3.4/drivers/atm/solos-pci.c 2012-03-19 10:38:57.932049926 +0100
++++ linux-3.4-pax/drivers/atm/solos-pci.c 2012-05-21 12:10:09.936048911 +0200
+@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
+ }
+ atm_charge(vcc, skb->truesize);
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ break;
+
+ case PKT_STATUS:
+@@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_car
+ vcc = SKB_CB(oldskb)->vcc;
+
+ if (vcc) {
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ solos_pop(vcc, oldskb);
+ } else
+ dev_kfree_skb_irq(oldskb);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/atm/suni.c linux-3.4-pax/drivers/atm/suni.c
+--- linux-3.4/drivers/atm/suni.c 2012-05-21 11:32:59.007927743 +0200
++++ linux-3.4-pax/drivers/atm/suni.c 2012-05-21 12:10:09.940048911 +0200
+@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
+
+
+ #define ADD_LIMITED(s,v) \
+- atomic_add((v),&stats->s); \
+- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
++ atomic_add_unchecked((v),&stats->s); \
++ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
+
+
+ static void suni_hz(unsigned long from_timer)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/atm/uPD98402.c linux-3.4-pax/drivers/atm/uPD98402.c
+--- linux-3.4/drivers/atm/uPD98402.c 2011-10-24 12:48:27.123091727 +0200
++++ linux-3.4-pax/drivers/atm/uPD98402.c 2012-05-21 12:10:09.940048911 +0200
+@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
+ struct sonet_stats tmp;
+ int error = 0;
+
+- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
++ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
+ sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
+ if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
+ if (zero && !error) {
+@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
+
+
+ #define ADD_LIMITED(s,v) \
+- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
+- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
+- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
++ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
++ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
+
+
+ static void stat_event(struct atm_dev *dev)
+@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
+ if (reason & uPD98402_INT_PFM) stat_event(dev);
+ if (reason & uPD98402_INT_PCO) {
+ (void) GET(PCOCR); /* clear interrupt cause */
+- atomic_add(GET(HECCT),
++ atomic_add_unchecked(GET(HECCT),
+ &PRIV(dev)->sonet_stats.uncorr_hcs);
+ }
+ if ((reason & uPD98402_INT_RFO) &&
+@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
+ PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
+ uPD98402_INT_LOS),PIMR); /* enable them */
+ (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
+- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
+- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
+- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
+ return 0;
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/atm/zatm.c linux-3.4-pax/drivers/atm/zatm.c
+--- linux-3.4/drivers/atm/zatm.c 2012-05-21 11:32:59.007927743 +0200
++++ linux-3.4-pax/drivers/atm/zatm.c 2012-05-21 12:10:09.944048911 +0200
+@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
+ }
+ if (!size) {
+ dev_kfree_skb_irq(skb);
+- if (vcc) atomic_inc(&vcc->stats->rx_err);
++ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
+ continue;
+ }
+ if (!atm_charge(vcc,skb->truesize)) {
+@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
+ skb->len = size;
+ ATM_SKB(skb)->vcc = vcc;
+ vcc->push(vcc,skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ zout(pos & 0xffff,MTA(mbx));
+ #if 0 /* probably a stupid idea */
+@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
+ skb_queue_head(&zatm_vcc->backlog,skb);
+ break;
+ }
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ wake_up(&zatm_vcc->tx_wait);
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/base/devtmpfs.c linux-3.4-pax/drivers/base/devtmpfs.c
+--- linux-3.4/drivers/base/devtmpfs.c 2012-03-19 10:38:57.960049925 +0100
++++ linux-3.4-pax/drivers/base/devtmpfs.c 2012-05-21 12:10:09.948048911 +0200
+@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
+ if (!thread)
+ return 0;
+
+- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
++ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
+ if (err)
+ printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
+ else
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/base/power/wakeup.c linux-3.4-pax/drivers/base/power/wakeup.c
+--- linux-3.4/drivers/base/power/wakeup.c 2012-05-21 11:32:59.075927747 +0200
++++ linux-3.4-pax/drivers/base/power/wakeup.c 2012-05-21 12:10:09.948048911 +0200
+@@ -30,14 +30,14 @@ bool events_check_enabled;
+ * They need to be modified together atomically, so it's better to use one
+ * atomic variable to hold them both.
+ */
+-static atomic_t combined_event_count = ATOMIC_INIT(0);
++static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
+
+ #define IN_PROGRESS_BITS (sizeof(int) * 4)
+ #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
+
+ static void split_counters(unsigned int *cnt, unsigned int *inpr)
+ {
+- unsigned int comb = atomic_read(&combined_event_count);
++ unsigned int comb = atomic_read_unchecked(&combined_event_count);
+
+ *cnt = (comb >> IN_PROGRESS_BITS);
+ *inpr = comb & MAX_IN_PROGRESS;
+@@ -379,7 +379,7 @@ static void wakeup_source_activate(struc
+ ws->last_time = ktime_get();
+
+ /* Increment the counter of events in progress. */
+- atomic_inc(&combined_event_count);
++ atomic_inc_unchecked(&combined_event_count);
+ }
+
+ /**
+@@ -475,7 +475,7 @@ static void wakeup_source_deactivate(str
+ * Increment the counter of registered wakeup events and decrement the
+ * couter of wakeup events in progress simultaneously.
+ */
+- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
++ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
+ }
+
+ /**
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/block/cciss.c linux-3.4-pax/drivers/block/cciss.c
+--- linux-3.4/drivers/block/cciss.c 2012-03-19 10:38:58.040049919 +0100
++++ linux-3.4-pax/drivers/block/cciss.c 2012-05-21 12:10:09.956048912 +0200
+@@ -3007,7 +3007,7 @@ static void start_io(ctlr_info_t *h)
+ while (!list_empty(&h->reqQ)) {
+ c = list_entry(h->reqQ.next, CommandList_struct, list);
+ /* can't do anything if fifo is full */
+- if ((h->access.fifo_full(h))) {
++ if ((h->access->fifo_full(h))) {
+ dev_warn(&h->pdev->dev, "fifo full\n");
+ break;
+ }
+@@ -3017,7 +3017,7 @@ static void start_io(ctlr_info_t *h)
+ h->Qdepth--;
+
+ /* Tell the controller execute command */
+- h->access.submit_command(h, c);
++ h->access->submit_command(h, c);
+
+ /* Put job onto the completed Q */
+ addQ(&h->cmpQ, c);
+@@ -3443,17 +3443,17 @@ startio:
+
+ static inline unsigned long get_next_completion(ctlr_info_t *h)
+ {
+- return h->access.command_completed(h);
++ return h->access->command_completed(h);
+ }
+
+ static inline int interrupt_pending(ctlr_info_t *h)
+ {
+- return h->access.intr_pending(h);
++ return h->access->intr_pending(h);
+ }
+
+ static inline long interrupt_not_for_us(ctlr_info_t *h)
+ {
+- return ((h->access.intr_pending(h) == 0) ||
++ return ((h->access->intr_pending(h) == 0) ||
+ (h->interrupts_enabled == 0));
+ }
+
+@@ -3486,7 +3486,7 @@ static inline u32 next_command(ctlr_info
+ u32 a;
+
+ if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+- return h->access.command_completed(h);
++ return h->access->command_completed(h);
+
+ if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+ a = *(h->reply_pool_head); /* Next cmd in ring buffer */
+@@ -4044,7 +4044,7 @@ static void __devinit cciss_put_controll
+ trans_support & CFGTBL_Trans_use_short_tags);
+
+ /* Change the access methods to the performant access methods */
+- h->access = SA5_performant_access;
++ h->access = &SA5_performant_access;
+ h->transMethod = CFGTBL_Trans_Performant;
+
+ return;
+@@ -4316,7 +4316,7 @@ static int __devinit cciss_pci_init(ctlr
+ if (prod_index < 0)
+ return -ENODEV;
+ h->product_name = products[prod_index].product_name;
+- h->access = *(products[prod_index].access);
++ h->access = products[prod_index].access;
+
+ if (cciss_board_disabled(h)) {
+ dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
+@@ -5041,7 +5041,7 @@ reinit_after_soft_reset:
+ }
+
+ /* make sure the board interrupts are off */
+- h->access.set_intr_mask(h, CCISS_INTR_OFF);
++ h->access->set_intr_mask(h, CCISS_INTR_OFF);
+ rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
+ if (rc)
+ goto clean2;
+@@ -5093,7 +5093,7 @@ reinit_after_soft_reset:
+ * fake ones to scoop up any residual completions.
+ */
+ spin_lock_irqsave(&h->lock, flags);
+- h->access.set_intr_mask(h, CCISS_INTR_OFF);
++ h->access->set_intr_mask(h, CCISS_INTR_OFF);
+ spin_unlock_irqrestore(&h->lock, flags);
+ free_irq(h->intr[h->intr_mode], h);
+ rc = cciss_request_irq(h, cciss_msix_discard_completions,
+@@ -5113,9 +5113,9 @@ reinit_after_soft_reset:
+ dev_info(&h->pdev->dev, "Board READY.\n");
+ dev_info(&h->pdev->dev,
+ "Waiting for stale completions to drain.\n");
+- h->access.set_intr_mask(h, CCISS_INTR_ON);
++ h->access->set_intr_mask(h, CCISS_INTR_ON);
+ msleep(10000);
+- h->access.set_intr_mask(h, CCISS_INTR_OFF);
++ h->access->set_intr_mask(h, CCISS_INTR_OFF);
+
+ rc = controller_reset_failed(h->cfgtable);
+ if (rc)
+@@ -5138,7 +5138,7 @@ reinit_after_soft_reset:
+ cciss_scsi_setup(h);
+
+ /* Turn the interrupts on so we can service requests */
+- h->access.set_intr_mask(h, CCISS_INTR_ON);
++ h->access->set_intr_mask(h, CCISS_INTR_ON);
+
+ /* Get the firmware version */
+ inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
+@@ -5211,7 +5211,7 @@ static void cciss_shutdown(struct pci_de
+ kfree(flush_buf);
+ if (return_code != IO_OK)
+ dev_warn(&h->pdev->dev, "Error flushing cache\n");
+- h->access.set_intr_mask(h, CCISS_INTR_OFF);
++ h->access->set_intr_mask(h, CCISS_INTR_OFF);
+ free_irq(h->intr[h->intr_mode], h);
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/block/cciss.h linux-3.4-pax/drivers/block/cciss.h
+--- linux-3.4/drivers/block/cciss.h 2012-01-08 19:47:51.559472876 +0100
++++ linux-3.4-pax/drivers/block/cciss.h 2012-05-21 12:10:09.956048912 +0200
+@@ -101,7 +101,7 @@ struct ctlr_info
+ /* information about each logical volume */
+ drive_info_struct *drv[CISS_MAX_LUN];
+
+- struct access_method access;
++ struct access_method *access;
+
+ /* queue and queue Info */
+ struct list_head reqQ;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/block/cpqarray.c linux-3.4-pax/drivers/block/cpqarray.c
+--- linux-3.4/drivers/block/cpqarray.c 2012-01-08 19:47:51.579472875 +0100
++++ linux-3.4-pax/drivers/block/cpqarray.c 2012-05-21 12:10:09.960048912 +0200
+@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
+ if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
+ goto Enomem4;
+ }
+- hba[i]->access.set_intr_mask(hba[i], 0);
++ hba[i]->access->set_intr_mask(hba[i], 0);
+ if (request_irq(hba[i]->intr, do_ida_intr,
+ IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
+ {
+@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
+ add_timer(&hba[i]->timer);
+
+ /* Enable IRQ now that spinlock and rate limit timer are set up */
+- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
++ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
+
+ for(j=0; j<NWD; j++) {
+ struct gendisk *disk = ida_gendisk[i][j];
+@@ -694,7 +694,7 @@ DBGINFO(
+ for(i=0; i<NR_PRODUCTS; i++) {
+ if (board_id == products[i].board_id) {
+ c->product_name = products[i].product_name;
+- c->access = *(products[i].access);
++ c->access = products[i].access;
+ break;
+ }
+ }
+@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
+ hba[ctlr]->intr = intr;
+ sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
+ hba[ctlr]->product_name = products[j].product_name;
+- hba[ctlr]->access = *(products[j].access);
++ hba[ctlr]->access = products[j].access;
+ hba[ctlr]->ctlr = ctlr;
+ hba[ctlr]->board_id = board_id;
+ hba[ctlr]->pci_dev = NULL; /* not PCI */
+@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
+
+ while((c = h->reqQ) != NULL) {
+ /* Can't do anything if we're busy */
+- if (h->access.fifo_full(h) == 0)
++ if (h->access->fifo_full(h) == 0)
+ return;
+
+ /* Get the first entry from the request Q */
+@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
+ h->Qdepth--;
+
+ /* Tell the controller to do our bidding */
+- h->access.submit_command(h, c);
++ h->access->submit_command(h, c);
+
+ /* Get onto the completion Q */
+ addQ(&h->cmpQ, c);
+@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq,
+ unsigned long flags;
+ __u32 a,a1;
+
+- istat = h->access.intr_pending(h);
++ istat = h->access->intr_pending(h);
+ /* Is this interrupt for us? */
+ if (istat == 0)
+ return IRQ_NONE;
+@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq,
+ */
+ spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
+ if (istat & FIFO_NOT_EMPTY) {
+- while((a = h->access.command_completed(h))) {
++ while((a = h->access->command_completed(h))) {
+ a1 = a; a &= ~3;
+ if ((c = h->cmpQ) == NULL)
+ {
+@@ -1449,11 +1449,11 @@ static int sendcmd(
+ /*
+ * Disable interrupt
+ */
+- info_p->access.set_intr_mask(info_p, 0);
++ info_p->access->set_intr_mask(info_p, 0);
+ /* Make sure there is room in the command FIFO */
+ /* Actually it should be completely empty at this time. */
+ for (i = 200000; i > 0; i--) {
+- temp = info_p->access.fifo_full(info_p);
++ temp = info_p->access->fifo_full(info_p);
+ if (temp != 0) {
+ break;
+ }
+@@ -1466,7 +1466,7 @@ DBG(
+ /*
+ * Send the cmd
+ */
+- info_p->access.submit_command(info_p, c);
++ info_p->access->submit_command(info_p, c);
+ complete = pollcomplete(ctlr);
+
+ pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
+@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t
+ * we check the new geometry. Then turn interrupts back on when
+ * we're done.
+ */
+- host->access.set_intr_mask(host, 0);
++ host->access->set_intr_mask(host, 0);
+ getgeometry(ctlr);
+- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
++ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
+
+ for(i=0; i<NWD; i++) {
+ struct gendisk *disk = ida_gendisk[ctlr][i];
+@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
+ /* Wait (up to 2 seconds) for a command to complete */
+
+ for (i = 200000; i > 0; i--) {
+- done = hba[ctlr]->access.command_completed(hba[ctlr]);
++ done = hba[ctlr]->access->command_completed(hba[ctlr]);
+ if (done == 0) {
+ udelay(10); /* a short fixed delay */
+ } else
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/block/cpqarray.h linux-3.4-pax/drivers/block/cpqarray.h
+--- linux-3.4/drivers/block/cpqarray.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/block/cpqarray.h 2012-05-21 12:10:09.964048912 +0200
+@@ -99,7 +99,7 @@ struct ctlr_info {
+ drv_info_t drv[NWD];
+ struct proc_dir_entry *proc;
+
+- struct access_method access;
++ struct access_method *access;
+
+ cmdlist_t *reqQ;
+ cmdlist_t *cmpQ;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/block/drbd/drbd_int.h linux-3.4-pax/drivers/block/drbd/drbd_int.h
+--- linux-3.4/drivers/block/drbd/drbd_int.h 2012-03-19 10:38:58.040049919 +0100
++++ linux-3.4-pax/drivers/block/drbd/drbd_int.h 2012-05-21 12:10:09.968048912 +0200
+@@ -736,7 +736,7 @@ struct drbd_request;
+ struct drbd_epoch {
+ struct list_head list;
+ unsigned int barrier_nr;
+- atomic_t epoch_size; /* increased on every request added. */
++ atomic_unchecked_t epoch_size; /* increased on every request added. */
+ atomic_t active; /* increased on every req. added, and dec on every finished. */
+ unsigned long flags;
+ };
+@@ -1108,7 +1108,7 @@ struct drbd_conf {
+ void *int_dig_in;
+ void *int_dig_vv;
+ wait_queue_head_t seq_wait;
+- atomic_t packet_seq;
++ atomic_unchecked_t packet_seq;
+ unsigned int peer_seq;
+ spinlock_t peer_seq_lock;
+ unsigned int minor;
+@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct
+
+ static inline void drbd_tcp_cork(struct socket *sock)
+ {
+- int __user val = 1;
++ int val = 1;
+ (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
+- (char __user *)&val, sizeof(val));
++ (char __force_user *)&val, sizeof(val));
+ }
+
+ static inline void drbd_tcp_uncork(struct socket *sock)
+ {
+- int __user val = 0;
++ int val = 0;
+ (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
+- (char __user *)&val, sizeof(val));
++ (char __force_user *)&val, sizeof(val));
+ }
+
+ static inline void drbd_tcp_nodelay(struct socket *sock)
+ {
+- int __user val = 1;
++ int val = 1;
+ (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
+- (char __user *)&val, sizeof(val));
++ (char __force_user *)&val, sizeof(val));
+ }
+
+ static inline void drbd_tcp_quickack(struct socket *sock)
+ {
+- int __user val = 2;
++ int val = 2;
+ (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
+- (char __user *)&val, sizeof(val));
++ (char __force_user *)&val, sizeof(val));
+ }
+
+ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/block/drbd/drbd_main.c linux-3.4-pax/drivers/block/drbd/drbd_main.c
+--- linux-3.4/drivers/block/drbd/drbd_main.c 2012-03-19 10:38:58.044049920 +0100
++++ linux-3.4-pax/drivers/block/drbd/drbd_main.c 2012-05-21 12:10:09.972048913 +0200
+@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
+ p.sector = sector;
+ p.block_id = block_id;
+ p.blksize = blksize;
+- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
++ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
+
+ if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
+ return false;
+@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
+ p.sector = cpu_to_be64(req->sector);
+ p.block_id = (unsigned long)req;
+ p.seq_num = cpu_to_be32(req->seq_num =
+- atomic_add_return(1, &mdev->packet_seq));
++ atomic_add_return_unchecked(1, &mdev->packet_seq));
+
+ dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
+
+@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
+ atomic_set(&mdev->unacked_cnt, 0);
+ atomic_set(&mdev->local_cnt, 0);
+ atomic_set(&mdev->net_cnt, 0);
+- atomic_set(&mdev->packet_seq, 0);
++ atomic_set_unchecked(&mdev->packet_seq, 0);
+ atomic_set(&mdev->pp_in_use, 0);
+ atomic_set(&mdev->pp_in_use_by_net, 0);
+ atomic_set(&mdev->rs_sect_in, 0);
+@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
+ mdev->receiver.t_state);
+
+ /* no need to lock it, I'm the only thread alive */
+- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
+- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
++ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
++ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
+ mdev->al_writ_cnt =
+ mdev->bm_writ_cnt =
+ mdev->read_cnt =
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/block/drbd/drbd_nl.c linux-3.4-pax/drivers/block/drbd/drbd_nl.c
+--- linux-3.4/drivers/block/drbd/drbd_nl.c 2012-05-21 11:32:59.243927756 +0200
++++ linux-3.4-pax/drivers/block/drbd/drbd_nl.c 2012-05-21 12:10:09.976048913 +0200
+@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
+ module_put(THIS_MODULE);
+ }
+
+-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
++static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
+
+ static unsigned short *
+ __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
+@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
+ cn_reply->id.idx = CN_IDX_DRBD;
+ cn_reply->id.val = CN_VAL_DRBD;
+
+- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
++ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
+ cn_reply->ack = 0; /* not used here. */
+ cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
+ (int)((char *)tl - (char *)reply->tag_list);
+@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
+ cn_reply->id.idx = CN_IDX_DRBD;
+ cn_reply->id.val = CN_VAL_DRBD;
+
+- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
++ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
+ cn_reply->ack = 0; /* not used here. */
+ cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
+ (int)((char *)tl - (char *)reply->tag_list);
+@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
+ cn_reply->id.idx = CN_IDX_DRBD;
+ cn_reply->id.val = CN_VAL_DRBD;
+
+- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
++ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
+ cn_reply->ack = 0; // not used here.
+ cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
+ (int)((char*)tl - (char*)reply->tag_list);
+@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
+ cn_reply->id.idx = CN_IDX_DRBD;
+ cn_reply->id.val = CN_VAL_DRBD;
+
+- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
++ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
+ cn_reply->ack = 0; /* not used here. */
+ cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
+ (int)((char *)tl - (char *)reply->tag_list);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/block/drbd/drbd_receiver.c linux-3.4-pax/drivers/block/drbd/drbd_receiver.c
+--- linux-3.4/drivers/block/drbd/drbd_receiver.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/block/drbd/drbd_receiver.c 2012-05-21 12:10:09.984048913 +0200
+@@ -894,7 +894,7 @@ retry:
+ sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
+ sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+
+- atomic_set(&mdev->packet_seq, 0);
++ atomic_set_unchecked(&mdev->packet_seq, 0);
+ mdev->peer_seq = 0;
+
+ drbd_thread_start(&mdev->asender);
+@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
+ do {
+ next_epoch = NULL;
+
+- epoch_size = atomic_read(&epoch->epoch_size);
++ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
+
+ switch (ev & ~EV_CLEANUP) {
+ case EV_PUT:
+@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
+ rv = FE_DESTROYED;
+ } else {
+ epoch->flags = 0;
+- atomic_set(&epoch->epoch_size, 0);
++ atomic_set_unchecked(&epoch->epoch_size, 0);
+ /* atomic_set(&epoch->active, 0); is already zero */
+ if (rv == FE_STILL_LIVE)
+ rv = FE_RECYCLED;
+@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
+ drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
+ drbd_flush(mdev);
+
+- if (atomic_read(&mdev->current_epoch->epoch_size)) {
++ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
+ epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
+ if (epoch)
+ break;
+ }
+
+ epoch = mdev->current_epoch;
+- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
++ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
+
+ D_ASSERT(atomic_read(&epoch->active) == 0);
+ D_ASSERT(epoch->flags == 0);
+@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
+ }
+
+ epoch->flags = 0;
+- atomic_set(&epoch->epoch_size, 0);
++ atomic_set_unchecked(&epoch->epoch_size, 0);
+ atomic_set(&epoch->active, 0);
+
+ spin_lock(&mdev->epoch_lock);
+- if (atomic_read(&mdev->current_epoch->epoch_size)) {
++ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
+ list_add(&epoch->list, &mdev->current_epoch->list);
+ mdev->current_epoch = epoch;
+ mdev->epochs++;
+@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
+ spin_unlock(&mdev->peer_seq_lock);
+
+ drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
+- atomic_inc(&mdev->current_epoch->epoch_size);
++ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
+ return drbd_drain_block(mdev, data_size);
+ }
+
+@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
+
+ spin_lock(&mdev->epoch_lock);
+ e->epoch = mdev->current_epoch;
+- atomic_inc(&e->epoch->epoch_size);
++ atomic_inc_unchecked(&e->epoch->epoch_size);
+ atomic_inc(&e->epoch->active);
+ spin_unlock(&mdev->epoch_lock);
+
+@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
+ D_ASSERT(list_empty(&mdev->done_ee));
+
+ /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
+- atomic_set(&mdev->current_epoch->epoch_size, 0);
++ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
+ D_ASSERT(list_empty(&mdev->current_epoch->list));
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/block/loop.c linux-3.4-pax/drivers/block/loop.c
+--- linux-3.4/drivers/block/loop.c 2012-05-21 11:32:59.283927758 +0200
++++ linux-3.4-pax/drivers/block/loop.c 2012-05-21 12:10:09.988048914 +0200
+@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct fil
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(get_ds());
+- bw = file->f_op->write(file, buf, len, &pos);
++ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
+ set_fs(old_fs);
+ if (likely(bw == len))
+ return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/char/agp/frontend.c linux-3.4-pax/drivers/char/agp/frontend.c
+--- linux-3.4/drivers/char/agp/frontend.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/char/agp/frontend.c 2012-05-21 12:10:09.988048914 +0200
+@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
+ if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
+ return -EFAULT;
+
+- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
++ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
+ return -EFAULT;
+
+ client = agp_find_client_by_pid(reserve.pid);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/char/hpet.c linux-3.4-pax/drivers/char/hpet.c
+--- linux-3.4/drivers/char/hpet.c 2012-05-21 11:32:59.607927776 +0200
++++ linux-3.4-pax/drivers/char/hpet.c 2012-05-21 12:10:09.992048914 +0200
+@@ -571,7 +571,7 @@ static inline unsigned long hpet_time_di
+ }
+
+ static int
+-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
++hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
+ struct hpet_info *info)
+ {
+ struct hpet_timer __iomem *timer;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/char/ipmi/ipmi_msghandler.c linux-3.4-pax/drivers/char/ipmi/ipmi_msghandler.c
+--- linux-3.4/drivers/char/ipmi/ipmi_msghandler.c 2012-05-21 11:32:59.651927778 +0200
++++ linux-3.4-pax/drivers/char/ipmi/ipmi_msghandler.c 2012-05-21 12:10:09.996048914 +0200
+@@ -420,7 +420,7 @@ struct ipmi_smi {
+ struct proc_dir_entry *proc_dir;
+ char proc_dir_name[10];
+
+- atomic_t stats[IPMI_NUM_STATS];
++ atomic_unchecked_t stats[IPMI_NUM_STATS];
+
+ /*
+ * run_to_completion duplicate of smb_info, smi_info
+@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
+
+
+ #define ipmi_inc_stat(intf, stat) \
+- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
++ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
+ #define ipmi_get_stat(intf, stat) \
+- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
++ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
+
+ static int is_lan_addr(struct ipmi_addr *addr)
+ {
+@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
+ INIT_LIST_HEAD(&intf->cmd_rcvrs);
+ init_waitqueue_head(&intf->waitq);
+ for (i = 0; i < IPMI_NUM_STATS; i++)
+- atomic_set(&intf->stats[i], 0);
++ atomic_set_unchecked(&intf->stats[i], 0);
+
+ intf->proc_dir = NULL;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/char/ipmi/ipmi_si_intf.c linux-3.4-pax/drivers/char/ipmi/ipmi_si_intf.c
+--- linux-3.4/drivers/char/ipmi/ipmi_si_intf.c 2012-05-21 11:32:59.675927779 +0200
++++ linux-3.4-pax/drivers/char/ipmi/ipmi_si_intf.c 2012-05-21 12:10:10.000048914 +0200
+@@ -275,7 +275,7 @@ struct smi_info {
+ unsigned char slave_addr;
+
+ /* Counters and things for the proc filesystem. */
+- atomic_t stats[SI_NUM_STATS];
++ atomic_unchecked_t stats[SI_NUM_STATS];
+
+ struct task_struct *thread;
+
+@@ -284,9 +284,9 @@ struct smi_info {
+ };
+
+ #define smi_inc_stat(smi, stat) \
+- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
++ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
+ #define smi_get_stat(smi, stat) \
+- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
++ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
+
+ #define SI_MAX_PARMS 4
+
+@@ -3209,7 +3209,7 @@ static int try_smi_init(struct smi_info
+ atomic_set(&new_smi->req_events, 0);
+ new_smi->run_to_completion = 0;
+ for (i = 0; i < SI_NUM_STATS; i++)
+- atomic_set(&new_smi->stats[i], 0);
++ atomic_set_unchecked(&new_smi->stats[i], 0);
+
+ new_smi->interrupt_disabled = 1;
+ atomic_set(&new_smi->stop_operation, 0);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/char/mbcs.c linux-3.4-pax/drivers/char/mbcs.c
+--- linux-3.4/drivers/char/mbcs.c 2012-05-21 11:32:59.699927781 +0200
++++ linux-3.4-pax/drivers/char/mbcs.c 2012-05-21 12:10:10.004048914 +0200
+@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *de
+ return 0;
+ }
+
+-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
++static const struct cx_device_id __devinitconst mbcs_id_table[] = {
+ {
+ .part_num = MBCS_PART_NUM,
+ .mfg_num = MBCS_MFG_NUM,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/char/mem.c linux-3.4-pax/drivers/char/mem.c
+--- linux-3.4/drivers/char/mem.c 2012-03-19 10:38:58.124049914 +0100
++++ linux-3.4-pax/drivers/char/mem.c 2012-05-21 12:10:10.008048915 +0200
+@@ -118,6 +118,7 @@ static ssize_t read_mem(struct file *fil
+
+ while (count > 0) {
+ unsigned long remaining;
++ char *temp;
+
+ sz = size_inside_page(p, count);
+
+@@ -133,7 +134,23 @@ static ssize_t read_mem(struct file *fil
+ if (!ptr)
+ return -EFAULT;
+
+- remaining = copy_to_user(buf, ptr, sz);
++#ifdef CONFIG_PAX_USERCOPY
++ temp = kmalloc(sz, GFP_KERNEL);
++ if (!temp) {
++ unxlate_dev_mem_ptr(p, ptr);
++ return -ENOMEM;
++ }
++ memcpy(temp, ptr, sz);
++#else
++ temp = ptr;
++#endif
++
++ remaining = copy_to_user(buf, temp, sz);
++
++#ifdef CONFIG_PAX_USERCOPY
++ kfree(temp);
++#endif
++
+ unxlate_dev_mem_ptr(p, ptr);
+ if (remaining)
+ return -EFAULT;
+@@ -396,9 +413,8 @@ static ssize_t read_kmem(struct file *fi
+ size_t count, loff_t *ppos)
+ {
+ unsigned long p = *ppos;
+- ssize_t low_count, read, sz;
++ ssize_t low_count, read, sz, err = 0;
+ char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
+- int err = 0;
+
+ read = 0;
+ if (p < (unsigned long) high_memory) {
+@@ -420,6 +436,8 @@ static ssize_t read_kmem(struct file *fi
+ }
+ #endif
+ while (low_count > 0) {
++ char *temp;
++
+ sz = size_inside_page(p, low_count);
+
+ /*
+@@ -429,7 +447,22 @@ static ssize_t read_kmem(struct file *fi
+ */
+ kbuf = xlate_dev_kmem_ptr((char *)p);
+
+- if (copy_to_user(buf, kbuf, sz))
++#ifdef CONFIG_PAX_USERCOPY
++ temp = kmalloc(sz, GFP_KERNEL);
++ if (!temp)
++ return -ENOMEM;
++ memcpy(temp, kbuf, sz);
++#else
++ temp = kbuf;
++#endif
++
++ err = copy_to_user(buf, temp, sz);
++
++#ifdef CONFIG_PAX_USERCOPY
++ kfree(temp);
++#endif
++
++ if (err)
+ return -EFAULT;
+ buf += sz;
+ p += sz;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/char/nvram.c linux-3.4-pax/drivers/char/nvram.c
+--- linux-3.4/drivers/char/nvram.c 2012-05-21 11:32:59.723927782 +0200
++++ linux-3.4-pax/drivers/char/nvram.c 2012-05-21 12:10:10.008048915 +0200
+@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *f
+
+ spin_unlock_irq(&rtc_lock);
+
+- if (copy_to_user(buf, contents, tmp - contents))
++ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
+ return -EFAULT;
+
+ *ppos = i;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/char/random.c linux-3.4-pax/drivers/char/random.c
+--- linux-3.4/drivers/char/random.c 2012-05-21 11:32:59.759927784 +0200
++++ linux-3.4-pax/drivers/char/random.c 2012-05-21 12:10:10.012048915 +0200
+@@ -913,7 +913,7 @@ static ssize_t extract_entropy_user(stru
+
+ extract_buf(r, tmp);
+ i = min_t(int, nbytes, EXTRACT_SIZE);
+- if (copy_to_user(buf, tmp, i)) {
++ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
+ ret = -EFAULT;
+ break;
+ }
+@@ -1238,7 +1238,7 @@ EXPORT_SYMBOL(generate_random_uuid);
+ #include <linux/sysctl.h>
+
+ static int min_read_thresh = 8, min_write_thresh;
+-static int max_read_thresh = INPUT_POOL_WORDS * 32;
++static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
+ static int max_write_thresh = INPUT_POOL_WORDS * 32;
+ static char sysctl_bootid[16];
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/char/sonypi.c linux-3.4-pax/drivers/char/sonypi.c
+--- linux-3.4/drivers/char/sonypi.c 2012-05-21 11:32:59.783927785 +0200
++++ linux-3.4-pax/drivers/char/sonypi.c 2012-05-21 12:10:10.016048915 +0200
+@@ -54,6 +54,7 @@
+
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
++#include <asm/local.h>
+
+ #include <linux/sonypi.h>
+
+@@ -490,7 +491,7 @@ static struct sonypi_device {
+ spinlock_t fifo_lock;
+ wait_queue_head_t fifo_proc_list;
+ struct fasync_struct *fifo_async;
+- int open_count;
++ local_t open_count;
+ int model;
+ struct input_dev *input_jog_dev;
+ struct input_dev *input_key_dev;
+@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, st
+ static int sonypi_misc_release(struct inode *inode, struct file *file)
+ {
+ mutex_lock(&sonypi_device.lock);
+- sonypi_device.open_count--;
++ local_dec(&sonypi_device.open_count);
+ mutex_unlock(&sonypi_device.lock);
+ return 0;
+ }
+@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode
+ {
+ mutex_lock(&sonypi_device.lock);
+ /* Flush input queue on first open */
+- if (!sonypi_device.open_count)
++ if (!local_read(&sonypi_device.open_count))
+ kfifo_reset(&sonypi_device.fifo);
+- sonypi_device.open_count++;
++ local_inc(&sonypi_device.open_count);
+ mutex_unlock(&sonypi_device.lock);
+
+ return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/char/tpm/tpm_bios.c linux-3.4-pax/drivers/char/tpm/tpm_bios.c
+--- linux-3.4/drivers/char/tpm/tpm_bios.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/char/tpm/tpm_bios.c 2012-05-21 12:10:10.016048915 +0200
+@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
+ event = addr;
+
+ if ((event->event_type == 0 && event->event_size == 0) ||
+- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
++ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
+ return NULL;
+
+ return addr;
+@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
+ return NULL;
+
+ if ((event->event_type == 0 && event->event_size == 0) ||
+- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
++ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
+ return NULL;
+
+ (*pos)++;
+@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
+ int i;
+
+ for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
+- seq_putc(m, data[i]);
++ if (!seq_putc(m, data[i]))
++ return -EFAULT;
+
+ return 0;
+ }
+@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log
+ log->bios_event_log_end = log->bios_event_log + len;
+
+ virt = acpi_os_map_memory(start, len);
++ if (!virt) {
++ kfree(log->bios_event_log);
++ log->bios_event_log = NULL;
++ return -EFAULT;
++ }
+
+- memcpy(log->bios_event_log, virt, len);
++ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
+
+ acpi_os_unmap_memory(virt, len);
+ return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/char/tpm/tpm.c linux-3.4-pax/drivers/char/tpm/tpm.c
+--- linux-3.4/drivers/char/tpm/tpm.c 2012-05-21 11:32:59.795927786 +0200
++++ linux-3.4-pax/drivers/char/tpm/tpm.c 2012-05-21 12:10:10.020048915 +0200
+@@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_c
+ chip->vendor.req_complete_val)
+ goto out_recv;
+
+- if ((status == chip->vendor.req_canceled)) {
++ if (status == chip->vendor.req_canceled) {
+ dev_err(chip->dev, "Operation Canceled\n");
+ rc = -ECANCELED;
+ goto out;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/char/virtio_console.c linux-3.4-pax/drivers/char/virtio_console.c
+--- linux-3.4/drivers/char/virtio_console.c 2012-05-21 11:32:59.831927788 +0200
++++ linux-3.4-pax/drivers/char/virtio_console.c 2012-05-21 12:10:10.024048915 +0200
+@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port
+ if (to_user) {
+ ssize_t ret;
+
+- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
++ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
+ if (ret)
+ return -EFAULT;
+ } else {
+@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct fil
+ if (!port_has_data(port) && !port->host_connected)
+ return 0;
+
+- return fill_readbuf(port, ubuf, count, true);
++ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
+ }
+
+ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/edac/edac_pci_sysfs.c linux-3.4-pax/drivers/edac/edac_pci_sysfs.c
+--- linux-3.4/drivers/edac/edac_pci_sysfs.c 2012-03-19 10:38:58.356049902 +0100
++++ linux-3.4-pax/drivers/edac/edac_pci_sysfs.c 2012-05-21 12:10:10.024048915 +0200
+@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
+ static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
+ static int edac_pci_poll_msec = 1000; /* one second workq period */
+
+-static atomic_t pci_parity_count = ATOMIC_INIT(0);
+-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
++static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
++static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
+
+ static struct kobject *edac_pci_top_main_kobj;
+ static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
+@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
+ edac_printk(KERN_CRIT, EDAC_PCI,
+ "Signaled System Error on %s\n",
+ pci_name(dev));
+- atomic_inc(&pci_nonparity_count);
++ atomic_inc_unchecked(&pci_nonparity_count);
+ }
+
+ if (status & (PCI_STATUS_PARITY)) {
+@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
+ "Master Data Parity Error on %s\n",
+ pci_name(dev));
+
+- atomic_inc(&pci_parity_count);
++ atomic_inc_unchecked(&pci_parity_count);
+ }
+
+ if (status & (PCI_STATUS_DETECTED_PARITY)) {
+@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
+ "Detected Parity Error on %s\n",
+ pci_name(dev));
+
+- atomic_inc(&pci_parity_count);
++ atomic_inc_unchecked(&pci_parity_count);
+ }
+ }
+
+@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
+ edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+ "Signaled System Error on %s\n",
+ pci_name(dev));
+- atomic_inc(&pci_nonparity_count);
++ atomic_inc_unchecked(&pci_nonparity_count);
+ }
+
+ if (status & (PCI_STATUS_PARITY)) {
+@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
+ "Master Data Parity Error on "
+ "%s\n", pci_name(dev));
+
+- atomic_inc(&pci_parity_count);
++ atomic_inc_unchecked(&pci_parity_count);
+ }
+
+ if (status & (PCI_STATUS_DETECTED_PARITY)) {
+@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
+ "Detected Parity Error on %s\n",
+ pci_name(dev));
+
+- atomic_inc(&pci_parity_count);
++ atomic_inc_unchecked(&pci_parity_count);
+ }
+ }
+ }
+@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
+ if (!check_pci_errors)
+ return;
+
+- before_count = atomic_read(&pci_parity_count);
++ before_count = atomic_read_unchecked(&pci_parity_count);
+
+ /* scan all PCI devices looking for a Parity Error on devices and
+ * bridges.
+@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
+ /* Only if operator has selected panic on PCI Error */
+ if (edac_pci_get_panic_on_pe()) {
+ /* If the count is different 'after' from 'before' */
+- if (before_count != atomic_read(&pci_parity_count))
++ if (before_count != atomic_read_unchecked(&pci_parity_count))
+ panic("EDAC: PCI Parity Error");
+ }
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/edac/mce_amd.h linux-3.4-pax/drivers/edac/mce_amd.h
+--- linux-3.4/drivers/edac/mce_amd.h 2012-05-21 11:33:00.747927838 +0200
++++ linux-3.4-pax/drivers/edac/mce_amd.h 2012-05-30 02:52:20.986925858 +0200
+@@ -82,7 +82,7 @@ extern const char * const ii_msgs[];
+ struct amd_decoder_ops {
+ bool (*dc_mce)(u16, u8);
+ bool (*ic_mce)(u16, u8);
+-};
++} __no_const;
+
+ void amd_report_gart_errors(bool);
+ void amd_register_ecc_decoder(void (*f)(int, struct mce *));
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/firewire/core-card.c linux-3.4-pax/drivers/firewire/core-card.c
+--- linux-3.4/drivers/firewire/core-card.c 2012-05-21 11:33:00.787927840 +0200
++++ linux-3.4-pax/drivers/firewire/core-card.c 2012-05-21 12:10:10.028048916 +0200
+@@ -679,7 +679,7 @@ void fw_card_release(struct kref *kref)
+
+ void fw_core_remove_card(struct fw_card *card)
+ {
+- struct fw_card_driver dummy_driver = dummy_driver_template;
++ fw_card_driver_no_const dummy_driver = dummy_driver_template;
+
+ card->driver->update_phy_reg(card, 4,
+ PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/firewire/core-cdev.c linux-3.4-pax/drivers/firewire/core-cdev.c
+--- linux-3.4/drivers/firewire/core-cdev.c 2012-05-21 11:33:00.799927840 +0200
++++ linux-3.4-pax/drivers/firewire/core-cdev.c 2012-05-21 12:10:10.032048916 +0200
+@@ -1341,8 +1341,7 @@ static int init_iso_resource(struct clie
+ int ret;
+
+ if ((request->channels == 0 && request->bandwidth == 0) ||
+- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
+- request->bandwidth < 0)
++ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
+ return -EINVAL;
+
+ r = kmalloc(sizeof(*r), GFP_KERNEL);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/firewire/core.h linux-3.4-pax/drivers/firewire/core.h
+--- linux-3.4/drivers/firewire/core.h 2012-05-21 11:33:00.811927841 +0200
++++ linux-3.4-pax/drivers/firewire/core.h 2012-05-21 12:10:10.032048916 +0200
+@@ -110,6 +110,7 @@ struct fw_card_driver {
+
+ int (*stop_iso)(struct fw_iso_context *ctx);
+ };
++typedef struct fw_card_driver __no_const fw_card_driver_no_const;
+
+ void fw_card_initialize(struct fw_card *card,
+ const struct fw_card_driver *driver, struct device *device);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/firewire/core-transaction.c linux-3.4-pax/drivers/firewire/core-transaction.c
+--- linux-3.4/drivers/firewire/core-transaction.c 2012-05-21 11:33:00.807927841 +0200
++++ linux-3.4-pax/drivers/firewire/core-transaction.c 2012-05-21 12:10:10.036048916 +0200
+@@ -37,6 +37,7 @@
+ #include <linux/timer.h>
+ #include <linux/types.h>
+ #include <linux/workqueue.h>
++#include <linux/sched.h>
+
+ #include <asm/byteorder.h>
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/firmware/dmi_scan.c linux-3.4-pax/drivers/firmware/dmi_scan.c
+--- linux-3.4/drivers/firmware/dmi_scan.c 2012-01-08 19:47:52.159472844 +0100
++++ linux-3.4-pax/drivers/firmware/dmi_scan.c 2012-05-21 12:10:10.036048916 +0200
+@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
+ }
+ }
+ else {
+- /*
+- * no iounmap() for that ioremap(); it would be a no-op, but
+- * it's so early in setup that sucker gets confused into doing
+- * what it shouldn't if we actually call it.
+- */
+ p = dmi_ioremap(0xF0000, 0x10000);
+ if (p == NULL)
+ goto error;
+@@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct
+ if (buf == NULL)
+ return -1;
+
+- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
++ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
+
+ iounmap(buf);
+ return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpio/gpio-vr41xx.c linux-3.4-pax/drivers/gpio/gpio-vr41xx.c
+--- linux-3.4/drivers/gpio/gpio-vr41xx.c 2012-03-19 10:38:58.656049887 +0100
++++ linux-3.4-pax/drivers/gpio/gpio-vr41xx.c 2012-05-21 12:10:10.040048916 +0200
+@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
+ printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
+ maskl, pendl, maskh, pendh);
+
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+
+ return -EINVAL;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/drm_crtc_helper.c linux-3.4-pax/drivers/gpu/drm/drm_crtc_helper.c
+--- linux-3.4/drivers/gpu/drm/drm_crtc_helper.c 2012-05-21 11:33:00.923927847 +0200
++++ linux-3.4-pax/drivers/gpu/drm/drm_crtc_helper.c 2012-05-21 12:10:10.044048917 +0200
+@@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct d
+ struct drm_crtc *tmp;
+ int crtc_mask = 1;
+
+- WARN(!crtc, "checking null crtc?\n");
++ BUG_ON(!crtc);
+
+ dev = crtc->dev;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/drm_drv.c linux-3.4-pax/drivers/gpu/drm/drm_drv.c
+--- linux-3.4/drivers/gpu/drm/drm_drv.c 2012-05-21 11:33:00.927927847 +0200
++++ linux-3.4-pax/drivers/gpu/drm/drm_drv.c 2012-05-21 12:10:10.044048917 +0200
+@@ -316,7 +316,7 @@ module_exit(drm_core_exit);
+ /**
+ * Copy and IOCTL return string to user space
+ */
+-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
++static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
+ {
+ int len;
+
+@@ -399,7 +399,7 @@ long drm_ioctl(struct file *filp,
+ return -ENODEV;
+
+ atomic_inc(&dev->ioctl_count);
+- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
+ ++file_priv->ioctl_count;
+
+ DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/drm_fops.c linux-3.4-pax/drivers/gpu/drm/drm_fops.c
+--- linux-3.4/drivers/gpu/drm/drm_fops.c 2012-05-21 11:33:00.935927848 +0200
++++ linux-3.4-pax/drivers/gpu/drm/drm_fops.c 2012-05-21 12:10:10.048048917 +0200
+@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device *
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+- atomic_set(&dev->counts[i], 0);
++ atomic_set_unchecked(&dev->counts[i], 0);
+
+ dev->sigdata.lock = NULL;
+
+@@ -138,8 +138,8 @@ int drm_open(struct inode *inode, struct
+
+ retcode = drm_open_helper(inode, filp, dev);
+ if (!retcode) {
+- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+- if (!dev->open_count++)
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
++ if (local_inc_return(&dev->open_count) == 1)
+ retcode = drm_setup(dev);
+ }
+ if (!retcode) {
+@@ -482,7 +482,7 @@ int drm_release(struct inode *inode, str
+
+ mutex_lock(&drm_global_mutex);
+
+- DRM_DEBUG("open_count = %d\n", dev->open_count);
++ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
+
+ if (dev->driver->preclose)
+ dev->driver->preclose(dev, file_priv);
+@@ -491,10 +491,10 @@ int drm_release(struct inode *inode, str
+ * Begin inline drm_release
+ */
+
+- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
++ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->device),
+- dev->open_count);
++ local_read(&dev->open_count));
+
+ /* Release any auth tokens that might point to this file_priv,
+ (do that under the drm_global_mutex) */
+@@ -584,8 +584,8 @@ int drm_release(struct inode *inode, str
+ * End inline drm_release
+ */
+
+- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
+- if (!--dev->open_count) {
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
++ if (local_dec_and_test(&dev->open_count)) {
+ if (atomic_read(&dev->ioctl_count)) {
+ DRM_ERROR("Device busy: %d\n",
+ atomic_read(&dev->ioctl_count));
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/drm_global.c linux-3.4-pax/drivers/gpu/drm/drm_global.c
+--- linux-3.4/drivers/gpu/drm/drm_global.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/gpu/drm/drm_global.c 2012-05-21 12:10:10.048048917 +0200
+@@ -36,7 +36,7 @@
+ struct drm_global_item {
+ struct mutex mutex;
+ void *object;
+- int refcount;
++ atomic_t refcount;
+ };
+
+ static struct drm_global_item glob[DRM_GLOBAL_NUM];
+@@ -49,7 +49,7 @@ void drm_global_init(void)
+ struct drm_global_item *item = &glob[i];
+ mutex_init(&item->mutex);
+ item->object = NULL;
+- item->refcount = 0;
++ atomic_set(&item->refcount, 0);
+ }
+ }
+
+@@ -59,7 +59,7 @@ void drm_global_release(void)
+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+ struct drm_global_item *item = &glob[i];
+ BUG_ON(item->object != NULL);
+- BUG_ON(item->refcount != 0);
++ BUG_ON(atomic_read(&item->refcount) != 0);
+ }
+ }
+
+@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
+ void *object;
+
+ mutex_lock(&item->mutex);
+- if (item->refcount == 0) {
++ if (atomic_read(&item->refcount) == 0) {
+ item->object = kzalloc(ref->size, GFP_KERNEL);
+ if (unlikely(item->object == NULL)) {
+ ret = -ENOMEM;
+@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
+ goto out_err;
+
+ }
+- ++item->refcount;
++ atomic_inc(&item->refcount);
+ ref->object = item->object;
+ object = item->object;
+ mutex_unlock(&item->mutex);
+@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
+ struct drm_global_item *item = &glob[ref->global_type];
+
+ mutex_lock(&item->mutex);
+- BUG_ON(item->refcount == 0);
++ BUG_ON(atomic_read(&item->refcount) == 0);
+ BUG_ON(ref->object != item->object);
+- if (--item->refcount == 0) {
++ if (atomic_dec_and_test(&item->refcount)) {
+ ref->release(ref);
+ item->object = NULL;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/drm_info.c linux-3.4-pax/drivers/gpu/drm/drm_info.c
+--- linux-3.4/drivers/gpu/drm/drm_info.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/gpu/drm/drm_info.c 2012-05-21 12:10:10.052048917 +0200
+@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
+ struct drm_local_map *map;
+ struct drm_map_list *r_list;
+
+- /* Hardcoded from _DRM_FRAME_BUFFER,
+- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
+- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
+- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
++ static const char * const types[] = {
++ [_DRM_FRAME_BUFFER] = "FB",
++ [_DRM_REGISTERS] = "REG",
++ [_DRM_SHM] = "SHM",
++ [_DRM_AGP] = "AGP",
++ [_DRM_SCATTER_GATHER] = "SG",
++ [_DRM_CONSISTENT] = "PCI",
++ [_DRM_GEM] = "GEM" };
+ const char *type;
+ int i;
+
+@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
+ map = r_list->map;
+ if (!map)
+ continue;
+- if (map->type < 0 || map->type > 5)
++ if (map->type >= ARRAY_SIZE(types))
+ type = "??";
+ else
+ type = types[map->type];
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/drm_ioc32.c linux-3.4-pax/drivers/gpu/drm/drm_ioc32.c
+--- linux-3.4/drivers/gpu/drm/drm_ioc32.c 2012-03-19 10:38:58.692049884 +0100
++++ linux-3.4-pax/drivers/gpu/drm/drm_ioc32.c 2012-05-21 12:10:10.052048917 +0200
+@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct fi
+ request = compat_alloc_user_space(nbytes);
+ if (!access_ok(VERIFY_WRITE, request, nbytes))
+ return -EFAULT;
+- list = (struct drm_buf_desc *) (request + 1);
++ list = (struct drm_buf_desc __user *) (request + 1);
+
+ if (__put_user(count, &request->count)
+ || __put_user(list, &request->list))
+@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct fil
+ request = compat_alloc_user_space(nbytes);
+ if (!access_ok(VERIFY_WRITE, request, nbytes))
+ return -EFAULT;
+- list = (struct drm_buf_pub *) (request + 1);
++ list = (struct drm_buf_pub __user *) (request + 1);
+
+ if (__put_user(count, &request->count)
+ || __put_user(list, &request->list))
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/drm_ioctl.c linux-3.4-pax/drivers/gpu/drm/drm_ioctl.c
+--- linux-3.4/drivers/gpu/drm/drm_ioctl.c 2012-05-21 11:33:00.939927848 +0200
++++ linux-3.4-pax/drivers/gpu/drm/drm_ioctl.c 2012-05-21 12:10:10.056048917 +0200
+@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev,
+ stats->data[i].value =
+ (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
+ else
+- stats->data[i].value = atomic_read(&dev->counts[i]);
++ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
+ stats->data[i].type = dev->types[i];
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/drm_lock.c linux-3.4-pax/drivers/gpu/drm/drm_lock.c
+--- linux-3.4/drivers/gpu/drm/drm_lock.c 2012-03-19 10:38:58.696049884 +0100
++++ linux-3.4-pax/drivers/gpu/drm/drm_lock.c 2012-05-21 12:10:10.060048917 +0200
+@@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, voi
+ if (drm_lock_take(&master->lock, lock->context)) {
+ master->lock.file_priv = file_priv;
+ master->lock.lock_time = jiffies;
+- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
+ break; /* Got lock */
+ }
+
+@@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, v
+ return -EINVAL;
+ }
+
+- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
+
+ if (drm_lock_free(&master->lock, lock->context)) {
+ /* FIXME: Should really bail out here. */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/drm_stub.c linux-3.4-pax/drivers/gpu/drm/drm_stub.c
+--- linux-3.4/drivers/gpu/drm/drm_stub.c 2012-05-21 11:33:00.955927849 +0200
++++ linux-3.4-pax/drivers/gpu/drm/drm_stub.c 2012-05-21 12:10:10.060048917 +0200
+@@ -512,7 +512,7 @@ void drm_unplug_dev(struct drm_device *d
+
+ drm_device_set_unplugged(dev);
+
+- if (dev->open_count == 0) {
++ if (local_read(&dev->open_count) == 0) {
+ drm_put_dev(dev);
+ }
+ mutex_unlock(&drm_global_mutex);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/i810/i810_dma.c linux-3.4-pax/drivers/gpu/drm/i810/i810_dma.c
+--- linux-3.4/drivers/gpu/drm/i810/i810_dma.c 2012-05-21 11:33:01.211927863 +0200
++++ linux-3.4-pax/drivers/gpu/drm/i810/i810_dma.c 2012-05-21 12:10:10.064048918 +0200
+@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_de
+ dma->buflist[vertex->idx],
+ vertex->discard, vertex->used);
+
+- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
+- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
++ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
+ sarea_priv->last_enqueue = dev_priv->counter - 1;
+ sarea_priv->last_dispatch = (int)hw_status[5];
+
+@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device
+ i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
+ mc->last_render);
+
+- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
+- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
++ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
+ sarea_priv->last_enqueue = dev_priv->counter - 1;
+ sarea_priv->last_dispatch = (int)hw_status[5];
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/i810/i810_drv.h linux-3.4-pax/drivers/gpu/drm/i810/i810_drv.h
+--- linux-3.4/drivers/gpu/drm/i810/i810_drv.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/gpu/drm/i810/i810_drv.h 2012-05-21 12:10:10.064048918 +0200
+@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
+ int page_flipping;
+
+ wait_queue_head_t irq_queue;
+- atomic_t irq_received;
+- atomic_t irq_emitted;
++ atomic_unchecked_t irq_received;
++ atomic_unchecked_t irq_emitted;
+
+ int front_offset;
+ } drm_i810_private_t;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/i915/i915_debugfs.c linux-3.4-pax/drivers/gpu/drm/i915/i915_debugfs.c
+--- linux-3.4/drivers/gpu/drm/i915/i915_debugfs.c 2012-05-21 11:33:01.223927863 +0200
++++ linux-3.4-pax/drivers/gpu/drm/i915/i915_debugfs.c 2012-05-21 12:10:10.068048918 +0200
+@@ -500,7 +500,7 @@ static int i915_interrupt_info(struct se
+ I915_READ(GTIMR));
+ }
+ seq_printf(m, "Interrupts received: %d\n",
+- atomic_read(&dev_priv->irq_received));
++ atomic_read_unchecked(&dev_priv->irq_received));
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
+@@ -1313,7 +1313,7 @@ static int i915_opregion(struct seq_file
+ return ret;
+
+ if (opregion->header)
+- seq_write(m, opregion->header, OPREGION_SIZE);
++ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
+
+ mutex_unlock(&dev->struct_mutex);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/i915/i915_dma.c linux-3.4-pax/drivers/gpu/drm/i915/i915_dma.c
+--- linux-3.4/drivers/gpu/drm/i915/i915_dma.c 2012-05-21 11:33:01.323927869 +0200
++++ linux-3.4-pax/drivers/gpu/drm/i915/i915_dma.c 2012-05-21 12:10:10.072048918 +0200
+@@ -1178,7 +1178,7 @@ static bool i915_switcheroo_can_switch(s
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+- can_switch = (dev->open_count == 0);
++ can_switch = (local_read(&dev->open_count) == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/i915/i915_drv.h linux-3.4-pax/drivers/gpu/drm/i915/i915_drv.h
+--- linux-3.4/drivers/gpu/drm/i915/i915_drv.h 2012-05-21 11:33:01.331927869 +0200
++++ linux-3.4-pax/drivers/gpu/drm/i915/i915_drv.h 2012-05-21 12:10:10.076048918 +0200
+@@ -240,7 +240,7 @@ struct drm_i915_display_funcs {
+ /* render clock increase/decrease */
+ /* display clock increase/decrease */
+ /* pll clock increase/decrease */
+-};
++} __no_const;
+
+ struct intel_device_info {
+ u8 gen;
+@@ -350,7 +350,7 @@ typedef struct drm_i915_private {
+ int current_page;
+ int page_flipping;
+
+- atomic_t irq_received;
++ atomic_unchecked_t irq_received;
+
+ /* protects the irq masks */
+ spinlock_t irq_lock;
+@@ -937,7 +937,7 @@ struct drm_i915_gem_object {
+ * will be page flipped away on the next vblank. When it
+ * reaches 0, dev_priv->pending_flip_queue will be woken up.
+ */
+- atomic_t pending_flip;
++ atomic_unchecked_t pending_flip;
+ };
+
+ #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+@@ -1359,7 +1359,7 @@ extern int intel_setup_gmbus(struct drm_
+ extern void intel_teardown_gmbus(struct drm_device *dev);
+ extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+ extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
++static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+ {
+ return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.4-pax/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+--- linux-3.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2012-05-21 11:33:01.339927870 +0200
++++ linux-3.4-pax/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2012-05-21 12:10:10.076048918 +0200
+@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct
+ i915_gem_clflush_object(obj);
+
+ if (obj->base.pending_write_domain)
+- cd->flips |= atomic_read(&obj->pending_flip);
++ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
+
+ /* The actual obj->write_domain will be updated with
+ * pending_write_domain after we emit the accumulated flush for all
+@@ -933,9 +933,9 @@ i915_gem_check_execbuffer(struct drm_i91
+
+ static int
+ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+- int count)
++ unsigned int count)
+ {
+- int i;
++ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/i915/i915_irq.c linux-3.4-pax/drivers/gpu/drm/i915/i915_irq.c
+--- linux-3.4/drivers/gpu/drm/i915/i915_irq.c 2012-05-21 11:33:01.343927869 +0200
++++ linux-3.4-pax/drivers/gpu/drm/i915/i915_irq.c 2012-05-21 12:10:10.080048919 +0200
+@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler
+ u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
+ struct drm_i915_master_private *master_priv;
+
+- atomic_inc(&dev_priv->irq_received);
++ atomic_inc_unchecked(&dev_priv->irq_received);
+
+ /* disable master interrupt before clearing iir */
+ de_ier = I915_READ(DEIER);
+@@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(
+ struct drm_i915_master_private *master_priv;
+ u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
+
+- atomic_inc(&dev_priv->irq_received);
++ atomic_inc_unchecked(&dev_priv->irq_received);
+
+ if (IS_GEN6(dev))
+ bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
+@@ -1295,7 +1295,7 @@ static irqreturn_t i915_driver_irq_handl
+ int ret = IRQ_NONE, pipe;
+ bool blc_event = false;
+
+- atomic_inc(&dev_priv->irq_received);
++ atomic_inc_unchecked(&dev_priv->irq_received);
+
+ iir = I915_READ(IIR);
+
+@@ -1806,7 +1806,7 @@ static void ironlake_irq_preinstall(stru
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+- atomic_set(&dev_priv->irq_received, 0);
++ atomic_set_unchecked(&dev_priv->irq_received, 0);
+
+ INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+@@ -1983,7 +1983,7 @@ static void i915_driver_irq_preinstall(s
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+- atomic_set(&dev_priv->irq_received, 0);
++ atomic_set_unchecked(&dev_priv->irq_received, 0);
+
+ INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/i915/intel_display.c linux-3.4-pax/drivers/gpu/drm/i915/intel_display.c
+--- linux-3.4/drivers/gpu/drm/i915/intel_display.c 2012-05-21 11:33:01.359927872 +0200
++++ linux-3.4-pax/drivers/gpu/drm/i915/intel_display.c 2012-05-21 12:10:10.088048919 +0200
+@@ -2254,7 +2254,7 @@ intel_finish_fb(struct drm_framebuffer *
+
+ wait_event(dev_priv->pending_flip_queue,
+ atomic_read(&dev_priv->mm.wedged) ||
+- atomic_read(&obj->pending_flip) == 0);
++ atomic_read_unchecked(&obj->pending_flip) == 0);
+
+ /* Big Hammer, we also need to ensure that any pending
+ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+@@ -2919,7 +2919,7 @@ static void intel_crtc_wait_for_pending_
+ obj = to_intel_framebuffer(crtc->fb)->obj;
+ dev_priv = crtc->dev->dev_private;
+ wait_event(dev_priv->pending_flip_queue,
+- atomic_read(&obj->pending_flip) == 0);
++ atomic_read_unchecked(&obj->pending_flip) == 0);
+ }
+
+ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+@@ -7286,7 +7286,7 @@ static void do_intel_finish_page_flip(st
+
+ atomic_clear_mask(1 << intel_crtc->plane,
+ &obj->pending_flip.counter);
+- if (atomic_read(&obj->pending_flip) == 0)
++ if (atomic_read_unchecked(&obj->pending_flip) == 0)
+ wake_up(&dev_priv->pending_flip_queue);
+
+ schedule_work(&work->work);
+@@ -7582,7 +7582,7 @@ static int intel_crtc_page_flip(struct d
+ /* Block clients from rendering to the new back buffer until
+ * the flip occurs and the object is no longer visible.
+ */
+- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
++ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
+ if (ret)
+@@ -7596,7 +7596,7 @@ static int intel_crtc_page_flip(struct d
+ return 0;
+
+ cleanup_pending:
+- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
++ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/mga/mga_drv.h linux-3.4-pax/drivers/gpu/drm/mga/mga_drv.h
+--- linux-3.4/drivers/gpu/drm/mga/mga_drv.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/gpu/drm/mga/mga_drv.h 2012-05-21 12:10:10.092048919 +0200
+@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
+ u32 clear_cmd;
+ u32 maccess;
+
+- atomic_t vbl_received; /**< Number of vblanks received. */
++ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
+ wait_queue_head_t fence_queue;
+- atomic_t last_fence_retired;
++ atomic_unchecked_t last_fence_retired;
+ u32 next_fence_to_post;
+
+ unsigned int fb_cpp;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/mga/mga_irq.c linux-3.4-pax/drivers/gpu/drm/mga/mga_irq.c
+--- linux-3.4/drivers/gpu/drm/mga/mga_irq.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/gpu/drm/mga/mga_irq.c 2012-05-21 12:10:10.092048919 +0200
+@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
+ if (crtc != 0)
+ return 0;
+
+- return atomic_read(&dev_priv->vbl_received);
++ return atomic_read_unchecked(&dev_priv->vbl_received);
+ }
+
+
+@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
+ /* VBLANK interrupt */
+ if (status & MGA_VLINEPEN) {
+ MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
+- atomic_inc(&dev_priv->vbl_received);
++ atomic_inc_unchecked(&dev_priv->vbl_received);
+ drm_handle_vblank(dev, 0);
+ handled = 1;
+ }
+@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
+ if ((prim_start & ~0x03) != (prim_end & ~0x03))
+ MGA_WRITE(MGA_PRIMEND, prim_end);
+
+- atomic_inc(&dev_priv->last_fence_retired);
++ atomic_inc_unchecked(&dev_priv->last_fence_retired);
+ DRM_WAKEUP(&dev_priv->fence_queue);
+ handled = 1;
+ }
+@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
+ * using fences.
+ */
+ DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
+- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
++ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
+ - *sequence) <= (1 << 23)));
+
+ *sequence = cur_fence;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.4-pax/drivers/gpu/drm/nouveau/nouveau_bios.c
+--- linux-3.4/drivers/gpu/drm/nouveau/nouveau_bios.c 2012-05-21 11:33:01.439927874 +0200
++++ linux-3.4-pax/drivers/gpu/drm/nouveau/nouveau_bios.c 2012-05-21 12:10:10.096048919 +0200
+@@ -5329,7 +5329,7 @@ parse_bit_U_tbl_entry(struct drm_device
+ struct bit_table {
+ const char id;
+ int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
+-};
++} __no_const;
+
+ #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.4-pax/drivers/gpu/drm/nouveau/nouveau_drv.h
+--- linux-3.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2012-05-21 11:33:01.491927878 +0200
++++ linux-3.4-pax/drivers/gpu/drm/nouveau/nouveau_drv.h 2012-05-21 12:10:10.100048920 +0200
+@@ -240,7 +240,7 @@ struct nouveau_channel {
+ struct list_head pending;
+ uint32_t sequence;
+ uint32_t sequence_ack;
+- atomic_t last_sequence_irq;
++ atomic_unchecked_t last_sequence_irq;
+ struct nouveau_vma vma;
+ } fence;
+
+@@ -321,7 +321,7 @@ struct nouveau_exec_engine {
+ u32 handle, u16 class);
+ void (*set_tile_region)(struct drm_device *dev, int i);
+ void (*tlb_flush)(struct drm_device *, int engine);
+-};
++} __no_const;
+
+ struct nouveau_instmem_engine {
+ void *priv;
+@@ -343,13 +343,13 @@ struct nouveau_instmem_engine {
+ struct nouveau_mc_engine {
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+-};
++} __no_const;
+
+ struct nouveau_timer_engine {
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+ uint64_t (*read)(struct drm_device *dev);
+-};
++} __no_const;
+
+ struct nouveau_fb_engine {
+ int num_tiles;
+@@ -590,7 +590,7 @@ struct nouveau_vram_engine {
+ void (*put)(struct drm_device *, struct nouveau_mem **);
+
+ bool (*flags_valid)(struct drm_device *, u32 tile_flags);
+-};
++} __no_const;
+
+ struct nouveau_engine {
+ struct nouveau_instmem_engine instmem;
+@@ -739,7 +739,7 @@ struct drm_nouveau_private {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+- atomic_t validate_sequence;
++ atomic_unchecked_t validate_sequence;
+ } ttm;
+
+ struct {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.4-pax/drivers/gpu/drm/nouveau/nouveau_fence.c
+--- linux-3.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2012-05-21 11:33:01.515927879 +0200
++++ linux-3.4-pax/drivers/gpu/drm/nouveau/nouveau_fence.c 2012-05-21 12:10:10.104048920 +0200
+@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
+ if (USE_REFCNT(dev))
+ sequence = nvchan_rd32(chan, 0x48);
+ else
+- sequence = atomic_read(&chan->fence.last_sequence_irq);
++ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
+
+ if (chan->fence.sequence_ack == sequence)
+ goto out;
+@@ -538,7 +538,7 @@ nouveau_fence_channel_init(struct nouvea
+ return ret;
+ }
+
+- atomic_set(&chan->fence.last_sequence_irq, 0);
++ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
+ return 0;
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.4-pax/drivers/gpu/drm/nouveau/nouveau_gem.c
+--- linux-3.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2012-05-21 11:33:01.519927880 +0200
++++ linux-3.4-pax/drivers/gpu/drm/nouveau/nouveau_gem.c 2012-05-21 12:10:10.108048920 +0200
+@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *ch
+ int trycnt = 0;
+ int ret, i;
+
+- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
++ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
+ retry:
+ if (++trycnt > 100000) {
+ NV_ERROR(dev, "%s failed and gave up.\n", __func__);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.4-pax/drivers/gpu/drm/nouveau/nouveau_state.c
+--- linux-3.4/drivers/gpu/drm/nouveau/nouveau_state.c 2012-05-21 11:33:01.547927882 +0200
++++ linux-3.4-pax/drivers/gpu/drm/nouveau/nouveau_state.c 2012-05-21 12:10:10.108048920 +0200
+@@ -588,7 +588,7 @@ static bool nouveau_switcheroo_can_switc
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+- can_switch = (dev->open_count == 0);
++ can_switch = (local_read(&dev->open_count) == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.4-pax/drivers/gpu/drm/nouveau/nv04_graph.c
+--- linux-3.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-10-24 12:48:28.303091661 +0200
++++ linux-3.4-pax/drivers/gpu/drm/nouveau/nv04_graph.c 2012-05-21 12:10:10.112048920 +0200
+@@ -554,7 +554,7 @@ static int
+ nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+ {
+- atomic_set(&chan->fence.last_sequence_irq, data);
++ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
+ return 0;
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/nouveau/nv50_sor.c linux-3.4-pax/drivers/gpu/drm/nouveau/nv50_sor.c
+--- linux-3.4/drivers/gpu/drm/nouveau/nv50_sor.c 2012-05-21 11:33:01.603927884 +0200
++++ linux-3.4-pax/drivers/gpu/drm/nouveau/nv50_sor.c 2012-05-30 02:58:32.250905776 +0200
+@@ -304,7 +304,7 @@ nv50_sor_dpms(struct drm_encoder *encode
+ }
+
+ if (nv_encoder->dcb->type == OUTPUT_DP) {
+- struct dp_train_func func = {
++ static struct dp_train_func func = {
+ .link_set = nv50_sor_dp_link_set,
+ .train_set = nv50_sor_dp_train_set,
+ .train_adj = nv50_sor_dp_train_adj
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/nouveau/nvd0_display.c linux-3.4-pax/drivers/gpu/drm/nouveau/nvd0_display.c
+--- linux-3.4/drivers/gpu/drm/nouveau/nvd0_display.c 2012-05-21 11:33:01.679927888 +0200
++++ linux-3.4-pax/drivers/gpu/drm/nouveau/nvd0_display.c 2012-05-30 02:58:15.574906597 +0200
+@@ -1366,7 +1366,7 @@ nvd0_sor_dpms(struct drm_encoder *encode
+ nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
+
+ if (nv_encoder->dcb->type == OUTPUT_DP) {
+- struct dp_train_func func = {
++ static struct dp_train_func func = {
+ .link_set = nvd0_sor_dp_link_set,
+ .train_set = nvd0_sor_dp_train_set,
+ .train_adj = nvd0_sor_dp_train_adj
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/r128/r128_cce.c linux-3.4-pax/drivers/gpu/drm/r128/r128_cce.c
+--- linux-3.4/drivers/gpu/drm/r128/r128_cce.c 2012-01-08 19:47:53.635472765 +0100
++++ linux-3.4-pax/drivers/gpu/drm/r128/r128_cce.c 2012-05-21 12:10:10.116048920 +0200
+@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_d
+
+ /* GH: Simple idle check.
+ */
+- atomic_set(&dev_priv->idle_count, 0);
++ atomic_set_unchecked(&dev_priv->idle_count, 0);
+
+ /* We don't support anything other than bus-mastering ring mode,
+ * but the ring can be in either AGP or PCI space for the ring
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/r128/r128_drv.h linux-3.4-pax/drivers/gpu/drm/r128/r128_drv.h
+--- linux-3.4/drivers/gpu/drm/r128/r128_drv.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/gpu/drm/r128/r128_drv.h 2012-05-21 12:10:10.116048920 +0200
+@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
+ int is_pci;
+ unsigned long cce_buffers_offset;
+
+- atomic_t idle_count;
++ atomic_unchecked_t idle_count;
+
+ int page_flipping;
+ int current_page;
+ u32 crtc_offset;
+ u32 crtc_offset_cntl;
+
+- atomic_t vbl_received;
++ atomic_unchecked_t vbl_received;
+
+ u32 color_fmt;
+ unsigned int front_offset;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/r128/r128_irq.c linux-3.4-pax/drivers/gpu/drm/r128/r128_irq.c
+--- linux-3.4/drivers/gpu/drm/r128/r128_irq.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/gpu/drm/r128/r128_irq.c 2012-05-21 12:10:10.120048921 +0200
+@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
+ if (crtc != 0)
+ return 0;
+
+- return atomic_read(&dev_priv->vbl_received);
++ return atomic_read_unchecked(&dev_priv->vbl_received);
+ }
+
+ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
+@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
+ /* VBLANK interrupt */
+ if (status & R128_CRTC_VBLANK_INT) {
+ R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
+- atomic_inc(&dev_priv->vbl_received);
++ atomic_inc_unchecked(&dev_priv->vbl_received);
+ drm_handle_vblank(dev, 0);
+ return IRQ_HANDLED;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/r128/r128_state.c linux-3.4-pax/drivers/gpu/drm/r128/r128_state.c
+--- linux-3.4/drivers/gpu/drm/r128/r128_state.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/gpu/drm/r128/r128_state.c 2012-05-21 12:10:10.124048921 +0200
+@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
+
+ static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
+ {
+- if (atomic_read(&dev_priv->idle_count) == 0)
++ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
+ r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
+ else
+- atomic_set(&dev_priv->idle_count, 0);
++ atomic_set_unchecked(&dev_priv->idle_count, 0);
+ }
+
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/radeon/mkregtable.c linux-3.4-pax/drivers/gpu/drm/radeon/mkregtable.c
+--- linux-3.4/drivers/gpu/drm/radeon/mkregtable.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/gpu/drm/radeon/mkregtable.c 2012-05-21 12:10:10.124048921 +0200
+@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
+ regex_t mask_rex;
+ regmatch_t match[4];
+ char buf[1024];
+- size_t end;
++ long end;
+ int len;
+ int done = 0;
+ int r;
+ unsigned o;
+ struct offset *offset;
+ char last_reg_s[10];
+- int last_reg;
++ unsigned long last_reg;
+
+ if (regcomp
+ (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/radeon/radeon_device.c linux-3.4-pax/drivers/gpu/drm/radeon/radeon_device.c
+--- linux-3.4/drivers/gpu/drm/radeon/radeon_device.c 2012-05-21 11:33:01.939927902 +0200
++++ linux-3.4-pax/drivers/gpu/drm/radeon/radeon_device.c 2012-05-21 12:10:10.128048921 +0200
+@@ -691,7 +691,7 @@ static bool radeon_switcheroo_can_switch
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+- can_switch = (dev->open_count == 0);
++ can_switch = (local_read(&dev->open_count) == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/radeon/radeon_drv.h linux-3.4-pax/drivers/gpu/drm/radeon/radeon_drv.h
+--- linux-3.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/gpu/drm/radeon/radeon_drv.h 2012-05-21 12:10:10.132048921 +0200
+@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
+
+ /* SW interrupt */
+ wait_queue_head_t swi_queue;
+- atomic_t swi_emitted;
++ atomic_unchecked_t swi_emitted;
+ int vblank_crtc;
+ uint32_t irq_enable_reg;
+ uint32_t r500_disp_irq_reg;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/radeon/radeon_fence.c linux-3.4-pax/drivers/gpu/drm/radeon/radeon_fence.c
+--- linux-3.4/drivers/gpu/drm/radeon/radeon_fence.c 2012-03-19 10:38:59.044049866 +0100
++++ linux-3.4-pax/drivers/gpu/drm/radeon/radeon_fence.c 2012-05-21 12:10:10.136048922 +0200
+@@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_devi
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ return 0;
+ }
+- fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
++ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
+ if (!rdev->ring[fence->ring].ready)
+ /* FIXME: cp is not running assume everythings is done right
+ * away
+@@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struc
+ }
+ rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+ rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
+- radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
++ radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
+ rdev->fence_drv[ring].initialized = true;
+ DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
+ ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
+@@ -418,7 +418,7 @@ static void radeon_fence_driver_init_rin
+ rdev->fence_drv[ring].scratch_reg = -1;
+ rdev->fence_drv[ring].cpu_addr = NULL;
+ rdev->fence_drv[ring].gpu_addr = 0;
+- atomic_set(&rdev->fence_drv[ring].seq, 0);
++ atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/radeon/radeon.h linux-3.4-pax/drivers/gpu/drm/radeon/radeon.h
+--- linux-3.4/drivers/gpu/drm/radeon/radeon.h 2012-05-21 11:33:01.859927898 +0200
++++ linux-3.4-pax/drivers/gpu/drm/radeon/radeon.h 2012-05-29 21:12:29.048013910 +0200
+@@ -253,7 +253,7 @@ struct radeon_fence_driver {
+ uint32_t scratch_reg;
+ uint64_t gpu_addr;
+ volatile uint32_t *cpu_addr;
+- atomic_t seq;
++ atomic_unchecked_t seq;
+ uint32_t last_seq;
+ unsigned long last_jiffies;
+ unsigned long last_timeout;
+@@ -753,7 +753,7 @@ struct r600_blit_cp_primitives {
+ int x2, int y2);
+ void (*draw_auto)(struct radeon_device *rdev);
+ void (*set_default_state)(struct radeon_device *rdev);
+-};
++} __no_const;
+
+ struct r600_blit {
+ struct mutex mutex;
+@@ -1246,7 +1246,7 @@ struct radeon_asic {
+ u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+ void (*post_page_flip)(struct radeon_device *rdev, int crtc);
+ } pflip;
+-};
++} __no_const;
+
+ /*
+ * Asic structures
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.4-pax/drivers/gpu/drm/radeon/radeon_ioc32.c
+--- linux-3.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/gpu/drm/radeon/radeon_ioc32.c 2012-05-21 12:10:10.140048922 +0200
+@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.param, &request->param)
+- || __put_user((void __user *)(unsigned long)req32.value,
++ || __put_user((unsigned long)req32.value,
+ &request->value))
+ return -EFAULT;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/radeon/radeon_irq.c linux-3.4-pax/drivers/gpu/drm/radeon/radeon_irq.c
+--- linux-3.4/drivers/gpu/drm/radeon/radeon_irq.c 2012-01-08 19:47:53.815472755 +0100
++++ linux-3.4-pax/drivers/gpu/drm/radeon/radeon_irq.c 2012-05-21 12:10:10.144048922 +0200
+@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
+ unsigned int ret;
+ RING_LOCALS;
+
+- atomic_inc(&dev_priv->swi_emitted);
+- ret = atomic_read(&dev_priv->swi_emitted);
++ atomic_inc_unchecked(&dev_priv->swi_emitted);
++ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
+
+ BEGIN_RING(4);
+ OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
+@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *) dev->dev_private;
+
+- atomic_set(&dev_priv->swi_emitted, 0);
++ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
+ DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
+
+ dev->max_vblank_count = 0x001fffff;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/radeon/radeon_state.c linux-3.4-pax/drivers/gpu/drm/radeon/radeon_state.c
+--- linux-3.4/drivers/gpu/drm/radeon/radeon_state.c 2012-01-08 19:47:53.839472754 +0100
++++ linux-3.4-pax/drivers/gpu/drm/radeon/radeon_state.c 2012-05-21 12:10:10.148048922 +0200
+@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+ sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
+
+- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
++ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
+ sarea_priv->nbox * sizeof(depth_boxes[0])))
+ return -EFAULT;
+
+@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
+ {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_getparam_t *param = data;
+- int value;
++ int value = 0;
+
+ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.4-pax/drivers/gpu/drm/radeon/radeon_ttm.c
+--- linux-3.4/drivers/gpu/drm/radeon/radeon_ttm.c 2012-05-21 11:33:02.043927908 +0200
++++ linux-3.4-pax/drivers/gpu/drm/radeon/radeon_ttm.c 2012-05-21 12:10:10.148048922 +0200
+@@ -843,8 +843,10 @@ int radeon_mmap(struct file *filp, struc
+ }
+ if (unlikely(ttm_vm_ops == NULL)) {
+ ttm_vm_ops = vma->vm_ops;
+- radeon_ttm_vm_ops = *ttm_vm_ops;
+- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
++ pax_open_kernel();
++ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
++ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
++ pax_close_kernel();
+ }
+ vma->vm_ops = &radeon_ttm_vm_ops;
+ return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/radeon/rs690.c linux-3.4-pax/drivers/gpu/drm/radeon/rs690.c
+--- linux-3.4/drivers/gpu/drm/radeon/rs690.c 2012-05-21 11:33:02.075927910 +0200
++++ linux-3.4-pax/drivers/gpu/drm/radeon/rs690.c 2012-05-21 12:10:10.152048922 +0200
+@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
+ if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
+ rdev->pm.sideport_bandwidth.full)
+ rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
+- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
++ read_delay_latency.full = dfixed_const(800 * 1000);
+ read_delay_latency.full = dfixed_div(read_delay_latency,
+ rdev->pm.igp_sideport_mclk);
++ a.full = dfixed_const(370);
++ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
+ } else {
+ if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
+ rdev->pm.k8_bandwidth.full)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.4-pax/drivers/gpu/drm/ttm/ttm_page_alloc.c
+--- linux-3.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2012-05-21 11:33:02.119927912 +0200
++++ linux-3.4-pax/drivers/gpu/drm/ttm/ttm_page_alloc.c 2012-05-21 12:10:10.156048923 +0200
+@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages
+ static int ttm_pool_mm_shrink(struct shrinker *shrink,
+ struct shrink_control *sc)
+ {
+- static atomic_t start_pool = ATOMIC_INIT(0);
++ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
+ unsigned i;
+- unsigned pool_offset = atomic_add_return(1, &start_pool);
++ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
+ struct ttm_page_pool *pool;
+ int shrink_pages = sc->nr_to_scan;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/via/via_drv.h linux-3.4-pax/drivers/gpu/drm/via/via_drv.h
+--- linux-3.4/drivers/gpu/drm/via/via_drv.h 2012-03-19 10:38:59.120049862 +0100
++++ linux-3.4-pax/drivers/gpu/drm/via/via_drv.h 2012-05-21 12:10:10.156048923 +0200
+@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
+ typedef uint32_t maskarray_t[5];
+
+ typedef struct drm_via_irq {
+- atomic_t irq_received;
++ atomic_unchecked_t irq_received;
+ uint32_t pending_mask;
+ uint32_t enable_mask;
+ wait_queue_head_t irq_queue;
+@@ -75,7 +75,7 @@ typedef struct drm_via_private {
+ struct timeval last_vblank;
+ int last_vblank_valid;
+ unsigned usec_per_vblank;
+- atomic_t vbl_received;
++ atomic_unchecked_t vbl_received;
+ drm_via_state_t hc_state;
+ char pci_buf[VIA_PCI_BUF_SIZE];
+ const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/via/via_irq.c linux-3.4-pax/drivers/gpu/drm/via/via_irq.c
+--- linux-3.4/drivers/gpu/drm/via/via_irq.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/gpu/drm/via/via_irq.c 2012-05-21 12:10:10.160048923 +0200
+@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
+ if (crtc != 0)
+ return 0;
+
+- return atomic_read(&dev_priv->vbl_received);
++ return atomic_read_unchecked(&dev_priv->vbl_received);
+ }
+
+ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
+@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
+
+ status = VIA_READ(VIA_REG_INTERRUPT);
+ if (status & VIA_IRQ_VBLANK_PENDING) {
+- atomic_inc(&dev_priv->vbl_received);
+- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
++ atomic_inc_unchecked(&dev_priv->vbl_received);
++ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
+ do_gettimeofday(&cur_vblank);
+ if (dev_priv->last_vblank_valid) {
+ dev_priv->usec_per_vblank =
+@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
+ dev_priv->last_vblank = cur_vblank;
+ dev_priv->last_vblank_valid = 1;
+ }
+- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
++ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
+ DRM_DEBUG("US per vblank is: %u\n",
+ dev_priv->usec_per_vblank);
+ }
+@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
+
+ for (i = 0; i < dev_priv->num_irqs; ++i) {
+ if (status & cur_irq->pending_mask) {
+- atomic_inc(&cur_irq->irq_received);
++ atomic_inc_unchecked(&cur_irq->irq_received);
+ DRM_WAKEUP(&cur_irq->irq_queue);
+ handled = 1;
+ if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
+@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
+ DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+ ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
+ masks[irq][4]));
+- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
++ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
+ } else {
+ DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+ (((cur_irq_sequence =
+- atomic_read(&cur_irq->irq_received)) -
++ atomic_read_unchecked(&cur_irq->irq_received)) -
+ *sequence) <= (1 << 23)));
+ }
+ *sequence = cur_irq_sequence;
+@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
+ }
+
+ for (i = 0; i < dev_priv->num_irqs; ++i) {
+- atomic_set(&cur_irq->irq_received, 0);
++ atomic_set_unchecked(&cur_irq->irq_received, 0);
+ cur_irq->enable_mask = dev_priv->irq_masks[i][0];
+ cur_irq->pending_mask = dev_priv->irq_masks[i][1];
+ DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
+@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
+ switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
+ case VIA_IRQ_RELATIVE:
+ irqwait->request.sequence +=
+- atomic_read(&cur_irq->irq_received);
++ atomic_read_unchecked(&cur_irq->irq_received);
+ irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+ case VIA_IRQ_ABSOLUTE:
+ break;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.4-pax/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+--- linux-3.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2012-05-21 11:33:02.155927914 +0200
++++ linux-3.4-pax/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2012-05-21 12:10:10.160048923 +0200
+@@ -263,7 +263,7 @@ struct vmw_private {
+ * Fencing and IRQs.
+ */
+
+- atomic_t marker_seq;
++ atomic_unchecked_t marker_seq;
+ wait_queue_head_t fence_queue;
+ wait_queue_head_t fifo_queue;
+ int fence_queue_waiters; /* Protected by hw_mutex */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.4-pax/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+--- linux-3.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2012-01-08 19:47:53.931472749 +0100
++++ linux-3.4-pax/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2012-05-21 12:10:10.164048923 +0200
+@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
+ (unsigned int) min,
+ (unsigned int) fifo->capabilities);
+
+- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
++ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+ iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
+ vmw_marker_queue_init(&fifo->marker_queue);
+ return vmw_fifo_send_fence(dev_priv, &dummy);
+@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_privat
+ if (reserveable)
+ iowrite32(bytes, fifo_mem +
+ SVGA_FIFO_RESERVED);
+- return fifo_mem + (next_cmd >> 2);
++ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
+ } else {
+ need_bounce = true;
+ }
+@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_priva
+
+ fm = vmw_fifo_reserve(dev_priv, bytes);
+ if (unlikely(fm == NULL)) {
+- *seqno = atomic_read(&dev_priv->marker_seq);
++ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
+ ret = -ENOMEM;
+ (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
+ false, 3*HZ);
+@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_priva
+ }
+
+ do {
+- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
++ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
+ } while (*seqno == 0);
+
+ if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.4-pax/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+--- linux-3.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2012-01-08 19:47:53.943472749 +0100
++++ linux-3.4-pax/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2012-05-21 12:10:10.164048923 +0200
+@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private
+ * emitted. Then the fence is stale and signaled.
+ */
+
+- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
++ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
+ > VMW_FENCE_WRAP);
+
+ return ret;
+@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private
+
+ if (fifo_idle)
+ down_read(&fifo_state->rwsem);
+- signal_seq = atomic_read(&dev_priv->marker_seq);
++ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
+ ret = 0;
+
+ for (;;) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c linux-3.4-pax/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+--- linux-3.4/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 2012-01-08 19:47:53.951472748 +0100
++++ linux-3.4-pax/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 2012-05-21 12:10:10.168048923 +0200
+@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
+ while (!vmw_lag_lt(queue, us)) {
+ spin_lock(&queue->lock);
+ if (list_empty(&queue->head))
+- seqno = atomic_read(&dev_priv->marker_seq);
++ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
+ else {
+ marker = list_first_entry(&queue->head,
+ struct vmw_marker, head);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/hid/hid-core.c linux-3.4-pax/drivers/hid/hid-core.c
+--- linux-3.4/drivers/hid/hid-core.c 2012-05-21 11:33:02.183927916 +0200
++++ linux-3.4-pax/drivers/hid/hid-core.c 2012-05-21 12:10:10.172048924 +0200
+@@ -2063,7 +2063,7 @@ static bool hid_ignore(struct hid_device
+
+ int hid_add_device(struct hid_device *hdev)
+ {
+- static atomic_t id = ATOMIC_INIT(0);
++ static atomic_unchecked_t id = ATOMIC_INIT(0);
+ int ret;
+
+ if (WARN_ON(hdev->status & HID_STAT_ADDED))
+@@ -2078,7 +2078,7 @@ int hid_add_device(struct hid_device *hd
+ /* XXX hack, any other cleaner solution after the driver core
+ * is converted to allow more than 20 bytes as the device name? */
+ dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
+- hdev->vendor, hdev->product, atomic_inc_return(&id));
++ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
+
+ hid_debug_register(hdev, dev_name(&hdev->dev));
+ ret = device_add(&hdev->dev);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/hid/usbhid/hiddev.c linux-3.4-pax/drivers/hid/usbhid/hiddev.c
+--- linux-3.4/drivers/hid/usbhid/hiddev.c 2012-03-19 10:38:59.172049859 +0100
++++ linux-3.4-pax/drivers/hid/usbhid/hiddev.c 2012-05-21 12:10:10.176048924 +0200
+@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
+ break;
+
+ case HIDIOCAPPLICATION:
+- if (arg < 0 || arg >= hid->maxapplication)
++ if (arg >= hid->maxapplication)
+ break;
+
+ for (i = 0; i < hid->maxcollection; i++)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/hv/channel.c linux-3.4-pax/drivers/hv/channel.c
+--- linux-3.4/drivers/hv/channel.c 2012-01-08 19:47:57.223472573 +0100
++++ linux-3.4-pax/drivers/hv/channel.c 2012-05-21 12:10:10.176048924 +0200
+@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_c
+ int ret = 0;
+ int t;
+
+- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
+- atomic_inc(&vmbus_connection.next_gpadl_handle);
++ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
++ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
+
+ ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
+ if (ret)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/hv/hv.c linux-3.4-pax/drivers/hv/hv.c
+--- linux-3.4/drivers/hv/hv.c 2012-05-21 11:33:02.323927922 +0200
++++ linux-3.4-pax/drivers/hv/hv.c 2012-05-21 12:10:10.180048924 +0200
+@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
+ u64 output_address = (output) ? virt_to_phys(output) : 0;
+ u32 output_address_hi = output_address >> 32;
+ u32 output_address_lo = output_address & 0xFFFFFFFF;
+- void *hypercall_page = hv_context.hypercall_page;
++ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
+
+ __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
+ "=a"(hv_status_lo) : "d" (control_hi),
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/hv/hyperv_vmbus.h linux-3.4-pax/drivers/hv/hyperv_vmbus.h
+--- linux-3.4/drivers/hv/hyperv_vmbus.h 2012-05-21 11:33:02.351927925 +0200
++++ linux-3.4-pax/drivers/hv/hyperv_vmbus.h 2012-05-21 12:10:10.180048924 +0200
+@@ -555,7 +555,7 @@ enum vmbus_connect_state {
+ struct vmbus_connection {
+ enum vmbus_connect_state conn_state;
+
+- atomic_t next_gpadl_handle;
++ atomic_unchecked_t next_gpadl_handle;
+
+ /*
+ * Represents channel interrupts. Each bit position represents a
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/hv/vmbus_drv.c linux-3.4-pax/drivers/hv/vmbus_drv.c
+--- linux-3.4/drivers/hv/vmbus_drv.c 2012-03-19 10:38:59.184049858 +0100
++++ linux-3.4-pax/drivers/hv/vmbus_drv.c 2012-05-21 12:10:10.184048924 +0200
+@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_devi
+ {
+ int ret = 0;
+
+- static atomic_t device_num = ATOMIC_INIT(0);
++ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
+
+ dev_set_name(&child_device_obj->device, "vmbus_0_%d",
+- atomic_inc_return(&device_num));
++ atomic_inc_return_unchecked(&device_num));
+
+ child_device_obj->device.bus = &hv_bus;
+ child_device_obj->device.parent = &hv_acpi_dev->dev;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/hwmon/acpi_power_meter.c linux-3.4-pax/drivers/hwmon/acpi_power_meter.c
+--- linux-3.4/drivers/hwmon/acpi_power_meter.c 2012-05-21 11:33:02.367927926 +0200
++++ linux-3.4-pax/drivers/hwmon/acpi_power_meter.c 2012-05-21 12:10:10.184048924 +0200
+@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
+ return res;
+
+ temp /= 1000;
+- if (temp < 0)
+- return -EINVAL;
+
+ mutex_lock(&resource->lock);
+ resource->trip[attr->index - 7] = temp;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/hwmon/sht15.c linux-3.4-pax/drivers/hwmon/sht15.c
+--- linux-3.4/drivers/hwmon/sht15.c 2012-05-21 11:33:03.363927980 +0200
++++ linux-3.4-pax/drivers/hwmon/sht15.c 2012-05-21 12:10:10.188048924 +0200
+@@ -166,7 +166,7 @@ struct sht15_data {
+ int supply_uV;
+ bool supply_uV_valid;
+ struct work_struct update_supply_work;
+- atomic_t interrupt_handled;
++ atomic_unchecked_t interrupt_handled;
+ };
+
+ /**
+@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
+ return ret;
+
+ gpio_direction_input(data->pdata->gpio_data);
+- atomic_set(&data->interrupt_handled, 0);
++ atomic_set_unchecked(&data->interrupt_handled, 0);
+
+ enable_irq(gpio_to_irq(data->pdata->gpio_data));
+ if (gpio_get_value(data->pdata->gpio_data) == 0) {
+ disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ /* Only relevant if the interrupt hasn't occurred. */
+- if (!atomic_read(&data->interrupt_handled))
++ if (!atomic_read_unchecked(&data->interrupt_handled))
+ schedule_work(&data->read_work);
+ }
+ ret = wait_event_timeout(data->wait_queue,
+@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
+
+ /* First disable the interrupt */
+ disable_irq_nosync(irq);
+- atomic_inc(&data->interrupt_handled);
++ atomic_inc_unchecked(&data->interrupt_handled);
+ /* Then schedule a reading work struct */
+ if (data->state != SHT15_READING_NOTHING)
+ schedule_work(&data->read_work);
+@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
+ * If not, then start the interrupt again - care here as could
+ * have gone low in meantime so verify it hasn't!
+ */
+- atomic_set(&data->interrupt_handled, 0);
++ atomic_set_unchecked(&data->interrupt_handled, 0);
+ enable_irq(gpio_to_irq(data->pdata->gpio_data));
+ /* If still not occurred or another handler was scheduled */
+ if (gpio_get_value(data->pdata->gpio_data)
+- || atomic_read(&data->interrupt_handled))
++ || atomic_read_unchecked(&data->interrupt_handled))
+ return;
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.4-pax/drivers/i2c/busses/i2c-amd756-s4882.c
+--- linux-3.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/i2c/busses/i2c-amd756-s4882.c 2012-05-21 12:10:10.192048925 +0200
+@@ -43,7 +43,7 @@
+ extern struct i2c_adapter amd756_smbus;
+
+ static struct i2c_adapter *s4882_adapter;
+-static struct i2c_algorithm *s4882_algo;
++static i2c_algorithm_no_const *s4882_algo;
+
+ /* Wrapper access functions for multiplexed SMBus */
+ static DEFINE_MUTEX(amd756_lock);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.4-pax/drivers/i2c/busses/i2c-nforce2-s4985.c
+--- linux-3.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/i2c/busses/i2c-nforce2-s4985.c 2012-05-21 12:10:10.192048925 +0200
+@@ -41,7 +41,7 @@
+ extern struct i2c_adapter *nforce2_smbus;
+
+ static struct i2c_adapter *s4985_adapter;
+-static struct i2c_algorithm *s4985_algo;
++static i2c_algorithm_no_const *s4985_algo;
+
+ /* Wrapper access functions for multiplexed SMBus */
+ static DEFINE_MUTEX(nforce2_lock);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/i2c/i2c-mux.c linux-3.4-pax/drivers/i2c/i2c-mux.c
+--- linux-3.4/drivers/i2c/i2c-mux.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/i2c/i2c-mux.c 2012-05-21 12:10:10.192048925 +0200
+@@ -28,7 +28,7 @@
+ /* multiplexer per channel data */
+ struct i2c_mux_priv {
+ struct i2c_adapter adap;
+- struct i2c_algorithm algo;
++ i2c_algorithm_no_const algo;
+
+ struct i2c_adapter *parent;
+ void *mux_dev; /* the mux chip/device */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/aec62xx.c linux-3.4-pax/drivers/ide/aec62xx.c
+--- linux-3.4/drivers/ide/aec62xx.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/aec62xx.c 2012-05-21 12:10:10.196048925 +0200
+@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_
+ .cable_detect = atp86x_cable_detect,
+ };
+
+-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
++static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
+ { /* 0: AEC6210 */
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_aec62xx,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/alim15x3.c linux-3.4-pax/drivers/ide/alim15x3.c
+--- linux-3.4/drivers/ide/alim15x3.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/alim15x3.c 2012-05-21 12:10:10.200048925 +0200
+@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_
+ .dma_sff_read_status = ide_dma_sff_read_status,
+ };
+
+-static const struct ide_port_info ali15x3_chipset __devinitdata = {
++static const struct ide_port_info ali15x3_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_ali15x3,
+ .init_hwif = init_hwif_ali15x3,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/amd74xx.c linux-3.4-pax/drivers/ide/amd74xx.c
+--- linux-3.4/drivers/ide/amd74xx.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/amd74xx.c 2012-05-21 12:10:10.200048925 +0200
+@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_por
+ .udma_mask = udma, \
+ }
+
+-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
++static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
+ /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
+ /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
+ /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/atiixp.c linux-3.4-pax/drivers/ide/atiixp.c
+--- linux-3.4/drivers/ide/atiixp.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/atiixp.c 2012-05-21 12:10:10.200048925 +0200
+@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_
+ .cable_detect = atiixp_cable_detect,
+ };
+
+-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
++static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
+ { /* 0: IXP200/300/400/700 */
+ .name = DRV_NAME,
+ .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/cmd64x.c linux-3.4-pax/drivers/ide/cmd64x.c
+--- linux-3.4/drivers/ide/cmd64x.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/cmd64x.c 2012-05-21 12:10:10.204048925 +0200
+@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_r
+ .dma_sff_read_status = ide_dma_sff_read_status,
+ };
+
+-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
++static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
+ { /* 0: CMD643 */
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_cmd64x,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/cs5520.c linux-3.4-pax/drivers/ide/cs5520.c
+--- linux-3.4/drivers/ide/cs5520.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/cs5520.c 2012-05-21 12:10:10.204048925 +0200
+@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_
+ .set_dma_mode = cs5520_set_dma_mode,
+ };
+
+-static const struct ide_port_info cyrix_chipset __devinitdata = {
++static const struct ide_port_info cyrix_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
+ .port_ops = &cs5520_port_ops,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/cs5530.c linux-3.4-pax/drivers/ide/cs5530.c
+--- linux-3.4/drivers/ide/cs5530.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/cs5530.c 2012-05-21 12:10:10.208048925 +0200
+@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_
+ .udma_filter = cs5530_udma_filter,
+ };
+
+-static const struct ide_port_info cs5530_chipset __devinitdata = {
++static const struct ide_port_info cs5530_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_cs5530,
+ .init_hwif = init_hwif_cs5530,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/cs5535.c linux-3.4-pax/drivers/ide/cs5535.c
+--- linux-3.4/drivers/ide/cs5535.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/cs5535.c 2012-05-21 12:10:10.208048925 +0200
+@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_
+ .cable_detect = cs5535_cable_detect,
+ };
+
+-static const struct ide_port_info cs5535_chipset __devinitdata = {
++static const struct ide_port_info cs5535_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .port_ops = &cs5535_port_ops,
+ .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/cy82c693.c linux-3.4-pax/drivers/ide/cy82c693.c
+--- linux-3.4/drivers/ide/cy82c693.c 2012-01-08 19:47:58.783472490 +0100
++++ linux-3.4-pax/drivers/ide/cy82c693.c 2012-05-21 12:10:10.212048926 +0200
+@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c69
+ .set_dma_mode = cy82c693_set_dma_mode,
+ };
+
+-static const struct ide_port_info cy82c693_chipset __devinitdata = {
++static const struct ide_port_info cy82c693_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_iops = init_iops_cy82c693,
+ .port_ops = &cy82c693_port_ops,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/hpt366.c linux-3.4-pax/drivers/ide/hpt366.c
+--- linux-3.4/drivers/ide/hpt366.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/hpt366.c 2012-05-21 12:10:10.216048926 +0200
+@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings
+ }
+ };
+
+-static const struct hpt_info hpt36x __devinitdata = {
++static const struct hpt_info hpt36x __devinitconst = {
+ .chip_name = "HPT36x",
+ .chip_type = HPT36x,
+ .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
+@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __de
+ .timings = &hpt36x_timings
+ };
+
+-static const struct hpt_info hpt370 __devinitdata = {
++static const struct hpt_info hpt370 __devinitconst = {
+ .chip_name = "HPT370",
+ .chip_type = HPT370,
+ .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
+@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __de
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt370a __devinitdata = {
++static const struct hpt_info hpt370a __devinitconst = {
+ .chip_name = "HPT370A",
+ .chip_type = HPT370A,
+ .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
+@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __d
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt374 __devinitdata = {
++static const struct hpt_info hpt374 __devinitconst = {
+ .chip_name = "HPT374",
+ .chip_type = HPT374,
+ .udma_mask = ATA_UDMA5,
+@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __de
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt372 __devinitdata = {
++static const struct hpt_info hpt372 __devinitconst = {
+ .chip_name = "HPT372",
+ .chip_type = HPT372,
+ .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __de
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt372a __devinitdata = {
++static const struct hpt_info hpt372a __devinitconst = {
+ .chip_name = "HPT372A",
+ .chip_type = HPT372A,
+ .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __d
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt302 __devinitdata = {
++static const struct hpt_info hpt302 __devinitconst = {
+ .chip_name = "HPT302",
+ .chip_type = HPT302,
+ .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __de
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt371 __devinitdata = {
++static const struct hpt_info hpt371 __devinitconst = {
+ .chip_name = "HPT371",
+ .chip_type = HPT371,
+ .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __de
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt372n __devinitdata = {
++static const struct hpt_info hpt372n __devinitconst = {
+ .chip_name = "HPT372N",
+ .chip_type = HPT372N,
+ .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __d
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt302n __devinitdata = {
++static const struct hpt_info hpt302n __devinitconst = {
+ .chip_name = "HPT302N",
+ .chip_type = HPT302N,
+ .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __d
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt371n __devinitdata = {
++static const struct hpt_info hpt371n __devinitconst = {
+ .chip_name = "HPT371N",
+ .chip_type = HPT371N,
+ .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_d
+ .dma_sff_read_status = ide_dma_sff_read_status,
+ };
+
+-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
++static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
+ { /* 0: HPT36x */
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_hpt366,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/ide-cd.c linux-3.4-pax/drivers/ide/ide-cd.c
+--- linux-3.4/drivers/ide/ide-cd.c 2012-01-08 19:47:58.803472489 +0100
++++ linux-3.4-pax/drivers/ide/ide-cd.c 2012-05-21 12:10:10.216048926 +0200
+@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_
+ alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+ if ((unsigned long)buf & alignment
+ || blk_rq_bytes(rq) & q->dma_pad_mask
+- || object_is_on_stack(buf))
++ || object_starts_on_stack(buf))
+ drive->dma = 0;
+ }
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/ide-pci-generic.c linux-3.4-pax/drivers/ide/ide-pci-generic.c
+--- linux-3.4/drivers/ide/ide-pci-generic.c 2012-03-19 10:38:59.420049846 +0100
++++ linux-3.4-pax/drivers/ide/ide-pci-generic.c 2012-05-21 12:10:10.220048926 +0200
+@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell
+ .udma_mask = ATA_UDMA6, \
+ }
+
+-static const struct ide_port_info generic_chipsets[] __devinitdata = {
++static const struct ide_port_info generic_chipsets[] __devinitconst = {
+ /* 0: Unknown */
+ DECLARE_GENERIC_PCI_DEV(0),
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/it8172.c linux-3.4-pax/drivers/ide/it8172.c
+--- linux-3.4/drivers/ide/it8172.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/it8172.c 2012-05-21 12:10:10.220048926 +0200
+@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_
+ .set_dma_mode = it8172_set_dma_mode,
+ };
+
+-static const struct ide_port_info it8172_port_info __devinitdata = {
++static const struct ide_port_info it8172_port_info __devinitconst = {
+ .name = DRV_NAME,
+ .port_ops = &it8172_port_ops,
+ .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/it8213.c linux-3.4-pax/drivers/ide/it8213.c
+--- linux-3.4/drivers/ide/it8213.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/it8213.c 2012-05-21 12:10:10.224048926 +0200
+@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_
+ .cable_detect = it8213_cable_detect,
+ };
+
+-static const struct ide_port_info it8213_chipset __devinitdata = {
++static const struct ide_port_info it8213_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .enablebits = { {0x41, 0x80, 0x80} },
+ .port_ops = &it8213_port_ops,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/it821x.c linux-3.4-pax/drivers/ide/it821x.c
+--- linux-3.4/drivers/ide/it821x.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/it821x.c 2012-05-21 12:10:10.224048926 +0200
+@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_
+ .cable_detect = it821x_cable_detect,
+ };
+
+-static const struct ide_port_info it821x_chipset __devinitdata = {
++static const struct ide_port_info it821x_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_it821x,
+ .init_hwif = init_hwif_it821x,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/jmicron.c linux-3.4-pax/drivers/ide/jmicron.c
+--- linux-3.4/drivers/ide/jmicron.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/jmicron.c 2012-05-21 12:10:10.228048927 +0200
+@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron
+ .cable_detect = jmicron_cable_detect,
+ };
+
+-static const struct ide_port_info jmicron_chipset __devinitdata = {
++static const struct ide_port_info jmicron_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
+ .port_ops = &jmicron_port_ops,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/ns87415.c linux-3.4-pax/drivers/ide/ns87415.c
+--- linux-3.4/drivers/ide/ns87415.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/ns87415.c 2012-05-21 12:10:10.228048927 +0200
+@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_
+ .dma_sff_read_status = superio_dma_sff_read_status,
+ };
+
+-static const struct ide_port_info ns87415_chipset __devinitdata = {
++static const struct ide_port_info ns87415_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_hwif = init_hwif_ns87415,
+ .tp_ops = &ns87415_tp_ops,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/opti621.c linux-3.4-pax/drivers/ide/opti621.c
+--- linux-3.4/drivers/ide/opti621.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/opti621.c 2012-05-21 12:10:10.232048927 +0200
+@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621
+ .set_pio_mode = opti621_set_pio_mode,
+ };
+
+-static const struct ide_port_info opti621_chipset __devinitdata = {
++static const struct ide_port_info opti621_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
+ .port_ops = &opti621_port_ops,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/pdc202xx_new.c linux-3.4-pax/drivers/ide/pdc202xx_new.c
+--- linux-3.4/drivers/ide/pdc202xx_new.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/pdc202xx_new.c 2012-05-21 12:10:10.232048927 +0200
+@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_
+ .udma_mask = udma, \
+ }
+
+-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
++static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
+ /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
+ /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
+ };
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/pdc202xx_old.c linux-3.4-pax/drivers/ide/pdc202xx_old.c
+--- linux-3.4/drivers/ide/pdc202xx_old.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/pdc202xx_old.c 2012-05-21 12:10:10.236048927 +0200
+@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x
+ .max_sectors = sectors, \
+ }
+
+-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
++static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
+ { /* 0: PDC20246 */
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_pdc202xx,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/piix.c linux-3.4-pax/drivers/ide/piix.c
+--- linux-3.4/drivers/ide/piix.c 2012-01-08 19:47:58.851472487 +0100
++++ linux-3.4-pax/drivers/ide/piix.c 2012-05-21 12:10:10.240048927 +0200
+@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_por
+ .udma_mask = udma, \
+ }
+
+-static const struct ide_port_info piix_pci_info[] __devinitdata = {
++static const struct ide_port_info piix_pci_info[] __devinitconst = {
+ /* 0: MPIIX */
+ { /*
+ * MPIIX actually has only a single IDE channel mapped to
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/rz1000.c linux-3.4-pax/drivers/ide/rz1000.c
+--- linux-3.4/drivers/ide/rz1000.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/rz1000.c 2012-05-21 12:10:10.240048927 +0200
+@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_read
+ }
+ }
+
+-static const struct ide_port_info rz1000_chipset __devinitdata = {
++static const struct ide_port_info rz1000_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .host_flags = IDE_HFLAG_NO_DMA,
+ };
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/sc1200.c linux-3.4-pax/drivers/ide/sc1200.c
+--- linux-3.4/drivers/ide/sc1200.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/sc1200.c 2012-05-21 12:10:10.240048927 +0200
+@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_d
+ .dma_sff_read_status = ide_dma_sff_read_status,
+ };
+
+-static const struct ide_port_info sc1200_chipset __devinitdata = {
++static const struct ide_port_info sc1200_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .port_ops = &sc1200_port_ops,
+ .dma_ops = &sc1200_dma_ops,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/scc_pata.c linux-3.4-pax/drivers/ide/scc_pata.c
+--- linux-3.4/drivers/ide/scc_pata.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/scc_pata.c 2012-05-21 12:10:10.244048927 +0200
+@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_
+ .dma_sff_read_status = scc_dma_sff_read_status,
+ };
+
+-static const struct ide_port_info scc_chipset __devinitdata = {
++static const struct ide_port_info scc_chipset __devinitconst = {
+ .name = "sccIDE",
+ .init_iops = init_iops_scc,
+ .init_dma = scc_init_dma,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/serverworks.c linux-3.4-pax/drivers/ide/serverworks.c
+--- linux-3.4/drivers/ide/serverworks.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/serverworks.c 2012-05-21 12:10:10.248048928 +0200
+@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_p
+ .cable_detect = svwks_cable_detect,
+ };
+
+-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
++static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
+ { /* 0: OSB4 */
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_svwks,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/siimage.c linux-3.4-pax/drivers/ide/siimage.c
+--- linux-3.4/drivers/ide/siimage.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/siimage.c 2012-05-21 12:10:10.248048928 +0200
+@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_
+ .udma_mask = ATA_UDMA6, \
+ }
+
+-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
++static const struct ide_port_info siimage_chipsets[] __devinitconst = {
+ /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
+ /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
+ };
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/sis5513.c linux-3.4-pax/drivers/ide/sis5513.c
+--- linux-3.4/drivers/ide/sis5513.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/sis5513.c 2012-05-21 12:10:10.252048928 +0200
+@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata
+ .cable_detect = sis_cable_detect,
+ };
+
+-static const struct ide_port_info sis5513_chipset __devinitdata = {
++static const struct ide_port_info sis5513_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_sis5513,
+ .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/sl82c105.c linux-3.4-pax/drivers/ide/sl82c105.c
+--- linux-3.4/drivers/ide/sl82c105.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/sl82c105.c 2012-05-21 12:10:10.252048928 +0200
+@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105
+ .dma_sff_read_status = ide_dma_sff_read_status,
+ };
+
+-static const struct ide_port_info sl82c105_chipset __devinitdata = {
++static const struct ide_port_info sl82c105_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_sl82c105,
+ .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/slc90e66.c linux-3.4-pax/drivers/ide/slc90e66.c
+--- linux-3.4/drivers/ide/slc90e66.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/slc90e66.c 2012-05-21 12:10:10.256048928 +0200
+@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e6
+ .cable_detect = slc90e66_cable_detect,
+ };
+
+-static const struct ide_port_info slc90e66_chipset __devinitdata = {
++static const struct ide_port_info slc90e66_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
+ .port_ops = &slc90e66_port_ops,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/tc86c001.c linux-3.4-pax/drivers/ide/tc86c001.c
+--- linux-3.4/drivers/ide/tc86c001.c 2012-01-08 19:47:58.859472486 +0100
++++ linux-3.4-pax/drivers/ide/tc86c001.c 2012-05-21 12:10:10.256048928 +0200
+@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001
+ .dma_sff_read_status = ide_dma_sff_read_status,
+ };
+
+-static const struct ide_port_info tc86c001_chipset __devinitdata = {
++static const struct ide_port_info tc86c001_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_hwif = init_hwif_tc86c001,
+ .port_ops = &tc86c001_port_ops,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/triflex.c linux-3.4-pax/drivers/ide/triflex.c
+--- linux-3.4/drivers/ide/triflex.c 2012-01-08 19:47:58.863472486 +0100
++++ linux-3.4-pax/drivers/ide/triflex.c 2012-05-21 12:10:10.256048928 +0200
+@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex
+ .set_dma_mode = triflex_set_mode,
+ };
+
+-static const struct ide_port_info triflex_device __devinitdata = {
++static const struct ide_port_info triflex_device __devinitconst = {
+ .name = DRV_NAME,
+ .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
+ .port_ops = &triflex_port_ops,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/trm290.c linux-3.4-pax/drivers/ide/trm290.c
+--- linux-3.4/drivers/ide/trm290.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/trm290.c 2012-05-21 12:10:10.260048928 +0200
+@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops
+ .dma_check = trm290_dma_check,
+ };
+
+-static const struct ide_port_info trm290_chipset __devinitdata = {
++static const struct ide_port_info trm290_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_hwif = init_hwif_trm290,
+ .tp_ops = &trm290_tp_ops,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ide/via82cxxx.c linux-3.4-pax/drivers/ide/via82cxxx.c
+--- linux-3.4/drivers/ide/via82cxxx.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/ide/via82cxxx.c 2012-05-21 12:10:10.264048929 +0200
+@@ -403,7 +403,7 @@ static const struct ide_port_ops via_por
+ .cable_detect = via82cxxx_cable_detect,
+ };
+
+-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
++static const struct ide_port_info via82cxxx_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_via82cxxx,
+ .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/ieee802154/fakehard.c linux-3.4-pax/drivers/ieee802154/fakehard.c
+--- linux-3.4/drivers/ieee802154/fakehard.c 2012-03-19 10:38:59.424049845 +0100
++++ linux-3.4-pax/drivers/ieee802154/fakehard.c 2012-05-21 12:10:10.264048929 +0200
+@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_prob
+ phy->transmit_power = 0xbf;
+
+ dev->netdev_ops = &fake_ops;
+- dev->ml_priv = &fake_mlme;
++ dev->ml_priv = (void *)&fake_mlme;
+
+ priv = netdev_priv(dev);
+ priv->phy = phy;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/infiniband/core/cm.c linux-3.4-pax/drivers/infiniband/core/cm.c
+--- linux-3.4/drivers/infiniband/core/cm.c 2012-03-19 10:38:59.436049845 +0100
++++ linux-3.4-pax/drivers/infiniband/core/cm.c 2012-05-21 12:10:10.268048929 +0200
+@@ -114,7 +114,7 @@ static char const counter_group_names[CM
+
+ struct cm_counter_group {
+ struct kobject obj;
+- atomic_long_t counter[CM_ATTR_COUNT];
++ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
+ };
+
+ struct cm_counter_attribute {
+@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm
+ struct ib_mad_send_buf *msg = NULL;
+ int ret;
+
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_REQ_COUNTER]);
+
+ /* Quick state check to discard duplicate REQs. */
+@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm
+ if (!cm_id_priv)
+ return;
+
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_REP_COUNTER]);
+ ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
+ if (ret)
+@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work
+ if (cm_id_priv->id.state != IB_CM_REP_SENT &&
+ cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
+ spin_unlock_irq(&cm_id_priv->lock);
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_RTU_COUNTER]);
+ goto out;
+ }
+@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_wor
+ cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
+ dreq_msg->local_comm_id);
+ if (!cm_id_priv) {
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_DREQ_COUNTER]);
+ cm_issue_drep(work->port, work->mad_recv_wc);
+ return -EINVAL;
+@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_wor
+ case IB_CM_MRA_REP_RCVD:
+ break;
+ case IB_CM_TIMEWAIT:
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_DREQ_COUNTER]);
+ if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+ goto unlock;
+@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_wor
+ cm_free_msg(msg);
+ goto deref;
+ case IB_CM_DREQ_RCVD:
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_DREQ_COUNTER]);
+ goto unlock;
+ default:
+@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work
+ ib_modify_mad(cm_id_priv->av.port->mad_agent,
+ cm_id_priv->msg, timeout)) {
+ if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
+- atomic_long_inc(&work->port->
++ atomic_long_inc_unchecked(&work->port->
+ counter_group[CM_RECV_DUPLICATES].
+ counter[CM_MRA_COUNTER]);
+ goto out;
+@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work
+ break;
+ case IB_CM_MRA_REQ_RCVD:
+ case IB_CM_MRA_REP_RCVD:
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_MRA_COUNTER]);
+ /* fall through */
+ default:
+@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work
+ case IB_CM_LAP_IDLE:
+ break;
+ case IB_CM_MRA_LAP_SENT:
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_LAP_COUNTER]);
+ if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+ goto unlock;
+@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work
+ cm_free_msg(msg);
+ goto deref;
+ case IB_CM_LAP_RCVD:
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_LAP_COUNTER]);
+ goto unlock;
+ default:
+@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm
+ cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
+ if (cur_cm_id_priv) {
+ spin_unlock_irq(&cm.lock);
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_SIDR_REQ_COUNTER]);
+ goto out; /* Duplicate message. */
+ }
+@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_ma
+ if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
+ msg->retries = 1;
+
+- atomic_long_add(1 + msg->retries,
++ atomic_long_add_unchecked(1 + msg->retries,
+ &port->counter_group[CM_XMIT].counter[attr_index]);
+ if (msg->retries)
+- atomic_long_add(msg->retries,
++ atomic_long_add_unchecked(msg->retries,
+ &port->counter_group[CM_XMIT_RETRIES].
+ counter[attr_index]);
+
+@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_ma
+ }
+
+ attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
+- atomic_long_inc(&port->counter_group[CM_RECV].
++ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
+ counter[attr_id - CM_ATTR_ID_OFFSET]);
+
+ work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
+@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct ko
+ cm_attr = container_of(attr, struct cm_counter_attribute, attr);
+
+ return sprintf(buf, "%ld\n",
+- atomic_long_read(&group->counter[cm_attr->index]));
++ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
+ }
+
+ static const struct sysfs_ops cm_counter_ops = {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/infiniband/core/fmr_pool.c linux-3.4-pax/drivers/infiniband/core/fmr_pool.c
+--- linux-3.4/drivers/infiniband/core/fmr_pool.c 2012-01-08 19:47:58.879472485 +0100
++++ linux-3.4-pax/drivers/infiniband/core/fmr_pool.c 2012-05-21 12:10:10.272048929 +0200
+@@ -98,8 +98,8 @@ struct ib_fmr_pool {
+
+ struct task_struct *thread;
+
+- atomic_t req_ser;
+- atomic_t flush_ser;
++ atomic_unchecked_t req_ser;
++ atomic_unchecked_t flush_ser;
+
+ wait_queue_head_t force_wait;
+ };
+@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *p
+ struct ib_fmr_pool *pool = pool_ptr;
+
+ do {
+- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
++ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
+ ib_fmr_batch_release(pool);
+
+- atomic_inc(&pool->flush_ser);
++ atomic_inc_unchecked(&pool->flush_ser);
+ wake_up_interruptible(&pool->force_wait);
+
+ if (pool->flush_function)
+@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *p
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
++ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
+ !kthread_should_stop())
+ schedule();
+ __set_current_state(TASK_RUNNING);
+@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
+ pool->dirty_watermark = params->dirty_watermark;
+ pool->dirty_len = 0;
+ spin_lock_init(&pool->pool_lock);
+- atomic_set(&pool->req_ser, 0);
+- atomic_set(&pool->flush_ser, 0);
++ atomic_set_unchecked(&pool->req_ser, 0);
++ atomic_set_unchecked(&pool->flush_ser, 0);
+ init_waitqueue_head(&pool->force_wait);
+
+ pool->thread = kthread_run(ib_fmr_cleanup_thread,
+@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
+ }
+ spin_unlock_irq(&pool->pool_lock);
+
+- serial = atomic_inc_return(&pool->req_ser);
++ serial = atomic_inc_return_unchecked(&pool->req_ser);
+ wake_up_process(pool->thread);
+
+ if (wait_event_interruptible(pool->force_wait,
+- atomic_read(&pool->flush_ser) - serial >= 0))
++ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
+ return -EINTR;
+
+ return 0;
+@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
+ } else {
+ list_add_tail(&fmr->list, &pool->dirty_list);
+ if (++pool->dirty_len >= pool->dirty_watermark) {
+- atomic_inc(&pool->req_ser);
++ atomic_inc_unchecked(&pool->req_ser);
+ wake_up_process(pool->thread);
+ }
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/infiniband/hw/cxgb4/mem.c linux-3.4-pax/drivers/infiniband/hw/cxgb4/mem.c
+--- linux-3.4/drivers/infiniband/hw/cxgb4/mem.c 2011-10-24 12:48:28.759091640 +0200
++++ linux-3.4-pax/drivers/infiniband/hw/cxgb4/mem.c 2012-05-21 12:10:10.276048929 +0200
+@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
+ int err;
+ struct fw_ri_tpte tpt;
+ u32 stag_idx;
+- static atomic_t key;
++ static atomic_unchecked_t key;
+
+ if (c4iw_fatal_error(rdev))
+ return -EIO;
+@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
+ &rdev->resource.tpt_fifo_lock);
+ if (!stag_idx)
+ return -ENOMEM;
+- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
++ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
+ }
+ PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
+ __func__, stag_state, type, pdid, stag_idx);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.4-pax/drivers/infiniband/hw/ipath/ipath_rc.c
+--- linux-3.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/infiniband/hw/ipath/ipath_rc.c 2012-05-21 12:10:10.280048929 +0200
+@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ struct ib_atomic_eth *ateth;
+ struct ipath_ack_entry *e;
+ u64 vaddr;
+- atomic64_t *maddr;
++ atomic64_unchecked_t *maddr;
+ u64 sdata;
+ u32 rkey;
+ u8 next;
+@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto nack_acc_unlck;
+ /* Perform atomic OP and save result. */
+- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
++ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
+ sdata = be64_to_cpu(ateth->swap_data);
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ e->atomic_data = (opcode == OP(FETCH_ADD)) ?
+- (u64) atomic64_add_return(sdata, maddr) - sdata :
++ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
+ (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ be64_to_cpu(ateth->compare_data),
+ sdata);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.4-pax/drivers/infiniband/hw/ipath/ipath_ruc.c
+--- linux-3.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/infiniband/hw/ipath/ipath_ruc.c 2012-05-21 12:10:10.284048930 +0200
+@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
+ unsigned long flags;
+ struct ib_wc wc;
+ u64 sdata;
+- atomic64_t *maddr;
++ atomic64_unchecked_t *maddr;
+ enum ib_wc_status send_status;
+
+ /*
+@@ -382,11 +382,11 @@ again:
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto acc_err;
+ /* Perform atomic OP and save result. */
+- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
++ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
+ sdata = wqe->wr.wr.atomic.compare_add;
+ *(u64 *) sqp->s_sge.sge.vaddr =
+ (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
+- (u64) atomic64_add_return(sdata, maddr) - sdata :
++ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
+ (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ sdata, wqe->wr.wr.atomic.swap);
+ goto send_comp;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/infiniband/hw/nes/nes.c linux-3.4-pax/drivers/infiniband/hw/nes/nes.c
+--- linux-3.4/drivers/infiniband/hw/nes/nes.c 2012-03-19 10:38:59.464049843 +0100
++++ linux-3.4-pax/drivers/infiniband/hw/nes/nes.c 2012-05-21 12:10:10.284048930 +0200
+@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
+ LIST_HEAD(nes_adapter_list);
+ static LIST_HEAD(nes_dev_list);
+
+-atomic_t qps_destroyed;
++atomic_unchecked_t qps_destroyed;
+
+ static unsigned int ee_flsh_adapter;
+ static unsigned int sysfs_nonidx_addr;
+@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(str
+ struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+
+- atomic_inc(&qps_destroyed);
++ atomic_inc_unchecked(&qps_destroyed);
+
+ /* Free the control structures */
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/infiniband/hw/nes/nes_cm.c linux-3.4-pax/drivers/infiniband/hw/nes/nes_cm.c
+--- linux-3.4/drivers/infiniband/hw/nes/nes_cm.c 2012-05-21 11:33:03.839928006 +0200
++++ linux-3.4-pax/drivers/infiniband/hw/nes/nes_cm.c 2012-05-21 12:10:10.288048930 +0200
+@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
+ u32 cm_packets_retrans;
+ u32 cm_packets_created;
+ u32 cm_packets_received;
+-atomic_t cm_listens_created;
+-atomic_t cm_listens_destroyed;
++atomic_unchecked_t cm_listens_created;
++atomic_unchecked_t cm_listens_destroyed;
+ u32 cm_backlog_drops;
+-atomic_t cm_loopbacks;
+-atomic_t cm_nodes_created;
+-atomic_t cm_nodes_destroyed;
+-atomic_t cm_accel_dropped_pkts;
+-atomic_t cm_resets_recvd;
++atomic_unchecked_t cm_loopbacks;
++atomic_unchecked_t cm_nodes_created;
++atomic_unchecked_t cm_nodes_destroyed;
++atomic_unchecked_t cm_accel_dropped_pkts;
++atomic_unchecked_t cm_resets_recvd;
+
+ static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
+ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
+@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
+
+ static struct nes_cm_core *g_cm_core;
+
+-atomic_t cm_connects;
+-atomic_t cm_accepts;
+-atomic_t cm_disconnects;
+-atomic_t cm_closes;
+-atomic_t cm_connecteds;
+-atomic_t cm_connect_reqs;
+-atomic_t cm_rejects;
++atomic_unchecked_t cm_connects;
++atomic_unchecked_t cm_accepts;
++atomic_unchecked_t cm_disconnects;
++atomic_unchecked_t cm_closes;
++atomic_unchecked_t cm_connecteds;
++atomic_unchecked_t cm_connect_reqs;
++atomic_unchecked_t cm_rejects;
+
+ int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
+ {
+@@ -1279,7 +1279,7 @@ static int mini_cm_dec_refcnt_listen(str
+ kfree(listener);
+ listener = NULL;
+ ret = 0;
+- atomic_inc(&cm_listens_destroyed);
++ atomic_inc_unchecked(&cm_listens_destroyed);
+ } else {
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ }
+@@ -1482,7 +1482,7 @@ static struct nes_cm_node *make_cm_node(
+ cm_node->rem_mac);
+
+ add_hte_node(cm_core, cm_node);
+- atomic_inc(&cm_nodes_created);
++ atomic_inc_unchecked(&cm_nodes_created);
+
+ return cm_node;
+ }
+@@ -1540,7 +1540,7 @@ static int rem_ref_cm_node(struct nes_cm
+ }
+
+ atomic_dec(&cm_core->node_cnt);
+- atomic_inc(&cm_nodes_destroyed);
++ atomic_inc_unchecked(&cm_nodes_destroyed);
+ nesqp = cm_node->nesqp;
+ if (nesqp) {
+ nesqp->cm_node = NULL;
+@@ -1604,7 +1604,7 @@ static int process_options(struct nes_cm
+
+ static void drop_packet(struct sk_buff *skb)
+ {
+- atomic_inc(&cm_accel_dropped_pkts);
++ atomic_inc_unchecked(&cm_accel_dropped_pkts);
+ dev_kfree_skb_any(skb);
+ }
+
+@@ -1667,7 +1667,7 @@ static void handle_rst_pkt(struct nes_cm
+ {
+
+ int reset = 0; /* whether to send reset in case of err.. */
+- atomic_inc(&cm_resets_recvd);
++ atomic_inc_unchecked(&cm_resets_recvd);
+ nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
+ " refcnt=%d\n", cm_node, cm_node->state,
+ atomic_read(&cm_node->ref_count));
+@@ -2308,7 +2308,7 @@ static struct nes_cm_node *mini_cm_conne
+ rem_ref_cm_node(cm_node->cm_core, cm_node);
+ return NULL;
+ }
+- atomic_inc(&cm_loopbacks);
++ atomic_inc_unchecked(&cm_loopbacks);
+ loopbackremotenode->loopbackpartner = cm_node;
+ loopbackremotenode->tcp_cntxt.rcv_wscale =
+ NES_CM_DEFAULT_RCV_WND_SCALE;
+@@ -2583,7 +2583,7 @@ static int mini_cm_recv_pkt(struct nes_c
+ nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
+ else {
+ rem_ref_cm_node(cm_core, cm_node);
+- atomic_inc(&cm_accel_dropped_pkts);
++ atomic_inc_unchecked(&cm_accel_dropped_pkts);
+ dev_kfree_skb_any(skb);
+ }
+ break;
+@@ -2890,7 +2890,7 @@ static int nes_cm_disconn_true(struct ne
+
+ if ((cm_id) && (cm_id->event_handler)) {
+ if (issue_disconn) {
+- atomic_inc(&cm_disconnects);
++ atomic_inc_unchecked(&cm_disconnects);
+ cm_event.event = IW_CM_EVENT_DISCONNECT;
+ cm_event.status = disconn_status;
+ cm_event.local_addr = cm_id->local_addr;
+@@ -2912,7 +2912,7 @@ static int nes_cm_disconn_true(struct ne
+ }
+
+ if (issue_close) {
+- atomic_inc(&cm_closes);
++ atomic_inc_unchecked(&cm_closes);
+ nes_disconnect(nesqp, 1);
+
+ cm_id->provider_data = nesqp;
+@@ -3048,7 +3048,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
+
+ nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
+ nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
+- atomic_inc(&cm_accepts);
++ atomic_inc_unchecked(&cm_accepts);
+
+ nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
+ netdev_refcnt_read(nesvnic->netdev));
+@@ -3250,7 +3250,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
+ struct nes_cm_core *cm_core;
+ u8 *start_buff;
+
+- atomic_inc(&cm_rejects);
++ atomic_inc_unchecked(&cm_rejects);
+ cm_node = (struct nes_cm_node *)cm_id->provider_data;
+ loopback = cm_node->loopbackpartner;
+ cm_core = cm_node->cm_core;
+@@ -3310,7 +3310,7 @@ int nes_connect(struct iw_cm_id *cm_id,
+ ntohl(cm_id->local_addr.sin_addr.s_addr),
+ ntohs(cm_id->local_addr.sin_port));
+
+- atomic_inc(&cm_connects);
++ atomic_inc_unchecked(&cm_connects);
+ nesqp->active_conn = 1;
+
+ /* cache the cm_id in the qp */
+@@ -3416,7 +3416,7 @@ int nes_create_listen(struct iw_cm_id *c
+ g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
+ return err;
+ }
+- atomic_inc(&cm_listens_created);
++ atomic_inc_unchecked(&cm_listens_created);
+ }
+
+ cm_id->add_ref(cm_id);
+@@ -3517,7 +3517,7 @@ static void cm_event_connected(struct ne
+
+ if (nesqp->destroyed)
+ return;
+- atomic_inc(&cm_connecteds);
++ atomic_inc_unchecked(&cm_connecteds);
+ nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
+ " local port 0x%04X. jiffies = %lu.\n",
+ nesqp->hwqp.qp_id,
+@@ -3704,7 +3704,7 @@ static void cm_event_reset(struct nes_cm
+
+ cm_id->add_ref(cm_id);
+ ret = cm_id->event_handler(cm_id, &cm_event);
+- atomic_inc(&cm_closes);
++ atomic_inc_unchecked(&cm_closes);
+ cm_event.event = IW_CM_EVENT_CLOSE;
+ cm_event.status = 0;
+ cm_event.provider_data = cm_id->provider_data;
+@@ -3740,7 +3740,7 @@ static void cm_event_mpa_req(struct nes_
+ return;
+ cm_id = cm_node->cm_id;
+
+- atomic_inc(&cm_connect_reqs);
++ atomic_inc_unchecked(&cm_connect_reqs);
+ nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
+ cm_node, cm_id, jiffies);
+
+@@ -3780,7 +3780,7 @@ static void cm_event_mpa_reject(struct n
+ return;
+ cm_id = cm_node->cm_id;
+
+- atomic_inc(&cm_connect_reqs);
++ atomic_inc_unchecked(&cm_connect_reqs);
+ nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
+ cm_node, cm_id, jiffies);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/infiniband/hw/nes/nes.h linux-3.4-pax/drivers/infiniband/hw/nes/nes.h
+--- linux-3.4/drivers/infiniband/hw/nes/nes.h 2012-03-19 10:38:59.464049843 +0100
++++ linux-3.4-pax/drivers/infiniband/hw/nes/nes.h 2012-05-21 12:10:10.292048930 +0200
+@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
+ extern unsigned int wqm_quanta;
+ extern struct list_head nes_adapter_list;
+
+-extern atomic_t cm_connects;
+-extern atomic_t cm_accepts;
+-extern atomic_t cm_disconnects;
+-extern atomic_t cm_closes;
+-extern atomic_t cm_connecteds;
+-extern atomic_t cm_connect_reqs;
+-extern atomic_t cm_rejects;
+-extern atomic_t mod_qp_timouts;
+-extern atomic_t qps_created;
+-extern atomic_t qps_destroyed;
+-extern atomic_t sw_qps_destroyed;
++extern atomic_unchecked_t cm_connects;
++extern atomic_unchecked_t cm_accepts;
++extern atomic_unchecked_t cm_disconnects;
++extern atomic_unchecked_t cm_closes;
++extern atomic_unchecked_t cm_connecteds;
++extern atomic_unchecked_t cm_connect_reqs;
++extern atomic_unchecked_t cm_rejects;
++extern atomic_unchecked_t mod_qp_timouts;
++extern atomic_unchecked_t qps_created;
++extern atomic_unchecked_t qps_destroyed;
++extern atomic_unchecked_t sw_qps_destroyed;
+ extern u32 mh_detected;
+ extern u32 mh_pauses_sent;
+ extern u32 cm_packets_sent;
+@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
+ extern u32 cm_packets_received;
+ extern u32 cm_packets_dropped;
+ extern u32 cm_packets_retrans;
+-extern atomic_t cm_listens_created;
+-extern atomic_t cm_listens_destroyed;
++extern atomic_unchecked_t cm_listens_created;
++extern atomic_unchecked_t cm_listens_destroyed;
+ extern u32 cm_backlog_drops;
+-extern atomic_t cm_loopbacks;
+-extern atomic_t cm_nodes_created;
+-extern atomic_t cm_nodes_destroyed;
+-extern atomic_t cm_accel_dropped_pkts;
+-extern atomic_t cm_resets_recvd;
+-extern atomic_t pau_qps_created;
+-extern atomic_t pau_qps_destroyed;
++extern atomic_unchecked_t cm_loopbacks;
++extern atomic_unchecked_t cm_nodes_created;
++extern atomic_unchecked_t cm_nodes_destroyed;
++extern atomic_unchecked_t cm_accel_dropped_pkts;
++extern atomic_unchecked_t cm_resets_recvd;
++extern atomic_unchecked_t pau_qps_created;
++extern atomic_unchecked_t pau_qps_destroyed;
+
+ extern u32 int_mod_timer_init;
+ extern u32 int_mod_cq_depth_256;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/infiniband/hw/nes/nes_mgt.c linux-3.4-pax/drivers/infiniband/hw/nes/nes_mgt.c
+--- linux-3.4/drivers/infiniband/hw/nes/nes_mgt.c 2012-03-19 10:38:59.472049843 +0100
++++ linux-3.4-pax/drivers/infiniband/hw/nes/nes_mgt.c 2012-05-21 12:10:10.296048930 +0200
+@@ -40,8 +40,8 @@
+ #include "nes.h"
+ #include "nes_mgt.h"
+
+-atomic_t pau_qps_created;
+-atomic_t pau_qps_destroyed;
++atomic_unchecked_t pau_qps_created;
++atomic_unchecked_t pau_qps_destroyed;
+
+ static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
+ {
+@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_devic
+ {
+ struct sk_buff *skb;
+ unsigned long flags;
+- atomic_inc(&pau_qps_destroyed);
++ atomic_inc_unchecked(&pau_qps_destroyed);
+
+ /* Free packets that have not yet been forwarded */
+ /* Lock is acquired by skb_dequeue when removing the skb */
+@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct ne
+ cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
+ skb_queue_head_init(&nesqp->pau_list);
+ spin_lock_init(&nesqp->pau_lock);
+- atomic_inc(&pau_qps_created);
++ atomic_inc_unchecked(&pau_qps_created);
+ nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/infiniband/hw/nes/nes_nic.c linux-3.4-pax/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-3.4/drivers/infiniband/hw/nes/nes_nic.c 2012-03-19 10:38:59.476049843 +0100
++++ linux-3.4-pax/drivers/infiniband/hw/nes/nes_nic.c 2012-05-21 12:10:10.300048930 +0200
+@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats
+ target_stat_values[++index] = mh_detected;
+ target_stat_values[++index] = mh_pauses_sent;
+ target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
+- target_stat_values[++index] = atomic_read(&cm_connects);
+- target_stat_values[++index] = atomic_read(&cm_accepts);
+- target_stat_values[++index] = atomic_read(&cm_disconnects);
+- target_stat_values[++index] = atomic_read(&cm_connecteds);
+- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
+- target_stat_values[++index] = atomic_read(&cm_rejects);
+- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
+- target_stat_values[++index] = atomic_read(&qps_created);
+- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
+- target_stat_values[++index] = atomic_read(&qps_destroyed);
+- target_stat_values[++index] = atomic_read(&cm_closes);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
++ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
++ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
++ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
++ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
+ target_stat_values[++index] = cm_packets_sent;
+ target_stat_values[++index] = cm_packets_bounced;
+ target_stat_values[++index] = cm_packets_created;
+ target_stat_values[++index] = cm_packets_received;
+ target_stat_values[++index] = cm_packets_dropped;
+ target_stat_values[++index] = cm_packets_retrans;
+- target_stat_values[++index] = atomic_read(&cm_listens_created);
+- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
+ target_stat_values[++index] = cm_backlog_drops;
+- target_stat_values[++index] = atomic_read(&cm_loopbacks);
+- target_stat_values[++index] = atomic_read(&cm_nodes_created);
+- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
+- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
+- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
+ target_stat_values[++index] = nesadapter->free_4kpbl;
+ target_stat_values[++index] = nesadapter->free_256pbl;
+ target_stat_values[++index] = int_mod_timer_init;
+ target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+ target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+ target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
+- target_stat_values[++index] = atomic_read(&pau_qps_created);
+- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
++ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
++ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
+ }
+
+ /**
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/infiniband/hw/nes/nes_verbs.c linux-3.4-pax/drivers/infiniband/hw/nes/nes_verbs.c
+--- linux-3.4/drivers/infiniband/hw/nes/nes_verbs.c 2012-05-21 11:33:03.851928005 +0200
++++ linux-3.4-pax/drivers/infiniband/hw/nes/nes_verbs.c 2012-05-21 12:10:10.304048931 +0200
+@@ -46,9 +46,9 @@
+
+ #include <rdma/ib_umem.h>
+
+-atomic_t mod_qp_timouts;
+-atomic_t qps_created;
+-atomic_t sw_qps_destroyed;
++atomic_unchecked_t mod_qp_timouts;
++atomic_unchecked_t qps_created;
++atomic_unchecked_t sw_qps_destroyed;
+
+ static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
+
+@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struc
+ if (init_attr->create_flags)
+ return ERR_PTR(-EINVAL);
+
+- atomic_inc(&qps_created);
++ atomic_inc_unchecked(&qps_created);
+ switch (init_attr->qp_type) {
+ case IB_QPT_RC:
+ if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
+@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *
+ struct iw_cm_event cm_event;
+ int ret = 0;
+
+- atomic_inc(&sw_qps_destroyed);
++ atomic_inc_unchecked(&sw_qps_destroyed);
+ nesqp->destroyed = 1;
+
+ /* Blow away the connection if it exists. */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/infiniband/hw/qib/qib.h linux-3.4-pax/drivers/infiniband/hw/qib/qib.h
+--- linux-3.4/drivers/infiniband/hw/qib/qib.h 2012-05-21 11:33:03.871928007 +0200
++++ linux-3.4-pax/drivers/infiniband/hw/qib/qib.h 2012-05-21 12:10:10.312048931 +0200
+@@ -51,6 +51,7 @@
+ #include <linux/completion.h>
+ #include <linux/kref.h>
+ #include <linux/sched.h>
++#include <linux/slab.h>
+
+ #include "qib_common.h"
+ #include "qib_verbs.h"
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/input/gameport/gameport.c linux-3.4-pax/drivers/input/gameport/gameport.c
+--- linux-3.4/drivers/input/gameport/gameport.c 2012-05-21 11:33:03.967928012 +0200
++++ linux-3.4-pax/drivers/input/gameport/gameport.c 2012-05-21 12:10:10.312048931 +0200
+@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
+ */
+ static void gameport_init_port(struct gameport *gameport)
+ {
+- static atomic_t gameport_no = ATOMIC_INIT(0);
++ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
+
+ __module_get(THIS_MODULE);
+
+ mutex_init(&gameport->drv_mutex);
+ device_initialize(&gameport->dev);
+ dev_set_name(&gameport->dev, "gameport%lu",
+- (unsigned long)atomic_inc_return(&gameport_no) - 1);
++ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
+ gameport->dev.bus = &gameport_bus;
+ gameport->dev.release = gameport_release_port;
+ if (gameport->parent)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/input/input.c linux-3.4-pax/drivers/input/input.c
+--- linux-3.4/drivers/input/input.c 2012-05-21 11:33:03.971928013 +0200
++++ linux-3.4-pax/drivers/input/input.c 2012-05-21 12:10:10.316048931 +0200
+@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
+ */
+ int input_register_device(struct input_dev *dev)
+ {
+- static atomic_t input_no = ATOMIC_INIT(0);
++ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
+ struct input_handler *handler;
+ const char *path;
+ int error;
+@@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
+ dev->setkeycode = input_default_setkeycode;
+
+ dev_set_name(&dev->dev, "input%ld",
+- (unsigned long) atomic_inc_return(&input_no) - 1);
++ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
+
+ error = device_add(&dev->dev);
+ if (error)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/input/joystick/sidewinder.c linux-3.4-pax/drivers/input/joystick/sidewinder.c
+--- linux-3.4/drivers/input/joystick/sidewinder.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/input/joystick/sidewinder.c 2012-05-21 12:10:10.320048932 +0200
+@@ -30,6 +30,7 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include <linux/sched.h>
+ #include <linux/init.h>
+ #include <linux/input.h>
+ #include <linux/gameport.h>
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/input/joystick/xpad.c linux-3.4-pax/drivers/input/joystick/xpad.c
+--- linux-3.4/drivers/input/joystick/xpad.c 2012-03-19 10:38:59.520049840 +0100
++++ linux-3.4-pax/drivers/input/joystick/xpad.c 2012-05-21 12:10:10.320048932 +0200
+@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_clas
+
+ static int xpad_led_probe(struct usb_xpad *xpad)
+ {
+- static atomic_t led_seq = ATOMIC_INIT(0);
++ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
+ long led_no;
+ struct xpad_led *led;
+ struct led_classdev *led_cdev;
+@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpa
+ if (!led)
+ return -ENOMEM;
+
+- led_no = (long)atomic_inc_return(&led_seq) - 1;
++ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
+
+ snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
+ led->xpad = xpad;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/input/mousedev.c linux-3.4-pax/drivers/input/mousedev.c
+--- linux-3.4/drivers/input/mousedev.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/input/mousedev.c 2012-05-21 12:10:10.324048932 +0200
+@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
+
+ spin_unlock_irq(&client->packet_lock);
+
+- if (copy_to_user(buffer, data, count))
++ if (count > sizeof(data) || copy_to_user(buffer, data, count))
+ return -EFAULT;
+
+ return count;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/input/serio/serio.c linux-3.4-pax/drivers/input/serio/serio.c
+--- linux-3.4/drivers/input/serio/serio.c 2012-05-21 11:33:04.263928029 +0200
++++ linux-3.4-pax/drivers/input/serio/serio.c 2012-05-21 12:10:10.328048932 +0200
+@@ -496,7 +496,7 @@ static void serio_release_port(struct de
+ */
+ static void serio_init_port(struct serio *serio)
+ {
+- static atomic_t serio_no = ATOMIC_INIT(0);
++ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
+
+ __module_get(THIS_MODULE);
+
+@@ -507,7 +507,7 @@ static void serio_init_port(struct serio
+ mutex_init(&serio->drv_mutex);
+ device_initialize(&serio->dev);
+ dev_set_name(&serio->dev, "serio%ld",
+- (long)atomic_inc_return(&serio_no) - 1);
++ (long)atomic_inc_return_unchecked(&serio_no) - 1);
+ serio->dev.bus = &serio_bus;
+ serio->dev.release = serio_release_port;
+ serio->dev.groups = serio_device_attr_groups;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/isdn/capi/capi.c linux-3.4-pax/drivers/isdn/capi/capi.c
+--- linux-3.4/drivers/isdn/capi/capi.c 2012-05-21 11:33:04.575928045 +0200
++++ linux-3.4-pax/drivers/isdn/capi/capi.c 2012-05-21 12:10:10.332048932 +0200
+@@ -83,8 +83,8 @@ struct capiminor {
+
+ struct capi20_appl *ap;
+ u32 ncci;
+- atomic_t datahandle;
+- atomic_t msgid;
++ atomic_unchecked_t datahandle;
++ atomic_unchecked_t msgid;
+
+ struct tty_port port;
+ int ttyinstop;
+@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
+ capimsg_setu16(s, 2, mp->ap->applid);
+ capimsg_setu8 (s, 4, CAPI_DATA_B3);
+ capimsg_setu8 (s, 5, CAPI_RESP);
+- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
++ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
+ capimsg_setu32(s, 8, mp->ncci);
+ capimsg_setu16(s, 12, datahandle);
+ }
+@@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
+ mp->outbytes -= len;
+ spin_unlock_bh(&mp->outlock);
+
+- datahandle = atomic_inc_return(&mp->datahandle);
++ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
+ skb_push(skb, CAPI_DATA_B3_REQ_LEN);
+ memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
+ capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
+ capimsg_setu16(skb->data, 2, mp->ap->applid);
+ capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
+ capimsg_setu8 (skb->data, 5, CAPI_REQ);
+- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
++ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
+ capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
+ capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
+ capimsg_setu16(skb->data, 16, len); /* Data length */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/isdn/hardware/avm/b1.c linux-3.4-pax/drivers/isdn/hardware/avm/b1.c
+--- linux-3.4/drivers/isdn/hardware/avm/b1.c 2012-05-21 11:33:04.707928053 +0200
++++ linux-3.4-pax/drivers/isdn/hardware/avm/b1.c 2012-05-21 12:10:10.332048932 +0200
+@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
+ }
+ if (left) {
+ if (t4file->user) {
+- if (copy_from_user(buf, dp, left))
++ if (left > sizeof buf || copy_from_user(buf, dp, left))
+ return -EFAULT;
+ } else {
+ memcpy(buf, dp, left);
+@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
+ }
+ if (left) {
+ if (config->user) {
+- if (copy_from_user(buf, dp, left))
++ if (left > sizeof buf || copy_from_user(buf, dp, left))
+ return -EFAULT;
+ } else {
+ memcpy(buf, dp, left);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/isdn/hardware/eicon/divasync.h linux-3.4-pax/drivers/isdn/hardware/eicon/divasync.h
+--- linux-3.4/drivers/isdn/hardware/eicon/divasync.h 2012-05-21 11:33:04.839928061 +0200
++++ linux-3.4-pax/drivers/isdn/hardware/eicon/divasync.h 2012-05-21 12:10:10.336048932 +0200
+@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
+ } diva_didd_add_adapter_t;
+ typedef struct _diva_didd_remove_adapter {
+ IDI_CALL p_request;
+-} diva_didd_remove_adapter_t;
++} __no_const diva_didd_remove_adapter_t;
+ typedef struct _diva_didd_read_adapter_array {
+ void *buffer;
+ dword length;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.4-pax/drivers/isdn/hardware/eicon/xdi_adapter.h
+--- linux-3.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2012-05-21 11:33:05.051928071 +0200
++++ linux-3.4-pax/drivers/isdn/hardware/eicon/xdi_adapter.h 2012-05-21 12:10:10.336048932 +0200
+@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
+ typedef struct _diva_os_idi_adapter_interface {
+ diva_init_card_proc_t cleanup_adapter_proc;
+ diva_cmd_card_proc_t cmd_proc;
+-} diva_os_idi_adapter_interface_t;
++} __no_const diva_os_idi_adapter_interface_t;
+
+ typedef struct _diva_os_xdi_adapter {
+ struct list_head link;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/isdn/icn/icn.c linux-3.4-pax/drivers/isdn/icn/icn.c
+--- linux-3.4/drivers/isdn/icn/icn.c 2012-05-21 11:33:05.887928117 +0200
++++ linux-3.4-pax/drivers/isdn/icn/icn.c 2012-05-21 12:10:10.340048933 +0200
+@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len,
+ if (count > len)
+ count = len;
+ if (user) {
+- if (copy_from_user(msg, buf, count))
++ if (count > sizeof msg || copy_from_user(msg, buf, count))
+ return -EFAULT;
+ } else
+ memcpy(msg, buf, count);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/lguest/core.c linux-3.4-pax/drivers/lguest/core.c
+--- linux-3.4/drivers/lguest/core.c 2012-01-08 19:48:00.403472404 +0100
++++ linux-3.4-pax/drivers/lguest/core.c 2012-05-21 12:10:10.344048933 +0200
+@@ -92,9 +92,17 @@ static __init int map_switcher(void)
+ * it's worked so far. The end address needs +1 because __get_vm_area
+ * allocates an extra guard page, so we need space for that.
+ */
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
++ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
++ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
++#else
+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
+ VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
++#endif
++
+ if (!switcher_vma) {
+ err = -ENOMEM;
+ printk("lguest: could not map switcher pages high\n");
+@@ -119,7 +127,7 @@ static __init int map_switcher(void)
+ * Now the Switcher is mapped at the right address, we can't fail!
+ * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
+ */
+- memcpy(switcher_vma->addr, start_switcher_text,
++ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
+ end_switcher_text - start_switcher_text);
+
+ printk(KERN_INFO "lguest: mapped switcher at %p\n",
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/lguest/x86/core.c linux-3.4-pax/drivers/lguest/x86/core.c
+--- linux-3.4/drivers/lguest/x86/core.c 2012-03-19 10:39:00.124049786 +0100
++++ linux-3.4-pax/drivers/lguest/x86/core.c 2012-05-21 12:10:10.348048933 +0200
+@@ -59,7 +59,7 @@ static struct {
+ /* Offset from where switcher.S was compiled to where we've copied it */
+ static unsigned long switcher_offset(void)
+ {
+- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
++ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
+ }
+
+ /* This cpu's struct lguest_pages. */
+@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
+ * These copies are pretty cheap, so we do them unconditionally: */
+ /* Save the current Host top-level page directory.
+ */
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ pages->state.host_cr3 = read_cr3();
++#else
+ pages->state.host_cr3 = __pa(current->mm->pgd);
++#endif
++
+ /*
+ * Set up the Guest's page tables to see this CPU's pages (and no
+ * other CPU's pages).
+@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
+ * compiled-in switcher code and the high-mapped copy we just made.
+ */
+ for (i = 0; i < IDT_ENTRIES; i++)
+- default_idt_entries[i] += switcher_offset();
++ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
+
+ /*
+ * Set up the Switcher's per-cpu areas.
+@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
+ * it will be undisturbed when we switch. To change %cs and jump we
+ * need this structure to feed to Intel's "lcall" instruction.
+ */
+- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
++ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
+ lguest_entry.segment = LGUEST_CS;
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/lguest/x86/switcher_32.S linux-3.4-pax/drivers/lguest/x86/switcher_32.S
+--- linux-3.4/drivers/lguest/x86/switcher_32.S 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/lguest/x86/switcher_32.S 2012-05-21 12:10:10.348048933 +0200
+@@ -87,6 +87,7 @@
+ #include <asm/page.h>
+ #include <asm/segment.h>
+ #include <asm/lguest.h>
++#include <asm/processor-flags.h>
+
+ // We mark the start of the code to copy
+ // It's placed in .text tho it's never run here
+@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
+ // Changes type when we load it: damn Intel!
+ // For after we switch over our page tables
+ // That entry will be read-only: we'd crash.
++
++#ifdef CONFIG_PAX_KERNEXEC
++ mov %cr0, %edx
++ xor $X86_CR0_WP, %edx
++ mov %edx, %cr0
++#endif
++
+ movl $(GDT_ENTRY_TSS*8), %edx
+ ltr %dx
+
+@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
+ // Let's clear it again for our return.
+ // The GDT descriptor of the Host
+ // Points to the table after two "size" bytes
+- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
++ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
+ // Clear "used" from type field (byte 5, bit 2)
+- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
++ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
++
++#ifdef CONFIG_PAX_KERNEXEC
++ mov %cr0, %eax
++ xor $X86_CR0_WP, %eax
++ mov %eax, %cr0
++#endif
+
+ // Once our page table's switched, the Guest is live!
+ // The Host fades as we run this final step.
+@@ -295,13 +309,12 @@ deliver_to_host:
+ // I consulted gcc, and it gave
+ // These instructions, which I gladly credit:
+ leal (%edx,%ebx,8), %eax
+- movzwl (%eax),%edx
+- movl 4(%eax), %eax
+- xorw %ax, %ax
+- orl %eax, %edx
++ movl 4(%eax), %edx
++ movw (%eax), %dx
+ // Now the address of the handler's in %edx
+ // We call it now: its "iret" drops us home.
+- jmp *%edx
++ ljmp $__KERNEL_CS, $1f
++1: jmp *%edx
+
+ // Every interrupt can come to us here
+ // But we must truly tell each apart.
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/macintosh/macio_asic.c linux-3.4-pax/drivers/macintosh/macio_asic.c
+--- linux-3.4/drivers/macintosh/macio_asic.c 2012-05-21 11:33:06.179928134 +0200
++++ linux-3.4-pax/drivers/macintosh/macio_asic.c 2012-05-21 12:10:10.352048933 +0200
+@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(s
+ * MacIO is matched against any Apple ID, it's probe() function
+ * will then decide wether it applies or not
+ */
+-static const struct pci_device_id __devinitdata pci_ids [] = { {
++static const struct pci_device_id __devinitconst pci_ids [] = { {
+ .vendor = PCI_VENDOR_ID_APPLE,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/md/bitmap.c linux-3.4-pax/drivers/md/bitmap.c
+--- linux-3.4/drivers/md/bitmap.c 2012-05-21 11:33:06.259928136 +0200
++++ linux-3.4-pax/drivers/md/bitmap.c 2012-05-21 12:10:10.356048934 +0200
+@@ -1823,7 +1823,7 @@ void bitmap_status(struct seq_file *seq,
+ chunk_kb ? "KB" : "B");
+ if (bitmap->file) {
+ seq_printf(seq, ", file: ");
+- seq_path(seq, &bitmap->file->f_path, " \t\n");
++ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
+ }
+
+ seq_printf(seq, "\n");
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/md/dm.c linux-3.4-pax/drivers/md/dm.c
+--- linux-3.4/drivers/md/dm.c 2012-05-21 11:33:06.327928141 +0200
++++ linux-3.4-pax/drivers/md/dm.c 2012-05-21 12:10:10.360048934 +0200
+@@ -176,9 +176,9 @@ struct mapped_device {
+ /*
+ * Event handling.
+ */
+- atomic_t event_nr;
++ atomic_unchecked_t event_nr;
+ wait_queue_head_t eventq;
+- atomic_t uevent_seq;
++ atomic_unchecked_t uevent_seq;
+ struct list_head uevent_list;
+ spinlock_t uevent_lock; /* Protect access to uevent_list */
+
+@@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(i
+ rwlock_init(&md->map_lock);
+ atomic_set(&md->holders, 1);
+ atomic_set(&md->open_count, 0);
+- atomic_set(&md->event_nr, 0);
+- atomic_set(&md->uevent_seq, 0);
++ atomic_set_unchecked(&md->event_nr, 0);
++ atomic_set_unchecked(&md->uevent_seq, 0);
+ INIT_LIST_HEAD(&md->uevent_list);
+ spin_lock_init(&md->uevent_lock);
+
+@@ -1980,7 +1980,7 @@ static void event_callback(void *context
+
+ dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
+
+- atomic_inc(&md->event_nr);
++ atomic_inc_unchecked(&md->event_nr);
+ wake_up(&md->eventq);
+ }
+
+@@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_devi
+
+ uint32_t dm_next_uevent_seq(struct mapped_device *md)
+ {
+- return atomic_add_return(1, &md->uevent_seq);
++ return atomic_add_return_unchecked(1, &md->uevent_seq);
+ }
+
+ uint32_t dm_get_event_nr(struct mapped_device *md)
+ {
+- return atomic_read(&md->event_nr);
++ return atomic_read_unchecked(&md->event_nr);
+ }
+
+ int dm_wait_event(struct mapped_device *md, int event_nr)
+ {
+ return wait_event_interruptible(md->eventq,
+- (event_nr != atomic_read(&md->event_nr)));
++ (event_nr != atomic_read_unchecked(&md->event_nr)));
+ }
+
+ void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/md/dm-ioctl.c linux-3.4-pax/drivers/md/dm-ioctl.c
+--- linux-3.4/drivers/md/dm-ioctl.c 2012-05-21 11:33:06.271928139 +0200
++++ linux-3.4-pax/drivers/md/dm-ioctl.c 2012-05-21 12:10:10.360048934 +0200
+@@ -1590,7 +1590,7 @@ static int validate_params(uint cmd, str
+ cmd == DM_LIST_VERSIONS_CMD)
+ return 0;
+
+- if ((cmd == DM_DEV_CREATE_CMD)) {
++ if (cmd == DM_DEV_CREATE_CMD) {
+ if (!*param->name) {
+ DMWARN("name not supplied when creating device");
+ return -EINVAL;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/md/dm-raid1.c linux-3.4-pax/drivers/md/dm-raid1.c
+--- linux-3.4/drivers/md/dm-raid1.c 2012-05-21 11:33:06.303928139 +0200
++++ linux-3.4-pax/drivers/md/dm-raid1.c 2012-05-22 15:28:30.107384684 +0200
+@@ -40,7 +40,7 @@ enum dm_raid1_error {
+
+ struct mirror {
+ struct mirror_set *ms;
+- atomic_t error_count;
++ atomic_unchecked_t error_count;
+ unsigned long error_type;
+ struct dm_dev *dev;
+ sector_t offset;
+@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
+ struct mirror *m;
+
+ for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
+- if (!atomic_read(&m->error_count))
++ if (!atomic_read_unchecked(&m->error_count))
+ return m;
+
+ return NULL;
+@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
+ * simple way to tell if a device has encountered
+ * errors.
+ */
+- atomic_inc(&m->error_count);
++ atomic_inc_unchecked(&m->error_count);
+
+ if (test_and_set_bit(error_type, &m->error_type))
+ return;
+@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
+ struct mirror *m = get_default_mirror(ms);
+
+ do {
+- if (likely(!atomic_read(&m->error_count)))
++ if (likely(!atomic_read_unchecked(&m->error_count)))
+ return m;
+
+ if (m-- == ms->mirror)
+@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
+ {
+ struct mirror *default_mirror = get_default_mirror(m->ms);
+
+- return !atomic_read(&default_mirror->error_count);
++ return !atomic_read_unchecked(&default_mirror->error_count);
+ }
+
+ static int mirror_available(struct mirror_set *ms, struct bio *bio)
+@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
+ */
+ if (likely(region_in_sync(ms, region, 1)))
+ m = choose_mirror(ms, bio->bi_sector);
+- else if (m && atomic_read(&m->error_count))
++ else if (m && atomic_read_unchecked(&m->error_count))
+ m = NULL;
+
+ if (likely(m))
+@@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set
+ }
+
+ ms->mirror[mirror].ms = ms;
+- atomic_set(&(ms->mirror[mirror].error_count), 0);
++ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
+ ms->mirror[mirror].error_type = 0;
+ ms->mirror[mirror].offset = offset;
+
+@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_targ
+ */
+ static char device_status_char(struct mirror *m)
+ {
+- if (!atomic_read(&(m->error_count)))
++ if (!atomic_read_unchecked(&(m->error_count)))
+ return 'A';
+
+ return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/md/dm-stripe.c linux-3.4-pax/drivers/md/dm-stripe.c
+--- linux-3.4/drivers/md/dm-stripe.c 2012-05-21 11:33:06.315928140 +0200
++++ linux-3.4-pax/drivers/md/dm-stripe.c 2012-05-22 15:28:30.107384684 +0200
+@@ -20,7 +20,7 @@ struct stripe {
+ struct dm_dev *dev;
+ sector_t physical_start;
+
+- atomic_t error_count;
++ atomic_unchecked_t error_count;
+ };
+
+ struct stripe_c {
+@@ -193,7 +193,7 @@ static int stripe_ctr(struct dm_target *
+ kfree(sc);
+ return r;
+ }
+- atomic_set(&(sc->stripe[i].error_count), 0);
++ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
+ }
+
+ ti->private = sc;
+@@ -315,7 +315,7 @@ static int stripe_status(struct dm_targe
+ DMEMIT("%d ", sc->stripes);
+ for (i = 0; i < sc->stripes; i++) {
+ DMEMIT("%s ", sc->stripe[i].dev->name);
+- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
++ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
+ 'D' : 'A';
+ }
+ buffer[i] = '\0';
+@@ -362,8 +362,8 @@ static int stripe_end_io(struct dm_targe
+ */
+ for (i = 0; i < sc->stripes; i++)
+ if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
+- atomic_inc(&(sc->stripe[i].error_count));
+- if (atomic_read(&(sc->stripe[i].error_count)) <
++ atomic_inc_unchecked(&(sc->stripe[i].error_count));
++ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
+ DM_IO_ERROR_THRESHOLD)
+ schedule_work(&sc->trigger_event);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/md/dm-table.c linux-3.4-pax/drivers/md/dm-table.c
+--- linux-3.4/drivers/md/dm-table.c 2012-05-21 11:33:06.315928140 +0200
++++ linux-3.4-pax/drivers/md/dm-table.c 2012-05-21 12:10:10.368048933 +0200
+@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
+ if (!dev_size)
+ return 0;
+
+- if ((start >= dev_size) || (start + len > dev_size)) {
++ if ((start >= dev_size) || (len > dev_size - start)) {
+ DMWARN("%s: %s too small for target: "
+ "start=%llu, len=%llu, dev_size=%llu",
+ dm_device_name(ti->table->md), bdevname(bdev, b),
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/md/dm-thin-metadata.c linux-3.4-pax/drivers/md/dm-thin-metadata.c
+--- linux-3.4/drivers/md/dm-thin-metadata.c 2012-05-21 11:33:06.319928140 +0200
++++ linux-3.4-pax/drivers/md/dm-thin-metadata.c 2012-05-21 12:10:10.372048935 +0200
+@@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metad
+
+ pmd->info.tm = tm;
+ pmd->info.levels = 2;
+- pmd->info.value_type.context = pmd->data_sm;
++ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
+ pmd->info.value_type.size = sizeof(__le64);
+ pmd->info.value_type.inc = data_block_inc;
+ pmd->info.value_type.dec = data_block_dec;
+@@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metad
+
+ pmd->bl_info.tm = tm;
+ pmd->bl_info.levels = 1;
+- pmd->bl_info.value_type.context = pmd->data_sm;
++ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
+ pmd->bl_info.value_type.size = sizeof(__le64);
+ pmd->bl_info.value_type.inc = data_block_inc;
+ pmd->bl_info.value_type.dec = data_block_dec;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/md/md.c linux-3.4-pax/drivers/md/md.c
+--- linux-3.4/drivers/md/md.c 2012-05-21 11:33:06.335928141 +0200
++++ linux-3.4-pax/drivers/md/md.c 2012-05-21 12:10:10.380048936 +0200
+@@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
+ * start build, activate spare
+ */
+ static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
+-static atomic_t md_event_count;
++static atomic_unchecked_t md_event_count;
+ void md_new_event(struct mddev *mddev)
+ {
+- atomic_inc(&md_event_count);
++ atomic_inc_unchecked(&md_event_count);
+ wake_up(&md_event_waiters);
+ }
+ EXPORT_SYMBOL_GPL(md_new_event);
+@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
+ */
+ static void md_new_event_inintr(struct mddev *mddev)
+ {
+- atomic_inc(&md_event_count);
++ atomic_inc_unchecked(&md_event_count);
+ wake_up(&md_event_waiters);
+ }
+
+@@ -1526,7 +1526,7 @@ static int super_1_load(struct md_rdev *
+
+ rdev->preferred_minor = 0xffff;
+ rdev->data_offset = le64_to_cpu(sb->data_offset);
+- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
++ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
+
+ rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
+ bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
+@@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *m
+ else
+ sb->resync_offset = cpu_to_le64(0);
+
+- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
++ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
+
+ sb->raid_disks = cpu_to_le32(mddev->raid_disks);
+ sb->size = cpu_to_le64(mddev->dev_sectors);
+@@ -2691,7 +2691,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
+ static ssize_t
+ errors_show(struct md_rdev *rdev, char *page)
+ {
+- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
++ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
+ }
+
+ static ssize_t
+@@ -2700,7 +2700,7 @@ errors_store(struct md_rdev *rdev, const
+ char *e;
+ unsigned long n = simple_strtoul(buf, &e, 10);
+ if (*buf && (*e == 0 || *e == '\n')) {
+- atomic_set(&rdev->corrected_errors, n);
++ atomic_set_unchecked(&rdev->corrected_errors, n);
+ return len;
+ }
+ return -EINVAL;
+@@ -3086,8 +3086,8 @@ int md_rdev_init(struct md_rdev *rdev)
+ rdev->sb_loaded = 0;
+ rdev->bb_page = NULL;
+ atomic_set(&rdev->nr_pending, 0);
+- atomic_set(&rdev->read_errors, 0);
+- atomic_set(&rdev->corrected_errors, 0);
++ atomic_set_unchecked(&rdev->read_errors, 0);
++ atomic_set_unchecked(&rdev->corrected_errors, 0);
+
+ INIT_LIST_HEAD(&rdev->same_set);
+ init_waitqueue_head(&rdev->blocked_wait);
+@@ -6738,7 +6738,7 @@ static int md_seq_show(struct seq_file *
+
+ spin_unlock(&pers_lock);
+ seq_printf(seq, "\n");
+- seq->poll_event = atomic_read(&md_event_count);
++ seq->poll_event = atomic_read_unchecked(&md_event_count);
+ return 0;
+ }
+ if (v == (void*)2) {
+@@ -6841,7 +6841,7 @@ static int md_seq_open(struct inode *ino
+ return error;
+
+ seq = file->private_data;
+- seq->poll_event = atomic_read(&md_event_count);
++ seq->poll_event = atomic_read_unchecked(&md_event_count);
+ return error;
+ }
+
+@@ -6855,7 +6855,7 @@ static unsigned int mdstat_poll(struct f
+ /* always allow read */
+ mask = POLLIN | POLLRDNORM;
+
+- if (seq->poll_event != atomic_read(&md_event_count))
++ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
+ mask |= POLLERR | POLLPRI;
+ return mask;
+ }
+@@ -6899,7 +6899,7 @@ static int is_mddev_idle(struct mddev *m
+ struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
+ curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
+ (int)part_stat_read(&disk->part0, sectors[1]) -
+- atomic_read(&disk->sync_io);
++ atomic_read_unchecked(&disk->sync_io);
+ /* sync IO will cause sync_io to increase before the disk_stats
+ * as sync_io is counted when a request starts, and
+ * disk_stats is counted when it completes.
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/md/md.h linux-3.4-pax/drivers/md/md.h
+--- linux-3.4/drivers/md/md.h 2012-05-21 11:33:06.339928141 +0200
++++ linux-3.4-pax/drivers/md/md.h 2012-05-21 12:10:10.384048936 +0200
+@@ -93,13 +93,13 @@ struct md_rdev {
+ * only maintained for arrays that
+ * support hot removal
+ */
+- atomic_t read_errors; /* number of consecutive read errors that
++ atomic_unchecked_t read_errors; /* number of consecutive read errors that
+ * we have tried to ignore.
+ */
+ struct timespec last_read_error; /* monotonic time since our
+ * last read error
+ */
+- atomic_t corrected_errors; /* number of corrected read errors,
++ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
+ * for reporting to userspace and storing
+ * in superblock.
+ */
+@@ -429,7 +429,7 @@ static inline void rdev_dec_pending(stru
+
+ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
+ {
+- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
++ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
+ }
+
+ struct md_personality
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/md/persistent-data/dm-space-map-checker.c linux-3.4-pax/drivers/md/persistent-data/dm-space-map-checker.c
+--- linux-3.4/drivers/md/persistent-data/dm-space-map-checker.c 2012-01-08 19:48:00.675472389 +0100
++++ linux-3.4-pax/drivers/md/persistent-data/dm-space-map-checker.c 2012-05-21 12:10:10.384048936 +0200
+@@ -159,7 +159,7 @@ static void ca_destroy(struct count_arra
+ /*----------------------------------------------------------------*/
+
+ struct sm_checker {
+- struct dm_space_map sm;
++ dm_space_map_no_const sm;
+
+ struct count_array old_counts;
+ struct count_array counts;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/md/persistent-data/dm-space-map-disk.c linux-3.4-pax/drivers/md/persistent-data/dm-space-map-disk.c
+--- linux-3.4/drivers/md/persistent-data/dm-space-map-disk.c 2012-01-08 19:48:00.683472389 +0100
++++ linux-3.4-pax/drivers/md/persistent-data/dm-space-map-disk.c 2012-05-21 12:10:10.388048935 +0200
+@@ -23,7 +23,7 @@
+ * Space map interface.
+ */
+ struct sm_disk {
+- struct dm_space_map sm;
++ dm_space_map_no_const sm;
+
+ struct ll_disk ll;
+ struct ll_disk old_ll;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/md/persistent-data/dm-space-map.h linux-3.4-pax/drivers/md/persistent-data/dm-space-map.h
+--- linux-3.4/drivers/md/persistent-data/dm-space-map.h 2012-01-08 19:48:00.687472389 +0100
++++ linux-3.4-pax/drivers/md/persistent-data/dm-space-map.h 2012-05-21 12:10:10.388048935 +0200
+@@ -60,6 +60,7 @@ struct dm_space_map {
+ int (*root_size)(struct dm_space_map *sm, size_t *result);
+ int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
+ };
++typedef struct dm_space_map __no_const dm_space_map_no_const;
+
+ /*----------------------------------------------------------------*/
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/md/persistent-data/dm-space-map-metadata.c linux-3.4-pax/drivers/md/persistent-data/dm-space-map-metadata.c
+--- linux-3.4/drivers/md/persistent-data/dm-space-map-metadata.c 2012-01-08 19:48:00.687472389 +0100
++++ linux-3.4-pax/drivers/md/persistent-data/dm-space-map-metadata.c 2012-05-21 12:10:10.392048935 +0200
+@@ -43,7 +43,7 @@ struct block_op {
+ };
+
+ struct sm_metadata {
+- struct dm_space_map sm;
++ dm_space_map_no_const sm;
+
+ struct ll_disk ll;
+ struct ll_disk old_ll;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/md/raid10.c linux-3.4-pax/drivers/md/raid10.c
+--- linux-3.4/drivers/md/raid10.c 2012-05-21 11:33:06.375928143 +0200
++++ linux-3.4-pax/drivers/md/raid10.c 2012-05-21 12:10:10.396048936 +0200
+@@ -1684,7 +1684,7 @@ static void end_sync_read(struct bio *bi
+ /* The write handler will notice the lack of
+ * R10BIO_Uptodate and record any errors etc
+ */
+- atomic_add(r10_bio->sectors,
++ atomic_add_unchecked(r10_bio->sectors,
+ &conf->mirrors[d].rdev->corrected_errors);
+
+ /* for reconstruct, we always reschedule after a read.
+@@ -2033,7 +2033,7 @@ static void check_decay_read_errors(stru
+ {
+ struct timespec cur_time_mon;
+ unsigned long hours_since_last;
+- unsigned int read_errors = atomic_read(&rdev->read_errors);
++ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
+
+ ktime_get_ts(&cur_time_mon);
+
+@@ -2055,9 +2055,9 @@ static void check_decay_read_errors(stru
+ * overflowing the shift of read_errors by hours_since_last.
+ */
+ if (hours_since_last >= 8 * sizeof(read_errors))
+- atomic_set(&rdev->read_errors, 0);
++ atomic_set_unchecked(&rdev->read_errors, 0);
+ else
+- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
++ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
+ }
+
+ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
+@@ -2111,8 +2111,8 @@ static void fix_read_error(struct r10con
+ return;
+
+ check_decay_read_errors(mddev, rdev);
+- atomic_inc(&rdev->read_errors);
+- if (atomic_read(&rdev->read_errors) > max_read_errors) {
++ atomic_inc_unchecked(&rdev->read_errors);
++ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
+ char b[BDEVNAME_SIZE];
+ bdevname(rdev->bdev, b);
+
+@@ -2120,7 +2120,7 @@ static void fix_read_error(struct r10con
+ "md/raid10:%s: %s: Raid device exceeded "
+ "read_error threshold [cur %d:max %d]\n",
+ mdname(mddev), b,
+- atomic_read(&rdev->read_errors), max_read_errors);
++ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
+ printk(KERN_NOTICE
+ "md/raid10:%s: %s: Failing raid device\n",
+ mdname(mddev), b);
+@@ -2271,7 +2271,7 @@ static void fix_read_error(struct r10con
+ (unsigned long long)(
+ sect + rdev->data_offset),
+ bdevname(rdev->bdev, b));
+- atomic_add(s, &rdev->corrected_errors);
++ atomic_add_unchecked(s, &rdev->corrected_errors);
+ }
+
+ rdev_dec_pending(rdev, mddev);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/md/raid1.c linux-3.4-pax/drivers/md/raid1.c
+--- linux-3.4/drivers/md/raid1.c 2012-05-21 11:33:06.371928144 +0200
++++ linux-3.4-pax/drivers/md/raid1.c 2012-05-21 12:10:10.400048936 +0200
+@@ -1688,7 +1688,7 @@ static int fix_sync_read_error(struct r1
+ if (r1_sync_page_io(rdev, sect, s,
+ bio->bi_io_vec[idx].bv_page,
+ READ) != 0)
+- atomic_add(s, &rdev->corrected_errors);
++ atomic_add_unchecked(s, &rdev->corrected_errors);
+ }
+ sectors -= s;
+ sect += s;
+@@ -1902,7 +1902,7 @@ static void fix_read_error(struct r1conf
+ test_bit(In_sync, &rdev->flags)) {
+ if (r1_sync_page_io(rdev, sect, s,
+ conf->tmppage, READ)) {
+- atomic_add(s, &rdev->corrected_errors);
++ atomic_add_unchecked(s, &rdev->corrected_errors);
+ printk(KERN_INFO
+ "md/raid1:%s: read error corrected "
+ "(%d sectors at %llu on %s)\n",
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/md/raid5.c linux-3.4-pax/drivers/md/raid5.c
+--- linux-3.4/drivers/md/raid5.c 2012-05-21 11:33:06.379928143 +0200
++++ linux-3.4-pax/drivers/md/raid5.c 2012-05-21 12:10:10.404048936 +0200
+@@ -1686,18 +1686,18 @@ static void raid5_end_read_request(struc
+ (unsigned long long)(sh->sector
+ + rdev->data_offset),
+ bdevname(rdev->bdev, b));
+- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
++ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
+ clear_bit(R5_ReadError, &sh->dev[i].flags);
+ clear_bit(R5_ReWrite, &sh->dev[i].flags);
+ }
+- if (atomic_read(&rdev->read_errors))
+- atomic_set(&rdev->read_errors, 0);
++ if (atomic_read_unchecked(&rdev->read_errors))
++ atomic_set_unchecked(&rdev->read_errors, 0);
+ } else {
+ const char *bdn = bdevname(rdev->bdev, b);
+ int retry = 0;
+
+ clear_bit(R5_UPTODATE, &sh->dev[i].flags);
+- atomic_inc(&rdev->read_errors);
++ atomic_inc_unchecked(&rdev->read_errors);
+ if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
+ printk_ratelimited(
+ KERN_WARNING
+@@ -1726,7 +1726,7 @@ static void raid5_end_read_request(struc
+ (unsigned long long)(sh->sector
+ + rdev->data_offset),
+ bdn);
+- else if (atomic_read(&rdev->read_errors)
++ else if (atomic_read_unchecked(&rdev->read_errors)
+ > conf->max_nr_stripes)
+ printk(KERN_WARNING
+ "md/raid:%s: Too many read errors, failing device %s.\n",
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/media/dvb/ddbridge/ddbridge-core.c linux-3.4-pax/drivers/media/dvb/ddbridge/ddbridge-core.c
+--- linux-3.4/drivers/media/dvb/ddbridge/ddbridge-core.c 2012-05-21 11:33:06.415928145 +0200
++++ linux-3.4-pax/drivers/media/dvb/ddbridge/ddbridge-core.c 2012-05-21 12:10:10.412048937 +0200
+@@ -1679,7 +1679,7 @@ static struct ddb_info ddb_v6 = {
+ .subvendor = _subvend, .subdevice = _subdev, \
+ .driver_data = (unsigned long)&_driverdata }
+
+-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
++static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
+ DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
+ DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
+ DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.4-pax/drivers/media/dvb/dvb-core/dvb_demux.h
+--- linux-3.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/media/dvb/dvb-core/dvb_demux.h 2012-05-21 12:10:10.412048937 +0200
+@@ -73,7 +73,7 @@ struct dvb_demux_feed {
+ union {
+ dmx_ts_cb ts;
+ dmx_section_cb sec;
+- } cb;
++ } __no_const cb;
+
+ struct dvb_demux *demux;
+ void *priv;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/media/dvb/dvb-core/dvbdev.c linux-3.4-pax/drivers/media/dvb/dvb-core/dvbdev.c
+--- linux-3.4/drivers/media/dvb/dvb-core/dvbdev.c 2012-03-19 10:39:00.232049802 +0100
++++ linux-3.4-pax/drivers/media/dvb/dvb-core/dvbdev.c 2012-05-21 12:10:10.416048937 +0200
+@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
+ const struct dvb_device *template, void *priv, int type)
+ {
+ struct dvb_device *dvbdev;
+- struct file_operations *dvbdevfops;
++ file_operations_no_const *dvbdevfops;
+ struct device *clsdev;
+ int minor;
+ int id;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/media/dvb/dvb-usb/cxusb.c linux-3.4-pax/drivers/media/dvb/dvb-usb/cxusb.c
+--- linux-3.4/drivers/media/dvb/dvb-usb/cxusb.c 2012-03-19 10:39:00.260049801 +0100
++++ linux-3.4-pax/drivers/media/dvb/dvb-usb/cxusb.c 2012-05-21 12:10:10.416048937 +0200
+@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_di
+
+ struct dib0700_adapter_state {
+ int (*set_param_save) (struct dvb_frontend *);
+-};
++} __no_const;
+
+ static int dib7070_set_param_override(struct dvb_frontend *fe)
+ {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/media/dvb/dvb-usb/dw2102.c linux-3.4-pax/drivers/media/dvb/dvb-usb/dw2102.c
+--- linux-3.4/drivers/media/dvb/dvb-usb/dw2102.c 2012-03-19 10:39:00.276049799 +0100
++++ linux-3.4-pax/drivers/media/dvb/dvb-usb/dw2102.c 2012-05-21 12:10:10.420048937 +0200
+@@ -95,7 +95,7 @@ struct su3000_state {
+
+ struct s6x0_state {
+ int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
+-};
++} __no_const;
+
+ /* debug */
+ static int dvb_usb_dw2102_debug;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/media/dvb/frontends/dib3000.h linux-3.4-pax/drivers/media/dvb/frontends/dib3000.h
+--- linux-3.4/drivers/media/dvb/frontends/dib3000.h 2012-01-08 19:48:01.019472371 +0100
++++ linux-3.4-pax/drivers/media/dvb/frontends/dib3000.h 2012-05-21 12:10:10.424048937 +0200
+@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
+ int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
+ int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
+ int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
+-};
++} __no_const;
+
+ #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
+ extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/media/dvb/ngene/ngene-cards.c linux-3.4-pax/drivers/media/dvb/ngene/ngene-cards.c
+--- linux-3.4/drivers/media/dvb/ngene/ngene-cards.c 2012-05-21 11:33:06.651928158 +0200
++++ linux-3.4-pax/drivers/media/dvb/ngene/ngene-cards.c 2012-05-21 12:10:10.424048937 +0200
+@@ -478,7 +478,7 @@ static struct ngene_info ngene_info_m780
+
+ /****************************************************************************/
+
+-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
++static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
+ NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
+ NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
+ NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/media/radio/radio-cadet.c linux-3.4-pax/drivers/media/radio/radio-cadet.c
+--- linux-3.4/drivers/media/radio/radio-cadet.c 2011-10-24 12:48:29.795091586 +0200
++++ linux-3.4-pax/drivers/media/radio/radio-cadet.c 2012-05-21 12:10:10.428048937 +0200
+@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *f
+ unsigned char readbuf[RDS_BUFFER];
+ int i = 0;
+
++ if (count > RDS_BUFFER)
++ return -EFAULT;
+ mutex_lock(&dev->lock);
+ if (dev->rdsstat == 0) {
+ dev->rdsstat = 1;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/media/video/au0828/au0828.h linux-3.4-pax/drivers/media/video/au0828/au0828.h
+--- linux-3.4/drivers/media/video/au0828/au0828.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/media/video/au0828/au0828.h 2012-05-21 12:10:10.428048937 +0200
+@@ -191,7 +191,7 @@ struct au0828_dev {
+
+ /* I2C */
+ struct i2c_adapter i2c_adap;
+- struct i2c_algorithm i2c_algo;
++ i2c_algorithm_no_const i2c_algo;
+ struct i2c_client i2c_client;
+ u32 i2c_rc;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/media/video/cx88/cx88-alsa.c linux-3.4-pax/drivers/media/video/cx88/cx88-alsa.c
+--- linux-3.4/drivers/media/video/cx88/cx88-alsa.c 2012-03-19 10:39:00.928049727 +0100
++++ linux-3.4-pax/drivers/media/video/cx88/cx88-alsa.c 2012-05-21 12:10:10.440048938 +0200
+@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_
+ * Only boards with eeprom and byte 1 at eeprom=1 have it
+ */
+
+-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
++static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
+ {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
+ {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
+ {0, }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/media/video/omap/omap_vout.c linux-3.4-pax/drivers/media/video/omap/omap_vout.c
+--- linux-3.4/drivers/media/video/omap/omap_vout.c 2012-05-21 11:33:07.387928198 +0200
++++ linux-3.4-pax/drivers/media/video/omap/omap_vout.c 2012-05-21 12:10:10.448048939 +0200
+@@ -64,7 +64,6 @@ enum omap_vout_channels {
+ OMAP_VIDEO2,
+ };
+
+-static struct videobuf_queue_ops video_vbq_ops;
+ /* Variables configurable through module params*/
+ static u32 video1_numbuffers = 3;
+ static u32 video2_numbuffers = 3;
+@@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *f
+ {
+ struct videobuf_queue *q;
+ struct omap_vout_device *vout = NULL;
++ static struct videobuf_queue_ops video_vbq_ops = {
++ .buf_setup = omap_vout_buffer_setup,
++ .buf_prepare = omap_vout_buffer_prepare,
++ .buf_release = omap_vout_buffer_release,
++ .buf_queue = omap_vout_buffer_queue,
++ };
+
+ vout = video_drvdata(file);
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+@@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *f
+ vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ q = &vout->vbq;
+- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
+- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
+- video_vbq_ops.buf_release = omap_vout_buffer_release;
+- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
+ spin_lock_init(&vout->vbq_lock);
+
+ videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-3.4-pax/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
+--- linux-3.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2012-05-21 12:10:10.452048939 +0200
+@@ -196,7 +196,7 @@ struct pvr2_hdw {
+
+ /* I2C stuff */
+ struct i2c_adapter i2c_adap;
+- struct i2c_algorithm i2c_algo;
++ i2c_algorithm_no_const i2c_algo;
+ pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
+ int i2c_cx25840_hack_state;
+ int i2c_linked;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/media/video/timblogiw.c linux-3.4-pax/drivers/media/video/timblogiw.c
+--- linux-3.4/drivers/media/video/timblogiw.c 2012-05-21 11:33:07.843928223 +0200
++++ linux-3.4-pax/drivers/media/video/timblogiw.c 2012-05-21 12:10:10.460048938 +0200
+@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
+
+ /* Platform device functions */
+
+-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
++static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
+ .vidioc_querycap = timblogiw_querycap,
+ .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
+ .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
+@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_
+ .vidioc_enum_framesizes = timblogiw_enum_framesizes,
+ };
+
+-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
++static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
+ .owner = THIS_MODULE,
+ .open = timblogiw_open,
+ .release = timblogiw_close,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/message/fusion/mptsas.c linux-3.4-pax/drivers/message/fusion/mptsas.c
+--- linux-3.4/drivers/message/fusion/mptsas.c 2012-03-19 10:39:01.380049740 +0100
++++ linux-3.4-pax/drivers/message/fusion/mptsas.c 2012-05-21 12:10:10.472048941 +0200
+@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devin
+ return 0;
+ }
+
++static inline void
++mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
++{
++ if (phy_info->port_details) {
++ phy_info->port_details->rphy = rphy;
++ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
++ ioc->name, rphy));
++ }
++
++ if (rphy) {
++ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
++ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
++ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
++ ioc->name, rphy, rphy->dev.release));
++ }
++}
++
+ /* no mutex */
+ static void
+ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
+@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
+ return NULL;
+ }
+
+-static inline void
+-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
+-{
+- if (phy_info->port_details) {
+- phy_info->port_details->rphy = rphy;
+- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
+- ioc->name, rphy));
+- }
+-
+- if (rphy) {
+- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
+- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
+- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
+- ioc->name, rphy, rphy->dev.release));
+- }
+-}
+-
+ static inline struct sas_port *
+ mptsas_get_port(struct mptsas_phyinfo *phy_info)
+ {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/message/fusion/mptscsih.c linux-3.4-pax/drivers/message/fusion/mptscsih.c
+--- linux-3.4/drivers/message/fusion/mptscsih.c 2012-01-08 19:48:02.635472285 +0100
++++ linux-3.4-pax/drivers/message/fusion/mptscsih.c 2012-05-21 12:10:10.476048941 +0200
+@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
+
+ h = shost_priv(SChost);
+
+- if (h) {
+- if (h->info_kbuf == NULL)
+- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
+- return h->info_kbuf;
+- h->info_kbuf[0] = '\0';
++ if (!h)
++ return NULL;
+
+- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
+- h->info_kbuf[size-1] = '\0';
+- }
++ if (h->info_kbuf == NULL)
++ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
++ return h->info_kbuf;
++ h->info_kbuf[0] = '\0';
++
++ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
++ h->info_kbuf[size-1] = '\0';
+
+ return h->info_kbuf;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/message/i2o/i2o_proc.c linux-3.4-pax/drivers/message/i2o/i2o_proc.c
+--- linux-3.4/drivers/message/i2o/i2o_proc.c 2012-03-19 10:39:01.380049740 +0100
++++ linux-3.4-pax/drivers/message/i2o/i2o_proc.c 2012-05-21 12:10:10.480048940 +0200
+@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
+ "Array Controller Device"
+ };
+
+-static char *chtostr(u8 * chars, int n)
+-{
+- char tmp[256];
+- tmp[0] = 0;
+- return strncat(tmp, (char *)chars, n);
+-}
+-
+ static int i2o_report_query_status(struct seq_file *seq, int block_status,
+ char *group)
+ {
+@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
+
+ seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
+ seq_printf(seq, "%-#8x", ddm_table.module_id);
+- seq_printf(seq, "%-29s",
+- chtostr(ddm_table.module_name_version, 28));
++ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
+ seq_printf(seq, "%9d ", ddm_table.data_size);
+ seq_printf(seq, "%8d", ddm_table.code_size);
+
+@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
+
+ seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
+ seq_printf(seq, "%-#8x", dst->module_id);
+- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
+- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
++ seq_printf(seq, "%-.28s", dst->module_name_version);
++ seq_printf(seq, "%-.8s", dst->date);
+ seq_printf(seq, "%8d ", dst->module_size);
+ seq_printf(seq, "%8d ", dst->mpb_size);
+ seq_printf(seq, "0x%04x", dst->module_flags);
+@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
+ seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
+ seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
+ seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
+- seq_printf(seq, "Vendor info : %s\n",
+- chtostr((u8 *) (work32 + 2), 16));
+- seq_printf(seq, "Product info : %s\n",
+- chtostr((u8 *) (work32 + 6), 16));
+- seq_printf(seq, "Description : %s\n",
+- chtostr((u8 *) (work32 + 10), 16));
+- seq_printf(seq, "Product rev. : %s\n",
+- chtostr((u8 *) (work32 + 14), 8));
++ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
++ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
++ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
++ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
+
+ seq_printf(seq, "Serial number : ");
+ print_serial_number(seq, (u8 *) (work32 + 16),
+@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
+ }
+
+ seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
+- seq_printf(seq, "Module name : %s\n",
+- chtostr(result.module_name, 24));
+- seq_printf(seq, "Module revision : %s\n",
+- chtostr(result.module_rev, 8));
++ seq_printf(seq, "Module name : %.24s\n", result.module_name);
++ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
+
+ seq_printf(seq, "Serial number : ");
+ print_serial_number(seq, result.serial_number, sizeof(result) - 36);
+@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
+ return 0;
+ }
+
+- seq_printf(seq, "Device name : %s\n",
+- chtostr(result.device_name, 64));
+- seq_printf(seq, "Service name : %s\n",
+- chtostr(result.service_name, 64));
+- seq_printf(seq, "Physical name : %s\n",
+- chtostr(result.physical_location, 64));
+- seq_printf(seq, "Instance number : %s\n",
+- chtostr(result.instance_number, 4));
++ seq_printf(seq, "Device name : %.64s\n", result.device_name);
++ seq_printf(seq, "Service name : %.64s\n", result.service_name);
++ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
++ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
+
+ return 0;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/message/i2o/iop.c linux-3.4-pax/drivers/message/i2o/iop.c
+--- linux-3.4/drivers/message/i2o/iop.c 2011-10-24 12:48:30.663091543 +0200
++++ linux-3.4-pax/drivers/message/i2o/iop.c 2012-05-21 12:10:10.484048940 +0200
+@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
+
+ spin_lock_irqsave(&c->context_list_lock, flags);
+
+- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
+- atomic_inc(&c->context_list_counter);
++ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
++ atomic_inc_unchecked(&c->context_list_counter);
+
+- entry->context = atomic_read(&c->context_list_counter);
++ entry->context = atomic_read_unchecked(&c->context_list_counter);
+
+ list_add(&entry->list, &c->context_list);
+
+@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
+
+ #if BITS_PER_LONG == 64
+ spin_lock_init(&c->context_list_lock);
+- atomic_set(&c->context_list_counter, 0);
++ atomic_set_unchecked(&c->context_list_counter, 0);
+ INIT_LIST_HEAD(&c->context_list);
+ #endif
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/mfd/abx500-core.c linux-3.4-pax/drivers/mfd/abx500-core.c
+--- linux-3.4/drivers/mfd/abx500-core.c 2012-01-08 19:48:03.063472262 +0100
++++ linux-3.4-pax/drivers/mfd/abx500-core.c 2012-05-21 12:10:10.488048941 +0200
+@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
+
+ struct abx500_device_entry {
+ struct list_head list;
+- struct abx500_ops ops;
++ abx500_ops_no_const ops;
+ struct device *dev;
+ };
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/mfd/janz-cmodio.c linux-3.4-pax/drivers/mfd/janz-cmodio.c
+--- linux-3.4/drivers/mfd/janz-cmodio.c 2012-03-19 10:39:01.432049738 +0100
++++ linux-3.4-pax/drivers/mfd/janz-cmodio.c 2012-05-21 12:10:10.488048941 +0200
+@@ -13,6 +13,7 @@
+
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#include <linux/slab.h>
+ #include <linux/init.h>
+ #include <linux/pci.h>
+ #include <linux/interrupt.h>
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.4-pax/drivers/misc/lis3lv02d/lis3lv02d.c
+--- linux-3.4/drivers/misc/lis3lv02d/lis3lv02d.c 2012-03-19 10:39:01.656049719 +0100
++++ linux-3.4-pax/drivers/misc/lis3lv02d/lis3lv02d.c 2012-05-21 12:10:10.492048941 +0200
+@@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(in
+ * the lid is closed. This leads to interrupts as soon as a little move
+ * is done.
+ */
+- atomic_inc(&lis3->count);
++ atomic_inc_unchecked(&lis3->count);
+
+ wake_up_interruptible(&lis3->misc_wait);
+ kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
+@@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct in
+ if (lis3->pm_dev)
+ pm_runtime_get_sync(lis3->pm_dev);
+
+- atomic_set(&lis3->count, 0);
++ atomic_set_unchecked(&lis3->count, 0);
+ return 0;
+ }
+
+@@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struc
+ add_wait_queue(&lis3->misc_wait, &wait);
+ while (true) {
+ set_current_state(TASK_INTERRUPTIBLE);
+- data = atomic_xchg(&lis3->count, 0);
++ data = atomic_xchg_unchecked(&lis3->count, 0);
+ if (data)
+ break;
+
+@@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(
+ struct lis3lv02d, miscdev);
+
+ poll_wait(file, &lis3->misc_wait, wait);
+- if (atomic_read(&lis3->count))
++ if (atomic_read_unchecked(&lis3->count))
+ return POLLIN | POLLRDNORM;
+ return 0;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.4-pax/drivers/misc/lis3lv02d/lis3lv02d.h
+--- linux-3.4/drivers/misc/lis3lv02d/lis3lv02d.h 2012-01-08 19:48:03.371472245 +0100
++++ linux-3.4-pax/drivers/misc/lis3lv02d/lis3lv02d.h 2012-05-21 12:10:10.492048941 +0200
+@@ -266,7 +266,7 @@ struct lis3lv02d {
+ struct input_polled_dev *idev; /* input device */
+ struct platform_device *pdev; /* platform device */
+ struct regulator_bulk_data regulators[2];
+- atomic_t count; /* interrupt count after last read */
++ atomic_unchecked_t count; /* interrupt count after last read */
+ union axis_conversion ac; /* hw -> logical axis */
+ int mapped_btns[3];
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/misc/sgi-gru/gruhandles.c linux-3.4-pax/drivers/misc/sgi-gru/gruhandles.c
+--- linux-3.4/drivers/misc/sgi-gru/gruhandles.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/misc/sgi-gru/gruhandles.c 2012-05-21 12:10:10.496048941 +0200
+@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
+ unsigned long nsec;
+
+ nsec = CLKS2NSEC(clks);
+- atomic_long_inc(&mcs_op_statistics[op].count);
+- atomic_long_add(nsec, &mcs_op_statistics[op].total);
++ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
++ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
+ if (mcs_op_statistics[op].max < nsec)
+ mcs_op_statistics[op].max = nsec;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/misc/sgi-gru/gruprocfs.c linux-3.4-pax/drivers/misc/sgi-gru/gruprocfs.c
+--- linux-3.4/drivers/misc/sgi-gru/gruprocfs.c 2012-03-19 10:39:01.660049719 +0100
++++ linux-3.4-pax/drivers/misc/sgi-gru/gruprocfs.c 2012-05-21 12:10:10.496048941 +0200
+@@ -32,9 +32,9 @@
+
+ #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
+
+-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
++static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
+ {
+- unsigned long val = atomic_long_read(v);
++ unsigned long val = atomic_long_read_unchecked(v);
+
+ seq_printf(s, "%16lu %s\n", val, id);
+ }
+@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
+
+ seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
+ for (op = 0; op < mcsop_last; op++) {
+- count = atomic_long_read(&mcs_op_statistics[op].count);
+- total = atomic_long_read(&mcs_op_statistics[op].total);
++ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
++ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
+ max = mcs_op_statistics[op].max;
+ seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
+ count ? total / count : 0, max);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/misc/sgi-gru/grutables.h linux-3.4-pax/drivers/misc/sgi-gru/grutables.h
+--- linux-3.4/drivers/misc/sgi-gru/grutables.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/misc/sgi-gru/grutables.h 2012-05-21 12:10:10.500048941 +0200
+@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
+ * GRU statistics.
+ */
+ struct gru_stats_s {
+- atomic_long_t vdata_alloc;
+- atomic_long_t vdata_free;
+- atomic_long_t gts_alloc;
+- atomic_long_t gts_free;
+- atomic_long_t gms_alloc;
+- atomic_long_t gms_free;
+- atomic_long_t gts_double_allocate;
+- atomic_long_t assign_context;
+- atomic_long_t assign_context_failed;
+- atomic_long_t free_context;
+- atomic_long_t load_user_context;
+- atomic_long_t load_kernel_context;
+- atomic_long_t lock_kernel_context;
+- atomic_long_t unlock_kernel_context;
+- atomic_long_t steal_user_context;
+- atomic_long_t steal_kernel_context;
+- atomic_long_t steal_context_failed;
+- atomic_long_t nopfn;
+- atomic_long_t asid_new;
+- atomic_long_t asid_next;
+- atomic_long_t asid_wrap;
+- atomic_long_t asid_reuse;
+- atomic_long_t intr;
+- atomic_long_t intr_cbr;
+- atomic_long_t intr_tfh;
+- atomic_long_t intr_spurious;
+- atomic_long_t intr_mm_lock_failed;
+- atomic_long_t call_os;
+- atomic_long_t call_os_wait_queue;
+- atomic_long_t user_flush_tlb;
+- atomic_long_t user_unload_context;
+- atomic_long_t user_exception;
+- atomic_long_t set_context_option;
+- atomic_long_t check_context_retarget_intr;
+- atomic_long_t check_context_unload;
+- atomic_long_t tlb_dropin;
+- atomic_long_t tlb_preload_page;
+- atomic_long_t tlb_dropin_fail_no_asid;
+- atomic_long_t tlb_dropin_fail_upm;
+- atomic_long_t tlb_dropin_fail_invalid;
+- atomic_long_t tlb_dropin_fail_range_active;
+- atomic_long_t tlb_dropin_fail_idle;
+- atomic_long_t tlb_dropin_fail_fmm;
+- atomic_long_t tlb_dropin_fail_no_exception;
+- atomic_long_t tfh_stale_on_fault;
+- atomic_long_t mmu_invalidate_range;
+- atomic_long_t mmu_invalidate_page;
+- atomic_long_t flush_tlb;
+- atomic_long_t flush_tlb_gru;
+- atomic_long_t flush_tlb_gru_tgh;
+- atomic_long_t flush_tlb_gru_zero_asid;
+-
+- atomic_long_t copy_gpa;
+- atomic_long_t read_gpa;
+-
+- atomic_long_t mesq_receive;
+- atomic_long_t mesq_receive_none;
+- atomic_long_t mesq_send;
+- atomic_long_t mesq_send_failed;
+- atomic_long_t mesq_noop;
+- atomic_long_t mesq_send_unexpected_error;
+- atomic_long_t mesq_send_lb_overflow;
+- atomic_long_t mesq_send_qlimit_reached;
+- atomic_long_t mesq_send_amo_nacked;
+- atomic_long_t mesq_send_put_nacked;
+- atomic_long_t mesq_page_overflow;
+- atomic_long_t mesq_qf_locked;
+- atomic_long_t mesq_qf_noop_not_full;
+- atomic_long_t mesq_qf_switch_head_failed;
+- atomic_long_t mesq_qf_unexpected_error;
+- atomic_long_t mesq_noop_unexpected_error;
+- atomic_long_t mesq_noop_lb_overflow;
+- atomic_long_t mesq_noop_qlimit_reached;
+- atomic_long_t mesq_noop_amo_nacked;
+- atomic_long_t mesq_noop_put_nacked;
+- atomic_long_t mesq_noop_page_overflow;
++ atomic_long_unchecked_t vdata_alloc;
++ atomic_long_unchecked_t vdata_free;
++ atomic_long_unchecked_t gts_alloc;
++ atomic_long_unchecked_t gts_free;
++ atomic_long_unchecked_t gms_alloc;
++ atomic_long_unchecked_t gms_free;
++ atomic_long_unchecked_t gts_double_allocate;
++ atomic_long_unchecked_t assign_context;
++ atomic_long_unchecked_t assign_context_failed;
++ atomic_long_unchecked_t free_context;
++ atomic_long_unchecked_t load_user_context;
++ atomic_long_unchecked_t load_kernel_context;
++ atomic_long_unchecked_t lock_kernel_context;
++ atomic_long_unchecked_t unlock_kernel_context;
++ atomic_long_unchecked_t steal_user_context;
++ atomic_long_unchecked_t steal_kernel_context;
++ atomic_long_unchecked_t steal_context_failed;
++ atomic_long_unchecked_t nopfn;
++ atomic_long_unchecked_t asid_new;
++ atomic_long_unchecked_t asid_next;
++ atomic_long_unchecked_t asid_wrap;
++ atomic_long_unchecked_t asid_reuse;
++ atomic_long_unchecked_t intr;
++ atomic_long_unchecked_t intr_cbr;
++ atomic_long_unchecked_t intr_tfh;
++ atomic_long_unchecked_t intr_spurious;
++ atomic_long_unchecked_t intr_mm_lock_failed;
++ atomic_long_unchecked_t call_os;
++ atomic_long_unchecked_t call_os_wait_queue;
++ atomic_long_unchecked_t user_flush_tlb;
++ atomic_long_unchecked_t user_unload_context;
++ atomic_long_unchecked_t user_exception;
++ atomic_long_unchecked_t set_context_option;
++ atomic_long_unchecked_t check_context_retarget_intr;
++ atomic_long_unchecked_t check_context_unload;
++ atomic_long_unchecked_t tlb_dropin;
++ atomic_long_unchecked_t tlb_preload_page;
++ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
++ atomic_long_unchecked_t tlb_dropin_fail_upm;
++ atomic_long_unchecked_t tlb_dropin_fail_invalid;
++ atomic_long_unchecked_t tlb_dropin_fail_range_active;
++ atomic_long_unchecked_t tlb_dropin_fail_idle;
++ atomic_long_unchecked_t tlb_dropin_fail_fmm;
++ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
++ atomic_long_unchecked_t tfh_stale_on_fault;
++ atomic_long_unchecked_t mmu_invalidate_range;
++ atomic_long_unchecked_t mmu_invalidate_page;
++ atomic_long_unchecked_t flush_tlb;
++ atomic_long_unchecked_t flush_tlb_gru;
++ atomic_long_unchecked_t flush_tlb_gru_tgh;
++ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
++
++ atomic_long_unchecked_t copy_gpa;
++ atomic_long_unchecked_t read_gpa;
++
++ atomic_long_unchecked_t mesq_receive;
++ atomic_long_unchecked_t mesq_receive_none;
++ atomic_long_unchecked_t mesq_send;
++ atomic_long_unchecked_t mesq_send_failed;
++ atomic_long_unchecked_t mesq_noop;
++ atomic_long_unchecked_t mesq_send_unexpected_error;
++ atomic_long_unchecked_t mesq_send_lb_overflow;
++ atomic_long_unchecked_t mesq_send_qlimit_reached;
++ atomic_long_unchecked_t mesq_send_amo_nacked;
++ atomic_long_unchecked_t mesq_send_put_nacked;
++ atomic_long_unchecked_t mesq_page_overflow;
++ atomic_long_unchecked_t mesq_qf_locked;
++ atomic_long_unchecked_t mesq_qf_noop_not_full;
++ atomic_long_unchecked_t mesq_qf_switch_head_failed;
++ atomic_long_unchecked_t mesq_qf_unexpected_error;
++ atomic_long_unchecked_t mesq_noop_unexpected_error;
++ atomic_long_unchecked_t mesq_noop_lb_overflow;
++ atomic_long_unchecked_t mesq_noop_qlimit_reached;
++ atomic_long_unchecked_t mesq_noop_amo_nacked;
++ atomic_long_unchecked_t mesq_noop_put_nacked;
++ atomic_long_unchecked_t mesq_noop_page_overflow;
+
+ };
+
+@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
+ tghop_invalidate, mcsop_last};
+
+ struct mcs_op_statistic {
+- atomic_long_t count;
+- atomic_long_t total;
++ atomic_long_unchecked_t count;
++ atomic_long_unchecked_t total;
+ unsigned long max;
+ };
+
+@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
+
+ #define STAT(id) do { \
+ if (gru_options & OPT_STATS) \
+- atomic_long_inc(&gru_stats.id); \
++ atomic_long_inc_unchecked(&gru_stats.id); \
+ } while (0)
+
+ #ifdef CONFIG_SGI_GRU_DEBUG
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/misc/sgi-xp/xpc.h linux-3.4-pax/drivers/misc/sgi-xp/xpc.h
+--- linux-3.4/drivers/misc/sgi-xp/xpc.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/misc/sgi-xp/xpc.h 2012-05-21 12:10:10.500048941 +0200
+@@ -835,6 +835,7 @@ struct xpc_arch_operations {
+ void (*received_payload) (struct xpc_channel *, void *);
+ void (*notify_senders_of_disconnect) (struct xpc_channel *);
+ };
++typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
+
+ /* struct xpc_partition act_state values (for XPC HB) */
+
+@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_regis
+ /* found in xpc_main.c */
+ extern struct device *xpc_part;
+ extern struct device *xpc_chan;
+-extern struct xpc_arch_operations xpc_arch_ops;
++extern xpc_arch_operations_no_const xpc_arch_ops;
+ extern int xpc_disengage_timelimit;
+ extern int xpc_disengage_timedout;
+ extern int xpc_activate_IRQ_rcvd;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/misc/sgi-xp/xpc_main.c linux-3.4-pax/drivers/misc/sgi-xp/xpc_main.c
+--- linux-3.4/drivers/misc/sgi-xp/xpc_main.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/misc/sgi-xp/xpc_main.c 2012-05-21 12:10:10.504048942 +0200
+@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_not
+ .notifier_call = xpc_system_die,
+ };
+
+-struct xpc_arch_operations xpc_arch_ops;
++xpc_arch_operations_no_const xpc_arch_ops;
+
+ /*
+ * Timer function to enforce the timelimit on the partition disengage.
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/misc/sgi-xp/xp.h linux-3.4-pax/drivers/misc/sgi-xp/xp.h
+--- linux-3.4/drivers/misc/sgi-xp/xp.h 2012-05-21 11:33:08.763928272 +0200
++++ linux-3.4-pax/drivers/misc/sgi-xp/xp.h 2012-05-21 12:10:10.508048942 +0200
+@@ -288,7 +288,7 @@ struct xpc_interface {
+ xpc_notify_func, void *);
+ void (*received) (short, int, void *);
+ enum xp_retval (*partid_to_nasids) (short, void *);
+-};
++} __no_const;
+
+ extern struct xpc_interface xpc_interface;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/mmc/host/sdhci-pci.c linux-3.4-pax/drivers/mmc/host/sdhci-pci.c
+--- linux-3.4/drivers/mmc/host/sdhci-pci.c 2012-05-21 11:33:08.915928281 +0200
++++ linux-3.4-pax/drivers/mmc/host/sdhci-pci.c 2012-05-21 12:10:10.508048942 +0200
+@@ -652,7 +652,7 @@ static const struct sdhci_pci_fixes sdhc
+ .probe = via_probe,
+ };
+
+-static const struct pci_device_id pci_ids[] __devinitdata = {
++static const struct pci_device_id pci_ids[] __devinitconst = {
+ {
+ .vendor = PCI_VENDOR_ID_RICOH,
+ .device = PCI_DEVICE_ID_RICOH_R5C822,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/mtd/devices/doc2000.c linux-3.4-pax/drivers/mtd/devices/doc2000.c
+--- linux-3.4/drivers/mtd/devices/doc2000.c 2012-05-21 11:33:09.043928288 +0200
++++ linux-3.4-pax/drivers/mtd/devices/doc2000.c 2012-05-21 12:10:10.512048942 +0200
+@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mt
+
+ /* The ECC will not be calculated correctly if less than 512 is written */
+ /* DBB-
+- if (len != 0x200 && eccbuf)
++ if (len != 0x200)
+ printk(KERN_WARNING
+ "ECC needs a full sector write (adr: %lx size %lx)\n",
+ (long) to, (long) len);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/mtd/nand/denali.c linux-3.4-pax/drivers/mtd/nand/denali.c
+--- linux-3.4/drivers/mtd/nand/denali.c 2012-05-21 11:33:09.411928308 +0200
++++ linux-3.4-pax/drivers/mtd/nand/denali.c 2012-05-21 12:10:10.516048942 +0200
+@@ -26,6 +26,7 @@
+ #include <linux/pci.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/module.h>
++#include <linux/slab.h>
+
+ #include "denali.h"
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/mtd/nftlmount.c linux-3.4-pax/drivers/mtd/nftlmount.c
+--- linux-3.4/drivers/mtd/nftlmount.c 2012-03-19 10:39:02.028049709 +0100
++++ linux-3.4-pax/drivers/mtd/nftlmount.c 2012-05-21 12:10:10.520048942 +0200
+@@ -24,6 +24,7 @@
+ #include <asm/errno.h>
+ #include <linux/delay.h>
+ #include <linux/slab.h>
++#include <linux/sched.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/nand.h>
+ #include <linux/mtd/nftl.h>
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/atheros/atlx/atl2.c linux-3.4-pax/drivers/net/ethernet/atheros/atlx/atl2.c
+--- linux-3.4/drivers/net/ethernet/atheros/atlx/atl2.c 2012-05-21 11:33:10.451928365 +0200
++++ linux-3.4-pax/drivers/net/ethernet/atheros/atlx/atl2.c 2012-05-21 12:10:10.524048943 +0200
+@@ -2859,7 +2859,7 @@ static void atl2_force_ps(struct atl2_hw
+ */
+
+ #define ATL2_PARAM(X, desc) \
+- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
++ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
+ MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
+ MODULE_PARM_DESC(X, desc);
+ #else
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h linux-3.4-pax/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+--- linux-3.4/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 2012-05-21 11:33:10.715928379 +0200
++++ linux-3.4-pax/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 2012-05-21 12:10:10.528048943 +0200
+@@ -483,7 +483,7 @@ struct bnx2x_rx_mode_obj {
+
+ int (*wait_comp)(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p);
+-};
++} __no_const;
+
+ /********************** Set multicast group ***********************************/
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/broadcom/tg3.h linux-3.4-pax/drivers/net/ethernet/broadcom/tg3.h
+--- linux-3.4/drivers/net/ethernet/broadcom/tg3.h 2012-05-21 11:33:10.779928382 +0200
++++ linux-3.4-pax/drivers/net/ethernet/broadcom/tg3.h 2012-05-21 12:10:10.532048943 +0200
+@@ -140,6 +140,7 @@
+ #define CHIPREV_ID_5750_A0 0x4000
+ #define CHIPREV_ID_5750_A1 0x4001
+ #define CHIPREV_ID_5750_A3 0x4003
++#define CHIPREV_ID_5750_C1 0x4201
+ #define CHIPREV_ID_5750_C2 0x4202
+ #define CHIPREV_ID_5752_A0_HW 0x5000
+ #define CHIPREV_ID_5752_A0 0x6000
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/chelsio/cxgb3/l2t.h linux-3.4-pax/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+--- linux-3.4/drivers/net/ethernet/chelsio/cxgb3/l2t.h 2012-03-19 10:39:02.864049661 +0100
++++ linux-3.4-pax/drivers/net/ethernet/chelsio/cxgb3/l2t.h 2012-05-21 12:10:10.540048944 +0200
+@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)
+ */
+ struct l2t_skb_cb {
+ arp_failure_handler_func arp_failure_handler;
+-};
++} __no_const;
+
+ #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/dec/tulip/de4x5.c linux-3.4-pax/drivers/net/ethernet/dec/tulip/de4x5.c
+--- linux-3.4/drivers/net/ethernet/dec/tulip/de4x5.c 2012-05-21 11:33:11.043928397 +0200
++++ linux-3.4-pax/drivers/net/ethernet/dec/tulip/de4x5.c 2012-05-21 12:10:10.556048945 +0200
+@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, stru
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[i] = dev->dev_addr[i];
+ }
+- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
++ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
+ break;
+
+ case DE4X5_SET_HWADDR: /* Set the hardware address */
+@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, stru
+ spin_lock_irqsave(&lp->lock, flags);
+ memcpy(&statbuf, &lp->pktStats, ioc->len);
+ spin_unlock_irqrestore(&lp->lock, flags);
+- if (copy_to_user(ioc->data, &statbuf, ioc->len))
++ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
+ return -EFAULT;
+ break;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/dec/tulip/eeprom.c linux-3.4-pax/drivers/net/ethernet/dec/tulip/eeprom.c
+--- linux-3.4/drivers/net/ethernet/dec/tulip/eeprom.c 2012-05-21 11:33:11.067928398 +0200
++++ linux-3.4-pax/drivers/net/ethernet/dec/tulip/eeprom.c 2012-05-21 12:10:10.560048946 +0200
+@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups
+ {NULL}};
+
+
+-static const char *block_name[] __devinitdata = {
++static const char *block_name[] __devinitconst = {
+ "21140 non-MII",
+ "21140 MII PHY",
+ "21142 Serial PHY",
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/dec/tulip/winbond-840.c linux-3.4-pax/drivers/net/ethernet/dec/tulip/winbond-840.c
+--- linux-3.4/drivers/net/ethernet/dec/tulip/winbond-840.c 2012-05-21 11:33:11.115928401 +0200
++++ linux-3.4-pax/drivers/net/ethernet/dec/tulip/winbond-840.c 2012-05-21 12:10:10.560048946 +0200
+@@ -236,7 +236,7 @@ struct pci_id_info {
+ int drv_flags; /* Driver use, intended as capability flags. */
+ };
+
+-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
++static const struct pci_id_info pci_id_tbl[] __devinitconst = {
+ { /* Sometime a Level-One switch card. */
+ "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
+ { "Winbond W89c840", CanHaveMII | HasBrokenTx},
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/dlink/sundance.c linux-3.4-pax/drivers/net/ethernet/dlink/sundance.c
+--- linux-3.4/drivers/net/ethernet/dlink/sundance.c 2012-05-21 11:33:11.147928402 +0200
++++ linux-3.4-pax/drivers/net/ethernet/dlink/sundance.c 2012-05-21 12:10:10.564048946 +0200
+@@ -218,7 +218,7 @@ enum {
+ struct pci_id_info {
+ const char *name;
+ };
+-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
++static const struct pci_id_info pci_id_tbl[] __devinitconst = {
+ {"D-Link DFE-550TX FAST Ethernet Adapter"},
+ {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
+ {"D-Link DFE-580TX 4 port Server Adapter"},
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/emulex/benet/be_main.c linux-3.4-pax/drivers/net/ethernet/emulex/benet/be_main.c
+--- linux-3.4/drivers/net/ethernet/emulex/benet/be_main.c 2012-05-21 11:33:11.191928406 +0200
++++ linux-3.4-pax/drivers/net/ethernet/emulex/benet/be_main.c 2012-05-21 12:10:10.568048946 +0200
+@@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *ac
+
+ if (wrapped)
+ newacc += 65536;
+- ACCESS_ONCE(*acc) = newacc;
++ ACCESS_ONCE_RW(*acc) = newacc;
+ }
+
+ void be_parse_stats(struct be_adapter *adapter)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/faraday/ftgmac100.c linux-3.4-pax/drivers/net/ethernet/faraday/ftgmac100.c
+--- linux-3.4/drivers/net/ethernet/faraday/ftgmac100.c 2012-05-21 11:33:11.199928405 +0200
++++ linux-3.4-pax/drivers/net/ethernet/faraday/ftgmac100.c 2012-05-21 12:10:10.572048945 +0200
+@@ -31,6 +31,8 @@
+ #include <linux/netdevice.h>
+ #include <linux/phy.h>
+ #include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/irqreturn.h>
+ #include <net/ip.h>
+
+ #include "ftgmac100.h"
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/faraday/ftmac100.c linux-3.4-pax/drivers/net/ethernet/faraday/ftmac100.c
+--- linux-3.4/drivers/net/ethernet/faraday/ftmac100.c 2012-05-21 11:33:11.203928405 +0200
++++ linux-3.4-pax/drivers/net/ethernet/faraday/ftmac100.c 2012-05-21 12:10:10.576048945 +0200
+@@ -31,6 +31,8 @@
+ #include <linux/module.h>
+ #include <linux/netdevice.h>
+ #include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/irqreturn.h>
+
+ #include "ftmac100.h"
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/fealnx.c linux-3.4-pax/drivers/net/ethernet/fealnx.c
+--- linux-3.4/drivers/net/ethernet/fealnx.c 2012-05-21 11:33:11.207928406 +0200
++++ linux-3.4-pax/drivers/net/ethernet/fealnx.c 2012-05-21 12:10:10.576048945 +0200
+@@ -150,7 +150,7 @@ struct chip_info {
+ int flags;
+ };
+
+-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
++static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
+ { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
+ { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
+ { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/intel/e1000e/hw.h linux-3.4-pax/drivers/net/ethernet/intel/e1000e/hw.h
+--- linux-3.4/drivers/net/ethernet/intel/e1000e/hw.h 2012-05-21 11:33:11.643928429 +0200
++++ linux-3.4-pax/drivers/net/ethernet/intel/e1000e/hw.h 2012-05-21 12:10:10.580048946 +0200
+@@ -784,6 +784,7 @@ struct e1000_mac_operations {
+ void (*config_collision_dist)(struct e1000_hw *);
+ s32 (*read_mac_addr)(struct e1000_hw *);
+ };
++typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
+
+ /*
+ * When to use various PHY register access functions:
+@@ -824,6 +825,7 @@ struct e1000_phy_operations {
+ void (*power_up)(struct e1000_hw *);
+ void (*power_down)(struct e1000_hw *);
+ };
++typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
+
+ /* Function pointers for the NVM. */
+ struct e1000_nvm_operations {
+@@ -836,9 +838,10 @@ struct e1000_nvm_operations {
+ s32 (*validate)(struct e1000_hw *);
+ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+ };
++typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
+
+ struct e1000_mac_info {
+- struct e1000_mac_operations ops;
++ e1000_mac_operations_no_const ops;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+
+@@ -879,7 +882,7 @@ struct e1000_mac_info {
+ };
+
+ struct e1000_phy_info {
+- struct e1000_phy_operations ops;
++ e1000_phy_operations_no_const ops;
+
+ enum e1000_phy_type type;
+
+@@ -913,7 +916,7 @@ struct e1000_phy_info {
+ };
+
+ struct e1000_nvm_info {
+- struct e1000_nvm_operations ops;
++ e1000_nvm_operations_no_const ops;
+
+ enum e1000_nvm_type type;
+ enum e1000_nvm_override override;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/intel/igb/e1000_hw.h linux-3.4-pax/drivers/net/ethernet/intel/igb/e1000_hw.h
+--- linux-3.4/drivers/net/ethernet/intel/igb/e1000_hw.h 2012-03-19 10:39:03.032049652 +0100
++++ linux-3.4-pax/drivers/net/ethernet/intel/igb/e1000_hw.h 2012-05-21 12:10:10.584048946 +0200
+@@ -314,6 +314,7 @@ struct e1000_mac_operations {
+ s32 (*read_mac_addr)(struct e1000_hw *);
+ s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
+ };
++typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
+
+ struct e1000_phy_operations {
+ s32 (*acquire)(struct e1000_hw *);
+@@ -330,6 +331,7 @@ struct e1000_phy_operations {
+ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+ s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ };
++typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
+
+ struct e1000_nvm_operations {
+ s32 (*acquire)(struct e1000_hw *);
+@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
+ s32 (*update)(struct e1000_hw *);
+ s32 (*validate)(struct e1000_hw *);
+ };
++typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
+
+ struct e1000_info {
+ s32 (*get_invariants)(struct e1000_hw *);
+@@ -350,7 +353,7 @@ struct e1000_info {
+ extern const struct e1000_info e1000_82575_info;
+
+ struct e1000_mac_info {
+- struct e1000_mac_operations ops;
++ e1000_mac_operations_no_const ops;
+
+ u8 addr[6];
+ u8 perm_addr[6];
+@@ -388,7 +391,7 @@ struct e1000_mac_info {
+ };
+
+ struct e1000_phy_info {
+- struct e1000_phy_operations ops;
++ e1000_phy_operations_no_const ops;
+
+ enum e1000_phy_type type;
+
+@@ -423,7 +426,7 @@ struct e1000_phy_info {
+ };
+
+ struct e1000_nvm_info {
+- struct e1000_nvm_operations ops;
++ e1000_nvm_operations_no_const ops;
+ enum e1000_nvm_type type;
+ enum e1000_nvm_override override;
+
+@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
+ s32 (*check_for_ack)(struct e1000_hw *, u16);
+ s32 (*check_for_rst)(struct e1000_hw *, u16);
+ };
++typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
+
+ struct e1000_mbx_stats {
+ u32 msgs_tx;
+@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
+ };
+
+ struct e1000_mbx_info {
+- struct e1000_mbx_operations ops;
++ e1000_mbx_operations_no_const ops;
+ struct e1000_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/intel/igbvf/vf.h linux-3.4-pax/drivers/net/ethernet/intel/igbvf/vf.h
+--- linux-3.4/drivers/net/ethernet/intel/igbvf/vf.h 2012-03-19 10:39:03.076049650 +0100
++++ linux-3.4-pax/drivers/net/ethernet/intel/igbvf/vf.h 2012-05-21 12:10:10.584048946 +0200
+@@ -189,9 +189,10 @@ struct e1000_mac_operations {
+ s32 (*read_mac_addr)(struct e1000_hw *);
+ s32 (*set_vfta)(struct e1000_hw *, u16, bool);
+ };
++typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
+
+ struct e1000_mac_info {
+- struct e1000_mac_operations ops;
++ e1000_mac_operations_no_const ops;
+ u8 addr[6];
+ u8 perm_addr[6];
+
+@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
+ s32 (*check_for_ack)(struct e1000_hw *);
+ s32 (*check_for_rst)(struct e1000_hw *);
+ };
++typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
+
+ struct e1000_mbx_stats {
+ u32 msgs_tx;
+@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
+ };
+
+ struct e1000_mbx_info {
+- struct e1000_mbx_operations ops;
++ e1000_mbx_operations_no_const ops;
+ struct e1000_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h linux-3.4-pax/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+--- linux-3.4/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h 2012-05-21 11:33:11.943928446 +0200
++++ linux-3.4-pax/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h 2012-05-21 12:10:10.588048946 +0200
+@@ -2710,6 +2710,7 @@ struct ixgbe_eeprom_operations {
+ s32 (*update_checksum)(struct ixgbe_hw *);
+ u16 (*calc_checksum)(struct ixgbe_hw *);
+ };
++typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
+
+ struct ixgbe_mac_operations {
+ s32 (*init_hw)(struct ixgbe_hw *);
+@@ -2773,6 +2774,7 @@ struct ixgbe_mac_operations {
+ /* Manageability interface */
+ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
+ };
++typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
+
+ struct ixgbe_phy_operations {
+ s32 (*identify)(struct ixgbe_hw *);
+@@ -2792,9 +2794,10 @@ struct ixgbe_phy_operations {
+ s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+ s32 (*check_overtemp)(struct ixgbe_hw *);
+ };
++typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
+
+ struct ixgbe_eeprom_info {
+- struct ixgbe_eeprom_operations ops;
++ ixgbe_eeprom_operations_no_const ops;
+ enum ixgbe_eeprom_type type;
+ u32 semaphore_delay;
+ u16 word_size;
+@@ -2804,7 +2807,7 @@ struct ixgbe_eeprom_info {
+
+ #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
+ struct ixgbe_mac_info {
+- struct ixgbe_mac_operations ops;
++ ixgbe_mac_operations_no_const ops;
+ enum ixgbe_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+@@ -2832,7 +2835,7 @@ struct ixgbe_mac_info {
+ };
+
+ struct ixgbe_phy_info {
+- struct ixgbe_phy_operations ops;
++ ixgbe_phy_operations_no_const ops;
+ struct mdio_if_info mdio;
+ enum ixgbe_phy_type type;
+ u32 id;
+@@ -2860,6 +2863,7 @@ struct ixgbe_mbx_operations {
+ s32 (*check_for_ack)(struct ixgbe_hw *, u16);
+ s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+ };
++typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
+
+ struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+@@ -2871,7 +2875,7 @@ struct ixgbe_mbx_stats {
+ };
+
+ struct ixgbe_mbx_info {
+- struct ixgbe_mbx_operations ops;
++ ixgbe_mbx_operations_no_const ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/intel/ixgbevf/vf.h linux-3.4-pax/drivers/net/ethernet/intel/ixgbevf/vf.h
+--- linux-3.4/drivers/net/ethernet/intel/ixgbevf/vf.h 2012-03-19 10:39:03.144049646 +0100
++++ linux-3.4-pax/drivers/net/ethernet/intel/ixgbevf/vf.h 2012-05-21 12:10:10.592048946 +0200
+@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
+ s32 (*clear_vfta)(struct ixgbe_hw *);
+ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+ };
++typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
+
+ enum ixgbe_mac_type {
+ ixgbe_mac_unknown = 0,
+@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
+ };
+
+ struct ixgbe_mac_info {
+- struct ixgbe_mac_operations ops;
++ ixgbe_mac_operations_no_const ops;
+ u8 addr[6];
+ u8 perm_addr[6];
+
+@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
+ s32 (*check_for_ack)(struct ixgbe_hw *);
+ s32 (*check_for_rst)(struct ixgbe_hw *);
+ };
++typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
+
+ struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
+ };
+
+ struct ixgbe_mbx_info {
+- struct ixgbe_mbx_operations ops;
++ ixgbe_mbx_operations_no_const ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 udelay;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/mellanox/mlx4/main.c linux-3.4-pax/drivers/net/ethernet/mellanox/mlx4/main.c
+--- linux-3.4/drivers/net/ethernet/mellanox/mlx4/main.c 2012-05-21 11:33:12.051928452 +0200
++++ linux-3.4-pax/drivers/net/ethernet/mellanox/mlx4/main.c 2012-05-21 12:10:10.596048947 +0200
+@@ -41,6 +41,7 @@
+ #include <linux/slab.h>
+ #include <linux/io-mapping.h>
+ #include <linux/delay.h>
++#include <linux/sched.h>
+
+ #include <linux/mlx4/device.h>
+ #include <linux/mlx4/doorbell.h>
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/neterion/vxge/vxge-config.h linux-3.4-pax/drivers/net/ethernet/neterion/vxge/vxge-config.h
+--- linux-3.4/drivers/net/ethernet/neterion/vxge/vxge-config.h 2012-01-08 19:48:08.607471966 +0100
++++ linux-3.4-pax/drivers/net/ethernet/neterion/vxge/vxge-config.h 2012-05-21 12:10:10.600048947 +0200
+@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
+ void (*link_down)(struct __vxge_hw_device *devh);
+ void (*crit_err)(struct __vxge_hw_device *devh,
+ enum vxge_hw_event type, u64 ext_data);
+-};
++} __no_const;
+
+ /*
+ * struct __vxge_hw_blockpool_entry - Block private data structure
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/neterion/vxge/vxge-traffic.h linux-3.4-pax/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
+--- linux-3.4/drivers/net/ethernet/neterion/vxge/vxge-traffic.h 2012-01-08 19:48:08.667471962 +0100
++++ linux-3.4-pax/drivers/net/ethernet/neterion/vxge/vxge-traffic.h 2012-05-21 12:10:10.608048947 +0200
+@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
+ struct vxge_hw_mempool_dma *dma_object,
+ u32 index,
+ u32 is_last);
+-};
++} __no_const;
+
+ #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
+ ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/realtek/r8169.c linux-3.4-pax/drivers/net/ethernet/realtek/r8169.c
+--- linux-3.4/drivers/net/ethernet/realtek/r8169.c 2012-05-21 11:33:12.627928484 +0200
++++ linux-3.4-pax/drivers/net/ethernet/realtek/r8169.c 2012-05-21 12:10:10.612048947 +0200
+@@ -708,17 +708,17 @@ struct rtl8169_private {
+ struct mdio_ops {
+ void (*write)(void __iomem *, int, int);
+ int (*read)(void __iomem *, int);
+- } mdio_ops;
++ } __no_const mdio_ops;
+
+ struct pll_power_ops {
+ void (*down)(struct rtl8169_private *);
+ void (*up)(struct rtl8169_private *);
+- } pll_power_ops;
++ } __no_const pll_power_ops;
+
+ struct jumbo_ops {
+ void (*enable)(struct rtl8169_private *);
+ void (*disable)(struct rtl8169_private *);
+- } jumbo_ops;
++ } __no_const jumbo_ops;
+
+ int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
+ int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/sis/sis190.c linux-3.4-pax/drivers/net/ethernet/sis/sis190.c
+--- linux-3.4/drivers/net/ethernet/sis/sis190.c 2012-05-21 11:33:12.859928495 +0200
++++ linux-3.4-pax/drivers/net/ethernet/sis/sis190.c 2012-05-21 12:10:10.616048948 +0200
+@@ -1620,7 +1620,7 @@ static int __devinit sis190_get_mac_addr
+ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
+ struct net_device *dev)
+ {
+- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
++ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
+ struct sis190_private *tp = netdev_priv(dev);
+ struct pci_dev *isa_bridge;
+ u8 reg, tmp8;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ethernet/stmicro/stmmac/mmc_core.c linux-3.4-pax/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+--- linux-3.4/drivers/net/ethernet/stmicro/stmmac/mmc_core.c 2012-03-19 10:39:03.516049627 +0100
++++ linux-3.4-pax/drivers/net/ethernet/stmicro/stmmac/mmc_core.c 2012-05-21 12:10:10.620048948 +0200
+@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr
+
+ writel(value, ioaddr + MMC_CNTRL);
+
+- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
+- MMC_CNTRL, value);
++// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
++// MMC_CNTRL, value);
+ }
+
+ /* To mask all all interrupts.*/
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/hyperv/hyperv_net.h linux-3.4-pax/drivers/net/hyperv/hyperv_net.h
+--- linux-3.4/drivers/net/hyperv/hyperv_net.h 2012-05-21 11:33:13.507928532 +0200
++++ linux-3.4-pax/drivers/net/hyperv/hyperv_net.h 2012-05-21 12:10:10.620048948 +0200
+@@ -98,7 +98,7 @@ struct rndis_device {
+
+ enum rndis_device_state state;
+ bool link_state;
+- atomic_t new_req_id;
++ atomic_unchecked_t new_req_id;
+
+ spinlock_t request_lock;
+ struct list_head req_list;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/hyperv/rndis_filter.c linux-3.4-pax/drivers/net/hyperv/rndis_filter.c
+--- linux-3.4/drivers/net/hyperv/rndis_filter.c 2012-05-21 11:33:13.523928531 +0200
++++ linux-3.4-pax/drivers/net/hyperv/rndis_filter.c 2012-05-21 12:10:10.624048948 +0200
+@@ -97,7 +97,7 @@ static struct rndis_request *get_rndis_r
+ * template
+ */
+ set = &rndis_msg->msg.set_req;
+- set->req_id = atomic_inc_return(&dev->new_req_id);
++ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
+
+ /* Add to the request list */
+ spin_lock_irqsave(&dev->request_lock, flags);
+@@ -648,7 +648,7 @@ static void rndis_filter_halt_device(str
+
+ /* Setup the rndis set */
+ halt = &request->request_msg.msg.halt_req;
+- halt->req_id = atomic_inc_return(&dev->new_req_id);
++ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
+
+ /* Ignore return since this msg is optional. */
+ rndis_filter_send_request(dev, request);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/ppp/ppp_generic.c linux-3.4-pax/drivers/net/ppp/ppp_generic.c
+--- linux-3.4/drivers/net/ppp/ppp_generic.c 2012-05-21 11:33:13.647928538 +0200
++++ linux-3.4-pax/drivers/net/ppp/ppp_generic.c 2012-05-21 12:10:10.628048948 +0200
+@@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, st
+ void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
+ struct ppp_stats stats;
+ struct ppp_comp_stats cstats;
+- char *vers;
+
+ switch (cmd) {
+ case SIOCGPPPSTATS:
+@@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, st
+ break;
+
+ case SIOCGPPPVER:
+- vers = PPP_VERSION;
+- if (copy_to_user(addr, vers, strlen(vers) + 1))
++ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
+ break;
+ err = 0;
+ break;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/tokenring/abyss.c linux-3.4-pax/drivers/net/tokenring/abyss.c
+--- linux-3.4/drivers/net/tokenring/abyss.c 2012-05-21 11:33:13.699928542 +0200
++++ linux-3.4-pax/drivers/net/tokenring/abyss.c 2012-05-21 12:10:10.632048949 +0200
+@@ -450,10 +450,12 @@ static struct pci_driver abyss_driver =
+
+ static int __init abyss_init (void)
+ {
+- abyss_netdev_ops = tms380tr_netdev_ops;
++ pax_open_kernel();
++ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
+
+- abyss_netdev_ops.ndo_open = abyss_open;
+- abyss_netdev_ops.ndo_stop = abyss_close;
++ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
++ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
++ pax_close_kernel();
+
+ return pci_register_driver(&abyss_driver);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/tokenring/madgemc.c linux-3.4-pax/drivers/net/tokenring/madgemc.c
+--- linux-3.4/drivers/net/tokenring/madgemc.c 2012-05-21 11:33:13.727928543 +0200
++++ linux-3.4-pax/drivers/net/tokenring/madgemc.c 2012-05-21 12:10:10.632048949 +0200
+@@ -742,9 +742,11 @@ static struct mca_driver madgemc_driver
+
+ static int __init madgemc_init (void)
+ {
+- madgemc_netdev_ops = tms380tr_netdev_ops;
+- madgemc_netdev_ops.ndo_open = madgemc_open;
+- madgemc_netdev_ops.ndo_stop = madgemc_close;
++ pax_open_kernel();
++ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
++ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
++ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
++ pax_close_kernel();
+
+ return mca_register_driver (&madgemc_driver);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/tokenring/proteon.c linux-3.4-pax/drivers/net/tokenring/proteon.c
+--- linux-3.4/drivers/net/tokenring/proteon.c 2012-05-21 11:33:13.755928544 +0200
++++ linux-3.4-pax/drivers/net/tokenring/proteon.c 2012-05-21 12:10:10.636048949 +0200
+@@ -352,9 +352,11 @@ static int __init proteon_init(void)
+ struct platform_device *pdev;
+ int i, num = 0, err = 0;
+
+- proteon_netdev_ops = tms380tr_netdev_ops;
+- proteon_netdev_ops.ndo_open = proteon_open;
+- proteon_netdev_ops.ndo_stop = tms380tr_close;
++ pax_open_kernel();
++ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
++ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
++ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
++ pax_close_kernel();
+
+ err = platform_driver_register(&proteon_driver);
+ if (err)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/tokenring/skisa.c linux-3.4-pax/drivers/net/tokenring/skisa.c
+--- linux-3.4/drivers/net/tokenring/skisa.c 2012-05-21 11:33:13.755928544 +0200
++++ linux-3.4-pax/drivers/net/tokenring/skisa.c 2012-05-21 12:10:10.636048949 +0200
+@@ -362,9 +362,11 @@ static int __init sk_isa_init(void)
+ struct platform_device *pdev;
+ int i, num = 0, err = 0;
+
+- sk_isa_netdev_ops = tms380tr_netdev_ops;
+- sk_isa_netdev_ops.ndo_open = sk_isa_open;
+- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
++ pax_open_kernel();
++ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
++ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
++ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
++ pax_close_kernel();
+
+ err = platform_driver_register(&sk_isa_driver);
+ if (err)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/usb/hso.c linux-3.4-pax/drivers/net/usb/hso.c
+--- linux-3.4/drivers/net/usb/hso.c 2012-05-21 11:33:13.819928548 +0200
++++ linux-3.4-pax/drivers/net/usb/hso.c 2012-05-21 12:10:10.640048948 +0200
+@@ -71,7 +71,7 @@
+ #include <asm/byteorder.h>
+ #include <linux/serial_core.h>
+ #include <linux/serial.h>
+-
++#include <asm/local.h>
+
+ #define MOD_AUTHOR "Option Wireless"
+ #define MOD_DESCRIPTION "USB High Speed Option driver"
+@@ -257,7 +257,7 @@ struct hso_serial {
+
+ /* from usb_serial_port */
+ struct tty_struct *tty;
+- int open_count;
++ local_t open_count;
+ spinlock_t serial_lock;
+
+ int (*write_data) (struct hso_serial *serial);
+@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
+ struct urb *urb;
+
+ urb = serial->rx_urb[0];
+- if (serial->open_count > 0) {
++ if (local_read(&serial->open_count) > 0) {
+ count = put_rxbuf_data(urb, serial);
+ if (count == -1)
+ return;
+@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
+ DUMP1(urb->transfer_buffer, urb->actual_length);
+
+ /* Anyone listening? */
+- if (serial->open_count == 0)
++ if (local_read(&serial->open_count) == 0)
+ return;
+
+ if (status == 0) {
+@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
+ spin_unlock_irq(&serial->serial_lock);
+
+ /* check for port already opened, if not set the termios */
+- serial->open_count++;
+- if (serial->open_count == 1) {
++ if (local_inc_return(&serial->open_count) == 1) {
+ serial->rx_state = RX_IDLE;
+ /* Force default termio settings */
+ _hso_serial_set_termios(tty, NULL);
+@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
+ result = hso_start_serial_device(serial->parent, GFP_KERNEL);
+ if (result) {
+ hso_stop_serial_device(serial->parent);
+- serial->open_count--;
++ local_dec(&serial->open_count);
+ kref_put(&serial->parent->ref, hso_serial_ref_free);
+ }
+ } else {
+@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
+
+ /* reset the rts and dtr */
+ /* do the actual close */
+- serial->open_count--;
++ local_dec(&serial->open_count);
+
+- if (serial->open_count <= 0) {
+- serial->open_count = 0;
++ if (local_read(&serial->open_count) <= 0) {
++ local_set(&serial->open_count, 0);
+ spin_lock_irq(&serial->serial_lock);
+ if (serial->tty == tty) {
+ serial->tty->driver_data = NULL;
+@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
+
+ /* the actual setup */
+ spin_lock_irqsave(&serial->serial_lock, flags);
+- if (serial->open_count)
++ if (local_read(&serial->open_count))
+ _hso_serial_set_termios(tty, old);
+ else
+ tty->termios = old;
+@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
+ D1("Pending read interrupt on port %d\n", i);
+ spin_lock(&serial->serial_lock);
+ if (serial->rx_state == RX_IDLE &&
+- serial->open_count > 0) {
++ local_read(&serial->open_count) > 0) {
+ /* Setup and send a ctrl req read on
+ * port i */
+ if (!serial->rx_urb_filled[0]) {
+@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
+ /* Start all serial ports */
+ for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
+ if (serial_table[i] && (serial_table[i]->interface == iface)) {
+- if (dev2ser(serial_table[i])->open_count) {
++ if (local_read(&dev2ser(serial_table[i])->open_count)) {
+ result =
+ hso_start_serial_device(serial_table[i], GFP_NOIO);
+ hso_kick_transmit(dev2ser(serial_table[i]));
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/wireless/ath/ath9k/ar9002_mac.c linux-3.4-pax/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+--- linux-3.4/drivers/net/wireless/ath/ath9k/ar9002_mac.c 2012-05-21 11:33:14.303928574 +0200
++++ linux-3.4-pax/drivers/net/wireless/ath/ath9k/ar9002_mac.c 2012-05-21 12:10:10.644048948 +0200
+@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, voi
+ ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
+ ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
+
+- ACCESS_ONCE(ads->ds_link) = i->link;
+- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
++ ACCESS_ONCE_RW(ads->ds_link) = i->link;
++ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
+
+ ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
+ ctl6 = SM(i->keytype, AR_EncrType);
+@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, voi
+
+ if ((i->is_first || i->is_last) &&
+ i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
+- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
++ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
+ | set11nTries(i->rates, 1)
+ | set11nTries(i->rates, 2)
+ | set11nTries(i->rates, 3)
+ | (i->dur_update ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
++ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
+ | set11nRate(i->rates, 1)
+ | set11nRate(i->rates, 2)
+ | set11nRate(i->rates, 3);
+ } else {
+- ACCESS_ONCE(ads->ds_ctl2) = 0;
+- ACCESS_ONCE(ads->ds_ctl3) = 0;
++ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
++ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
+ }
+
+ if (!i->is_first) {
+- ACCESS_ONCE(ads->ds_ctl0) = 0;
+- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
+- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
++ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
++ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
++ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
+ return;
+ }
+
+@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, voi
+ break;
+ }
+
+- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
++ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
+ | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(i->txpower, AR_XmitPower)
+ | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, voi
+ | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
+ (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
+
+- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
+- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
++ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
++ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
+
+ if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
+ return;
+
+- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
++ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
+ | set11nPktDurRTSCTS(i->rates, 1);
+
+- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
++ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
+ | set11nPktDurRTSCTS(i->rates, 3);
+
+- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
++ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
+ | set11nRateFlags(i->rates, 1)
+ | set11nRateFlags(i->rates, 2)
+ | set11nRateFlags(i->rates, 3)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/wireless/ath/ath9k/ar9003_mac.c linux-3.4-pax/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+--- linux-3.4/drivers/net/wireless/ath/ath9k/ar9003_mac.c 2012-05-21 11:33:14.359928577 +0200
++++ linux-3.4-pax/drivers/net/wireless/ath/ath9k/ar9003_mac.c 2012-05-21 12:10:10.648048950 +0200
+@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, voi
+ (i->qcu << AR_TxQcuNum_S) | desc_len;
+
+ checksum += val;
+- ACCESS_ONCE(ads->info) = val;
++ ACCESS_ONCE_RW(ads->info) = val;
+
+ checksum += i->link;
+- ACCESS_ONCE(ads->link) = i->link;
++ ACCESS_ONCE_RW(ads->link) = i->link;
+
+ checksum += i->buf_addr[0];
+- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
++ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
+ checksum += i->buf_addr[1];
+- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
++ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
+ checksum += i->buf_addr[2];
+- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
++ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
+ checksum += i->buf_addr[3];
+- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
++ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
+
+ checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
+- ACCESS_ONCE(ads->ctl3) = val;
++ ACCESS_ONCE_RW(ads->ctl3) = val;
+ checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
+- ACCESS_ONCE(ads->ctl5) = val;
++ ACCESS_ONCE_RW(ads->ctl5) = val;
+ checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
+- ACCESS_ONCE(ads->ctl7) = val;
++ ACCESS_ONCE_RW(ads->ctl7) = val;
+ checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
+- ACCESS_ONCE(ads->ctl9) = val;
++ ACCESS_ONCE_RW(ads->ctl9) = val;
+
+ checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
+- ACCESS_ONCE(ads->ctl10) = checksum;
++ ACCESS_ONCE_RW(ads->ctl10) = checksum;
+
+ if (i->is_first || i->is_last) {
+- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
++ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
+ | set11nTries(i->rates, 1)
+ | set11nTries(i->rates, 2)
+ | set11nTries(i->rates, 3)
+ | (i->dur_update ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
++ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
+ | set11nRate(i->rates, 1)
+ | set11nRate(i->rates, 2)
+ | set11nRate(i->rates, 3);
+ } else {
+- ACCESS_ONCE(ads->ctl13) = 0;
+- ACCESS_ONCE(ads->ctl14) = 0;
++ ACCESS_ONCE_RW(ads->ctl13) = 0;
++ ACCESS_ONCE_RW(ads->ctl14) = 0;
+ }
+
+ ads->ctl20 = 0;
+@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, voi
+
+ ctl17 = SM(i->keytype, AR_EncrType);
+ if (!i->is_first) {
+- ACCESS_ONCE(ads->ctl11) = 0;
+- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
+- ACCESS_ONCE(ads->ctl15) = 0;
+- ACCESS_ONCE(ads->ctl16) = 0;
+- ACCESS_ONCE(ads->ctl17) = ctl17;
+- ACCESS_ONCE(ads->ctl18) = 0;
+- ACCESS_ONCE(ads->ctl19) = 0;
++ ACCESS_ONCE_RW(ads->ctl11) = 0;
++ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
++ ACCESS_ONCE_RW(ads->ctl15) = 0;
++ ACCESS_ONCE_RW(ads->ctl16) = 0;
++ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
++ ACCESS_ONCE_RW(ads->ctl18) = 0;
++ ACCESS_ONCE_RW(ads->ctl19) = 0;
+ return;
+ }
+
+- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
++ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
+ | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(i->txpower, AR_XmitPower)
+ | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, voi
+ val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
+ ctl12 |= SM(val, AR_PAPRDChainMask);
+
+- ACCESS_ONCE(ads->ctl12) = ctl12;
+- ACCESS_ONCE(ads->ctl17) = ctl17;
++ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
++ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
+
+- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
++ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
+ | set11nPktDurRTSCTS(i->rates, 1);
+
+- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
++ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
+ | set11nPktDurRTSCTS(i->rates, 3);
+
+- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
++ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
+ | set11nRateFlags(i->rates, 1)
+ | set11nRateFlags(i->rates, 2)
+ | set11nRateFlags(i->rates, 3)
+ | SM(i->rtscts_rate, AR_RTSCTSRate);
+
+- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
++ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
+ }
+
+ static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/wireless/ath/ath9k/hw.h linux-3.4-pax/drivers/net/wireless/ath/ath9k/hw.h
+--- linux-3.4/drivers/net/wireless/ath/ath9k/hw.h 2012-05-21 11:33:14.547928587 +0200
++++ linux-3.4-pax/drivers/net/wireless/ath/ath9k/hw.h 2012-05-21 12:10:10.656048951 +0200
+@@ -614,7 +614,7 @@ struct ath_hw_private_ops {
+
+ /* ANI */
+ void (*ani_cache_ini_regs)(struct ath_hw *ah);
+-};
++} __no_const;
+
+ /**
+ * struct ath_hw_ops - callbacks used by hardware code and driver code
+@@ -644,7 +644,7 @@ struct ath_hw_ops {
+ void (*antdiv_comb_conf_set)(struct ath_hw *ah,
+ struct ath_hw_antcomb_conf *antconf);
+
+-};
++} __no_const;
+
+ struct ath_nf_limits {
+ s16 max;
+@@ -664,7 +664,7 @@ enum ath_cal_list {
+ #define AH_FASTCC 0x4
+
+ struct ath_hw {
+- struct ath_ops reg_ops;
++ ath_ops_no_const reg_ops;
+
+ struct ieee80211_hw *hw;
+ struct ath_common common;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/wireless/ath/ath.h linux-3.4-pax/drivers/net/wireless/ath/ath.h
+--- linux-3.4/drivers/net/wireless/ath/ath.h 2012-05-21 11:33:14.023928559 +0200
++++ linux-3.4-pax/drivers/net/wireless/ath/ath.h 2012-05-21 12:10:10.660048951 +0200
+@@ -119,6 +119,7 @@ struct ath_ops {
+ void (*write_flush) (void *);
+ u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
+ };
++typedef struct ath_ops __no_const ath_ops_no_const;
+
+ struct ath_common;
+ struct ath_bus_ops;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h linux-3.4-pax/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
+--- linux-3.4/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h 2012-03-19 10:39:04.208049590 +0100
++++ linux-3.4-pax/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h 2012-05-21 12:10:10.660048951 +0200
+@@ -545,7 +545,7 @@ struct phy_func_ptr {
+ void (*carrsuppr)(struct brcms_phy *);
+ s32 (*rxsigpwr)(struct brcms_phy *, s32);
+ void (*detach)(struct brcms_phy *);
+-};
++} __no_const;
+
+ struct brcms_phy {
+ struct brcms_phy_pub pubpi_ro;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/wireless/iwlegacy/3945-mac.c linux-3.4-pax/drivers/net/wireless/iwlegacy/3945-mac.c
+--- linux-3.4/drivers/net/wireless/iwlegacy/3945-mac.c 2012-05-21 11:33:16.335928684 +0200
++++ linux-3.4-pax/drivers/net/wireless/iwlegacy/3945-mac.c 2012-05-21 12:10:10.664048950 +0200
+@@ -3611,7 +3611,9 @@ il3945_pci_probe(struct pci_dev *pdev, c
+ */
+ if (il3945_mod_params.disable_hw_scan) {
+ D_INFO("Disabling hw_scan\n");
+- il3945_mac_ops.hw_scan = NULL;
++ pax_open_kernel();
++ *(void **)&il3945_mac_ops.hw_scan = NULL;
++ pax_close_kernel();
+ }
+
+ D_INFO("*** LOAD DRIVER ***\n");
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/wireless/mac80211_hwsim.c linux-3.4-pax/drivers/net/wireless/mac80211_hwsim.c
+--- linux-3.4/drivers/net/wireless/mac80211_hwsim.c 2012-05-21 11:33:16.635928700 +0200
++++ linux-3.4-pax/drivers/net/wireless/mac80211_hwsim.c 2012-05-21 12:10:10.668048950 +0200
+@@ -1721,9 +1721,11 @@ static int __init init_mac80211_hwsim(vo
+ return -EINVAL;
+
+ if (fake_hw_scan) {
+- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
+- mac80211_hwsim_ops.sw_scan_start = NULL;
+- mac80211_hwsim_ops.sw_scan_complete = NULL;
++ pax_open_kernel();
++ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
++ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
++ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
++ pax_close_kernel();
+ }
+
+ spin_lock_init(&hwsim_radio_lock);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/wireless/mwifiex/main.h linux-3.4-pax/drivers/net/wireless/mwifiex/main.h
+--- linux-3.4/drivers/net/wireless/mwifiex/main.h 2012-05-21 11:33:16.699928704 +0200
++++ linux-3.4-pax/drivers/net/wireless/mwifiex/main.h 2012-05-21 12:10:10.672048951 +0200
+@@ -537,7 +537,7 @@ struct mwifiex_if_ops {
+ void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
+ int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
+ int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
+-};
++} __no_const;
+
+ struct mwifiex_adapter {
+ u8 iface_type;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/wireless/rndis_wlan.c linux-3.4-pax/drivers/net/wireless/rndis_wlan.c
+--- linux-3.4/drivers/net/wireless/rndis_wlan.c 2012-05-21 11:33:16.899928715 +0200
++++ linux-3.4-pax/drivers/net/wireless/rndis_wlan.c 2012-05-21 12:10:10.676048951 +0200
+@@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbn
+
+ netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
+
+- if (rts_threshold < 0 || rts_threshold > 2347)
++ if (rts_threshold > 2347)
+ rts_threshold = 2347;
+
+ tmp = cpu_to_le32(rts_threshold);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/net/wireless/wl1251/wl1251.h linux-3.4-pax/drivers/net/wireless/wl1251/wl1251.h
+--- linux-3.4/drivers/net/wireless/wl1251/wl1251.h 2012-05-21 11:33:17.435928743 +0200
++++ linux-3.4-pax/drivers/net/wireless/wl1251/wl1251.h 2012-05-21 12:10:10.680048951 +0200
+@@ -266,7 +266,7 @@ struct wl1251_if_operations {
+ void (*reset)(struct wl1251 *wl);
+ void (*enable_irq)(struct wl1251 *wl);
+ void (*disable_irq)(struct wl1251 *wl);
+-};
++} __no_const;
+
+ struct wl1251 {
+ struct ieee80211_hw *hw;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/oprofile/buffer_sync.c linux-3.4-pax/drivers/oprofile/buffer_sync.c
+--- linux-3.4/drivers/oprofile/buffer_sync.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/oprofile/buffer_sync.c 2012-05-21 12:10:10.680048951 +0200
+@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
+ if (cookie == NO_COOKIE)
+ offset = pc;
+ if (cookie == INVALID_COOKIE) {
+- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
+ offset = pc;
+ }
+ if (cookie != last_cookie) {
+@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
+ /* add userspace sample */
+
+ if (!mm) {
+- atomic_inc(&oprofile_stats.sample_lost_no_mm);
++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
+ return 0;
+ }
+
+ cookie = lookup_dcookie(mm, s->eip, &offset);
+
+ if (cookie == INVALID_COOKIE) {
+- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
+ return 0;
+ }
+
+@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
+ /* ignore backtraces if failed to add a sample */
+ if (state == sb_bt_start) {
+ state = sb_bt_ignore;
+- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
++ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
+ }
+ }
+ release_mm(mm);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/oprofile/event_buffer.c linux-3.4-pax/drivers/oprofile/event_buffer.c
+--- linux-3.4/drivers/oprofile/event_buffer.c 2012-01-08 19:48:14.763471637 +0100
++++ linux-3.4-pax/drivers/oprofile/event_buffer.c 2012-05-21 12:10:10.684048951 +0200
+@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
+ }
+
+ if (buffer_pos == buffer_size) {
+- atomic_inc(&oprofile_stats.event_lost_overflow);
++ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
+ return;
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/oprofile/oprof.c linux-3.4-pax/drivers/oprofile/oprof.c
+--- linux-3.4/drivers/oprofile/oprof.c 2012-03-19 10:39:04.908049553 +0100
++++ linux-3.4-pax/drivers/oprofile/oprof.c 2012-05-21 12:10:10.684048951 +0200
+@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
+ if (oprofile_ops.switch_events())
+ return;
+
+- atomic_inc(&oprofile_stats.multiplex_counter);
++ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
+ start_switch_worker();
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/oprofile/oprofilefs.c linux-3.4-pax/drivers/oprofile/oprofilefs.c
+--- linux-3.4/drivers/oprofile/oprofilefs.c 2012-05-21 11:33:17.743928761 +0200
++++ linux-3.4-pax/drivers/oprofile/oprofilefs.c 2012-05-22 15:28:30.135384683 +0200
+@@ -185,7 +185,7 @@ static const struct file_operations atom
+
+
+ int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
+- char const *name, atomic_t *val)
++ char const *name, atomic_unchecked_t *val)
+ {
+ return __oprofilefs_create_file(sb, root, name,
+ &atomic_ro_fops, 0444, val);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/oprofile/oprofile_stats.c linux-3.4-pax/drivers/oprofile/oprofile_stats.c
+--- linux-3.4/drivers/oprofile/oprofile_stats.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/oprofile/oprofile_stats.c 2012-05-21 12:10:10.692048952 +0200
+@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
+ cpu_buf->sample_invalid_eip = 0;
+ }
+
+- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
+- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
+- atomic_set(&oprofile_stats.event_lost_overflow, 0);
+- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
+- atomic_set(&oprofile_stats.multiplex_counter, 0);
++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
++ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
++ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
++ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
+ }
+
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/oprofile/oprofile_stats.h linux-3.4-pax/drivers/oprofile/oprofile_stats.h
+--- linux-3.4/drivers/oprofile/oprofile_stats.h 2011-10-24 12:48:34.403091338 +0200
++++ linux-3.4-pax/drivers/oprofile/oprofile_stats.h 2012-05-21 12:10:10.692048952 +0200
+@@ -13,11 +13,11 @@
+ #include <linux/atomic.h>
+
+ struct oprofile_stat_struct {
+- atomic_t sample_lost_no_mm;
+- atomic_t sample_lost_no_mapping;
+- atomic_t bt_lost_no_mapping;
+- atomic_t event_lost_overflow;
+- atomic_t multiplex_counter;
++ atomic_unchecked_t sample_lost_no_mm;
++ atomic_unchecked_t sample_lost_no_mapping;
++ atomic_unchecked_t bt_lost_no_mapping;
++ atomic_unchecked_t event_lost_overflow;
++ atomic_unchecked_t multiplex_counter;
+ };
+
+ extern struct oprofile_stat_struct oprofile_stats;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/parport/procfs.c linux-3.4-pax/drivers/parport/procfs.c
+--- linux-3.4/drivers/parport/procfs.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/parport/procfs.c 2012-05-21 12:10:10.696048952 +0200
+@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
+
+ *ppos += len;
+
+- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
++ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
+ }
+
+ #ifdef CONFIG_PARPORT_1284
+@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
+
+ *ppos += len;
+
+- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
++ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
+ }
+ #endif /* IEEE1284.3 support. */
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/pci/hotplug/cpci_hotplug.h linux-3.4-pax/drivers/pci/hotplug/cpci_hotplug.h
+--- linux-3.4/drivers/pci/hotplug/cpci_hotplug.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/pci/hotplug/cpci_hotplug.h 2012-05-21 12:10:10.696048952 +0200
+@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
+ int (*hardware_test) (struct slot* slot, u32 value);
+ u8 (*get_power) (struct slot* slot);
+ int (*set_power) (struct slot* slot, int value);
+-};
++} __no_const;
+
+ struct cpci_hp_controller {
+ unsigned int irq;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/pci/hotplug/cpqphp_nvram.c linux-3.4-pax/drivers/pci/hotplug/cpqphp_nvram.c
+--- linux-3.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/pci/hotplug/cpqphp_nvram.c 2012-05-21 12:10:10.696048952 +0200
+@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
+
+ void compaq_nvram_init (void __iomem *rom_start)
+ {
++
++#ifndef CONFIG_PAX_KERNEXEC
+ if (rom_start) {
+ compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
+ }
++#endif
++
+ dbg("int15 entry = %p\n", compaq_int15_entry_point);
+
+ /* initialize our int15 lock */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/pci/pcie/aspm.c linux-3.4-pax/drivers/pci/pcie/aspm.c
+--- linux-3.4/drivers/pci/pcie/aspm.c 2012-05-21 11:33:17.819928765 +0200
++++ linux-3.4-pax/drivers/pci/pcie/aspm.c 2012-05-21 12:10:10.700048952 +0200
+@@ -27,9 +27,9 @@
+ #define MODULE_PARAM_PREFIX "pcie_aspm."
+
+ /* Note: those are not register definitions */
+-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
+-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
+-#define ASPM_STATE_L1 (4) /* L1 state */
++#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
++#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
++#define ASPM_STATE_L1 (4U) /* L1 state */
+ #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
+ #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/pci/probe.c linux-3.4-pax/drivers/pci/probe.c
+--- linux-3.4/drivers/pci/probe.c 2012-05-21 11:33:17.823928765 +0200
++++ linux-3.4-pax/drivers/pci/probe.c 2012-05-21 12:10:10.704048952 +0200
+@@ -215,7 +215,7 @@ int __pci_read_base(struct pci_dev *dev,
+ u16 orig_cmd;
+ struct pci_bus_region region;
+
+- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
++ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
+
+ if (!dev->mmio_always_on) {
+ pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/platform/x86/thinkpad_acpi.c linux-3.4-pax/drivers/platform/x86/thinkpad_acpi.c
+--- linux-3.4/drivers/platform/x86/thinkpad_acpi.c 2012-05-21 11:33:18.439928798 +0200
++++ linux-3.4-pax/drivers/platform/x86/thinkpad_acpi.c 2012-05-21 12:10:10.712048953 +0200
+@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
+ return 0;
+ }
+
+-void static hotkey_mask_warn_incomplete_mask(void)
++static void hotkey_mask_warn_incomplete_mask(void)
+ {
+ /* log only what the user can fix... */
+ const u32 wantedmask = hotkey_driver_mask &
+@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_
+ }
+ }
+
+-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+- struct tp_nvram_state *newn,
+- const u32 event_mask)
+-{
+-
+ #define TPACPI_COMPARE_KEY(__scancode, __member) \
+ do { \
+ if ((event_mask & (1 << __scancode)) && \
+@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_eve
+ tpacpi_hotkey_send_key(__scancode); \
+ } while (0)
+
+- void issue_volchange(const unsigned int oldvol,
+- const unsigned int newvol)
+- {
+- unsigned int i = oldvol;
+-
+- while (i > newvol) {
+- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
+- i--;
+- }
+- while (i < newvol) {
+- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+- i++;
+- }
++static void issue_volchange(const unsigned int oldvol,
++ const unsigned int newvol,
++ const u32 event_mask)
++{
++ unsigned int i = oldvol;
++
++ while (i > newvol) {
++ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
++ i--;
+ }
++ while (i < newvol) {
++ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
++ i++;
++ }
++}
+
+- void issue_brightnesschange(const unsigned int oldbrt,
+- const unsigned int newbrt)
+- {
+- unsigned int i = oldbrt;
++static void issue_brightnesschange(const unsigned int oldbrt,
++ const unsigned int newbrt,
++ const u32 event_mask)
++{
++ unsigned int i = oldbrt;
+
+- while (i > newbrt) {
+- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
+- i--;
+- }
+- while (i < newbrt) {
+- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+- i++;
+- }
++ while (i > newbrt) {
++ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
++ i--;
++ }
++ while (i < newbrt) {
++ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
++ i++;
+ }
++}
+
++static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
++ struct tp_nvram_state *newn,
++ const u32 event_mask)
++{
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
+@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_eve
+ oldn->volume_level != newn->volume_level) {
+ /* recently muted, or repeated mute keypress, or
+ * multiple presses ending in mute */
+- issue_volchange(oldn->volume_level, newn->volume_level);
++ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
+ }
+ } else {
+@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_eve
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+ }
+ if (oldn->volume_level != newn->volume_level) {
+- issue_volchange(oldn->volume_level, newn->volume_level);
++ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
+ } else if (oldn->volume_toggle != newn->volume_toggle) {
+ /* repeated vol up/down keypress at end of scale ? */
+ if (newn->volume_level == 0)
+@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_eve
+ /* handle brightness */
+ if (oldn->brightness_level != newn->brightness_level) {
+ issue_brightnesschange(oldn->brightness_level,
+- newn->brightness_level);
++ newn->brightness_level,
++ event_mask);
+ } else if (oldn->brightness_toggle != newn->brightness_toggle) {
+ /* repeated key presses that didn't change state */
+ if (newn->brightness_level == 0)
+@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_eve
+ && !tp_features.bright_unkfw)
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+ }
++}
+
+ #undef TPACPI_COMPARE_KEY
+ #undef TPACPI_MAY_SEND_KEY
+-}
+
+ /*
+ * Polling driver
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/pnp/pnpbios/bioscalls.c linux-3.4-pax/drivers/pnp/pnpbios/bioscalls.c
+--- linux-3.4/drivers/pnp/pnpbios/bioscalls.c 2012-05-21 11:33:18.451928799 +0200
++++ linux-3.4-pax/drivers/pnp/pnpbios/bioscalls.c 2012-05-21 12:10:10.716048953 +0200
+@@ -58,7 +58,7 @@ do { \
+ set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
+ } while(0)
+
+-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
+ (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
+
+ /*
+@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func
+
+ cpu = get_cpu();
+ save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
++
++ pax_open_kernel();
+ get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ /* On some boxes IRQ's during PnP BIOS calls are deadly. */
+ spin_lock_irqsave(&pnp_bios_lock, flags);
+@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func
+ :"memory");
+ spin_unlock_irqrestore(&pnp_bios_lock, flags);
+
++ pax_open_kernel();
+ get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+
+ /* If we get here and this is set then the PnP BIOS faulted on us. */
+@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 n
+ return status;
+ }
+
+-void pnpbios_calls_init(union pnp_bios_install_struct *header)
++void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
+ {
+ int i;
+
+@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_i
+ pnp_bios_callpoint.offset = header->fields.pm16offset;
+ pnp_bios_callpoint.segment = PNP_CS16;
+
++ pax_open_kernel();
++
+ for_each_possible_cpu(i) {
+ struct desc_struct *gdt = get_cpu_gdt_table(i);
+ if (!gdt)
+@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_i
+ set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
+ (unsigned long)__va(header->fields.pm16dseg));
+ }
++
++ pax_close_kernel();
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/pnp/resource.c linux-3.4-pax/drivers/pnp/resource.c
+--- linux-3.4/drivers/pnp/resource.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/pnp/resource.c 2012-05-21 12:10:10.720048953 +0200
+@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
+ return 1;
+
+ /* check if the resource is valid */
+- if (*irq < 0 || *irq > 15)
++ if (*irq > 15)
+ return 0;
+
+ /* check if the resource is reserved */
+@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
+ return 1;
+
+ /* check if the resource is valid */
+- if (*dma < 0 || *dma == 4 || *dma > 7)
++ if (*dma == 4 || *dma > 7)
+ return 0;
+
+ /* check if the resource is reserved */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/power/bq27x00_battery.c linux-3.4-pax/drivers/power/bq27x00_battery.c
+--- linux-3.4/drivers/power/bq27x00_battery.c 2012-05-21 11:33:18.495928802 +0200
++++ linux-3.4-pax/drivers/power/bq27x00_battery.c 2012-05-21 12:10:10.724048954 +0200
+@@ -72,7 +72,7 @@
+ struct bq27x00_device_info;
+ struct bq27x00_access_methods {
+ int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
+-};
++} __no_const;
+
+ enum bq27x00_chip { BQ27000, BQ27500 };
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/regulator/max8660.c linux-3.4-pax/drivers/regulator/max8660.c
+--- linux-3.4/drivers/regulator/max8660.c 2012-05-21 11:33:18.619928808 +0200
++++ linux-3.4-pax/drivers/regulator/max8660.c 2012-05-21 12:10:10.724048954 +0200
+@@ -385,8 +385,10 @@ static int __devinit max8660_probe(struc
+ max8660->shadow_regs[MAX8660_OVER1] = 5;
+ } else {
+ /* Otherwise devices can be toggled via software */
+- max8660_dcdc_ops.enable = max8660_dcdc_enable;
+- max8660_dcdc_ops.disable = max8660_dcdc_disable;
++ pax_open_kernel();
++ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
++ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
++ pax_close_kernel();
+ }
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/regulator/mc13892-regulator.c linux-3.4-pax/drivers/regulator/mc13892-regulator.c
+--- linux-3.4/drivers/regulator/mc13892-regulator.c 2012-05-21 11:33:18.647928810 +0200
++++ linux-3.4-pax/drivers/regulator/mc13892-regulator.c 2012-05-21 12:10:10.728048954 +0200
+@@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_p
+ }
+ mc13xxx_unlock(mc13892);
+
+- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
++ pax_open_kernel();
++ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
+ = mc13892_vcam_set_mode;
+- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
++ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
+ = mc13892_vcam_get_mode;
++ pax_close_kernel();
+
+ mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
+ ARRAY_SIZE(mc13892_regulators));
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/aacraid/aacraid.h linux-3.4-pax/drivers/scsi/aacraid/aacraid.h
+--- linux-3.4/drivers/scsi/aacraid/aacraid.h 2012-05-21 11:33:21.895928984 +0200
++++ linux-3.4-pax/drivers/scsi/aacraid/aacraid.h 2012-05-21 12:10:10.732048953 +0200
+@@ -492,7 +492,7 @@ struct adapter_ops
+ int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
+ /* Administrative operations */
+ int (*adapter_comm)(struct aac_dev * dev, int comm);
+-};
++} __no_const;
+
+ /*
+ * Define which interrupt handler needs to be installed
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/aacraid/linit.c linux-3.4-pax/drivers/scsi/aacraid/linit.c
+--- linux-3.4/drivers/scsi/aacraid/linit.c 2012-05-21 11:33:22.975929045 +0200
++++ linux-3.4-pax/drivers/scsi/aacraid/linit.c 2012-05-21 12:10:10.732048953 +0200
+@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_
+ #elif defined(__devinitconst)
+ static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
+ #else
+-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
++static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
+ #endif
+ { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
+ { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/aic94xx/aic94xx_init.c linux-3.4-pax/drivers/scsi/aic94xx/aic94xx_init.c
+--- linux-3.4/drivers/scsi/aic94xx/aic94xx_init.c 2012-05-21 11:33:24.139929108 +0200
++++ linux-3.4-pax/drivers/scsi/aic94xx/aic94xx_init.c 2012-05-21 12:10:10.736048953 +0200
+@@ -1012,7 +1012,7 @@ static struct sas_domain_function_templa
+ .lldd_ata_set_dmamode = asd_set_dmamode,
+ };
+
+-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
++static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
+ {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/bfa/bfa_fcpim.c linux-3.4-pax/drivers/scsi/bfa/bfa_fcpim.c
+--- linux-3.4/drivers/scsi/bfa/bfa_fcpim.c 2012-03-19 10:39:05.576049517 +0100
++++ linux-3.4-pax/drivers/scsi/bfa/bfa_fcpim.c 2012-05-21 12:10:10.740048955 +0200
+@@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *
+
+ bfa_iotag_attach(fcp);
+
+- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
++ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
+ bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
+ (fcp->num_itns * sizeof(struct bfa_itn_s));
+ memset(fcp->itn_arr, 0,
+@@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct
+ void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
+ {
+ struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+- struct bfa_itn_s *itn;
++ bfa_itn_s_no_const *itn;
+
+ itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
+ itn->isr = isr;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/bfa/bfa_fcpim.h linux-3.4-pax/drivers/scsi/bfa/bfa_fcpim.h
+--- linux-3.4/drivers/scsi/bfa/bfa_fcpim.h 2012-03-19 10:39:05.576049517 +0100
++++ linux-3.4-pax/drivers/scsi/bfa/bfa_fcpim.h 2012-05-21 12:10:10.744048956 +0200
+@@ -37,6 +37,7 @@ struct bfa_iotag_s {
+ struct bfa_itn_s {
+ bfa_isr_func_t isr;
+ };
++typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
+
+ void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
+ void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
+@@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
+ struct list_head iotag_tio_free_q; /* free IO resources */
+ struct list_head iotag_unused_q; /* unused IO resources*/
+ struct bfa_iotag_s *iotag_arr;
+- struct bfa_itn_s *itn_arr;
++ bfa_itn_s_no_const *itn_arr;
+ int num_ioim_reqs;
+ int num_fwtio_reqs;
+ int num_itns;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/bfa/bfa.h linux-3.4-pax/drivers/scsi/bfa/bfa.h
+--- linux-3.4/drivers/scsi/bfa/bfa.h 2012-05-21 11:33:24.187929111 +0200
++++ linux-3.4-pax/drivers/scsi/bfa/bfa.h 2012-05-21 12:10:10.744048956 +0200
+@@ -196,7 +196,7 @@ struct bfa_hwif_s {
+ u32 *end);
+ int cpe_vec_q0;
+ int rme_vec_q0;
+-};
++} __no_const;
+ typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
+
+ struct bfa_faa_cbfn_s {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/bfa/bfa_ioc.h linux-3.4-pax/drivers/scsi/bfa/bfa_ioc.h
+--- linux-3.4/drivers/scsi/bfa/bfa_ioc.h 2012-05-21 11:33:24.251929114 +0200
++++ linux-3.4-pax/drivers/scsi/bfa/bfa_ioc.h 2012-05-21 12:10:10.748048956 +0200
+@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
+ bfa_ioc_disable_cbfn_t disable_cbfn;
+ bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
+ bfa_ioc_reset_cbfn_t reset_cbfn;
+-};
++} __no_const;
+
+ /*
+ * IOC event notification mechanism.
+@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
+ void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
+ bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
+ bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
+-};
++} __no_const;
+
+ /*
+ * Queue element to wait for room in request queue. FIFO order is
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/hosts.c linux-3.4-pax/drivers/scsi/hosts.c
+--- linux-3.4/drivers/scsi/hosts.c 2012-05-21 11:33:24.475929126 +0200
++++ linux-3.4-pax/drivers/scsi/hosts.c 2012-05-21 12:10:10.752048956 +0200
+@@ -42,7 +42,7 @@
+ #include "scsi_logging.h"
+
+
+-static atomic_t scsi_host_next_hn; /* host_no for next new host */
++static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
+
+
+ static void scsi_host_cls_release(struct device *dev)
+@@ -360,7 +360,7 @@ struct Scsi_Host *scsi_host_alloc(struct
+ * subtract one because we increment first then return, but we need to
+ * know what the next host number was before increment
+ */
+- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
++ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
+ shost->dma_channel = 0xff;
+
+ /* These three are default values which can be overridden */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/hpsa.c linux-3.4-pax/drivers/scsi/hpsa.c
+--- linux-3.4/drivers/scsi/hpsa.c 2012-05-21 11:33:24.491929127 +0200
++++ linux-3.4-pax/drivers/scsi/hpsa.c 2012-05-21 12:10:10.756048955 +0200
+@@ -521,7 +521,7 @@ static inline u32 next_command(struct ct
+ u32 a;
+
+ if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+- return h->access.command_completed(h);
++ return h->access->command_completed(h);
+
+ if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+ a = *(h->reply_pool_head); /* Next cmd in ring buffer */
+@@ -3002,7 +3002,7 @@ static void start_io(struct ctlr_info *h
+ while (!list_empty(&h->reqQ)) {
+ c = list_entry(h->reqQ.next, struct CommandList, list);
+ /* can't do anything if fifo is full */
+- if ((h->access.fifo_full(h))) {
++ if ((h->access->fifo_full(h))) {
+ dev_warn(&h->pdev->dev, "fifo full\n");
+ break;
+ }
+@@ -3012,7 +3012,7 @@ static void start_io(struct ctlr_info *h
+ h->Qdepth--;
+
+ /* Tell the controller execute command */
+- h->access.submit_command(h, c);
++ h->access->submit_command(h, c);
+
+ /* Put job onto the completed Q */
+ addQ(&h->cmpQ, c);
+@@ -3021,17 +3021,17 @@ static void start_io(struct ctlr_info *h
+
+ static inline unsigned long get_next_completion(struct ctlr_info *h)
+ {
+- return h->access.command_completed(h);
++ return h->access->command_completed(h);
+ }
+
+ static inline bool interrupt_pending(struct ctlr_info *h)
+ {
+- return h->access.intr_pending(h);
++ return h->access->intr_pending(h);
+ }
+
+ static inline long interrupt_not_for_us(struct ctlr_info *h)
+ {
+- return (h->access.intr_pending(h) == 0) ||
++ return (h->access->intr_pending(h) == 0) ||
+ (h->interrupts_enabled == 0);
+ }
+
+@@ -3930,7 +3930,7 @@ static int __devinit hpsa_pci_init(struc
+ if (prod_index < 0)
+ return -ENODEV;
+ h->product_name = products[prod_index].product_name;
+- h->access = *(products[prod_index].access);
++ h->access = products[prod_index].access;
+
+ if (hpsa_board_disabled(h->pdev)) {
+ dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
+@@ -4175,7 +4175,7 @@ static void controller_lockup_detected(s
+
+ assert_spin_locked(&lockup_detector_lock);
+ remove_ctlr_from_lockup_detector_list(h);
+- h->access.set_intr_mask(h, HPSA_INTR_OFF);
++ h->access->set_intr_mask(h, HPSA_INTR_OFF);
+ spin_lock_irqsave(&h->lock, flags);
+ h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+ spin_unlock_irqrestore(&h->lock, flags);
+@@ -4355,7 +4355,7 @@ reinit_after_soft_reset:
+ }
+
+ /* make sure the board interrupts are off */
+- h->access.set_intr_mask(h, HPSA_INTR_OFF);
++ h->access->set_intr_mask(h, HPSA_INTR_OFF);
+
+ if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
+ goto clean2;
+@@ -4389,7 +4389,7 @@ reinit_after_soft_reset:
+ * fake ones to scoop up any residual completions.
+ */
+ spin_lock_irqsave(&h->lock, flags);
+- h->access.set_intr_mask(h, HPSA_INTR_OFF);
++ h->access->set_intr_mask(h, HPSA_INTR_OFF);
+ spin_unlock_irqrestore(&h->lock, flags);
+ free_irq(h->intr[h->intr_mode], h);
+ rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
+@@ -4408,9 +4408,9 @@ reinit_after_soft_reset:
+ dev_info(&h->pdev->dev, "Board READY.\n");
+ dev_info(&h->pdev->dev,
+ "Waiting for stale completions to drain.\n");
+- h->access.set_intr_mask(h, HPSA_INTR_ON);
++ h->access->set_intr_mask(h, HPSA_INTR_ON);
+ msleep(10000);
+- h->access.set_intr_mask(h, HPSA_INTR_OFF);
++ h->access->set_intr_mask(h, HPSA_INTR_OFF);
+
+ rc = controller_reset_failed(h->cfgtable);
+ if (rc)
+@@ -4431,7 +4431,7 @@ reinit_after_soft_reset:
+ }
+
+ /* Turn the interrupts on so we can service requests */
+- h->access.set_intr_mask(h, HPSA_INTR_ON);
++ h->access->set_intr_mask(h, HPSA_INTR_ON);
+
+ hpsa_hba_inquiry(h);
+ hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
+@@ -4483,7 +4483,7 @@ static void hpsa_shutdown(struct pci_dev
+ * To write all data in the battery backed cache to disks
+ */
+ hpsa_flush_cache(h);
+- h->access.set_intr_mask(h, HPSA_INTR_OFF);
++ h->access->set_intr_mask(h, HPSA_INTR_OFF);
+ free_irq(h->intr[h->intr_mode], h);
+ #ifdef CONFIG_PCI_MSI
+ if (h->msix_vector)
+@@ -4657,7 +4657,7 @@ static __devinit void hpsa_enter_perform
+ return;
+ }
+ /* Change the access methods to the performant access methods */
+- h->access = SA5_performant_access;
++ h->access = &SA5_performant_access;
+ h->transMethod = CFGTBL_Trans_Performant;
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/hpsa.h linux-3.4-pax/drivers/scsi/hpsa.h
+--- linux-3.4/drivers/scsi/hpsa.h 2012-05-21 11:33:24.499929128 +0200
++++ linux-3.4-pax/drivers/scsi/hpsa.h 2012-05-21 12:10:10.760048955 +0200
+@@ -72,7 +72,7 @@ struct ctlr_info {
+ unsigned int msix_vector;
+ unsigned int msi_vector;
+ int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
+- struct access_method access;
++ struct access_method *access;
+
+ /* queue and queue Info */
+ struct list_head reqQ;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/ips.h linux-3.4-pax/drivers/scsi/ips.h
+--- linux-3.4/drivers/scsi/ips.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/scsi/ips.h 2012-05-21 12:10:10.760048955 +0200
+@@ -1027,7 +1027,7 @@ typedef struct {
+ int (*intr)(struct ips_ha *);
+ void (*enableint)(struct ips_ha *);
+ uint32_t (*statupd)(struct ips_ha *);
+-} ips_hw_func_t;
++} __no_const ips_hw_func_t;
+
+ typedef struct ips_ha {
+ uint8_t ha_id[IPS_MAX_CHANNELS+1];
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/libfc/fc_exch.c linux-3.4-pax/drivers/scsi/libfc/fc_exch.c
+--- linux-3.4/drivers/scsi/libfc/fc_exch.c 2012-05-21 11:33:24.807929144 +0200
++++ linux-3.4-pax/drivers/scsi/libfc/fc_exch.c 2012-05-21 12:10:10.764048956 +0200
+@@ -105,12 +105,12 @@ struct fc_exch_mgr {
+ * all together if not used XXX
+ */
+ struct {
+- atomic_t no_free_exch;
+- atomic_t no_free_exch_xid;
+- atomic_t xid_not_found;
+- atomic_t xid_busy;
+- atomic_t seq_not_found;
+- atomic_t non_bls_resp;
++ atomic_unchecked_t no_free_exch;
++ atomic_unchecked_t no_free_exch_xid;
++ atomic_unchecked_t xid_not_found;
++ atomic_unchecked_t xid_busy;
++ atomic_unchecked_t seq_not_found;
++ atomic_unchecked_t non_bls_resp;
+ } stats;
+ };
+
+@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(
+ /* allocate memory for exchange */
+ ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
+ if (!ep) {
+- atomic_inc(&mp->stats.no_free_exch);
++ atomic_inc_unchecked(&mp->stats.no_free_exch);
+ goto out;
+ }
+ memset(ep, 0, sizeof(*ep));
+@@ -780,7 +780,7 @@ out:
+ return ep;
+ err:
+ spin_unlock_bh(&pool->lock);
+- atomic_inc(&mp->stats.no_free_exch_xid);
++ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
+ mempool_free(ep, mp->ep_pool);
+ return NULL;
+ }
+@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_look
+ xid = ntohs(fh->fh_ox_id); /* we originated exch */
+ ep = fc_exch_find(mp, xid);
+ if (!ep) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ reject = FC_RJT_OX_ID;
+ goto out;
+ }
+@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_look
+ ep = fc_exch_find(mp, xid);
+ if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
+ if (ep) {
+- atomic_inc(&mp->stats.xid_busy);
++ atomic_inc_unchecked(&mp->stats.xid_busy);
+ reject = FC_RJT_RX_ID;
+ goto rel;
+ }
+@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
+ }
+ xid = ep->xid; /* get our XID */
+ } else if (!ep) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ reject = FC_RJT_RX_ID; /* XID not found */
+ goto out;
+ }
+@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_look
+ } else {
+ sp = &ep->seq;
+ if (sp->id != fh->fh_seq_id) {
+- atomic_inc(&mp->stats.seq_not_found);
++ atomic_inc_unchecked(&mp->stats.seq_not_found);
+ if (f_ctl & FC_FC_END_SEQ) {
+ /*
+ * Update sequence_id based on incoming last
+@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct
+
+ ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
+ if (!ep) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ goto out;
+ }
+ if (ep->esb_stat & ESB_ST_COMPLETE) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ if (ep->rxid == FC_XID_UNKNOWN)
+ ep->rxid = ntohs(fh->fh_rx_id);
+ if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ if (ep->did != ntoh24(fh->fh_s_id) &&
+ ep->did != FC_FID_FLOGI) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ sof = fr_sof(fp);
+@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct
+ sp->ssb_stat |= SSB_ST_RESP;
+ sp->id = fh->fh_seq_id;
+ } else if (sp->id != fh->fh_seq_id) {
+- atomic_inc(&mp->stats.seq_not_found);
++ atomic_inc_unchecked(&mp->stats.seq_not_found);
+ goto rel;
+ }
+
+@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_
+ sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
+
+ if (!sp)
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ else
+- atomic_inc(&mp->stats.non_bls_resp);
++ atomic_inc_unchecked(&mp->stats.non_bls_resp);
+
+ fc_frame_free(fp);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/libsas/sas_ata.c linux-3.4-pax/drivers/scsi/libsas/sas_ata.c
+--- linux-3.4/drivers/scsi/libsas/sas_ata.c 2012-05-21 11:33:24.883929148 +0200
++++ linux-3.4-pax/drivers/scsi/libsas/sas_ata.c 2012-05-21 12:10:10.768048956 +0200
+@@ -529,7 +529,7 @@ static struct ata_port_operations sas_sa
+ .postreset = ata_std_postreset,
+ .error_handler = ata_std_error_handler,
+ .post_internal_cmd = sas_ata_post_internal,
+- .qc_defer = ata_std_qc_defer,
++ .qc_defer = ata_std_qc_defer,
+ .qc_prep = ata_noop_qc_prep,
+ .qc_issue = sas_ata_qc_issue,
+ .qc_fill_rtf = sas_ata_qc_fill_rtf,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.4-pax/drivers/scsi/lpfc/lpfc_debugfs.c
+--- linux-3.4/drivers/scsi/lpfc/lpfc_debugfs.c 2012-05-21 11:33:24.971929153 +0200
++++ linux-3.4-pax/drivers/scsi/lpfc/lpfc_debugfs.c 2012-05-21 12:10:10.772048956 +0200
+@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
+
+ #include <linux/debugfs.h>
+
+-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
+ static unsigned long lpfc_debugfs_start_time = 0L;
+
+ /* iDiag */
+@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
+ lpfc_debugfs_enable = 0;
+
+ len = 0;
+- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
++ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
+ (lpfc_debugfs_max_disc_trc - 1);
+ for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
+ dtp = vport->disc_trc + i;
+@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
+ lpfc_debugfs_enable = 0;
+
+ len = 0;
+- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
++ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
+ (lpfc_debugfs_max_slow_ring_trc - 1);
+ for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
+ dtp = phba->slow_ring_trc + i;
+@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
+ !vport || !vport->disc_trc)
+ return;
+
+- index = atomic_inc_return(&vport->disc_trc_cnt) &
++ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
+ (lpfc_debugfs_max_disc_trc - 1);
+ dtp = vport->disc_trc + index;
+ dtp->fmt = fmt;
+ dtp->data1 = data1;
+ dtp->data2 = data2;
+ dtp->data3 = data3;
+- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
++ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
+ dtp->jif = jiffies;
+ #endif
+ return;
+@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
+ !phba || !phba->slow_ring_trc)
+ return;
+
+- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
++ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
+ (lpfc_debugfs_max_slow_ring_trc - 1);
+ dtp = phba->slow_ring_trc + index;
+ dtp->fmt = fmt;
+ dtp->data1 = data1;
+ dtp->data2 = data2;
+ dtp->data3 = data3;
+- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
++ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
+ dtp->jif = jiffies;
+ #endif
+ return;
+@@ -4090,7 +4090,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
+ "slow_ring buffer\n");
+ goto debug_failed;
+ }
+- atomic_set(&phba->slow_ring_trc_cnt, 0);
++ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
+ memset(phba->slow_ring_trc, 0,
+ (sizeof(struct lpfc_debugfs_trc) *
+ lpfc_debugfs_max_slow_ring_trc));
+@@ -4136,7 +4136,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
+ "buffer\n");
+ goto debug_failed;
+ }
+- atomic_set(&vport->disc_trc_cnt, 0);
++ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
+
+ snprintf(name, sizeof(name), "discovery_trace");
+ vport->debug_disc_trc =
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/lpfc/lpfc.h linux-3.4-pax/drivers/scsi/lpfc/lpfc.h
+--- linux-3.4/drivers/scsi/lpfc/lpfc.h 2012-05-21 11:33:24.923929151 +0200
++++ linux-3.4-pax/drivers/scsi/lpfc/lpfc.h 2012-05-21 12:10:10.776048956 +0200
+@@ -413,7 +413,7 @@ struct lpfc_vport {
+ struct dentry *debug_nodelist;
+ struct dentry *vport_debugfs_root;
+ struct lpfc_debugfs_trc *disc_trc;
+- atomic_t disc_trc_cnt;
++ atomic_unchecked_t disc_trc_cnt;
+ #endif
+ uint8_t stat_data_enabled;
+ uint8_t stat_data_blocked;
+@@ -826,8 +826,8 @@ struct lpfc_hba {
+ struct timer_list fabric_block_timer;
+ unsigned long bit_flags;
+ #define FABRIC_COMANDS_BLOCKED 0
+- atomic_t num_rsrc_err;
+- atomic_t num_cmd_success;
++ atomic_unchecked_t num_rsrc_err;
++ atomic_unchecked_t num_cmd_success;
+ unsigned long last_rsrc_error_time;
+ unsigned long last_ramp_down_time;
+ unsigned long last_ramp_up_time;
+@@ -863,7 +863,7 @@ struct lpfc_hba {
+
+ struct dentry *debug_slow_ring_trc;
+ struct lpfc_debugfs_trc *slow_ring_trc;
+- atomic_t slow_ring_trc_cnt;
++ atomic_unchecked_t slow_ring_trc_cnt;
+ /* iDiag debugfs sub-directory */
+ struct dentry *idiag_root;
+ struct dentry *idiag_pci_cfg;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/lpfc/lpfc_init.c linux-3.4-pax/drivers/scsi/lpfc/lpfc_init.c
+--- linux-3.4/drivers/scsi/lpfc/lpfc_init.c 2012-05-21 11:33:25.019929156 +0200
++++ linux-3.4-pax/drivers/scsi/lpfc/lpfc_init.c 2012-05-21 12:10:10.784048957 +0200
+@@ -10266,8 +10266,10 @@ lpfc_init(void)
+ "misc_register returned with status %d", error);
+
+ if (lpfc_enable_npiv) {
+- lpfc_transport_functions.vport_create = lpfc_vport_create;
+- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
++ pax_open_kernel();
++ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
++ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
++ pax_close_kernel();
+ }
+ lpfc_transport_template =
+ fc_attach_transport(&lpfc_transport_functions);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/lpfc/lpfc_scsi.c linux-3.4-pax/drivers/scsi/lpfc/lpfc_scsi.c
+--- linux-3.4/drivers/scsi/lpfc/lpfc_scsi.c 2012-05-21 11:33:25.055929159 +0200
++++ linux-3.4-pax/drivers/scsi/lpfc/lpfc_scsi.c 2012-05-21 12:10:10.792048957 +0200
+@@ -311,7 +311,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
+ uint32_t evt_posted;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+- atomic_inc(&phba->num_rsrc_err);
++ atomic_inc_unchecked(&phba->num_rsrc_err);
+ phba->last_rsrc_error_time = jiffies;
+
+ if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
+@@ -352,7 +352,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
+ unsigned long flags;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t evt_posted;
+- atomic_inc(&phba->num_cmd_success);
++ atomic_inc_unchecked(&phba->num_cmd_success);
+
+ if (vport->cfg_lun_queue_depth <= queue_depth)
+ return;
+@@ -396,8 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
+ unsigned long num_rsrc_err, num_cmd_success;
+ int i;
+
+- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
+- num_cmd_success = atomic_read(&phba->num_cmd_success);
++ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
++ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+@@ -417,8 +417,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+- atomic_set(&phba->num_rsrc_err, 0);
+- atomic_set(&phba->num_cmd_success, 0);
++ atomic_set_unchecked(&phba->num_rsrc_err, 0);
++ atomic_set_unchecked(&phba->num_cmd_success, 0);
+ }
+
+ /**
+@@ -452,8 +452,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+- atomic_set(&phba->num_rsrc_err, 0);
+- atomic_set(&phba->num_cmd_success, 0);
++ atomic_set_unchecked(&phba->num_rsrc_err, 0);
++ atomic_set_unchecked(&phba->num_cmd_success, 0);
+ }
+
+ /**
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/pmcraid.c linux-3.4-pax/drivers/scsi/pmcraid.c
+--- linux-3.4/drivers/scsi/pmcraid.c 2012-03-19 10:39:05.800049505 +0100
++++ linux-3.4-pax/drivers/scsi/pmcraid.c 2012-05-21 12:10:10.800048958 +0200
+@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct sc
+ res->scsi_dev = scsi_dev;
+ scsi_dev->hostdata = res;
+ res->change_detected = 0;
+- atomic_set(&res->read_failures, 0);
+- atomic_set(&res->write_failures, 0);
++ atomic_set_unchecked(&res->read_failures, 0);
++ atomic_set_unchecked(&res->write_failures, 0);
+ rc = 0;
+ }
+ spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
+@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct
+
+ /* If this was a SCSI read/write command keep count of errors */
+ if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
+- atomic_inc(&res->read_failures);
++ atomic_inc_unchecked(&res->read_failures);
+ else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
+- atomic_inc(&res->write_failures);
++ atomic_inc_unchecked(&res->write_failures);
+
+ if (!RES_IS_GSCSI(res->cfg_entry) &&
+ masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
+@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
+ * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
+ * hrrq_id assigned here in queuecommand
+ */
+- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
++ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
+ pinstance->num_hrrq;
+ cmd->cmd_done = pmcraid_io_done;
+
+@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
+ * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
+ * hrrq_id assigned here in queuecommand
+ */
+- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
++ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
+ pinstance->num_hrrq;
+
+ if (request_size) {
+@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(stru
+
+ pinstance = container_of(workp, struct pmcraid_instance, worker_q);
+ /* add resources only after host is added into system */
+- if (!atomic_read(&pinstance->expose_resources))
++ if (!atomic_read_unchecked(&pinstance->expose_resources))
+ return;
+
+ fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
+@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instan
+ init_waitqueue_head(&pinstance->reset_wait_q);
+
+ atomic_set(&pinstance->outstanding_cmds, 0);
+- atomic_set(&pinstance->last_message_id, 0);
+- atomic_set(&pinstance->expose_resources, 0);
++ atomic_set_unchecked(&pinstance->last_message_id, 0);
++ atomic_set_unchecked(&pinstance->expose_resources, 0);
+
+ INIT_LIST_HEAD(&pinstance->free_res_q);
+ INIT_LIST_HEAD(&pinstance->used_res_q);
+@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
+ /* Schedule worker thread to handle CCN and take care of adding and
+ * removing devices to OS
+ */
+- atomic_set(&pinstance->expose_resources, 1);
++ atomic_set_unchecked(&pinstance->expose_resources, 1);
+ schedule_work(&pinstance->worker_q);
+ return rc;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/pmcraid.h linux-3.4-pax/drivers/scsi/pmcraid.h
+--- linux-3.4/drivers/scsi/pmcraid.h 2012-05-21 11:33:25.747929195 +0200
++++ linux-3.4-pax/drivers/scsi/pmcraid.h 2012-05-21 12:10:10.804048958 +0200
+@@ -748,7 +748,7 @@ struct pmcraid_instance {
+ struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
+
+ /* Message id as filled in last fired IOARCB, used to identify HRRQ */
+- atomic_t last_message_id;
++ atomic_unchecked_t last_message_id;
+
+ /* configuration table */
+ struct pmcraid_config_table *cfg_table;
+@@ -777,7 +777,7 @@ struct pmcraid_instance {
+ atomic_t outstanding_cmds;
+
+ /* should add/delete resources to mid-layer now ?*/
+- atomic_t expose_resources;
++ atomic_unchecked_t expose_resources;
+
+
+
+@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
+ struct pmcraid_config_table_entry_ext cfg_entry_ext;
+ };
+ struct scsi_device *scsi_dev; /* Link scsi_device structure */
+- atomic_t read_failures; /* count of failed READ commands */
+- atomic_t write_failures; /* count of failed WRITE commands */
++ atomic_unchecked_t read_failures; /* count of failed READ commands */
++ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
+
+ /* To indicate add/delete/modify during CCN */
+ u8 change_detected;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/qla2xxx/qla_def.h linux-3.4-pax/drivers/scsi/qla2xxx/qla_def.h
+--- linux-3.4/drivers/scsi/qla2xxx/qla_def.h 2012-05-21 11:33:25.827929200 +0200
++++ linux-3.4-pax/drivers/scsi/qla2xxx/qla_def.h 2012-05-21 12:10:10.808048958 +0200
+@@ -2264,7 +2264,7 @@ struct isp_operations {
+ int (*start_scsi) (srb_t *);
+ int (*abort_isp) (struct scsi_qla_host *);
+ int (*iospace_config)(struct qla_hw_data*);
+-};
++} __no_const;
+
+ /* MSI-X Support *************************************************************/
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/qla4xxx/ql4_def.h linux-3.4-pax/drivers/scsi/qla4xxx/ql4_def.h
+--- linux-3.4/drivers/scsi/qla4xxx/ql4_def.h 2012-05-21 11:33:25.951929206 +0200
++++ linux-3.4-pax/drivers/scsi/qla4xxx/ql4_def.h 2012-05-21 12:10:10.812048958 +0200
+@@ -268,7 +268,7 @@ struct ddb_entry {
+ * (4000 only) */
+ atomic_t relogin_timer; /* Max Time to wait for
+ * relogin to complete */
+- atomic_t relogin_retry_count; /* Num of times relogin has been
++ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
+ * retried */
+ uint32_t default_time2wait; /* Default Min time between
+ * relogins (+aens) */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/qla4xxx/ql4_os.c linux-3.4-pax/drivers/scsi/qla4xxx/ql4_os.c
+--- linux-3.4/drivers/scsi/qla4xxx/ql4_os.c 2012-05-21 11:33:25.995929209 +0200
++++ linux-3.4-pax/drivers/scsi/qla4xxx/ql4_os.c 2012-05-21 12:10:10.820048959 +0200
+@@ -2551,12 +2551,12 @@ static void qla4xxx_check_relogin_flash_
+ */
+ if (!iscsi_is_session_online(cls_sess)) {
+ /* Reset retry relogin timer */
+- atomic_inc(&ddb_entry->relogin_retry_count);
++ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: index[%d] relogin timed out-retrying"
+ " relogin (%d), retry (%d)\n", __func__,
+ ddb_entry->fw_ddb_index,
+- atomic_read(&ddb_entry->relogin_retry_count),
++ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
+ ddb_entry->default_time2wait + 4));
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ atomic_set(&ddb_entry->retry_relogin_timer,
+@@ -4453,7 +4453,7 @@ static void qla4xxx_setup_flash_ddb_entr
+
+ atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+ atomic_set(&ddb_entry->relogin_timer, 0);
+- atomic_set(&ddb_entry->relogin_retry_count, 0);
++ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
+ def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+ ddb_entry->default_relogin_timeout =
+ (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/scsi.c linux-3.4-pax/drivers/scsi/scsi.c
+--- linux-3.4/drivers/scsi/scsi.c 2012-05-21 11:33:26.023929211 +0200
++++ linux-3.4-pax/drivers/scsi/scsi.c 2012-05-21 12:10:10.824048958 +0200
+@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
+ unsigned long timeout;
+ int rtn = 0;
+
+- atomic_inc(&cmd->device->iorequest_cnt);
++ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
+
+ /* check if the device is still usable */
+ if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/scsi_lib.c linux-3.4-pax/drivers/scsi/scsi_lib.c
+--- linux-3.4/drivers/scsi/scsi_lib.c 2012-05-21 11:33:26.051929212 +0200
++++ linux-3.4-pax/drivers/scsi/scsi_lib.c 2012-05-21 12:10:10.828048958 +0200
+@@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
+ shost = sdev->host;
+ scsi_init_cmd_errh(cmd);
+ cmd->result = DID_NO_CONNECT << 16;
+- atomic_inc(&cmd->device->iorequest_cnt);
++ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
+
+ /*
+ * SCSI request completion path will do scsi_device_unbusy(),
+@@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
+
+ INIT_LIST_HEAD(&cmd->eh_entry);
+
+- atomic_inc(&cmd->device->iodone_cnt);
++ atomic_inc_unchecked(&cmd->device->iodone_cnt);
+ if (cmd->result)
+- atomic_inc(&cmd->device->ioerr_cnt);
++ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
+
+ disposition = scsi_decide_disposition(cmd);
+ if (disposition != SUCCESS &&
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/scsi_sysfs.c linux-3.4-pax/drivers/scsi/scsi_sysfs.c
+--- linux-3.4/drivers/scsi/scsi_sysfs.c 2012-01-08 19:48:16.295471555 +0100
++++ linux-3.4-pax/drivers/scsi/scsi_sysfs.c 2012-05-21 12:10:10.832048960 +0200
+@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev,
+ char *buf) \
+ { \
+ struct scsi_device *sdev = to_scsi_device(dev); \
+- unsigned long long count = atomic_read(&sdev->field); \
++ unsigned long long count = atomic_read_unchecked(&sdev->field); \
+ return snprintf(buf, 20, "0x%llx\n", count); \
+ } \
+ static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/scsi_tgt_lib.c linux-3.4-pax/drivers/scsi/scsi_tgt_lib.c
+--- linux-3.4/drivers/scsi/scsi_tgt_lib.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/scsi/scsi_tgt_lib.c 2012-05-21 12:10:10.832048960 +0200
+@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct sc
+ int err;
+
+ dprintk("%lx %u\n", uaddr, len);
+- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
++ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
+ if (err) {
+ /*
+ * TODO: need to fixup sg_tablesize, max_segment_size,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/scsi_transport_fc.c linux-3.4-pax/drivers/scsi/scsi_transport_fc.c
+--- linux-3.4/drivers/scsi/scsi_transport_fc.c 2012-05-21 11:33:26.059929212 +0200
++++ linux-3.4-pax/drivers/scsi/scsi_transport_fc.c 2012-05-21 12:10:10.840048961 +0200
+@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
+ * Netlink Infrastructure
+ */
+
+-static atomic_t fc_event_seq;
++static atomic_unchecked_t fc_event_seq;
+
+ /**
+ * fc_get_event_number - Obtain the next sequential FC event number
+@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
+ u32
+ fc_get_event_number(void)
+ {
+- return atomic_add_return(1, &fc_event_seq);
++ return atomic_add_return_unchecked(1, &fc_event_seq);
+ }
+ EXPORT_SYMBOL(fc_get_event_number);
+
+@@ -659,7 +659,7 @@ static __init int fc_transport_init(void
+ {
+ int error;
+
+- atomic_set(&fc_event_seq, 0);
++ atomic_set_unchecked(&fc_event_seq, 0);
+
+ error = transport_class_register(&fc_host_class);
+ if (error)
+@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char
+ char *cp;
+
+ *val = simple_strtoul(buf, &cp, 0);
+- if ((*cp && (*cp != '\n')) || (*val < 0))
++ if (*cp && (*cp != '\n'))
+ return -EINVAL;
+ /*
+ * Check for overflow; dev_loss_tmo is u32
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/scsi_transport_iscsi.c linux-3.4-pax/drivers/scsi/scsi_transport_iscsi.c
+--- linux-3.4/drivers/scsi/scsi_transport_iscsi.c 2012-05-21 11:33:26.063929213 +0200
++++ linux-3.4-pax/drivers/scsi/scsi_transport_iscsi.c 2012-05-21 12:10:10.844048961 +0200
+@@ -79,7 +79,7 @@ struct iscsi_internal {
+ struct transport_container session_cont;
+ };
+
+-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
++static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
+ static struct workqueue_struct *iscsi_eh_timer_workq;
+
+ static DEFINE_IDA(iscsi_sess_ida);
+@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_s
+ int err;
+
+ ihost = shost->shost_data;
+- session->sid = atomic_add_return(1, &iscsi_session_nr);
++ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
+
+ if (target_id == ISCSI_MAX_TARGET) {
+ id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
+@@ -2940,7 +2940,7 @@ static __init int iscsi_transport_init(v
+ printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
+ ISCSI_TRANSPORT_VERSION);
+
+- atomic_set(&iscsi_session_nr, 0);
++ atomic_set_unchecked(&iscsi_session_nr, 0);
+
+ err = class_register(&iscsi_transport_class);
+ if (err)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/scsi_transport_srp.c linux-3.4-pax/drivers/scsi/scsi_transport_srp.c
+--- linux-3.4/drivers/scsi/scsi_transport_srp.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/scsi/scsi_transport_srp.c 2012-05-21 12:10:10.848048960 +0200
+@@ -33,7 +33,7 @@
+ #include "scsi_transport_srp_internal.h"
+
+ struct srp_host_attrs {
+- atomic_t next_port_id;
++ atomic_unchecked_t next_port_id;
+ };
+ #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
+
+@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
+
+- atomic_set(&srp_host->next_port_id, 0);
++ atomic_set_unchecked(&srp_host->next_port_id, 0);
+ return 0;
+ }
+
+@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
+ memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
+ rport->roles = ids->roles;
+
+- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
++ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
+ dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
+
+ transport_setup_device(&rport->dev);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/scsi/sg.c linux-3.4-pax/drivers/scsi/sg.c
+--- linux-3.4/drivers/scsi/sg.c 2012-03-19 10:39:05.888049499 +0100
++++ linux-3.4-pax/drivers/scsi/sg.c 2012-05-21 12:10:10.852048960 +0200
+@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int
+ sdp->disk->disk_name,
+ MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
+ NULL,
+- (char *)arg);
++ (char __user *)arg);
+ case BLKTRACESTART:
+ return blk_trace_startstop(sdp->device->request_queue, 1);
+ case BLKTRACESTOP:
+@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
+ const struct file_operations * fops;
+ };
+
+-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
++static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
+ {"allow_dio", &adio_fops},
+ {"debug", &debug_fops},
+ {"def_reserved_size", &dressz_fops},
+@@ -2332,7 +2332,7 @@ sg_proc_init(void)
+ if (!sg_proc_sgp)
+ return 1;
+ for (k = 0; k < num_leaves; ++k) {
+- struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
++ const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
+ umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
+ proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/spi/spi.c linux-3.4-pax/drivers/spi/spi.c
+--- linux-3.4/drivers/spi/spi.c 2012-05-21 11:33:26.455929234 +0200
++++ linux-3.4-pax/drivers/spi/spi.c 2012-05-21 12:10:10.856048961 +0200
+@@ -1361,7 +1361,7 @@ int spi_bus_unlock(struct spi_master *ma
+ EXPORT_SYMBOL_GPL(spi_bus_unlock);
+
+ /* portable code must never pass more than 32 bytes */
+-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
++#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
+
+ static u8 *buf;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/staging/octeon/ethernet.c linux-3.4-pax/drivers/staging/octeon/ethernet.c
+--- linux-3.4/drivers/staging/octeon/ethernet.c 2012-05-21 11:33:27.591929296 +0200
++++ linux-3.4-pax/drivers/staging/octeon/ethernet.c 2012-05-21 12:10:10.856048961 +0200
+@@ -259,11 +259,11 @@ static struct net_device_stats *cvm_oct_
+ * since the RX tasklet also increments it.
+ */
+ #ifdef CONFIG_64BIT
+- atomic64_add(rx_status.dropped_packets,
+- (atomic64_t *)&priv->stats.rx_dropped);
++ atomic64_add_unchecked(rx_status.dropped_packets,
++ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
+ #else
+- atomic_add(rx_status.dropped_packets,
+- (atomic_t *)&priv->stats.rx_dropped);
++ atomic_add_unchecked(rx_status.dropped_packets,
++ (atomic_unchecked_t *)&priv->stats.rx_dropped);
+ #endif
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/staging/octeon/ethernet-rx.c linux-3.4-pax/drivers/staging/octeon/ethernet-rx.c
+--- linux-3.4/drivers/staging/octeon/ethernet-rx.c 2012-05-21 11:33:27.583929295 +0200
++++ linux-3.4-pax/drivers/staging/octeon/ethernet-rx.c 2012-05-21 12:10:10.860048961 +0200
+@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi
+ /* Increment RX stats for virtual ports */
+ if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
+ #ifdef CONFIG_64BIT
+- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
+- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
++ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
++ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
+ #else
+- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
+- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
++ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
++ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
+ #endif
+ }
+ netif_receive_skb(skb);
+@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi
+ dev->name);
+ */
+ #ifdef CONFIG_64BIT
+- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
++ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
+ #else
+- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
++ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
+ #endif
+ dev_kfree_skb_irq(skb);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/staging/rtl8712/rtl871x_io.h linux-3.4-pax/drivers/staging/rtl8712/rtl871x_io.h
+--- linux-3.4/drivers/staging/rtl8712/rtl871x_io.h 2012-05-21 11:33:27.951929315 +0200
++++ linux-3.4-pax/drivers/staging/rtl8712/rtl871x_io.h 2012-05-21 12:10:10.864048961 +0200
+@@ -108,7 +108,7 @@ struct _io_ops {
+ u8 *pmem);
+ u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
+ u8 *pmem);
+-};
++} __no_const;
+
+ struct io_req {
+ struct list_head list;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/staging/sbe-2t3e3/netdev.c linux-3.4-pax/drivers/staging/sbe-2t3e3/netdev.c
+--- linux-3.4/drivers/staging/sbe-2t3e3/netdev.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/staging/sbe-2t3e3/netdev.c 2012-05-21 12:10:10.868048961 +0200
+@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
+ t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
+
+ if (rlen)
+- if (copy_to_user(data, &resp, rlen))
++ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
+ return -EFAULT;
+
+ return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/staging/speakup/speakup_soft.c linux-3.4-pax/drivers/staging/speakup/speakup_soft.c
+--- linux-3.4/drivers/staging/speakup/speakup_soft.c 2011-10-24 12:48:37.219091188 +0200
++++ linux-3.4-pax/drivers/staging/speakup/speakup_soft.c 2012-05-21 12:10:10.868048961 +0200
+@@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct fil
+ break;
+ } else if (!initialized) {
+ if (*init) {
+- ch = *init;
+ init++;
+ } else {
+ initialized = 1;
+ }
++ ch = *init;
+ } else {
+ ch = synth_buffer_getc();
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/staging/usbip/usbip_common.h linux-3.4-pax/drivers/staging/usbip/usbip_common.h
+--- linux-3.4/drivers/staging/usbip/usbip_common.h 2012-05-21 11:33:28.723929358 +0200
++++ linux-3.4-pax/drivers/staging/usbip/usbip_common.h 2012-05-21 12:10:10.872048962 +0200
+@@ -289,7 +289,7 @@ struct usbip_device {
+ void (*shutdown)(struct usbip_device *);
+ void (*reset)(struct usbip_device *);
+ void (*unusable)(struct usbip_device *);
+- } eh_ops;
++ } __no_const eh_ops;
+ };
+
+ /* usbip_common.c */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/staging/usbip/vhci.h linux-3.4-pax/drivers/staging/usbip/vhci.h
+--- linux-3.4/drivers/staging/usbip/vhci.h 2012-01-08 19:48:19.835471366 +0100
++++ linux-3.4-pax/drivers/staging/usbip/vhci.h 2012-05-21 12:10:10.872048962 +0200
+@@ -88,7 +88,7 @@ struct vhci_hcd {
+ unsigned resuming:1;
+ unsigned long re_timeout;
+
+- atomic_t seqnum;
++ atomic_unchecked_t seqnum;
+
+ /*
+ * NOTE:
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/staging/usbip/vhci_hcd.c linux-3.4-pax/drivers/staging/usbip/vhci_hcd.c
+--- linux-3.4/drivers/staging/usbip/vhci_hcd.c 2012-05-21 11:33:28.739929358 +0200
++++ linux-3.4-pax/drivers/staging/usbip/vhci_hcd.c 2012-05-21 12:10:10.876048962 +0200
+@@ -488,7 +488,7 @@ static void vhci_tx_urb(struct urb *urb)
+ return;
+ }
+
+- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
++ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
+ if (priv->seqnum == 0xffff)
+ dev_info(&urb->dev->dev, "seqnum max\n");
+
+@@ -740,7 +740,7 @@ static int vhci_urb_dequeue(struct usb_h
+ return -ENOMEM;
+ }
+
+- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
++ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
+ if (unlink->seqnum == 0xffff)
+ pr_info("seqnum max\n");
+
+@@ -928,7 +928,7 @@ static int vhci_start(struct usb_hcd *hc
+ vdev->rhport = rhport;
+ }
+
+- atomic_set(&vhci->seqnum, 0);
++ atomic_set_unchecked(&vhci->seqnum, 0);
+ spin_lock_init(&vhci->lock);
+
+ hcd->power_budget = 0; /* no limit */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/staging/usbip/vhci_rx.c linux-3.4-pax/drivers/staging/usbip/vhci_rx.c
+--- linux-3.4/drivers/staging/usbip/vhci_rx.c 2012-05-21 11:33:28.743929358 +0200
++++ linux-3.4-pax/drivers/staging/usbip/vhci_rx.c 2012-05-21 12:10:10.880048962 +0200
+@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct
+ if (!urb) {
+ pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
+ pr_info("max seqnum %d\n",
+- atomic_read(&the_controller->seqnum));
++ atomic_read_unchecked(&the_controller->seqnum));
+ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+ return;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/staging/vt6655/hostap.c linux-3.4-pax/drivers/staging/vt6655/hostap.c
+--- linux-3.4/drivers/staging/vt6655/hostap.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/staging/vt6655/hostap.c 2012-05-21 12:10:10.880048962 +0200
+@@ -79,14 +79,13 @@ static int msglevel
+ *
+ */
+
++static net_device_ops_no_const apdev_netdev_ops;
++
+ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
+ {
+ PSDevice apdev_priv;
+ struct net_device *dev = pDevice->dev;
+ int ret;
+- const struct net_device_ops apdev_netdev_ops = {
+- .ndo_start_xmit = pDevice->tx_80211,
+- };
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
+
+@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
+ *apdev_priv = *pDevice;
+ memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
+
++ /* only half broken now */
++ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
+ pDevice->apdev->netdev_ops = &apdev_netdev_ops;
+
+ pDevice->apdev->type = ARPHRD_IEEE80211;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/staging/vt6656/hostap.c linux-3.4-pax/drivers/staging/vt6656/hostap.c
+--- linux-3.4/drivers/staging/vt6656/hostap.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/staging/vt6656/hostap.c 2012-05-21 12:10:10.884048962 +0200
+@@ -80,14 +80,13 @@ static int msglevel
+ *
+ */
+
++static net_device_ops_no_const apdev_netdev_ops;
++
+ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
+ {
+ PSDevice apdev_priv;
+ struct net_device *dev = pDevice->dev;
+ int ret;
+- const struct net_device_ops apdev_netdev_ops = {
+- .ndo_start_xmit = pDevice->tx_80211,
+- };
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
+
+@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
+ *apdev_priv = *pDevice;
+ memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
+
++ /* only half broken now */
++ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
+ pDevice->apdev->netdev_ops = &apdev_netdev_ops;
+
+ pDevice->apdev->type = ARPHRD_IEEE80211;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.4-pax/drivers/staging/wlan-ng/hfa384x_usb.c
+--- linux-3.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/staging/wlan-ng/hfa384x_usb.c 2012-05-21 12:10:10.888048962 +0200
+@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
+
+ struct usbctlx_completor {
+ int (*complete) (struct usbctlx_completor *);
+-};
++} __no_const;
+
+ static int
+ hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/staging/zcache/tmem.c linux-3.4-pax/drivers/staging/zcache/tmem.c
+--- linux-3.4/drivers/staging/zcache/tmem.c 2011-10-24 12:48:37.639091165 +0200
++++ linux-3.4-pax/drivers/staging/zcache/tmem.c 2012-05-21 12:10:10.892048963 +0200
+@@ -39,7 +39,7 @@
+ * A tmem host implementation must use this function to register callbacks
+ * for memory allocation.
+ */
+-static struct tmem_hostops tmem_hostops;
++static tmem_hostops_no_const tmem_hostops;
+
+ static void tmem_objnode_tree_init(void);
+
+@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
+ * A tmem host implementation must use this function to register
+ * callbacks for a page-accessible memory (PAM) implementation
+ */
+-static struct tmem_pamops tmem_pamops;
++static tmem_pamops_no_const tmem_pamops;
+
+ void tmem_register_pamops(struct tmem_pamops *m)
+ {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/staging/zcache/tmem.h linux-3.4-pax/drivers/staging/zcache/tmem.h
+--- linux-3.4/drivers/staging/zcache/tmem.h 2012-05-21 11:33:29.007929372 +0200
++++ linux-3.4-pax/drivers/staging/zcache/tmem.h 2012-05-21 12:10:10.892048963 +0200
+@@ -180,6 +180,7 @@ struct tmem_pamops {
+ void (*new_obj)(struct tmem_obj *);
+ int (*replace_in_obj)(void *, struct tmem_obj *);
+ };
++typedef struct tmem_pamops __no_const tmem_pamops_no_const;
+ extern void tmem_register_pamops(struct tmem_pamops *m);
+
+ /* memory allocation methods provided by the host implementation */
+@@ -189,6 +190,7 @@ struct tmem_hostops {
+ struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
+ void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
+ };
++typedef struct tmem_hostops __no_const tmem_hostops_no_const;
+ extern void tmem_register_hostops(struct tmem_hostops *m);
+
+ /* core tmem accessor functions */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/target/target_core_tmr.c linux-3.4-pax/drivers/target/target_core_tmr.c
+--- linux-3.4/drivers/target/target_core_tmr.c 2012-05-21 11:33:29.231929385 +0200
++++ linux-3.4-pax/drivers/target/target_core_tmr.c 2012-05-21 12:10:10.896048963 +0200
+@@ -327,7 +327,7 @@ static void core_tmr_drain_task_list(
+ cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
+ cmd->t_task_list_num,
+ atomic_read(&cmd->t_task_cdbs_left),
+- atomic_read(&cmd->t_task_cdbs_sent),
++ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
+ (cmd->transport_state & CMD_T_ACTIVE) != 0,
+ (cmd->transport_state & CMD_T_STOP) != 0,
+ (cmd->transport_state & CMD_T_SENT) != 0);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/target/target_core_transport.c linux-3.4-pax/drivers/target/target_core_transport.c
+--- linux-3.4/drivers/target/target_core_transport.c 2012-05-21 11:33:29.239929385 +0200
++++ linux-3.4-pax/drivers/target/target_core_transport.c 2012-05-21 12:10:10.900048963 +0200
+@@ -1355,7 +1355,7 @@ struct se_device *transport_add_device_t
+ spin_lock_init(&dev->se_port_lock);
+ spin_lock_init(&dev->se_tmr_lock);
+ spin_lock_init(&dev->qf_cmd_lock);
+- atomic_set(&dev->dev_ordered_id, 0);
++ atomic_set_unchecked(&dev->dev_ordered_id, 0);
+
+ se_dev_set_default_attribs(dev, dev_limits);
+
+@@ -1542,7 +1542,7 @@ static int transport_check_alloc_task_at
+ * Used to determine when ORDERED commands should go from
+ * Dormant to Active status.
+ */
+- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
++ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
+ smp_mb__after_atomic_inc();
+ pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
+ cmd->se_ordered_id, cmd->sam_task_attr,
+@@ -1956,7 +1956,7 @@ void transport_generic_request_failure(s
+ " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
+ cmd->t_task_list_num,
+ atomic_read(&cmd->t_task_cdbs_left),
+- atomic_read(&cmd->t_task_cdbs_sent),
++ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
+ atomic_read(&cmd->t_task_cdbs_ex_left),
+ (cmd->transport_state & CMD_T_ACTIVE) != 0,
+ (cmd->transport_state & CMD_T_STOP) != 0,
+@@ -2216,9 +2216,9 @@ check_depth:
+ cmd = task->task_se_cmd;
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ task->task_flags |= (TF_ACTIVE | TF_SENT);
+- atomic_inc(&cmd->t_task_cdbs_sent);
++ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
+
+- if (atomic_read(&cmd->t_task_cdbs_sent) ==
++ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
+ cmd->t_task_list_num)
+ cmd->transport_state |= CMD_T_SENT;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/tty/hvc/hvcs.c linux-3.4-pax/drivers/tty/hvc/hvcs.c
+--- linux-3.4/drivers/tty/hvc/hvcs.c 2012-05-21 11:33:29.359929392 +0200
++++ linux-3.4-pax/drivers/tty/hvc/hvcs.c 2012-05-21 12:10:10.908048963 +0200
+@@ -83,6 +83,7 @@
+ #include <asm/hvcserver.h>
+ #include <asm/uaccess.h>
+ #include <asm/vio.h>
++#include <asm/local.h>
+
+ /*
+ * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
+@@ -270,7 +271,7 @@ struct hvcs_struct {
+ unsigned int index;
+
+ struct tty_struct *tty;
+- int open_count;
++ local_t open_count;
+
+ /*
+ * Used to tell the driver kernel_thread what operations need to take
+@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+
+- if (hvcsd->open_count > 0) {
++ if (local_read(&hvcsd->open_count) > 0) {
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ printk(KERN_INFO "HVCS: vterm state unchanged. "
+ "The hvcs device node is still in use.\n");
+@@ -1138,7 +1139,7 @@ static int hvcs_open(struct tty_struct *
+ if ((retval = hvcs_partner_connect(hvcsd)))
+ goto error_release;
+
+- hvcsd->open_count = 1;
++ local_set(&hvcsd->open_count, 1);
+ hvcsd->tty = tty;
+ tty->driver_data = hvcsd;
+
+@@ -1172,7 +1173,7 @@ fast_open:
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ kref_get(&hvcsd->kref);
+- hvcsd->open_count++;
++ local_inc(&hvcsd->open_count);
+ hvcsd->todo_mask |= HVCS_SCHED_READ;
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+
+@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct
+ hvcsd = tty->driver_data;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+- if (--hvcsd->open_count == 0) {
++ if (local_dec_and_test(&hvcsd->open_count)) {
+
+ vio_disable_interrupts(hvcsd->vdev);
+
+@@ -1242,10 +1243,10 @@ static void hvcs_close(struct tty_struct
+ free_irq(irq, hvcsd);
+ kref_put(&hvcsd->kref, destroy_hvcs_struct);
+ return;
+- } else if (hvcsd->open_count < 0) {
++ } else if (local_read(&hvcsd->open_count) < 0) {
+ printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
+ " is missmanaged.\n",
+- hvcsd->vdev->unit_address, hvcsd->open_count);
++ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
+ }
+
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+@@ -1261,7 +1262,7 @@ static void hvcs_hangup(struct tty_struc
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ /* Preserve this so that we know how many kref refs to put */
+- temp_open_count = hvcsd->open_count;
++ temp_open_count = local_read(&hvcsd->open_count);
+
+ /*
+ * Don't kref put inside the spinlock because the destruction
+@@ -1276,7 +1277,7 @@ static void hvcs_hangup(struct tty_struc
+ hvcsd->tty->driver_data = NULL;
+ hvcsd->tty = NULL;
+
+- hvcsd->open_count = 0;
++ local_set(&hvcsd->open_count, 0);
+
+ /* This will drop any buffered data on the floor which is OK in a hangup
+ * scenario. */
+@@ -1347,7 +1348,7 @@ static int hvcs_write(struct tty_struct
+ * the middle of a write operation? This is a crummy place to do this
+ * but we want to keep it all in the spinlock.
+ */
+- if (hvcsd->open_count <= 0) {
++ if (local_read(&hvcsd->open_count) <= 0) {
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return -ENODEV;
+ }
+@@ -1421,7 +1422,7 @@ static int hvcs_write_room(struct tty_st
+ {
+ struct hvcs_struct *hvcsd = tty->driver_data;
+
+- if (!hvcsd || hvcsd->open_count <= 0)
++ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
+ return 0;
+
+ return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/tty/ipwireless/tty.c linux-3.4-pax/drivers/tty/ipwireless/tty.c
+--- linux-3.4/drivers/tty/ipwireless/tty.c 2012-05-21 11:33:29.375929392 +0200
++++ linux-3.4-pax/drivers/tty/ipwireless/tty.c 2012-05-21 12:10:10.912048964 +0200
+@@ -29,6 +29,7 @@
+ #include <linux/tty_driver.h>
+ #include <linux/tty_flip.h>
+ #include <linux/uaccess.h>
++#include <asm/local.h>
+
+ #include "tty.h"
+ #include "network.h"
+@@ -51,7 +52,7 @@ struct ipw_tty {
+ int tty_type;
+ struct ipw_network *network;
+ struct tty_struct *linux_tty;
+- int open_count;
++ local_t open_count;
+ unsigned int control_lines;
+ struct mutex ipw_tty_mutex;
+ int tx_bytes_queued;
+@@ -117,10 +118,10 @@ static int ipw_open(struct tty_struct *l
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return -ENODEV;
+ }
+- if (tty->open_count == 0)
++ if (local_read(&tty->open_count) == 0)
+ tty->tx_bytes_queued = 0;
+
+- tty->open_count++;
++ local_inc(&tty->open_count);
+
+ tty->linux_tty = linux_tty;
+ linux_tty->driver_data = tty;
+@@ -136,9 +137,7 @@ static int ipw_open(struct tty_struct *l
+
+ static void do_ipw_close(struct ipw_tty *tty)
+ {
+- tty->open_count--;
+-
+- if (tty->open_count == 0) {
++ if (local_dec_return(&tty->open_count) == 0) {
+ struct tty_struct *linux_tty = tty->linux_tty;
+
+ if (linux_tty != NULL) {
+@@ -159,7 +158,7 @@ static void ipw_hangup(struct tty_struct
+ return;
+
+ mutex_lock(&tty->ipw_tty_mutex);
+- if (tty->open_count == 0) {
++ if (local_read(&tty->open_count) == 0) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return;
+ }
+@@ -188,7 +187,7 @@ void ipwireless_tty_received(struct ipw_
+ return;
+ }
+
+- if (!tty->open_count) {
++ if (!local_read(&tty->open_count)) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return;
+ }
+@@ -230,7 +229,7 @@ static int ipw_write(struct tty_struct *
+ return -ENODEV;
+
+ mutex_lock(&tty->ipw_tty_mutex);
+- if (!tty->open_count) {
++ if (!local_read(&tty->open_count)) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return -EINVAL;
+ }
+@@ -270,7 +269,7 @@ static int ipw_write_room(struct tty_str
+ if (!tty)
+ return -ENODEV;
+
+- if (!tty->open_count)
++ if (!local_read(&tty->open_count))
+ return -EINVAL;
+
+ room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
+@@ -312,7 +311,7 @@ static int ipw_chars_in_buffer(struct tt
+ if (!tty)
+ return 0;
+
+- if (!tty->open_count)
++ if (!local_read(&tty->open_count))
+ return 0;
+
+ return tty->tx_bytes_queued;
+@@ -393,7 +392,7 @@ static int ipw_tiocmget(struct tty_struc
+ if (!tty)
+ return -ENODEV;
+
+- if (!tty->open_count)
++ if (!local_read(&tty->open_count))
+ return -EINVAL;
+
+ return get_control_lines(tty);
+@@ -409,7 +408,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
+ if (!tty)
+ return -ENODEV;
+
+- if (!tty->open_count)
++ if (!local_read(&tty->open_count))
+ return -EINVAL;
+
+ return set_control_lines(tty, set, clear);
+@@ -423,7 +422,7 @@ static int ipw_ioctl(struct tty_struct *
+ if (!tty)
+ return -ENODEV;
+
+- if (!tty->open_count)
++ if (!local_read(&tty->open_count))
+ return -EINVAL;
+
+ /* FIXME: Exactly how is the tty object locked here .. */
+@@ -572,7 +571,7 @@ void ipwireless_tty_free(struct ipw_tty
+ against a parallel ioctl etc */
+ mutex_lock(&ttyj->ipw_tty_mutex);
+ }
+- while (ttyj->open_count)
++ while (local_read(&ttyj->open_count))
+ do_ipw_close(ttyj);
+ ipwireless_disassociate_network_ttys(network,
+ ttyj->channel_idx);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/tty/n_gsm.c linux-3.4-pax/drivers/tty/n_gsm.c
+--- linux-3.4/drivers/tty/n_gsm.c 2012-05-21 11:33:29.387929393 +0200
++++ linux-3.4-pax/drivers/tty/n_gsm.c 2012-05-21 12:10:10.916048964 +0200
+@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
+ kref_init(&dlci->ref);
+ mutex_init(&dlci->mutex);
+ dlci->fifo = &dlci->_fifo;
+- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
++ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
+ kfree(dlci);
+ return NULL;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/tty/n_tty.c linux-3.4-pax/drivers/tty/n_tty.c
+--- linux-3.4/drivers/tty/n_tty.c 2012-05-21 11:33:29.395929394 +0200
++++ linux-3.4-pax/drivers/tty/n_tty.c 2012-05-21 12:10:10.920048964 +0200
+@@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
+ {
+ *ops = tty_ldisc_N_TTY;
+ ops->owner = NULL;
+- ops->refcount = ops->flags = 0;
++ atomic_set(&ops->refcount, 0);
++ ops->flags = 0;
+ }
+ EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/tty/pty.c linux-3.4-pax/drivers/tty/pty.c
+--- linux-3.4/drivers/tty/pty.c 2012-05-21 11:33:29.407929394 +0200
++++ linux-3.4-pax/drivers/tty/pty.c 2012-05-21 12:10:10.920048964 +0200
+@@ -707,8 +707,10 @@ static void __init unix98_pty_init(void)
+ panic("Couldn't register Unix98 pts driver");
+
+ /* Now create the /dev/ptmx special device */
++ pax_open_kernel();
+ tty_default_fops(&ptmx_fops);
+- ptmx_fops.open = ptmx_open;
++ *(void **)&ptmx_fops.open = ptmx_open;
++ pax_close_kernel();
+
+ cdev_init(&ptmx_cdev, &ptmx_fops);
+ if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/tty/serial/kgdboc.c linux-3.4-pax/drivers/tty/serial/kgdboc.c
+--- linux-3.4/drivers/tty/serial/kgdboc.c 2012-01-08 19:48:20.619471324 +0100
++++ linux-3.4-pax/drivers/tty/serial/kgdboc.c 2012-05-21 12:10:10.924048964 +0200
+@@ -24,8 +24,9 @@
+ #define MAX_CONFIG_LEN 40
+
+ static struct kgdb_io kgdboc_io_ops;
++static struct kgdb_io kgdboc_io_ops_console;
+
+-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
++/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
+ static int configured = -1;
+
+ static char config[MAX_CONFIG_LEN];
+@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
+ kgdboc_unregister_kbd();
+ if (configured == 1)
+ kgdb_unregister_io_module(&kgdboc_io_ops);
++ else if (configured == 2)
++ kgdb_unregister_io_module(&kgdboc_io_ops_console);
+ }
+
+ static int configure_kgdboc(void)
+@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
+ int err;
+ char *cptr = config;
+ struct console *cons;
++ int is_console = 0;
+
+ err = kgdboc_option_setup(config);
+ if (err || !strlen(config) || isspace(config[0]))
+ goto noconfig;
+
+ err = -ENODEV;
+- kgdboc_io_ops.is_console = 0;
+ kgdb_tty_driver = NULL;
+
+ kgdboc_use_kms = 0;
+@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
+ int idx;
+ if (cons->device && cons->device(cons, &idx) == p &&
+ idx == tty_line) {
+- kgdboc_io_ops.is_console = 1;
++ is_console = 1;
+ break;
+ }
+ cons = cons->next;
+@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
+ kgdb_tty_line = tty_line;
+
+ do_register:
+- err = kgdb_register_io_module(&kgdboc_io_ops);
++ if (is_console) {
++ err = kgdb_register_io_module(&kgdboc_io_ops_console);
++ configured = 2;
++ } else {
++ err = kgdb_register_io_module(&kgdboc_io_ops);
++ configured = 1;
++ }
+ if (err)
+ goto noconfig;
+
+- configured = 1;
+-
+ return 0;
+
+ noconfig:
+@@ -213,7 +220,7 @@ noconfig:
+ static int __init init_kgdboc(void)
+ {
+ /* Already configured? */
+- if (configured == 1)
++ if (configured >= 1)
+ return 0;
+
+ return configure_kgdboc();
+@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const ch
+ if (config[len - 1] == '\n')
+ config[len - 1] = '\0';
+
+- if (configured == 1)
++ if (configured >= 1)
+ cleanup_kgdboc();
+
+ /* Go and configure with the new params. */
+@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
+ .post_exception = kgdboc_post_exp_handler,
+ };
+
++static struct kgdb_io kgdboc_io_ops_console = {
++ .name = "kgdboc",
++ .read_char = kgdboc_get_char,
++ .write_char = kgdboc_put_char,
++ .pre_exception = kgdboc_pre_exp_handler,
++ .post_exception = kgdboc_post_exp_handler,
++ .is_console = 1
++};
++
+ #ifdef CONFIG_KGDB_SERIAL_CONSOLE
+ /* This is only available if kgdboc is a built in for early debugging */
+ static int __init kgdboc_early_init(char *opt)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/tty/tty_io.c linux-3.4-pax/drivers/tty/tty_io.c
+--- linux-3.4/drivers/tty/tty_io.c 2012-05-21 11:33:29.991929426 +0200
++++ linux-3.4-pax/drivers/tty/tty_io.c 2012-05-21 12:10:10.928048965 +0200
+@@ -3278,7 +3278,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
+
+ void tty_default_fops(struct file_operations *fops)
+ {
+- *fops = tty_fops;
++ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
+ }
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/tty/tty_ldisc.c linux-3.4-pax/drivers/tty/tty_ldisc.c
+--- linux-3.4/drivers/tty/tty_ldisc.c 2012-03-19 10:39:07.980049389 +0100
++++ linux-3.4-pax/drivers/tty/tty_ldisc.c 2012-05-21 12:10:10.932048965 +0200
+@@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *
+ if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
+ struct tty_ldisc_ops *ldo = ld->ops;
+
+- ldo->refcount--;
++ atomic_dec(&ldo->refcount);
+ module_put(ldo->owner);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+@@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ tty_ldiscs[disc] = new_ldisc;
+ new_ldisc->num = disc;
+- new_ldisc->refcount = 0;
++ atomic_set(&new_ldisc->refcount, 0);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+ return ret;
+@@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
+ return -EINVAL;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+- if (tty_ldiscs[disc]->refcount)
++ if (atomic_read(&tty_ldiscs[disc]->refcount))
+ ret = -EBUSY;
+ else
+ tty_ldiscs[disc] = NULL;
+@@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(i
+ if (ldops) {
+ ret = ERR_PTR(-EAGAIN);
+ if (try_module_get(ldops->owner)) {
+- ldops->refcount++;
++ atomic_inc(&ldops->refcount);
+ ret = ldops;
+ }
+ }
+@@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_o
+ unsigned long flags;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+- ldops->refcount--;
++ atomic_dec(&ldops->refcount);
+ module_put(ldops->owner);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/uio/uio.c linux-3.4-pax/drivers/uio/uio.c
+--- linux-3.4/drivers/uio/uio.c 2012-01-08 19:48:20.775471316 +0100
++++ linux-3.4-pax/drivers/uio/uio.c 2012-05-21 12:10:10.932048965 +0200
+@@ -25,6 +25,7 @@
+ #include <linux/kobject.h>
+ #include <linux/cdev.h>
+ #include <linux/uio_driver.h>
++#include <asm/local.h>
+
+ #define UIO_MAX_DEVICES (1U << MINORBITS)
+
+@@ -32,10 +33,10 @@ struct uio_device {
+ struct module *owner;
+ struct device *dev;
+ int minor;
+- atomic_t event;
++ atomic_unchecked_t event;
+ struct fasync_struct *async_queue;
+ wait_queue_head_t wait;
+- int vma_count;
++ local_t vma_count;
+ struct uio_info *info;
+ struct kobject *map_dir;
+ struct kobject *portio_dir;
+@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
+ struct device_attribute *attr, char *buf)
+ {
+ struct uio_device *idev = dev_get_drvdata(dev);
+- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
++ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
+ }
+
+ static struct device_attribute uio_class_attributes[] = {
+@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
+ {
+ struct uio_device *idev = info->uio_dev;
+
+- atomic_inc(&idev->event);
++ atomic_inc_unchecked(&idev->event);
+ wake_up_interruptible(&idev->wait);
+ kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
+ }
+@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
+ }
+
+ listener->dev = idev;
+- listener->event_count = atomic_read(&idev->event);
++ listener->event_count = atomic_read_unchecked(&idev->event);
+ filep->private_data = listener;
+
+ if (idev->info->open) {
+@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
+ return -EIO;
+
+ poll_wait(filep, &idev->wait, wait);
+- if (listener->event_count != atomic_read(&idev->event))
++ if (listener->event_count != atomic_read_unchecked(&idev->event))
+ return POLLIN | POLLRDNORM;
+ return 0;
+ }
+@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+- event_count = atomic_read(&idev->event);
++ event_count = atomic_read_unchecked(&idev->event);
+ if (event_count != listener->event_count) {
+ if (copy_to_user(buf, &event_count, count))
+ retval = -EFAULT;
+@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
+ static void uio_vma_open(struct vm_area_struct *vma)
+ {
+ struct uio_device *idev = vma->vm_private_data;
+- idev->vma_count++;
++ local_inc(&idev->vma_count);
+ }
+
+ static void uio_vma_close(struct vm_area_struct *vma)
+ {
+ struct uio_device *idev = vma->vm_private_data;
+- idev->vma_count--;
++ local_dec(&idev->vma_count);
+ }
+
+ static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -821,7 +822,7 @@ int __uio_register_device(struct module
+ idev->owner = owner;
+ idev->info = info;
+ init_waitqueue_head(&idev->wait);
+- atomic_set(&idev->event, 0);
++ atomic_set_unchecked(&idev->event, 0);
+
+ ret = uio_get_minor(idev);
+ if (ret)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/usb/atm/cxacru.c linux-3.4-pax/drivers/usb/atm/cxacru.c
+--- linux-3.4/drivers/usb/atm/cxacru.c 2012-03-19 10:39:08.024049385 +0100
++++ linux-3.4-pax/drivers/usb/atm/cxacru.c 2012-05-21 12:10:10.936048965 +0200
+@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
+ ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
+ if (ret < 2)
+ return -EINVAL;
+- if (index < 0 || index > 0x7f)
++ if (index > 0x7f)
+ return -EINVAL;
+ pos += tmp;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/usb/atm/usbatm.c linux-3.4-pax/drivers/usb/atm/usbatm.c
+--- linux-3.4/drivers/usb/atm/usbatm.c 2011-10-24 12:48:38.263091132 +0200
++++ linux-3.4-pax/drivers/usb/atm/usbatm.c 2012-05-21 12:10:10.940048965 +0200
+@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
+ if (printk_ratelimit())
+ atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
+ __func__, vpi, vci);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+
+@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
+ if (length > ATM_MAX_AAL5_PDU) {
+ atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
+ __func__, length, vcc);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out;
+ }
+
+@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
+ if (sarb->len < pdu_length) {
+ atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
+ __func__, pdu_length, sarb->len, vcc);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out;
+ }
+
+ if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
+ atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
+ __func__, vcc);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out;
+ }
+
+@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
+ if (printk_ratelimit())
+ atm_err(instance, "%s: no memory for skb (length: %u)!\n",
+ __func__, length);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ goto out;
+ }
+
+@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
+
+ vcc->push(vcc, skb);
+
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ out:
+ skb_trim(sarb, 0);
+ }
+@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned l
+ struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
+
+ usbatm_pop(vcc, skb);
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ skb = skb_dequeue(&instance->sndqueue);
+ }
+@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
+ if (!left--)
+ return sprintf(page,
+ "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
+- atomic_read(&atm_dev->stats.aal5.tx),
+- atomic_read(&atm_dev->stats.aal5.tx_err),
+- atomic_read(&atm_dev->stats.aal5.rx),
+- atomic_read(&atm_dev->stats.aal5.rx_err),
+- atomic_read(&atm_dev->stats.aal5.rx_drop));
++ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
++ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
++ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
+
+ if (!left--) {
+ if (instance->disconnected)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/usb/core/devices.c linux-3.4-pax/drivers/usb/core/devices.c
+--- linux-3.4/drivers/usb/core/devices.c 2012-01-08 19:48:20.807471314 +0100
++++ linux-3.4-pax/drivers/usb/core/devices.c 2012-05-21 12:10:10.944048965 +0200
+@@ -126,7 +126,7 @@ static const char format_endpt[] =
+ * time it gets called.
+ */
+ static struct device_connect_event {
+- atomic_t count;
++ atomic_unchecked_t count;
+ wait_queue_head_t wait;
+ } device_event = {
+ .count = ATOMIC_INIT(1),
+@@ -164,7 +164,7 @@ static const struct class_info clas_info
+
+ void usbfs_conn_disc_event(void)
+ {
+- atomic_add(2, &device_event.count);
++ atomic_add_unchecked(2, &device_event.count);
+ wake_up(&device_event.wait);
+ }
+
+@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
+
+ poll_wait(file, &device_event.wait, wait);
+
+- event_count = atomic_read(&device_event.count);
++ event_count = atomic_read_unchecked(&device_event.count);
+ if (file->f_version != event_count) {
+ file->f_version = event_count;
+ return POLLIN | POLLRDNORM;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/usb/early/ehci-dbgp.c linux-3.4-pax/drivers/usb/early/ehci-dbgp.c
+--- linux-3.4/drivers/usb/early/ehci-dbgp.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/usb/early/ehci-dbgp.c 2012-05-21 12:10:10.944048965 +0200
+@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
+
+ #ifdef CONFIG_KGDB
+ static struct kgdb_io kgdbdbgp_io_ops;
+-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
++static struct kgdb_io kgdbdbgp_io_ops_console;
++#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
+ #else
+ #define dbgp_kgdb_mode (0)
+ #endif
+@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
+ .write_char = kgdbdbgp_write_char,
+ };
+
++static struct kgdb_io kgdbdbgp_io_ops_console = {
++ .name = "kgdbdbgp",
++ .read_char = kgdbdbgp_read_char,
++ .write_char = kgdbdbgp_write_char,
++ .is_console = 1
++};
++
+ static int kgdbdbgp_wait_time;
+
+ static int __init kgdbdbgp_parse_config(char *str)
+@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
+ ptr++;
+ kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
+ }
+- kgdb_register_io_module(&kgdbdbgp_io_ops);
+- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
++ if (early_dbgp_console.index != -1)
++ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
++ else
++ kgdb_register_io_module(&kgdbdbgp_io_ops);
+
+ return 0;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/usb/wusbcore/wa-hc.h linux-3.4-pax/drivers/usb/wusbcore/wa-hc.h
+--- linux-3.4/drivers/usb/wusbcore/wa-hc.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/usb/wusbcore/wa-hc.h 2012-05-21 12:10:10.948048966 +0200
+@@ -192,7 +192,7 @@ struct wahc {
+ struct list_head xfer_delayed_list;
+ spinlock_t xfer_list_lock;
+ struct work_struct xfer_work;
+- atomic_t xfer_id_count;
++ atomic_unchecked_t xfer_id_count;
+ };
+
+
+@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
+ INIT_LIST_HEAD(&wa->xfer_delayed_list);
+ spin_lock_init(&wa->xfer_list_lock);
+ INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
+- atomic_set(&wa->xfer_id_count, 1);
++ atomic_set_unchecked(&wa->xfer_id_count, 1);
+ }
+
+ /**
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/usb/wusbcore/wa-xfer.c linux-3.4-pax/drivers/usb/wusbcore/wa-xfer.c
+--- linux-3.4/drivers/usb/wusbcore/wa-xfer.c 2012-01-08 19:48:21.567471274 +0100
++++ linux-3.4-pax/drivers/usb/wusbcore/wa-xfer.c 2012-05-21 12:10:10.948048966 +0200
+@@ -296,7 +296,7 @@ out:
+ */
+ static void wa_xfer_id_init(struct wa_xfer *xfer)
+ {
+- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
++ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
+ }
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/vhost/vhost.c linux-3.4-pax/drivers/vhost/vhost.c
+--- linux-3.4/drivers/vhost/vhost.c 2012-05-21 11:33:31.547929510 +0200
++++ linux-3.4-pax/drivers/vhost/vhost.c 2012-05-21 12:10:10.952048966 +0200
+@@ -632,7 +632,7 @@ static long vhost_set_memory(struct vhos
+ return 0;
+ }
+
+-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
++static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
+ {
+ struct file *eventfp, *filep = NULL,
+ *pollstart = NULL, *pollstop = NULL;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/video/aty/aty128fb.c linux-3.4-pax/drivers/video/aty/aty128fb.c
+--- linux-3.4/drivers/video/aty/aty128fb.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/video/aty/aty128fb.c 2012-05-21 12:10:10.956048966 +0200
+@@ -148,7 +148,7 @@ enum {
+ };
+
+ /* Must match above enum */
+-static const char *r128_family[] __devinitdata = {
++static const char *r128_family[] __devinitconst = {
+ "AGP",
+ "PCI",
+ "PRO AGP",
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/video/fbcmap.c linux-3.4-pax/drivers/video/fbcmap.c
+--- linux-3.4/drivers/video/fbcmap.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/video/fbcmap.c 2012-05-21 12:10:10.960048966 +0200
+@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
+ rc = -ENODEV;
+ goto out;
+ }
+- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
+- !info->fbops->fb_setcmap)) {
++ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
+ rc = -EINVAL;
+ goto out1;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/video/fbmem.c linux-3.4-pax/drivers/video/fbmem.c
+--- linux-3.4/drivers/video/fbmem.c 2012-05-21 11:33:31.799929524 +0200
++++ linux-3.4-pax/drivers/video/fbmem.c 2012-05-21 12:10:10.964048967 +0200
+@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
+ image->dx += image->width + 8;
+ }
+ } else if (rotate == FB_ROTATE_UD) {
+- for (x = 0; x < num && image->dx >= 0; x++) {
++ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
+ info->fbops->fb_imageblit(info, image);
+ image->dx -= image->width + 8;
+ }
+@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
+ image->dy += image->height + 8;
+ }
+ } else if (rotate == FB_ROTATE_CCW) {
+- for (x = 0; x < num && image->dy >= 0; x++) {
++ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
+ info->fbops->fb_imageblit(info, image);
+ image->dy -= image->height + 8;
+ }
+@@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *
+ return -EFAULT;
+ if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
+ return -EINVAL;
+- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
++ if (con2fb.framebuffer >= FB_MAX)
+ return -EINVAL;
+ if (!registered_fb[con2fb.framebuffer])
+ request_module("fb%d", con2fb.framebuffer);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/video/geode/gx1fb_core.c linux-3.4-pax/drivers/video/geode/gx1fb_core.c
+--- linux-3.4/drivers/video/geode/gx1fb_core.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/video/geode/gx1fb_core.c 2012-05-21 12:10:10.964048967 +0200
+@@ -29,7 +29,7 @@ static int crt_option = 1;
+ static char panel_option[32] = "";
+
+ /* Modes relevant to the GX1 (taken from modedb.c) */
+-static const struct fb_videomode __devinitdata gx1_modedb[] = {
++static const struct fb_videomode __devinitconst gx1_modedb[] = {
+ /* 640x480-60 VESA */
+ { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/video/gxt4500.c linux-3.4-pax/drivers/video/gxt4500.c
+--- linux-3.4/drivers/video/gxt4500.c 2012-01-08 19:48:21.727471265 +0100
++++ linux-3.4-pax/drivers/video/gxt4500.c 2012-05-21 12:10:10.968048967 +0200
+@@ -156,7 +156,7 @@ struct gxt4500_par {
+ static char *mode_option;
+
+ /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
+-static const struct fb_videomode defaultmode __devinitdata = {
++static const struct fb_videomode defaultmode __devinitconst = {
+ .refresh = 60,
+ .xres = 1280,
+ .yres = 1024,
+@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, stru
+ return 0;
+ }
+
+-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
++static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
+ .id = "IBM GXT4500P",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_PSEUDOCOLOR,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/video/i810/i810_accel.c linux-3.4-pax/drivers/video/i810/i810_accel.c
+--- linux-3.4/drivers/video/i810/i810_accel.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/video/i810/i810_accel.c 2012-05-21 12:10:10.968048967 +0200
+@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
+ }
+ }
+ printk("ringbuffer lockup!!!\n");
++ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
+ i810_report_error(mmio);
+ par->dev_flags |= LOCKUP;
+ info->pixmap.scan_align = 1;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/video/i810/i810_main.c linux-3.4-pax/drivers/video/i810/i810_main.c
+--- linux-3.4/drivers/video/i810/i810_main.c 2012-03-19 10:39:08.860049346 +0100
++++ linux-3.4-pax/drivers/video/i810/i810_main.c 2012-05-21 12:10:10.972048967 +0200
+@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_
+ static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
+
+ /* PCI */
+-static const char *i810_pci_list[] __devinitdata = {
++static const char *i810_pci_list[] __devinitconst = {
+ "Intel(R) 810 Framebuffer Device" ,
+ "Intel(R) 810-DC100 Framebuffer Device" ,
+ "Intel(R) 810E Framebuffer Device" ,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/video/jz4740_fb.c linux-3.4-pax/drivers/video/jz4740_fb.c
+--- linux-3.4/drivers/video/jz4740_fb.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/video/jz4740_fb.c 2012-05-21 12:10:10.976048967 +0200
+@@ -136,7 +136,7 @@ struct jzfb {
+ uint32_t pseudo_palette[16];
+ };
+
+-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
++static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
+ .id = "JZ4740 FB",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_TRUECOLOR,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/video/udlfb.c linux-3.4-pax/drivers/video/udlfb.c
+--- linux-3.4/drivers/video/udlfb.c 2012-05-21 11:33:32.263929549 +0200
++++ linux-3.4-pax/drivers/video/udlfb.c 2012-05-21 12:10:10.980048967 +0200
+@@ -620,11 +620,11 @@ int dlfb_handle_damage(struct dlfb_data
+ dlfb_urb_completion(urb);
+
+ error:
+- atomic_add(bytes_sent, &dev->bytes_sent);
+- atomic_add(bytes_identical, &dev->bytes_identical);
+- atomic_add(width*height*2, &dev->bytes_rendered);
++ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
++ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
++ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
+ end_cycles = get_cycles();
+- atomic_add(((unsigned int) ((end_cycles - start_cycles)
++ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
+ >> 10)), /* Kcycles */
+ &dev->cpu_kcycles_used);
+
+@@ -745,11 +745,11 @@ static void dlfb_dpy_deferred_io(struct
+ dlfb_urb_completion(urb);
+
+ error:
+- atomic_add(bytes_sent, &dev->bytes_sent);
+- atomic_add(bytes_identical, &dev->bytes_identical);
+- atomic_add(bytes_rendered, &dev->bytes_rendered);
++ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
++ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
++ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
+ end_cycles = get_cycles();
+- atomic_add(((unsigned int) ((end_cycles - start_cycles)
++ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
+ >> 10)), /* Kcycles */
+ &dev->cpu_kcycles_used);
+ }
+@@ -1373,7 +1373,7 @@ static ssize_t metrics_bytes_rendered_sh
+ struct fb_info *fb_info = dev_get_drvdata(fbdev);
+ struct dlfb_data *dev = fb_info->par;
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+- atomic_read(&dev->bytes_rendered));
++ atomic_read_unchecked(&dev->bytes_rendered));
+ }
+
+ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
+@@ -1381,7 +1381,7 @@ static ssize_t metrics_bytes_identical_s
+ struct fb_info *fb_info = dev_get_drvdata(fbdev);
+ struct dlfb_data *dev = fb_info->par;
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+- atomic_read(&dev->bytes_identical));
++ atomic_read_unchecked(&dev->bytes_identical));
+ }
+
+ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
+@@ -1389,7 +1389,7 @@ static ssize_t metrics_bytes_sent_show(s
+ struct fb_info *fb_info = dev_get_drvdata(fbdev);
+ struct dlfb_data *dev = fb_info->par;
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+- atomic_read(&dev->bytes_sent));
++ atomic_read_unchecked(&dev->bytes_sent));
+ }
+
+ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
+@@ -1397,7 +1397,7 @@ static ssize_t metrics_cpu_kcycles_used_
+ struct fb_info *fb_info = dev_get_drvdata(fbdev);
+ struct dlfb_data *dev = fb_info->par;
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+- atomic_read(&dev->cpu_kcycles_used));
++ atomic_read_unchecked(&dev->cpu_kcycles_used));
+ }
+
+ static ssize_t edid_show(
+@@ -1457,10 +1457,10 @@ static ssize_t metrics_reset_store(struc
+ struct fb_info *fb_info = dev_get_drvdata(fbdev);
+ struct dlfb_data *dev = fb_info->par;
+
+- atomic_set(&dev->bytes_rendered, 0);
+- atomic_set(&dev->bytes_identical, 0);
+- atomic_set(&dev->bytes_sent, 0);
+- atomic_set(&dev->cpu_kcycles_used, 0);
++ atomic_set_unchecked(&dev->bytes_rendered, 0);
++ atomic_set_unchecked(&dev->bytes_identical, 0);
++ atomic_set_unchecked(&dev->bytes_sent, 0);
++ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
+
+ return count;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/video/uvesafb.c linux-3.4-pax/drivers/video/uvesafb.c
+--- linux-3.4/drivers/video/uvesafb.c 2012-05-21 11:33:32.267929550 +0200
++++ linux-3.4-pax/drivers/video/uvesafb.c 2012-05-21 12:10:10.984048968 +0200
+@@ -19,6 +19,7 @@
+ #include <linux/io.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
++#include <linux/moduleloader.h>
+ #include <video/edid.h>
+ #include <video/uvesafb.h>
+ #ifdef CONFIG_X86
+@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
+ if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
+ par->pmi_setpal = par->ypan = 0;
+ } else {
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_MODULES
++ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
++#endif
++ if (!par->pmi_code) {
++ par->pmi_setpal = par->ypan = 0;
++ return 0;
++ }
++#endif
++
+ par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
+ + task->t.regs.edi);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ pax_open_kernel();
++ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
++ pax_close_kernel();
++
++ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
++ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
++#else
+ par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
+ par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
++#endif
++
+ printk(KERN_INFO "uvesafb: protected mode interface info at "
+ "%04x:%04x\n",
+ (u16)task->t.regs.es, (u16)task->t.regs.edi);
+@@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(st
+ par->ypan = ypan;
+
+ if (par->pmi_setpal || par->ypan) {
++#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
+ if (__supported_pte_mask & _PAGE_NX) {
+ par->pmi_setpal = par->ypan = 0;
+ printk(KERN_WARNING "uvesafb: NX protection is actively."
+ "We have better not to use the PMI.\n");
+- } else {
++ } else
++#endif
+ uvesafb_vbe_getpmi(task, par);
+- }
+ }
+ #else
+ /* The protected mode interface is not available on non-x86. */
+@@ -1836,6 +1860,11 @@ out:
+ if (par->vbe_modes)
+ kfree(par->vbe_modes);
+
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ if (par->pmi_code)
++ module_free_exec(NULL, par->pmi_code);
++#endif
++
+ framebuffer_release(info);
+ return err;
+ }
+@@ -1862,6 +1891,12 @@ static int uvesafb_remove(struct platfor
+ kfree(par->vbe_state_orig);
+ if (par->vbe_state_saved)
+ kfree(par->vbe_state_saved);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ if (par->pmi_code)
++ module_free_exec(NULL, par->pmi_code);
++#endif
++
+ }
+
+ framebuffer_release(info);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/video/vesafb.c linux-3.4-pax/drivers/video/vesafb.c
+--- linux-3.4/drivers/video/vesafb.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/video/vesafb.c 2012-05-21 12:10:10.984048968 +0200
+@@ -9,6 +9,7 @@
+ */
+
+ #include <linux/module.h>
++#include <linux/moduleloader.h>
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/string.h>
+@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
+ static int vram_total __initdata; /* Set total amount of memory */
+ static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
+ static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
+-static void (*pmi_start)(void) __read_mostly;
+-static void (*pmi_pal) (void) __read_mostly;
++static void (*pmi_start)(void) __read_only;
++static void (*pmi_pal) (void) __read_only;
+ static int depth __read_mostly;
+ static int vga_compat __read_mostly;
+ /* --------------------------------------------------------------------- */
+@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
+ unsigned int size_vmode;
+ unsigned int size_remap;
+ unsigned int size_total;
++ void *pmi_code = NULL;
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
+ return -ENODEV;
+@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
+ size_remap = size_total;
+ vesafb_fix.smem_len = size_remap;
+
+-#ifndef __i386__
+- screen_info.vesapm_seg = 0;
+-#endif
+-
+ if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
+ printk(KERN_WARNING
+ "vesafb: cannot reserve video memory at 0x%lx\n",
+@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
+ printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
+ vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
+
++#ifdef __i386__
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ pmi_code = module_alloc_exec(screen_info.vesapm_size);
++ if (!pmi_code)
++#elif !defined(CONFIG_PAX_KERNEXEC)
++ if (0)
++#endif
++
++#endif
++ screen_info.vesapm_seg = 0;
++
+ if (screen_info.vesapm_seg) {
+- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
+- screen_info.vesapm_seg,screen_info.vesapm_off);
++ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
++ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
+ }
+
+ if (screen_info.vesapm_seg < 0xc000)
+@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
+
+ if (ypan || pmi_setpal) {
+ unsigned short *pmi_base;
++
+ pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
+- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
+- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ pax_open_kernel();
++ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
++#else
++ pmi_code = pmi_base;
++#endif
++
++ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
++ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ pmi_start = ktva_ktla(pmi_start);
++ pmi_pal = ktva_ktla(pmi_pal);
++ pax_close_kernel();
++#endif
++
+ printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
+ if (pmi_base[3]) {
+ printk(KERN_INFO "vesafb: pmi: ports = ");
+@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
+ info->node, info->fix.id);
+ return 0;
+ err:
++
++#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ module_free_exec(NULL, pmi_code);
++#endif
++
+ if (info->screen_base)
+ iounmap(info->screen_base);
+ framebuffer_release(info);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/video/via/via_clock.h linux-3.4-pax/drivers/video/via/via_clock.h
+--- linux-3.4/drivers/video/via/via_clock.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/drivers/video/via/via_clock.h 2012-05-21 12:10:10.988048968 +0200
+@@ -56,7 +56,7 @@ struct via_clock {
+
+ void (*set_engine_pll_state)(u8 state);
+ void (*set_engine_pll)(struct via_pll_config config);
+-};
++} __no_const;
+
+
+ static inline u32 get_pll_internal_frequency(u32 ref_freq,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/drivers/xen/xen-pciback/conf_space.h linux-3.4-pax/drivers/xen/xen-pciback/conf_space.h
+--- linux-3.4/drivers/xen/xen-pciback/conf_space.h 2011-10-24 12:48:39.087091088 +0200
++++ linux-3.4-pax/drivers/xen/xen-pciback/conf_space.h 2012-05-21 12:10:10.988048968 +0200
+@@ -44,15 +44,15 @@ struct config_field {
+ struct {
+ conf_dword_write write;
+ conf_dword_read read;
+- } dw;
++ } __no_const dw;
+ struct {
+ conf_word_write write;
+ conf_word_read read;
+- } w;
++ } __no_const w;
+ struct {
+ conf_byte_write write;
+ conf_byte_read read;
+- } b;
++ } __no_const b;
+ } u;
+ struct list_head list;
+ };
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/9p/vfs_inode.c linux-3.4-pax/fs/9p/vfs_inode.c
+--- linux-3.4/fs/9p/vfs_inode.c 2012-03-19 10:39:09.400049313 +0100
++++ linux-3.4-pax/fs/9p/vfs_inode.c 2012-05-21 12:10:10.992048968 +0200
+@@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct
+ void
+ v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+ {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+
+ p9_debug(P9_DEBUG_VFS, " %s %s\n",
+ dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/aio.c linux-3.4-pax/fs/aio.c
+--- linux-3.4/fs/aio.c 2012-05-21 11:33:34.247929657 +0200
++++ linux-3.4-pax/fs/aio.c 2012-05-21 12:10:10.996048968 +0200
+@@ -118,7 +118,7 @@ static int aio_setup_ring(struct kioctx
+ size += sizeof(struct io_event) * nr_events;
+ nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
+
+- if (nr_pages < 0)
++ if (nr_pages <= 0)
+ return -EINVAL;
+
+ nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
+@@ -1440,22 +1440,27 @@ static ssize_t aio_fsync(struct kiocb *i
+ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
+ {
+ ssize_t ret;
++ struct iovec iovstack;
+
+ #ifdef CONFIG_COMPAT
+ if (compat)
+ ret = compat_rw_copy_check_uvector(type,
+ (struct compat_iovec __user *)kiocb->ki_buf,
+- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
++ kiocb->ki_nbytes, 1, &iovstack,
+ &kiocb->ki_iovec, 1);
+ else
+ #endif
+ ret = rw_copy_check_uvector(type,
+ (struct iovec __user *)kiocb->ki_buf,
+- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
++ kiocb->ki_nbytes, 1, &iovstack,
+ &kiocb->ki_iovec, 1);
+ if (ret < 0)
+ goto out;
+
++ if (kiocb->ki_iovec == &iovstack) {
++ kiocb->ki_inline_vec = iovstack;
++ kiocb->ki_iovec = &kiocb->ki_inline_vec;
++ }
+ kiocb->ki_nr_segs = kiocb->ki_nbytes;
+ kiocb->ki_cur_seg = 0;
+ /* ki_nbytes/left now reflect bytes instead of segs */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/autofs4/waitq.c linux-3.4-pax/fs/autofs4/waitq.c
+--- linux-3.4/fs/autofs4/waitq.c 2012-05-21 11:33:34.255929657 +0200
++++ linux-3.4-pax/fs/autofs4/waitq.c 2012-05-21 12:10:10.996048968 +0200
+@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_s
+ {
+ unsigned long sigpipe, flags;
+ mm_segment_t fs;
+- const char *data = (const char *)addr;
++ const char __user *data = (const char __force_user *)addr;
+ ssize_t wr = 0;
+
+ sigpipe = sigismember(&current->pending.signal, SIGPIPE);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/befs/linuxvfs.c linux-3.4-pax/fs/befs/linuxvfs.c
+--- linux-3.4/fs/befs/linuxvfs.c 2012-05-21 11:33:34.259929658 +0200
++++ linux-3.4-pax/fs/befs/linuxvfs.c 2012-05-21 12:10:11.000048968 +0200
+@@ -502,7 +502,7 @@ static void befs_put_link(struct dentry
+ {
+ befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
+ if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
+- char *link = nd_get_link(nd);
++ const char *link = nd_get_link(nd);
+ if (!IS_ERR(link))
+ kfree(link);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/binfmt_aout.c linux-3.4-pax/fs/binfmt_aout.c
+--- linux-3.4/fs/binfmt_aout.c 2012-05-21 11:33:34.275929659 +0200
++++ linux-3.4-pax/fs/binfmt_aout.c 2012-05-21 12:10:11.004048969 +0200
+@@ -265,6 +265,27 @@ static int load_aout_binary(struct linux
+
+ install_exec_creds(bprm);
+
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ current->mm->pax_flags = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
++ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
++ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
++ current->mm->pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++ }
++#endif
++
+ if (N_MAGIC(ex) == OMAGIC) {
+ unsigned long text_addr, map_size;
+ loff_t pos;
+@@ -330,7 +351,7 @@ static int load_aout_binary(struct linux
+ }
+
+ error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
+- PROT_READ | PROT_WRITE | PROT_EXEC,
++ PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+ fd_offset + ex.a_text);
+ if (error != N_DATADDR(ex)) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/binfmt_elf.c linux-3.4-pax/fs/binfmt_elf.c
+--- linux-3.4/fs/binfmt_elf.c 2012-05-21 11:33:34.279929659 +0200
++++ linux-3.4-pax/fs/binfmt_elf.c 2012-05-21 12:10:11.008048969 +0200
+@@ -32,6 +32,7 @@
+ #include <linux/elf.h>
+ #include <linux/utsname.h>
+ #include <linux/coredump.h>
++#include <linux/xattr.h>
+ #include <asm/uaccess.h>
+ #include <asm/param.h>
+ #include <asm/page.h>
+@@ -52,6 +53,10 @@ static int elf_core_dump(struct coredump
+ #define elf_core_dump NULL
+ #endif
+
++#ifdef CONFIG_PAX_MPROTECT
++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
++#endif
++
+ #if ELF_EXEC_PAGESIZE > PAGE_SIZE
+ #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
+ #else
+@@ -71,6 +76,11 @@ static struct linux_binfmt elf_format =
+ .load_binary = load_elf_binary,
+ .load_shlib = load_elf_library,
+ .core_dump = elf_core_dump,
++
++#ifdef CONFIG_PAX_MPROTECT
++ .handle_mprotect= elf_handle_mprotect,
++#endif
++
+ .min_coredump = ELF_EXEC_PAGESIZE,
+ };
+
+@@ -78,6 +88,8 @@ static struct linux_binfmt elf_format =
+
+ static int set_brk(unsigned long start, unsigned long end)
+ {
++ unsigned long e = end;
++
+ start = ELF_PAGEALIGN(start);
+ end = ELF_PAGEALIGN(end);
+ if (end > start) {
+@@ -86,7 +98,7 @@ static int set_brk(unsigned long start,
+ if (BAD_ADDR(addr))
+ return addr;
+ }
+- current->mm->start_brk = current->mm->brk = end;
++ current->mm->start_brk = current->mm->brk = e;
+ return 0;
+ }
+
+@@ -147,12 +159,13 @@ create_elf_tables(struct linux_binprm *b
+ elf_addr_t __user *u_rand_bytes;
+ const char *k_platform = ELF_PLATFORM;
+ const char *k_base_platform = ELF_BASE_PLATFORM;
+- unsigned char k_rand_bytes[16];
++ u32 k_rand_bytes[4];
+ int items;
+ elf_addr_t *elf_info;
+ int ei_index = 0;
+ const struct cred *cred = current_cred();
+ struct vm_area_struct *vma;
++ unsigned long saved_auxv[AT_VECTOR_SIZE];
+
+ /*
+ * In some cases (e.g. Hyper-Threading), we want to avoid L1
+@@ -194,8 +207,12 @@ create_elf_tables(struct linux_binprm *b
+ * Generate 16 random bytes for userspace PRNG seeding.
+ */
+ get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
+- u_rand_bytes = (elf_addr_t __user *)
+- STACK_ALLOC(p, sizeof(k_rand_bytes));
++ srandom32(k_rand_bytes[0] ^ random32());
++ srandom32(k_rand_bytes[1] ^ random32());
++ srandom32(k_rand_bytes[2] ^ random32());
++ srandom32(k_rand_bytes[3] ^ random32());
++ p = STACK_ROUND(p, sizeof(k_rand_bytes));
++ u_rand_bytes = (elf_addr_t __user *) p;
+ if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
+ return -EFAULT;
+
+@@ -307,9 +324,11 @@ create_elf_tables(struct linux_binprm *b
+ return -EFAULT;
+ current->mm->env_end = p;
+
++ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
++
+ /* Put the elf_info on the stack in the right place. */
+ sp = (elf_addr_t __user *)envp + 1;
+- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
++ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
+ return -EFAULT;
+ return 0;
+ }
+@@ -380,10 +399,10 @@ static unsigned long load_elf_interp(str
+ {
+ struct elf_phdr *elf_phdata;
+ struct elf_phdr *eppnt;
+- unsigned long load_addr = 0;
++ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
+ int load_addr_set = 0;
+ unsigned long last_bss = 0, elf_bss = 0;
+- unsigned long error = ~0UL;
++ unsigned long error = -EINVAL;
+ unsigned long total_size;
+ int retval, i, size;
+
+@@ -429,6 +448,11 @@ static unsigned long load_elf_interp(str
+ goto out_close;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
+ eppnt = elf_phdata;
+ for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
+ if (eppnt->p_type == PT_LOAD) {
+@@ -472,8 +496,8 @@ static unsigned long load_elf_interp(str
+ k = load_addr + eppnt->p_vaddr;
+ if (BAD_ADDR(k) ||
+ eppnt->p_filesz > eppnt->p_memsz ||
+- eppnt->p_memsz > TASK_SIZE ||
+- TASK_SIZE - eppnt->p_memsz < k) {
++ eppnt->p_memsz > pax_task_size ||
++ pax_task_size - eppnt->p_memsz < k) {
+ error = -ENOMEM;
+ goto out_close;
+ }
+@@ -525,6 +549,351 @@ out:
+ return error;
+ }
+
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
++static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (elf_phdata->p_flags & PF_PAGEEXEC)
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (elf_phdata->p_flags & PF_SEGMEXEC)
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if ((__supported_pte_mask & _PAGE_NX))
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (elf_phdata->p_flags & PF_EMUTRAMP)
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (elf_phdata->p_flags & PF_MPROTECT)
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++#endif
++
++ return pax_flags;
++}
++
++static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if ((__supported_pte_mask & _PAGE_NX))
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++#endif
++
++ return pax_flags;
++}
++
++static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_EI_PAX
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if ((__supported_pte_mask & _PAGE_NX))
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++#else
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ pax_flags |= MF_PAX_SEGMEXEC;
++ }
++#endif
++
++#endif
++
++ return pax_flags;
++}
++
++static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
++{
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++ unsigned long i;
++
++ for (i = 0UL; i < elf_ex->e_phnum; i++)
++ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
++ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
++ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
++ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
++ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
++ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
++ return ~0UL;
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_softmode)
++ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
++ else
++#endif
++
++ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
++ break;
++ }
++#endif
++
++ return ~0UL;
++}
++
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if ((__supported_pte_mask & _PAGE_NX))
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (pax_flags_softmode & MF_PAX_MPROTECT)
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++ return pax_flags;
++}
++
++static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if ((__supported_pte_mask & _PAGE_NX))
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++static unsigned long pax_parse_xattr_pax(struct file * const file)
++{
++
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++ ssize_t xattr_size, i;
++ unsigned char xattr_value[5];
++ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
++
++ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
++ if (xattr_size <= 0)
++ return ~0UL;
++
++ for (i = 0; i < xattr_size; i++)
++ switch (xattr_value[i]) {
++ default:
++ return ~0UL;
++
++#define parse_flag(option1, option2, flag) \
++ case option1: \
++ pax_flags_hardmode |= MF_PAX_##flag; \
++ break; \
++ case option2: \
++ pax_flags_softmode |= MF_PAX_##flag; \
++ break;
++
++ parse_flag('p', 'P', PAGEEXEC);
++ parse_flag('e', 'E', EMUTRAMP);
++ parse_flag('m', 'M', MPROTECT);
++ parse_flag('r', 'R', RANDMMAP);
++ parse_flag('s', 'S', SEGMEXEC);
++
++#undef parse_flag
++ }
++
++ if (pax_flags_hardmode & pax_flags_softmode)
++ return ~0UL;
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_softmode)
++ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
++ else
++#endif
++
++ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
++#else
++ return ~0UL;
++#endif
++
++}
++
++static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
++{
++ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
++
++ pax_flags = pax_parse_ei_pax(elf_ex);
++ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
++ xattr_pax_flags = pax_parse_xattr_pax(file);
++
++ if (pt_pax_flags == ~0UL)
++ pt_pax_flags = xattr_pax_flags;
++ else if (xattr_pax_flags == ~0UL)
++ xattr_pax_flags = pt_pax_flags;
++ if (pt_pax_flags != xattr_pax_flags)
++ return -EINVAL;
++ if (pt_pax_flags != ~0UL)
++ pax_flags = pt_pax_flags;
++
++ if (0 > pax_check_flags(&pax_flags))
++ return -EINVAL;
++
++ current->mm->pax_flags = pax_flags;
++ return 0;
++}
++#endif
++
+ /*
+ * These are the functions used to load ELF style executables and shared
+ * libraries. There is no binary dependent code anywhere else.
+@@ -541,6 +910,11 @@ static unsigned long randomize_stack_top
+ {
+ unsigned int random_variable = 0;
+
++#ifdef CONFIG_PAX_RANDUSTACK
++ if (randomize_va_space)
++ return stack_top - current->mm->delta_stack;
++#endif
++
+ if ((current->flags & PF_RANDOMIZE) &&
+ !(current->personality & ADDR_NO_RANDOMIZE)) {
+ random_variable = get_random_int() & STACK_RND_MASK;
+@@ -559,7 +933,7 @@ static int load_elf_binary(struct linux_
+ unsigned long load_addr = 0, load_bias = 0;
+ int load_addr_set = 0;
+ char * elf_interpreter = NULL;
+- unsigned long error;
++ unsigned long error = 0;
+ struct elf_phdr *elf_ppnt, *elf_phdata;
+ unsigned long elf_bss, elf_brk;
+ int retval, i;
+@@ -569,11 +943,11 @@ static int load_elf_binary(struct linux_
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long reloc_func_desc __maybe_unused = 0;
+ int executable_stack = EXSTACK_DEFAULT;
+- unsigned long def_flags = 0;
+ struct {
+ struct elfhdr elf_ex;
+ struct elfhdr interp_elf_ex;
+ } *loc;
++ unsigned long pax_task_size = TASK_SIZE;
+
+ loc = kmalloc(sizeof(*loc), GFP_KERNEL);
+ if (!loc) {
+@@ -709,11 +1083,81 @@ static int load_elf_binary(struct linux_
+ goto out_free_dentry;
+
+ /* OK, This is the point of no return */
+- current->mm->def_flags = def_flags;
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ current->mm->pax_flags = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ current->mm->call_dl_resolve = 0UL;
++#endif
++
++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
++ current->mm->call_syscall = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ current->mm->delta_mmap = 0UL;
++ current->mm->delta_stack = 0UL;
++#endif
++
++ current->mm->def_flags = 0;
++
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
++ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
++ send_sig(SIGKILL, current, 0);
++ goto out_free_dentry;
++ }
++#endif
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++ pax_set_initial_flags(bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++ if (pax_set_initial_flags_func)
++ (pax_set_initial_flags_func)(bprm);
++#endif
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
++ current->mm->context.user_cs_limit = PAGE_SIZE;
++ current->mm->def_flags |= VM_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
++ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++ current->mm->def_flags |= VM_NOHUGEPAGE;
++ }
++#endif
++
++#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
++ put_cpu();
++ }
++#endif
+
+ /* Do this immediately, since STACK_TOP as used in setup_arg_pages
+ may depend on the personality. */
+ SET_PERSONALITY(loc->elf_ex);
++
++#ifdef CONFIG_PAX_ASLR
++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
++ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
++ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
++ }
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ executable_stack = EXSTACK_DISABLE_X;
++ current->personality &= ~READ_IMPLIES_EXEC;
++ } else
++#endif
++
+ if (elf_read_implies_exec(loc->elf_ex, executable_stack))
+ current->personality |= READ_IMPLIES_EXEC;
+
+@@ -804,6 +1248,20 @@ static int load_elf_binary(struct linux_
+ #else
+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ /* PaX: randomize base address at the default exe base if requested */
++ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
++#ifdef CONFIG_SPARC64
++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
++#else
++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
++#endif
++ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
++ elf_flags |= MAP_FIXED;
++ }
++#endif
++
+ }
+
+ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
+@@ -836,9 +1294,9 @@ static int load_elf_binary(struct linux_
+ * allowed task size. Note that p_filesz must always be
+ * <= p_memsz so it is only necessary to check p_memsz.
+ */
+- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
+- elf_ppnt->p_memsz > TASK_SIZE ||
+- TASK_SIZE - elf_ppnt->p_memsz < k) {
++ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
++ elf_ppnt->p_memsz > pax_task_size ||
++ pax_task_size - elf_ppnt->p_memsz < k) {
+ /* set_brk can never work. Avoid overflows. */
+ send_sig(SIGKILL, current, 0);
+ retval = -EINVAL;
+@@ -877,11 +1335,40 @@ static int load_elf_binary(struct linux_
+ goto out_free_dentry;
+ }
+ if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
+- send_sig(SIGSEGV, current, 0);
+- retval = -EFAULT; /* Nobody gets to see this, but.. */
+- goto out_free_dentry;
++ /*
++ * This bss-zeroing can fail if the ELF
++ * file specifies odd protections. So
++ * we don't check the return value
++ */
+ }
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
++ unsigned long start, size;
++
++ start = ELF_PAGEALIGN(elf_brk);
++ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
++ down_write(&current->mm->mmap_sem);
++ retval = -ENOMEM;
++ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
++ unsigned long prot = PROT_NONE;
++
++ current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
++// if (current->personality & ADDR_NO_RANDOMIZE)
++// prot = PROT_READ;
++ start = do_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
++ retval = IS_ERR_VALUE(start) ? start : 0;
++ }
++ up_write(&current->mm->mmap_sem);
++ if (retval == 0)
++ retval = set_brk(start + size, start + size + PAGE_SIZE);
++ if (retval < 0) {
++ send_sig(SIGKILL, current, 0);
++ goto out_free_dentry;
++ }
++ }
++#endif
++
+ if (elf_interpreter) {
+ unsigned long uninitialized_var(interp_map_addr);
+
+@@ -1109,7 +1596,7 @@ static bool always_dump_vma(struct vm_ar
+ * Decide what to dump of a segment, part, all or none.
+ */
+ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+- unsigned long mm_flags)
++ unsigned long mm_flags, long signr)
+ {
+ #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
+
+@@ -1146,7 +1633,7 @@ static unsigned long vma_dump_size(struc
+ if (vma->vm_file == NULL)
+ return 0;
+
+- if (FILTER(MAPPED_PRIVATE))
++ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
+ goto whole;
+
+ /*
+@@ -1368,9 +1855,9 @@ static void fill_auxv_note(struct memelf
+ {
+ elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
+ int i = 0;
+- do
++ do {
+ i += 2;
+- while (auxv[i - 2] != AT_NULL);
++ } while (auxv[i - 2] != AT_NULL);
+ fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
+ }
+
+@@ -1892,14 +2379,14 @@ static void fill_extnum_info(struct elfh
+ }
+
+ static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
+- unsigned long mm_flags)
++ struct coredump_params *cprm)
+ {
+ struct vm_area_struct *vma;
+ size_t size = 0;
+
+ for (vma = first_vma(current, gate_vma); vma != NULL;
+ vma = next_vma(vma, gate_vma))
+- size += vma_dump_size(vma, mm_flags);
++ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
+ return size;
+ }
+
+@@ -1993,7 +2480,7 @@ static int elf_core_dump(struct coredump
+
+ dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
+
+- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
++ offset += elf_core_vma_data_size(gate_vma, cprm);
+ offset += elf_core_extra_data_size();
+ e_shoff = offset;
+
+@@ -2024,7 +2511,7 @@ static int elf_core_dump(struct coredump
+ phdr.p_offset = offset;
+ phdr.p_vaddr = vma->vm_start;
+ phdr.p_paddr = 0;
+- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
++ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
+ phdr.p_memsz = vma->vm_end - vma->vm_start;
+ offset += phdr.p_filesz;
+ phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
+@@ -2059,7 +2546,7 @@ static int elf_core_dump(struct coredump
+ unsigned long addr;
+ unsigned long end;
+
+- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
++ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
+
+ for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
+ struct page *page;
+@@ -2105,6 +2592,96 @@ out:
+
+ #endif /* CONFIG_ELF_CORE */
+
++#ifdef CONFIG_PAX_MPROTECT
++/* PaX: non-PIC ELF libraries need relocations on their executable segments
++ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
++ * we'll remove VM_MAYWRITE for good on RELRO segments.
++ *
++ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
++ * basis because we want to allow the common case and not the special ones.
++ */
++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
++{
++ struct elfhdr elf_h;
++ struct elf_phdr elf_p;
++ unsigned long i;
++ unsigned long oldflags;
++ bool is_textrel_rw, is_textrel_rx, is_relro;
++
++ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
++ return;
++
++ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
++ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
++
++#ifdef CONFIG_PAX_ELFRELOCS
++ /* possible TEXTREL */
++ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
++ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
++#else
++ is_textrel_rw = false;
++ is_textrel_rx = false;
++#endif
++
++ /* possible RELRO */
++ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
++
++ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
++ return;
++
++ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
++ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
++
++#ifdef CONFIG_PAX_ETEXECRELOCS
++ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
++#else
++ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
++#endif
++
++ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
++ !elf_check_arch(&elf_h) ||
++ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
++ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
++ return;
++
++ for (i = 0UL; i < elf_h.e_phnum; i++) {
++ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
++ return;
++ switch (elf_p.p_type) {
++ case PT_DYNAMIC:
++ if (!is_textrel_rw && !is_textrel_rx)
++ continue;
++ i = 0UL;
++ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
++ elf_dyn dyn;
++
++ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
++ return;
++ if (dyn.d_tag == DT_NULL)
++ return;
++ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
++ if (is_textrel_rw)
++ vma->vm_flags |= VM_MAYWRITE;
++ else
++ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
++ vma->vm_flags &= ~VM_MAYWRITE;
++ return;
++ }
++ i++;
++ }
++ return;
++
++ case PT_GNU_RELRO:
++ if (!is_relro)
++ continue;
++ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
++ vma->vm_flags &= ~VM_MAYWRITE;
++ return;
++ }
++ }
++}
++#endif
++
+ static int __init init_elf_binfmt(void)
+ {
+ register_binfmt(&elf_format);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/binfmt_flat.c linux-3.4-pax/fs/binfmt_flat.c
+--- linux-3.4/fs/binfmt_flat.c 2012-05-21 11:33:34.315929661 +0200
++++ linux-3.4-pax/fs/binfmt_flat.c 2012-05-21 12:10:11.012048969 +0200
+@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_b
+ realdatastart = (unsigned long) -ENOMEM;
+ printk("Unable to allocate RAM for process data, errno %d\n",
+ (int)-realdatastart);
++ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, textpos, text_len);
++ up_write(&current->mm->mmap_sem);
+ ret = realdatastart;
+ goto err;
+ }
+@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_b
+ }
+ if (IS_ERR_VALUE(result)) {
+ printk("Unable to read data+bss, errno %d\n", (int)-result);
++ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, textpos, text_len);
+ do_munmap(current->mm, realdatastart, len);
++ up_write(&current->mm->mmap_sem);
+ ret = result;
+ goto err;
+ }
+@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_b
+ }
+ if (IS_ERR_VALUE(result)) {
+ printk("Unable to read code+data+bss, errno %d\n",(int)-result);
++ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, textpos, text_len + data_len + extra +
+ MAX_SHARED_LIBS * sizeof(unsigned long));
++ up_write(&current->mm->mmap_sem);
+ ret = result;
+ goto err;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/bio.c linux-3.4-pax/fs/bio.c
+--- linux-3.4/fs/bio.c 2012-05-21 11:33:34.331929662 +0200
++++ linux-3.4-pax/fs/bio.c 2012-05-21 12:10:11.012048969 +0200
+@@ -838,7 +838,7 @@ struct bio *bio_copy_user_iov(struct req
+ /*
+ * Overflow, abort
+ */
+- if (end < start)
++ if (end < start || end - start > INT_MAX - nr_pages)
+ return ERR_PTR(-EINVAL);
+
+ nr_pages += end - start;
+@@ -1234,7 +1234,7 @@ static void bio_copy_kern_endio(struct b
+ const int read = bio_data_dir(bio) == READ;
+ struct bio_map_data *bmd = bio->bi_private;
+ int i;
+- char *p = bmd->sgvecs[0].iov_base;
++ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
+
+ __bio_for_each_segment(bvec, bio, i, 0) {
+ char *addr = page_address(bvec->bv_page);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/block_dev.c linux-3.4-pax/fs/block_dev.c
+--- linux-3.4/fs/block_dev.c 2012-05-21 11:33:34.347929662 +0200
++++ linux-3.4-pax/fs/block_dev.c 2012-05-21 12:10:11.020048970 +0200
+@@ -704,7 +704,7 @@ static bool bd_may_claim(struct block_de
+ else if (bdev->bd_contains == bdev)
+ return true; /* is a whole device which isn't held */
+
+- else if (whole->bd_holder == bd_may_claim)
++ else if (whole->bd_holder == (void *)bd_may_claim)
+ return true; /* is a partition of a device that is being partitioned */
+ else if (whole->bd_holder != NULL)
+ return false; /* is a partition of a held device */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/btrfs/check-integrity.c linux-3.4-pax/fs/btrfs/check-integrity.c
+--- linux-3.4/fs/btrfs/check-integrity.c 2012-05-21 11:33:34.375929664 +0200
++++ linux-3.4-pax/fs/btrfs/check-integrity.c 2012-05-21 12:10:11.024048970 +0200
+@@ -156,7 +156,7 @@ struct btrfsic_block {
+ union {
+ bio_end_io_t *bio;
+ bh_end_io_t *bh;
+- } orig_bio_bh_end_io;
++ } __no_const orig_bio_bh_end_io;
+ int submit_bio_bh_rw;
+ u64 flush_gen; /* only valid if !never_written */
+ };
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/btrfs/ctree.c linux-3.4-pax/fs/btrfs/ctree.c
+--- linux-3.4/fs/btrfs/ctree.c 2012-05-21 11:33:34.391929665 +0200
++++ linux-3.4-pax/fs/btrfs/ctree.c 2012-05-21 12:10:11.028048970 +0200
+@@ -513,9 +513,12 @@ static noinline int __btrfs_cow_block(st
+ free_extent_buffer(buf);
+ add_root_to_dirty_list(root);
+ } else {
+- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+- parent_start = parent->start;
+- else
++ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
++ if (parent)
++ parent_start = parent->start;
++ else
++ parent_start = 0;
++ } else
+ parent_start = 0;
+
+ WARN_ON(trans->transid != btrfs_header_generation(parent));
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/btrfs/ioctl.c linux-3.4-pax/fs/btrfs/ioctl.c
+--- linux-3.4/fs/btrfs/ioctl.c 2012-05-21 11:33:34.431929667 +0200
++++ linux-3.4-pax/fs/btrfs/ioctl.c 2012-05-21 12:10:11.036048970 +0200
+@@ -2914,7 +2914,7 @@ long btrfs_ioctl_space_info(struct btrfs
+ up_read(&info->groups_sem);
+ }
+
+- user_dest = (struct btrfs_ioctl_space_info *)
++ user_dest = (struct btrfs_ioctl_space_info __user *)
+ (arg + sizeof(struct btrfs_ioctl_space_args));
+
+ if (copy_to_user(user_dest, dest_orig, alloc_size))
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/btrfs/relocation.c linux-3.4-pax/fs/btrfs/relocation.c
+--- linux-3.4/fs/btrfs/relocation.c 2012-05-21 11:33:34.459929669 +0200
++++ linux-3.4-pax/fs/btrfs/relocation.c 2012-05-21 12:10:11.040048971 +0200
+@@ -1268,7 +1268,7 @@ static int __update_reloc_root(struct bt
+ }
+ spin_unlock(&rc->reloc_root_tree.lock);
+
+- BUG_ON((struct btrfs_root *)node->data != root);
++ BUG_ON(!node || (struct btrfs_root *)node->data != root);
+
+ if (!del) {
+ spin_lock(&rc->reloc_root_tree.lock);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/cachefiles/bind.c linux-3.4-pax/fs/cachefiles/bind.c
+--- linux-3.4/fs/cachefiles/bind.c 2011-10-24 12:48:39.203091082 +0200
++++ linux-3.4-pax/fs/cachefiles/bind.c 2012-05-21 12:10:11.044048971 +0200
+@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
+ args);
+
+ /* start by checking things over */
+- ASSERT(cache->fstop_percent >= 0 &&
+- cache->fstop_percent < cache->fcull_percent &&
++ ASSERT(cache->fstop_percent < cache->fcull_percent &&
+ cache->fcull_percent < cache->frun_percent &&
+ cache->frun_percent < 100);
+
+- ASSERT(cache->bstop_percent >= 0 &&
+- cache->bstop_percent < cache->bcull_percent &&
++ ASSERT(cache->bstop_percent < cache->bcull_percent &&
+ cache->bcull_percent < cache->brun_percent &&
+ cache->brun_percent < 100);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/cachefiles/daemon.c linux-3.4-pax/fs/cachefiles/daemon.c
+--- linux-3.4/fs/cachefiles/daemon.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/cachefiles/daemon.c 2012-05-21 12:10:11.044048971 +0200
+@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
+ if (n > buflen)
+ return -EMSGSIZE;
+
+- if (copy_to_user(_buffer, buffer, n) != 0)
++ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
+ return -EFAULT;
+
+ return n;
+@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
+ if (test_bit(CACHEFILES_DEAD, &cache->flags))
+ return -EIO;
+
+- if (datalen < 0 || datalen > PAGE_SIZE - 1)
++ if (datalen > PAGE_SIZE - 1)
+ return -EOPNOTSUPP;
+
+ /* drag the command string into the kernel so we can parse it */
+@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
+ if (args[0] != '%' || args[1] != '\0')
+ return -EINVAL;
+
+- if (fstop < 0 || fstop >= cache->fcull_percent)
++ if (fstop >= cache->fcull_percent)
+ return cachefiles_daemon_range_error(cache, args);
+
+ cache->fstop_percent = fstop;
+@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
+ if (args[0] != '%' || args[1] != '\0')
+ return -EINVAL;
+
+- if (bstop < 0 || bstop >= cache->bcull_percent)
++ if (bstop >= cache->bcull_percent)
+ return cachefiles_daemon_range_error(cache, args);
+
+ cache->bstop_percent = bstop;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/cachefiles/internal.h linux-3.4-pax/fs/cachefiles/internal.h
+--- linux-3.4/fs/cachefiles/internal.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/cachefiles/internal.h 2012-05-21 12:10:11.048048971 +0200
+@@ -57,7 +57,7 @@ struct cachefiles_cache {
+ wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
+ struct rb_root active_nodes; /* active nodes (can't be culled) */
+ rwlock_t active_lock; /* lock for active_nodes */
+- atomic_t gravecounter; /* graveyard uniquifier */
++ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
+ unsigned frun_percent; /* when to stop culling (% files) */
+ unsigned fcull_percent; /* when to start culling (% files) */
+ unsigned fstop_percent; /* when to stop allocating (% files) */
+@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
+ * proc.c
+ */
+ #ifdef CONFIG_CACHEFILES_HISTOGRAM
+-extern atomic_t cachefiles_lookup_histogram[HZ];
+-extern atomic_t cachefiles_mkdir_histogram[HZ];
+-extern atomic_t cachefiles_create_histogram[HZ];
++extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
++extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
++extern atomic_unchecked_t cachefiles_create_histogram[HZ];
+
+ extern int __init cachefiles_proc_init(void);
+ extern void cachefiles_proc_cleanup(void);
+ static inline
+-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
++void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
+ {
+ unsigned long jif = jiffies - start_jif;
+ if (jif >= HZ)
+ jif = HZ - 1;
+- atomic_inc(&histogram[jif]);
++ atomic_inc_unchecked(&histogram[jif]);
+ }
+
+ #else
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/cachefiles/namei.c linux-3.4-pax/fs/cachefiles/namei.c
+--- linux-3.4/fs/cachefiles/namei.c 2012-05-21 11:33:34.519929673 +0200
++++ linux-3.4-pax/fs/cachefiles/namei.c 2012-05-21 12:10:11.052048971 +0200
+@@ -318,7 +318,7 @@ try_again:
+ /* first step is to make up a grave dentry in the graveyard */
+ sprintf(nbuffer, "%08x%08x",
+ (uint32_t) get_seconds(),
+- (uint32_t) atomic_inc_return(&cache->gravecounter));
++ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
+
+ /* do the multiway lock magic */
+ trap = lock_rename(cache->graveyard, dir);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/cachefiles/proc.c linux-3.4-pax/fs/cachefiles/proc.c
+--- linux-3.4/fs/cachefiles/proc.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/cachefiles/proc.c 2012-05-21 12:10:11.052048971 +0200
+@@ -14,9 +14,9 @@
+ #include <linux/seq_file.h>
+ #include "internal.h"
+
+-atomic_t cachefiles_lookup_histogram[HZ];
+-atomic_t cachefiles_mkdir_histogram[HZ];
+-atomic_t cachefiles_create_histogram[HZ];
++atomic_unchecked_t cachefiles_lookup_histogram[HZ];
++atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
++atomic_unchecked_t cachefiles_create_histogram[HZ];
+
+ /*
+ * display the latency histogram
+@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
+ return 0;
+ default:
+ index = (unsigned long) v - 3;
+- x = atomic_read(&cachefiles_lookup_histogram[index]);
+- y = atomic_read(&cachefiles_mkdir_histogram[index]);
+- z = atomic_read(&cachefiles_create_histogram[index]);
++ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
++ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
++ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
+ if (x == 0 && y == 0 && z == 0)
+ return 0;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/cachefiles/rdwr.c linux-3.4-pax/fs/cachefiles/rdwr.c
+--- linux-3.4/fs/cachefiles/rdwr.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/cachefiles/rdwr.c 2012-05-21 12:10:11.056048972 +0200
+@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = file->f_op->write(
+- file, (const void __user *) data, len, &pos);
++ file, (const void __force_user *) data, len, &pos);
+ set_fs(old_fs);
+ kunmap(page);
+ if (ret != len)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/ceph/dir.c linux-3.4-pax/fs/ceph/dir.c
+--- linux-3.4/fs/ceph/dir.c 2012-03-19 10:39:09.692049302 +0100
++++ linux-3.4-pax/fs/ceph/dir.c 2012-05-21 12:10:11.060048972 +0200
+@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *fil
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ unsigned frag = fpos_frag(filp->f_pos);
+- int off = fpos_off(filp->f_pos);
++ unsigned int off = fpos_off(filp->f_pos);
+ int err;
+ u32 ftype;
+ struct ceph_mds_reply_info_parsed *rinfo;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/cifs/cifs_debug.c linux-3.4-pax/fs/cifs/cifs_debug.c
+--- linux-3.4/fs/cifs/cifs_debug.c 2012-05-21 11:33:34.547929673 +0200
++++ linux-3.4-pax/fs/cifs/cifs_debug.c 2012-05-21 12:10:11.064048972 +0200
+@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
+
+ if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
+ #ifdef CONFIG_CIFS_STATS2
+- atomic_set(&totBufAllocCount, 0);
+- atomic_set(&totSmBufAllocCount, 0);
++ atomic_set_unchecked(&totBufAllocCount, 0);
++ atomic_set_unchecked(&totSmBufAllocCount, 0);
+ #endif /* CONFIG_CIFS_STATS2 */
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each(tmp1, &cifs_tcp_ses_list) {
+@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
+ tcon = list_entry(tmp3,
+ struct cifs_tcon,
+ tcon_list);
+- atomic_set(&tcon->num_smbs_sent, 0);
+- atomic_set(&tcon->num_writes, 0);
+- atomic_set(&tcon->num_reads, 0);
+- atomic_set(&tcon->num_oplock_brks, 0);
+- atomic_set(&tcon->num_opens, 0);
+- atomic_set(&tcon->num_posixopens, 0);
+- atomic_set(&tcon->num_posixmkdirs, 0);
+- atomic_set(&tcon->num_closes, 0);
+- atomic_set(&tcon->num_deletes, 0);
+- atomic_set(&tcon->num_mkdirs, 0);
+- atomic_set(&tcon->num_rmdirs, 0);
+- atomic_set(&tcon->num_renames, 0);
+- atomic_set(&tcon->num_t2renames, 0);
+- atomic_set(&tcon->num_ffirst, 0);
+- atomic_set(&tcon->num_fnext, 0);
+- atomic_set(&tcon->num_fclose, 0);
+- atomic_set(&tcon->num_hardlinks, 0);
+- atomic_set(&tcon->num_symlinks, 0);
+- atomic_set(&tcon->num_locks, 0);
++ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
++ atomic_set_unchecked(&tcon->num_writes, 0);
++ atomic_set_unchecked(&tcon->num_reads, 0);
++ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
++ atomic_set_unchecked(&tcon->num_opens, 0);
++ atomic_set_unchecked(&tcon->num_posixopens, 0);
++ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
++ atomic_set_unchecked(&tcon->num_closes, 0);
++ atomic_set_unchecked(&tcon->num_deletes, 0);
++ atomic_set_unchecked(&tcon->num_mkdirs, 0);
++ atomic_set_unchecked(&tcon->num_rmdirs, 0);
++ atomic_set_unchecked(&tcon->num_renames, 0);
++ atomic_set_unchecked(&tcon->num_t2renames, 0);
++ atomic_set_unchecked(&tcon->num_ffirst, 0);
++ atomic_set_unchecked(&tcon->num_fnext, 0);
++ atomic_set_unchecked(&tcon->num_fclose, 0);
++ atomic_set_unchecked(&tcon->num_hardlinks, 0);
++ atomic_set_unchecked(&tcon->num_symlinks, 0);
++ atomic_set_unchecked(&tcon->num_locks, 0);
+ }
+ }
+ }
+@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
+ smBufAllocCount.counter, cifs_min_small);
+ #ifdef CONFIG_CIFS_STATS2
+ seq_printf(m, "Total Large %d Small %d Allocations\n",
+- atomic_read(&totBufAllocCount),
+- atomic_read(&totSmBufAllocCount));
++ atomic_read_unchecked(&totBufAllocCount),
++ atomic_read_unchecked(&totSmBufAllocCount));
+ #endif /* CONFIG_CIFS_STATS2 */
+
+ seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
+@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
+ if (tcon->need_reconnect)
+ seq_puts(m, "\tDISCONNECTED ");
+ seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
+- atomic_read(&tcon->num_smbs_sent),
+- atomic_read(&tcon->num_oplock_brks));
++ atomic_read_unchecked(&tcon->num_smbs_sent),
++ atomic_read_unchecked(&tcon->num_oplock_brks));
+ seq_printf(m, "\nReads: %d Bytes: %lld",
+- atomic_read(&tcon->num_reads),
++ atomic_read_unchecked(&tcon->num_reads),
+ (long long)(tcon->bytes_read));
+ seq_printf(m, "\nWrites: %d Bytes: %lld",
+- atomic_read(&tcon->num_writes),
++ atomic_read_unchecked(&tcon->num_writes),
+ (long long)(tcon->bytes_written));
+ seq_printf(m, "\nFlushes: %d",
+- atomic_read(&tcon->num_flushes));
++ atomic_read_unchecked(&tcon->num_flushes));
+ seq_printf(m, "\nLocks: %d HardLinks: %d "
+ "Symlinks: %d",
+- atomic_read(&tcon->num_locks),
+- atomic_read(&tcon->num_hardlinks),
+- atomic_read(&tcon->num_symlinks));
++ atomic_read_unchecked(&tcon->num_locks),
++ atomic_read_unchecked(&tcon->num_hardlinks),
++ atomic_read_unchecked(&tcon->num_symlinks));
+ seq_printf(m, "\nOpens: %d Closes: %d "
+ "Deletes: %d",
+- atomic_read(&tcon->num_opens),
+- atomic_read(&tcon->num_closes),
+- atomic_read(&tcon->num_deletes));
++ atomic_read_unchecked(&tcon->num_opens),
++ atomic_read_unchecked(&tcon->num_closes),
++ atomic_read_unchecked(&tcon->num_deletes));
+ seq_printf(m, "\nPosix Opens: %d "
+ "Posix Mkdirs: %d",
+- atomic_read(&tcon->num_posixopens),
+- atomic_read(&tcon->num_posixmkdirs));
++ atomic_read_unchecked(&tcon->num_posixopens),
++ atomic_read_unchecked(&tcon->num_posixmkdirs));
+ seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
+- atomic_read(&tcon->num_mkdirs),
+- atomic_read(&tcon->num_rmdirs));
++ atomic_read_unchecked(&tcon->num_mkdirs),
++ atomic_read_unchecked(&tcon->num_rmdirs));
+ seq_printf(m, "\nRenames: %d T2 Renames %d",
+- atomic_read(&tcon->num_renames),
+- atomic_read(&tcon->num_t2renames));
++ atomic_read_unchecked(&tcon->num_renames),
++ atomic_read_unchecked(&tcon->num_t2renames));
+ seq_printf(m, "\nFindFirst: %d FNext %d "
+ "FClose %d",
+- atomic_read(&tcon->num_ffirst),
+- atomic_read(&tcon->num_fnext),
+- atomic_read(&tcon->num_fclose));
++ atomic_read_unchecked(&tcon->num_ffirst),
++ atomic_read_unchecked(&tcon->num_fnext),
++ atomic_read_unchecked(&tcon->num_fclose));
+ }
+ }
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/cifs/cifsfs.c linux-3.4-pax/fs/cifs/cifsfs.c
+--- linux-3.4/fs/cifs/cifsfs.c 2012-05-21 11:33:34.551929674 +0200
++++ linux-3.4-pax/fs/cifs/cifsfs.c 2012-05-21 12:10:11.068048972 +0200
+@@ -985,7 +985,7 @@ cifs_init_request_bufs(void)
+ cifs_req_cachep = kmem_cache_create("cifs_request",
+ CIFSMaxBufSize +
+ MAX_CIFS_HDR_SIZE, 0,
+- SLAB_HWCACHE_ALIGN, NULL);
++ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
+ if (cifs_req_cachep == NULL)
+ return -ENOMEM;
+
+@@ -1012,7 +1012,7 @@ cifs_init_request_bufs(void)
+ efficient to alloc 1 per page off the slab compared to 17K (5page)
+ alloc of large cifs buffers even when page debugging is on */
+ cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
+- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
++ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
+ NULL);
+ if (cifs_sm_req_cachep == NULL) {
+ mempool_destroy(cifs_req_poolp);
+@@ -1097,8 +1097,8 @@ init_cifs(void)
+ atomic_set(&bufAllocCount, 0);
+ atomic_set(&smBufAllocCount, 0);
+ #ifdef CONFIG_CIFS_STATS2
+- atomic_set(&totBufAllocCount, 0);
+- atomic_set(&totSmBufAllocCount, 0);
++ atomic_set_unchecked(&totBufAllocCount, 0);
++ atomic_set_unchecked(&totSmBufAllocCount, 0);
+ #endif /* CONFIG_CIFS_STATS2 */
+
+ atomic_set(&midCount, 0);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/cifs/cifsglob.h linux-3.4-pax/fs/cifs/cifsglob.h
+--- linux-3.4/fs/cifs/cifsglob.h 2012-05-21 11:33:34.555929674 +0200
++++ linux-3.4-pax/fs/cifs/cifsglob.h 2012-05-21 12:10:11.068048972 +0200
+@@ -438,28 +438,28 @@ struct cifs_tcon {
+ __u16 Flags; /* optional support bits */
+ enum statusEnum tidStatus;
+ #ifdef CONFIG_CIFS_STATS
+- atomic_t num_smbs_sent;
+- atomic_t num_writes;
+- atomic_t num_reads;
+- atomic_t num_flushes;
+- atomic_t num_oplock_brks;
+- atomic_t num_opens;
+- atomic_t num_closes;
+- atomic_t num_deletes;
+- atomic_t num_mkdirs;
+- atomic_t num_posixopens;
+- atomic_t num_posixmkdirs;
+- atomic_t num_rmdirs;
+- atomic_t num_renames;
+- atomic_t num_t2renames;
+- atomic_t num_ffirst;
+- atomic_t num_fnext;
+- atomic_t num_fclose;
+- atomic_t num_hardlinks;
+- atomic_t num_symlinks;
+- atomic_t num_locks;
+- atomic_t num_acl_get;
+- atomic_t num_acl_set;
++ atomic_unchecked_t num_smbs_sent;
++ atomic_unchecked_t num_writes;
++ atomic_unchecked_t num_reads;
++ atomic_unchecked_t num_flushes;
++ atomic_unchecked_t num_oplock_brks;
++ atomic_unchecked_t num_opens;
++ atomic_unchecked_t num_closes;
++ atomic_unchecked_t num_deletes;
++ atomic_unchecked_t num_mkdirs;
++ atomic_unchecked_t num_posixopens;
++ atomic_unchecked_t num_posixmkdirs;
++ atomic_unchecked_t num_rmdirs;
++ atomic_unchecked_t num_renames;
++ atomic_unchecked_t num_t2renames;
++ atomic_unchecked_t num_ffirst;
++ atomic_unchecked_t num_fnext;
++ atomic_unchecked_t num_fclose;
++ atomic_unchecked_t num_hardlinks;
++ atomic_unchecked_t num_symlinks;
++ atomic_unchecked_t num_locks;
++ atomic_unchecked_t num_acl_get;
++ atomic_unchecked_t num_acl_set;
+ #ifdef CONFIG_CIFS_STATS2
+ unsigned long long time_writes;
+ unsigned long long time_reads;
+@@ -676,7 +676,7 @@ convert_delimiter(char *path, char delim
+ }
+
+ #ifdef CONFIG_CIFS_STATS
+-#define cifs_stats_inc atomic_inc
++#define cifs_stats_inc atomic_inc_unchecked
+
+ static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
+ unsigned int bytes)
+@@ -1035,8 +1035,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
+ /* Various Debug counters */
+ GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
+ #ifdef CONFIG_CIFS_STATS2
+-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
+-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
++GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
++GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
+ #endif
+ GLOBAL_EXTERN atomic_t smBufAllocCount;
+ GLOBAL_EXTERN atomic_t midCount;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/cifs/link.c linux-3.4-pax/fs/cifs/link.c
+--- linux-3.4/fs/cifs/link.c 2012-01-08 19:48:22.771471209 +0100
++++ linux-3.4-pax/fs/cifs/link.c 2012-05-21 12:10:11.072048972 +0200
+@@ -600,7 +600,7 @@ symlink_exit:
+
+ void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
+ {
+- char *p = nd_get_link(nd);
++ const char *p = nd_get_link(nd);
+ if (!IS_ERR(p))
+ kfree(p);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/cifs/misc.c linux-3.4-pax/fs/cifs/misc.c
+--- linux-3.4/fs/cifs/misc.c 2012-05-21 11:33:34.603929677 +0200
++++ linux-3.4-pax/fs/cifs/misc.c 2012-05-21 12:10:11.076048973 +0200
+@@ -156,7 +156,7 @@ cifs_buf_get(void)
+ memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
+ atomic_inc(&bufAllocCount);
+ #ifdef CONFIG_CIFS_STATS2
+- atomic_inc(&totBufAllocCount);
++ atomic_inc_unchecked(&totBufAllocCount);
+ #endif /* CONFIG_CIFS_STATS2 */
+ }
+
+@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
+ /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
+ atomic_inc(&smBufAllocCount);
+ #ifdef CONFIG_CIFS_STATS2
+- atomic_inc(&totSmBufAllocCount);
++ atomic_inc_unchecked(&totSmBufAllocCount);
+ #endif /* CONFIG_CIFS_STATS2 */
+
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/coda/cache.c linux-3.4-pax/fs/coda/cache.c
+--- linux-3.4/fs/coda/cache.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/coda/cache.c 2012-05-21 12:10:11.076048973 +0200
+@@ -24,7 +24,7 @@
+ #include "coda_linux.h"
+ #include "coda_cache.h"
+
+-static atomic_t permission_epoch = ATOMIC_INIT(0);
++static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
+
+ /* replace or extend an acl cache hit */
+ void coda_cache_enter(struct inode *inode, int mask)
+@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
+ struct coda_inode_info *cii = ITOC(inode);
+
+ spin_lock(&cii->c_lock);
+- cii->c_cached_epoch = atomic_read(&permission_epoch);
++ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
+ if (cii->c_uid != current_fsuid()) {
+ cii->c_uid = current_fsuid();
+ cii->c_cached_perm = mask;
+@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
+ {
+ struct coda_inode_info *cii = ITOC(inode);
+ spin_lock(&cii->c_lock);
+- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
++ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
+ spin_unlock(&cii->c_lock);
+ }
+
+ /* remove all acl caches */
+ void coda_cache_clear_all(struct super_block *sb)
+ {
+- atomic_inc(&permission_epoch);
++ atomic_inc_unchecked(&permission_epoch);
+ }
+
+
+@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
+ spin_lock(&cii->c_lock);
+ hit = (mask & cii->c_cached_perm) == mask &&
+ cii->c_uid == current_fsuid() &&
+- cii->c_cached_epoch == atomic_read(&permission_epoch);
++ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
+ spin_unlock(&cii->c_lock);
+
+ return hit;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/compat_binfmt_elf.c linux-3.4-pax/fs/compat_binfmt_elf.c
+--- linux-3.4/fs/compat_binfmt_elf.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/compat_binfmt_elf.c 2012-05-21 12:10:11.080048973 +0200
+@@ -30,11 +30,13 @@
+ #undef elf_phdr
+ #undef elf_shdr
+ #undef elf_note
++#undef elf_dyn
+ #undef elf_addr_t
+ #define elfhdr elf32_hdr
+ #define elf_phdr elf32_phdr
+ #define elf_shdr elf32_shdr
+ #define elf_note elf32_note
++#define elf_dyn Elf32_Dyn
+ #define elf_addr_t Elf32_Addr
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/compat.c linux-3.4-pax/fs/compat.c
+--- linux-3.4/fs/compat.c 2012-05-21 11:33:34.631929678 +0200
++++ linux-3.4-pax/fs/compat.c 2012-05-21 12:10:11.080048973 +0200
+@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u3
+
+ set_fs(KERNEL_DS);
+ /* The __user pointer cast is valid because of the set_fs() */
+- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
++ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
+ set_fs(oldfs);
+ /* truncating is ok because it's a user address */
+ if (!ret)
+@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int
+ goto out;
+
+ ret = -EINVAL;
+- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
++ if (nr_segs > UIO_MAXIOV)
+ goto out;
+ if (nr_segs > fast_segs) {
+ ret = -ENOMEM;
+@@ -1062,7 +1062,7 @@ asmlinkage long compat_sys_getdents64(un
+ error = buf.error;
+ lastdirent = buf.previous;
+ if (lastdirent) {
+- typeof(lastdirent->d_off) d_off = file->f_pos;
++ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
+ if (__put_user_unaligned(d_off, &lastdirent->d_off))
+ error = -EFAULT;
+ else
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/compat_ioctl.c linux-3.4-pax/fs/compat_ioctl.c
+--- linux-3.4/fs/compat_ioctl.c 2012-05-21 11:33:34.635929678 +0200
++++ linux-3.4-pax/fs/compat_ioctl.c 2012-05-21 12:10:11.084048973 +0200
+@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsi
+
+ err = get_user(palp, &up->palette);
+ err |= get_user(length, &up->length);
++ if (err)
++ return -EFAULT;
+
+ up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
+ err = put_user(compat_ptr(palp), &up_native->palette);
+@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned
+ return -EFAULT;
+ if (__get_user(udata, &ss32->iomem_base))
+ return -EFAULT;
+- ss.iomem_base = compat_ptr(udata);
++ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
+ if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
+ __get_user(ss.port_high, &ss32->port_high))
+ return -EFAULT;
+@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(stru
+ copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
+ copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
+ copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
+- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
++ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
+ return -EFAULT;
+
+ return ioctl_preallocate(file, p);
+@@ -1610,8 +1612,8 @@ asmlinkage long compat_sys_ioctl(unsigne
+ static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
+ {
+ unsigned int a, b;
+- a = *(unsigned int *)p;
+- b = *(unsigned int *)q;
++ a = *(const unsigned int *)p;
++ b = *(const unsigned int *)q;
+ if (a > b)
+ return 1;
+ if (a < b)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/configfs/dir.c linux-3.4-pax/fs/configfs/dir.c
+--- linux-3.4/fs/configfs/dir.c 2012-05-21 11:33:34.639929678 +0200
++++ linux-3.4-pax/fs/configfs/dir.c 2012-05-21 12:10:11.088048973 +0200
+@@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file
+ }
+ for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
+ struct configfs_dirent *next;
+- const char * name;
++ const unsigned char * name;
++ char d_name[sizeof(next->s_dentry->d_iname)];
+ int len;
+ struct inode *inode = NULL;
+
+@@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file
+ continue;
+
+ name = configfs_get_name(next);
+- len = strlen(name);
++ if (next->s_dentry && name == next->s_dentry->d_iname) {
++ len = next->s_dentry->d_name.len;
++ memcpy(d_name, name, len);
++ name = d_name;
++ } else
++ len = strlen(name);
+
+ /*
+ * We'll have a dentry and an inode for
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/dcache.c linux-3.4-pax/fs/dcache.c
+--- linux-3.4/fs/dcache.c 2012-05-21 11:33:34.647929679 +0200
++++ linux-3.4-pax/fs/dcache.c 2012-05-21 12:10:11.096048974 +0200
+@@ -3084,7 +3084,7 @@ void __init vfs_caches_init(unsigned lon
+ mempages -= reserve;
+
+ names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
+- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
++ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
+
+ dcache_init();
+ inode_init();
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/ecryptfs/inode.c linux-3.4-pax/fs/ecryptfs/inode.c
+--- linux-3.4/fs/ecryptfs/inode.c 2012-03-19 10:39:09.800049293 +0100
++++ linux-3.4-pax/fs/ecryptfs/inode.c 2012-05-21 12:10:11.096048974 +0200
+@@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struc
+ old_fs = get_fs();
+ set_fs(get_ds());
+ rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
+- (char __user *)lower_buf,
++ (char __force_user *)lower_buf,
+ lower_bufsiz);
+ set_fs(old_fs);
+ if (rc < 0)
+@@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct
+ }
+ old_fs = get_fs();
+ set_fs(get_ds());
+- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
++ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
+ set_fs(old_fs);
+ if (rc < 0) {
+ kfree(buf);
+@@ -733,7 +733,7 @@ out:
+ static void
+ ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
+ {
+- char *buf = nd_get_link(nd);
++ const char *buf = nd_get_link(nd);
+ if (!IS_ERR(buf)) {
+ /* Free the char* */
+ kfree(buf);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/ecryptfs/miscdev.c linux-3.4-pax/fs/ecryptfs/miscdev.c
+--- linux-3.4/fs/ecryptfs/miscdev.c 2012-03-19 10:39:09.804049293 +0100
++++ linux-3.4-pax/fs/ecryptfs/miscdev.c 2012-05-21 12:10:11.100048974 +0200
+@@ -345,7 +345,7 @@ check_list:
+ goto out_unlock_msg_ctx;
+ i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
+ if (msg_ctx->msg) {
+- if (copy_to_user(&buf[i], packet_length, packet_length_size))
++ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
+ goto out_unlock_msg_ctx;
+ i += packet_length_size;
+ if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/ecryptfs/read_write.c linux-3.4-pax/fs/ecryptfs/read_write.c
+--- linux-3.4/fs/ecryptfs/read_write.c 2012-03-19 10:39:09.808049292 +0100
++++ linux-3.4-pax/fs/ecryptfs/read_write.c 2012-05-21 12:10:11.104048974 +0200
+@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *e
+ return -EIO;
+ fs_save = get_fs();
+ set_fs(get_ds());
+- rc = vfs_write(lower_file, data, size, &offset);
++ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
+ set_fs(fs_save);
+ mark_inode_dirty_sync(ecryptfs_inode);
+ return rc;
+@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff
+ return -EIO;
+ fs_save = get_fs();
+ set_fs(get_ds());
+- rc = vfs_read(lower_file, data, size, &offset);
++ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
+ set_fs(fs_save);
+ return rc;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/exec.c linux-3.4-pax/fs/exec.c
+--- linux-3.4/fs/exec.c 2012-05-21 11:33:34.851929690 +0200
++++ linux-3.4-pax/fs/exec.c 2012-05-21 12:10:11.108048974 +0200
+@@ -55,6 +55,13 @@
+ #include <linux/pipe_fs_i.h>
+ #include <linux/oom.h>
+ #include <linux/compat.h>
++#include <linux/random.h>
++#include <linux/seq_file.h>
++
++#ifdef CONFIG_PAX_REFCOUNT
++#include <linux/kallsyms.h>
++#include <linux/kdebug.h>
++#endif
+
+ #include <asm/uaccess.h>
+ #include <asm/mmu_context.h>
+@@ -66,6 +73,15 @@
+
+ #include <trace/events/sched.h>
+
++#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
++void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
++#endif
++
++#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
++void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
++EXPORT_SYMBOL(pax_set_initial_flags_func);
++#endif
++
+ int core_uses_pid;
+ char core_pattern[CORENAME_MAX_SIZE] = "core";
+ unsigned int core_pipe_limit;
+@@ -75,7 +91,7 @@ struct core_name {
+ char *corename;
+ int used, size;
+ };
+-static atomic_t call_count = ATOMIC_INIT(1);
++static atomic_unchecked_t call_count = ATOMIC_INIT(1);
+
+ /* The maximal length of core_pattern is also specified in sysctl.c */
+
+@@ -191,18 +207,10 @@ static struct page *get_arg_page(struct
+ int write)
+ {
+ struct page *page;
+- int ret;
+
+-#ifdef CONFIG_STACK_GROWSUP
+- if (write) {
+- ret = expand_downwards(bprm->vma, pos);
+- if (ret < 0)
+- return NULL;
+- }
+-#endif
+- ret = get_user_pages(current, bprm->mm, pos,
+- 1, write, 1, &page, NULL);
+- if (ret <= 0)
++ if (0 > expand_downwards(bprm->vma, pos))
++ return NULL;
++ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
+ return NULL;
+
+ if (write) {
+@@ -277,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
+ vma->vm_end = STACK_TOP_MAX;
+ vma->vm_start = vma->vm_end - PAGE_SIZE;
+ vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++#endif
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
+
+@@ -291,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
+ mm->stack_vm = mm->total_vm = 1;
+ up_write(&mm->mmap_sem);
+ bprm->p = vma->vm_end - sizeof(void *);
++
++#ifdef CONFIG_PAX_RANDUSTACK
++ if (randomize_va_space)
++ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
++#endif
++
+ return 0;
+ err:
+ up_write(&mm->mmap_sem);
+@@ -420,14 +439,14 @@ static const char __user *get_user_arg_p
+ compat_uptr_t compat;
+
+ if (get_user(compat, argv.ptr.compat + nr))
+- return ERR_PTR(-EFAULT);
++ return (const char __force_user *)ERR_PTR(-EFAULT);
+
+ return compat_ptr(compat);
+ }
+ #endif
+
+ if (get_user(native, argv.ptr.native + nr))
+- return ERR_PTR(-EFAULT);
++ return (const char __force_user *)ERR_PTR(-EFAULT);
+
+ return native;
+ }
+@@ -446,7 +465,7 @@ static int count(struct user_arg_ptr arg
+ if (!p)
+ break;
+
+- if (IS_ERR(p))
++ if (IS_ERR((const char __force_kernel *)p))
+ return -EFAULT;
+
+ if (i++ >= max)
+@@ -480,7 +499,7 @@ static int copy_strings(int argc, struct
+
+ ret = -EFAULT;
+ str = get_user_arg_ptr(argv, argc);
+- if (IS_ERR(str))
++ if (IS_ERR((const char __force_kernel *)str))
+ goto out;
+
+ len = strnlen_user(str, MAX_ARG_STRLEN);
+@@ -562,7 +581,7 @@ int copy_strings_kernel(int argc, const
+ int r;
+ mm_segment_t oldfs = get_fs();
+ struct user_arg_ptr argv = {
+- .ptr.native = (const char __user *const __user *)__argv,
++ .ptr.native = (const char __force_user *const __force_user *)__argv,
+ };
+
+ set_fs(KERNEL_DS);
+@@ -597,7 +616,8 @@ static int shift_arg_pages(struct vm_are
+ unsigned long new_end = old_end - shift;
+ struct mmu_gather tlb;
+
+- BUG_ON(new_start > new_end);
++ if (new_start >= new_end || new_start < mmap_min_addr)
++ return -ENOMEM;
+
+ /*
+ * ensure there are no vmas between where we want to go
+@@ -606,6 +626,10 @@ static int shift_arg_pages(struct vm_are
+ if (vma != find_vma(mm, new_start))
+ return -EFAULT;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ BUG_ON(pax_find_mirror_vma(vma));
++#endif
++
+ /*
+ * cover the whole range: [new_start, old_end)
+ */
+@@ -686,10 +710,6 @@ int setup_arg_pages(struct linux_binprm
+ stack_top = arch_align_stack(stack_top);
+ stack_top = PAGE_ALIGN(stack_top);
+
+- if (unlikely(stack_top < mmap_min_addr) ||
+- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
+- return -ENOMEM;
+-
+ stack_shift = vma->vm_end - stack_top;
+
+ bprm->p -= stack_shift;
+@@ -701,8 +721,28 @@ int setup_arg_pages(struct linux_binprm
+ bprm->exec -= stack_shift;
+
+ down_write(&mm->mmap_sem);
++
++ /* Move stack pages down in memory. */
++ if (stack_shift) {
++ ret = shift_arg_pages(vma, stack_shift);
++ if (ret)
++ goto out_unlock;
++ }
++
+ vm_flags = VM_STACK_FLAGS;
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->pax_flags & MF_PAX_MPROTECT)
++ vm_flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ /*
+ * Adjust stack execute permissions; explicitly enable for
+ * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
+@@ -721,13 +761,6 @@ int setup_arg_pages(struct linux_binprm
+ goto out_unlock;
+ BUG_ON(prev != vma);
+
+- /* Move stack pages down in memory. */
+- if (stack_shift) {
+- ret = shift_arg_pages(vma, stack_shift);
+- if (ret)
+- goto out_unlock;
+- }
+-
+ /* mprotect_fixup is overkill to remove the temporary stack flags */
+ vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
+
+@@ -808,7 +841,7 @@ int kernel_read(struct file *file, loff_
+ old_fs = get_fs();
+ set_fs(get_ds());
+ /* The cast to a user pointer is valid due to the set_fs() */
+- result = vfs_read(file, (void __user *)addr, count, &pos);
++ result = vfs_read(file, (void __force_user *)addr, count, &pos);
+ set_fs(old_fs);
+ return result;
+ }
+@@ -1254,7 +1287,7 @@ static int check_unsafe_exec(struct linu
+ }
+ rcu_read_unlock();
+
+- if (p->fs->users > n_fs) {
++ if (atomic_read(&p->fs->users) > n_fs) {
+ bprm->unsafe |= LSM_UNSAFE_SHARE;
+ } else {
+ res = -EAGAIN;
+@@ -1627,7 +1660,7 @@ static int expand_corename(struct core_n
+ {
+ char *old_corename = cn->corename;
+
+- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
++ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
+ cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
+
+ if (!cn->corename) {
+@@ -1724,7 +1757,7 @@ static int format_corename(struct core_n
+ int pid_in_pattern = 0;
+ int err = 0;
+
+- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
++ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
+ cn->corename = kmalloc(cn->size, GFP_KERNEL);
+ cn->used = 0;
+
+@@ -1821,6 +1854,216 @@ out:
+ return ispipe;
+ }
+
++int pax_check_flags(unsigned long *flags)
++{
++ int retval = 0;
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
++ if (*flags & MF_PAX_SEGMEXEC)
++ {
++ *flags &= ~MF_PAX_SEGMEXEC;
++ retval = -EINVAL;
++ }
++#endif
++
++ if ((*flags & MF_PAX_PAGEEXEC)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ && (*flags & MF_PAX_SEGMEXEC)
++#endif
++
++ )
++ {
++ *flags &= ~MF_PAX_PAGEEXEC;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & MF_PAX_MPROTECT)
++
++#ifdef CONFIG_PAX_MPROTECT
++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
++#endif
++
++ )
++ {
++ *flags &= ~MF_PAX_MPROTECT;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & MF_PAX_EMUTRAMP)
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
++#endif
++
++ )
++ {
++ *flags &= ~MF_PAX_EMUTRAMP;
++ retval = -EINVAL;
++ }
++
++ return retval;
++}
++
++EXPORT_SYMBOL(pax_check_flags);
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
++{
++ struct task_struct *tsk = current;
++ struct mm_struct *mm = current->mm;
++ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
++ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
++ char *path_exec = NULL;
++ char *path_fault = NULL;
++ unsigned long start = 0UL, end = 0UL, offset = 0UL;
++
++ if (buffer_exec && buffer_fault) {
++ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
++
++ down_read(&mm->mmap_sem);
++ vma = mm->mmap;
++ while (vma && (!vma_exec || !vma_fault)) {
++ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
++ vma_exec = vma;
++ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
++ vma_fault = vma;
++ vma = vma->vm_next;
++ }
++ if (vma_exec) {
++ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
++ if (IS_ERR(path_exec))
++ path_exec = "<path too long>";
++ else {
++ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
++ if (path_exec) {
++ *path_exec = 0;
++ path_exec = buffer_exec;
++ } else
++ path_exec = "<path too long>";
++ }
++ }
++ if (vma_fault) {
++ start = vma_fault->vm_start;
++ end = vma_fault->vm_end;
++ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
++ if (vma_fault->vm_file) {
++ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
++ if (IS_ERR(path_fault))
++ path_fault = "<path too long>";
++ else {
++ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
++ if (path_fault) {
++ *path_fault = 0;
++ path_fault = buffer_fault;
++ } else
++ path_fault = "<path too long>";
++ }
++ } else
++ path_fault = "<anonymous mapping>";
++ }
++ up_read(&mm->mmap_sem);
++ }
++ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
++ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
++ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
++ task_uid(tsk), task_euid(tsk), pc, sp);
++ free_page((unsigned long)buffer_exec);
++ free_page((unsigned long)buffer_fault);
++ pax_report_insns(regs, pc, sp);
++ do_coredump(SIGKILL, SIGKILL, regs);
++}
++#endif
++
++#ifdef CONFIG_PAX_REFCOUNT
++void pax_report_refcount_overflow(struct pt_regs *regs)
++{
++ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
++ current->comm, task_pid_nr(current), current_uid(), current_euid());
++ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
++ show_regs(regs);
++ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
++}
++#endif
++
++#ifdef CONFIG_PAX_USERCOPY
++/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
++int object_is_on_stack(const void *obj, unsigned long len)
++{
++ const void * const stack = task_stack_page(current);
++ const void * const stackend = stack + THREAD_SIZE;
++
++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
++ const void *frame = NULL;
++ const void *oldframe;
++#endif
++
++ if (obj + len < obj)
++ return -1;
++
++ if (obj + len <= stack || stackend <= obj)
++ return 0;
++
++ if (obj < stack || stackend < obj + len)
++ return -1;
++
++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
++ oldframe = __builtin_frame_address(1);
++ if (oldframe)
++ frame = __builtin_frame_address(2);
++ /*
++ low ----------------------------------------------> high
++ [saved bp][saved ip][args][local vars][saved bp][saved ip]
++ ^----------------^
++ allow copies only within here
++ */
++ while (stack <= frame && frame < stackend) {
++ /* if obj + len extends past the last frame, this
++ check won't pass and the next frame will be 0,
++ causing us to bail out and correctly report
++ the copy as invalid
++ */
++ if (obj + len <= frame)
++ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
++ oldframe = frame;
++ frame = *(const void * const *)frame;
++ }
++ return -1;
++#else
++ return 1;
++#endif
++}
++
++__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
++{
++ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
++ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
++ dump_stack();
++ do_group_exit(SIGKILL);
++}
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void pax_track_stack(void)
++{
++ unsigned long sp = (unsigned long)&sp;
++ if (sp < current_thread_info()->lowest_stack &&
++ sp > (unsigned long)task_stack_page(current))
++ current_thread_info()->lowest_stack = sp;
++}
++EXPORT_SYMBOL(pax_track_stack);
++#endif
++
++#ifdef CONFIG_PAX_SIZE_OVERFLOW
++void report_size_overflow(const char *file, unsigned int line, const char *func)
++{
++ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
++ dump_stack();
++ do_group_exit(SIGKILL);
++}
++EXPORT_SYMBOL(report_size_overflow);
++#endif
++
+ static int zap_process(struct task_struct *start, int exit_code)
+ {
+ struct task_struct *t;
+@@ -2018,17 +2261,17 @@ static void wait_for_dump_helpers(struct
+ pipe = file->f_path.dentry->d_inode->i_pipe;
+
+ pipe_lock(pipe);
+- pipe->readers++;
+- pipe->writers--;
++ atomic_inc(&pipe->readers);
++ atomic_dec(&pipe->writers);
+
+- while ((pipe->readers > 1) && (!signal_pending(current))) {
++ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
+ wake_up_interruptible_sync(&pipe->wait);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ pipe_wait(pipe);
+ }
+
+- pipe->readers--;
+- pipe->writers++;
++ atomic_dec(&pipe->readers);
++ atomic_inc(&pipe->writers);
+ pipe_unlock(pipe);
+
+ }
+@@ -2089,7 +2332,7 @@ void do_coredump(long signr, int exit_co
+ int retval = 0;
+ int flag = 0;
+ int ispipe;
+- static atomic_t core_dump_count = ATOMIC_INIT(0);
++ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
+ struct coredump_params cprm = {
+ .signr = signr,
+ .regs = regs,
+@@ -2171,7 +2414,7 @@ void do_coredump(long signr, int exit_co
+ }
+ cprm.limit = RLIM_INFINITY;
+
+- dump_count = atomic_inc_return(&core_dump_count);
++ dump_count = atomic_inc_return_unchecked(&core_dump_count);
+ if (core_pipe_limit && (core_pipe_limit < dump_count)) {
+ printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
+ task_tgid_vnr(current), current->comm);
+@@ -2241,7 +2484,7 @@ close_fail:
+ filp_close(cprm.file, NULL);
+ fail_dropcount:
+ if (ispipe)
+- atomic_dec(&core_dump_count);
++ atomic_dec_unchecked(&core_dump_count);
+ fail_unlock:
+ kfree(cn.corename);
+ fail_corename:
+@@ -2260,7 +2503,7 @@ fail:
+ */
+ int dump_write(struct file *file, const void *addr, int nr)
+ {
+- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
++ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
+ }
+ EXPORT_SYMBOL(dump_write);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/ext4/ext4.h linux-3.4-pax/fs/ext4/ext4.h
+--- linux-3.4/fs/ext4/ext4.h 2012-05-21 11:33:34.955929696 +0200
++++ linux-3.4-pax/fs/ext4/ext4.h 2012-05-21 12:10:11.112048975 +0200
+@@ -1225,19 +1225,19 @@ struct ext4_sb_info {
+ unsigned long s_mb_last_start;
+
+ /* stats for buddy allocator */
+- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
+- atomic_t s_bal_success; /* we found long enough chunks */
+- atomic_t s_bal_allocated; /* in blocks */
+- atomic_t s_bal_ex_scanned; /* total extents scanned */
+- atomic_t s_bal_goals; /* goal hits */
+- atomic_t s_bal_breaks; /* too long searches */
+- atomic_t s_bal_2orders; /* 2^order hits */
++ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
++ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
++ atomic_unchecked_t s_bal_allocated; /* in blocks */
++ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
++ atomic_unchecked_t s_bal_goals; /* goal hits */
++ atomic_unchecked_t s_bal_breaks; /* too long searches */
++ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
+ spinlock_t s_bal_lock;
+ unsigned long s_mb_buddies_generated;
+ unsigned long long s_mb_generation_time;
+- atomic_t s_mb_lost_chunks;
+- atomic_t s_mb_preallocated;
+- atomic_t s_mb_discarded;
++ atomic_unchecked_t s_mb_lost_chunks;
++ atomic_unchecked_t s_mb_preallocated;
++ atomic_unchecked_t s_mb_discarded;
+ atomic_t s_lock_busy;
+
+ /* locality groups */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/ext4/mballoc.c linux-3.4-pax/fs/ext4/mballoc.c
+--- linux-3.4/fs/ext4/mballoc.c 2012-05-21 11:33:34.995929698 +0200
++++ linux-3.4-pax/fs/ext4/mballoc.c 2012-05-21 12:10:11.116048975 +0200
+@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ex
+ BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
+
+ if (EXT4_SB(sb)->s_mb_stats)
+- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
++ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
+
+ break;
+ }
+@@ -2041,7 +2041,7 @@ repeat:
+ ac->ac_status = AC_STATUS_CONTINUE;
+ ac->ac_flags |= EXT4_MB_HINT_FIRST;
+ cr = 3;
+- atomic_inc(&sbi->s_mb_lost_chunks);
++ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
+ goto repeat;
+ }
+ }
+@@ -2542,25 +2542,25 @@ int ext4_mb_release(struct super_block *
+ if (sbi->s_mb_stats) {
+ ext4_msg(sb, KERN_INFO,
+ "mballoc: %u blocks %u reqs (%u success)",
+- atomic_read(&sbi->s_bal_allocated),
+- atomic_read(&sbi->s_bal_reqs),
+- atomic_read(&sbi->s_bal_success));
++ atomic_read_unchecked(&sbi->s_bal_allocated),
++ atomic_read_unchecked(&sbi->s_bal_reqs),
++ atomic_read_unchecked(&sbi->s_bal_success));
+ ext4_msg(sb, KERN_INFO,
+ "mballoc: %u extents scanned, %u goal hits, "
+ "%u 2^N hits, %u breaks, %u lost",
+- atomic_read(&sbi->s_bal_ex_scanned),
+- atomic_read(&sbi->s_bal_goals),
+- atomic_read(&sbi->s_bal_2orders),
+- atomic_read(&sbi->s_bal_breaks),
+- atomic_read(&sbi->s_mb_lost_chunks));
++ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
++ atomic_read_unchecked(&sbi->s_bal_goals),
++ atomic_read_unchecked(&sbi->s_bal_2orders),
++ atomic_read_unchecked(&sbi->s_bal_breaks),
++ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
+ ext4_msg(sb, KERN_INFO,
+ "mballoc: %lu generated and it took %Lu",
+ sbi->s_mb_buddies_generated,
+ sbi->s_mb_generation_time);
+ ext4_msg(sb, KERN_INFO,
+ "mballoc: %u preallocated, %u discarded",
+- atomic_read(&sbi->s_mb_preallocated),
+- atomic_read(&sbi->s_mb_discarded));
++ atomic_read_unchecked(&sbi->s_mb_preallocated),
++ atomic_read_unchecked(&sbi->s_mb_discarded));
+ }
+
+ free_percpu(sbi->s_locality_groups);
+@@ -3044,16 +3044,16 @@ static void ext4_mb_collect_stats(struct
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+
+ if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
+- atomic_inc(&sbi->s_bal_reqs);
+- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
++ atomic_inc_unchecked(&sbi->s_bal_reqs);
++ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
+ if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
+- atomic_inc(&sbi->s_bal_success);
+- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
++ atomic_inc_unchecked(&sbi->s_bal_success);
++ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
+ if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
+ ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
+- atomic_inc(&sbi->s_bal_goals);
++ atomic_inc_unchecked(&sbi->s_bal_goals);
+ if (ac->ac_found > sbi->s_mb_max_to_scan)
+- atomic_inc(&sbi->s_bal_breaks);
++ atomic_inc_unchecked(&sbi->s_bal_breaks);
+ }
+
+ if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
+@@ -3457,7 +3457,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
+ trace_ext4_mb_new_inode_pa(ac, pa);
+
+ ext4_mb_use_inode_pa(ac, pa);
+- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
++ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
+
+ ei = EXT4_I(ac->ac_inode);
+ grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
+@@ -3517,7 +3517,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
+ trace_ext4_mb_new_group_pa(ac, pa);
+
+ ext4_mb_use_group_pa(ac, pa);
+- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
++ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
+
+ grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
+ lg = ac->ac_lg;
+@@ -3606,7 +3606,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
+ * from the bitmap and continue.
+ */
+ }
+- atomic_add(free, &sbi->s_mb_discarded);
++ atomic_add_unchecked(free, &sbi->s_mb_discarded);
+
+ return err;
+ }
+@@ -3624,7 +3624,7 @@ ext4_mb_release_group_pa(struct ext4_bud
+ ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
+ BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
+ mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
+- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
++ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
+ trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
+
+ return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/fcntl.c linux-3.4-pax/fs/fcntl.c
+--- linux-3.4/fs/fcntl.c 2012-05-21 11:33:35.043929700 +0200
++++ linux-3.4-pax/fs/fcntl.c 2012-05-21 12:10:11.120048975 +0200
+@@ -266,7 +266,7 @@ pid_t f_getown(struct file *filp)
+
+ static int f_setown_ex(struct file *filp, unsigned long arg)
+ {
+- struct f_owner_ex * __user owner_p = (void * __user)arg;
++ struct f_owner_ex __user *owner_p = (void __user *)arg;
+ struct f_owner_ex owner;
+ struct pid *pid;
+ int type;
+@@ -306,7 +306,7 @@ static int f_setown_ex(struct file *filp
+
+ static int f_getown_ex(struct file *filp, unsigned long arg)
+ {
+- struct f_owner_ex * __user owner_p = (void * __user)arg;
++ struct f_owner_ex __user *owner_p = (void __user *)arg;
+ struct f_owner_ex owner;
+ int ret = 0;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/fifo.c linux-3.4-pax/fs/fifo.c
+--- linux-3.4/fs/fifo.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/fifo.c 2012-05-21 12:10:11.124048975 +0200
+@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
+ */
+ filp->f_op = &read_pipefifo_fops;
+ pipe->r_counter++;
+- if (pipe->readers++ == 0)
++ if (atomic_inc_return(&pipe->readers) == 1)
+ wake_up_partner(inode);
+
+- if (!pipe->writers) {
++ if (!atomic_read(&pipe->writers)) {
+ if ((filp->f_flags & O_NONBLOCK)) {
+ /* suppress POLLHUP until we have
+ * seen a writer */
+@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
+ * errno=ENXIO when there is no process reading the FIFO.
+ */
+ ret = -ENXIO;
+- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
++ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
+ goto err;
+
+ filp->f_op = &write_pipefifo_fops;
+ pipe->w_counter++;
+- if (!pipe->writers++)
++ if (atomic_inc_return(&pipe->writers) == 1)
+ wake_up_partner(inode);
+
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ wait_for_partner(inode, &pipe->r_counter);
+ if (signal_pending(current))
+ goto err_wr;
+@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
+ */
+ filp->f_op = &rdwr_pipefifo_fops;
+
+- pipe->readers++;
+- pipe->writers++;
++ atomic_inc(&pipe->readers);
++ atomic_inc(&pipe->writers);
+ pipe->r_counter++;
+ pipe->w_counter++;
+- if (pipe->readers == 1 || pipe->writers == 1)
++ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
+ wake_up_partner(inode);
+ break;
+
+@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
+ return 0;
+
+ err_rd:
+- if (!--pipe->readers)
++ if (atomic_dec_and_test(&pipe->readers))
+ wake_up_interruptible(&pipe->wait);
+ ret = -ERESTARTSYS;
+ goto err;
+
+ err_wr:
+- if (!--pipe->writers)
++ if (atomic_dec_and_test(&pipe->writers))
+ wake_up_interruptible(&pipe->wait);
+ ret = -ERESTARTSYS;
+ goto err;
+
+ err:
+- if (!pipe->readers && !pipe->writers)
++ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
+ free_pipe_info(inode);
+
+ err_nocleanup:
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/fscache/cookie.c linux-3.4-pax/fs/fscache/cookie.c
+--- linux-3.4/fs/fscache/cookie.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/fscache/cookie.c 2012-05-21 12:10:11.124048975 +0200
+@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
+ parent ? (char *) parent->def->name : "<no-parent>",
+ def->name, netfs_data);
+
+- fscache_stat(&fscache_n_acquires);
++ fscache_stat_unchecked(&fscache_n_acquires);
+
+ /* if there's no parent cookie, then we don't create one here either */
+ if (!parent) {
+- fscache_stat(&fscache_n_acquires_null);
++ fscache_stat_unchecked(&fscache_n_acquires_null);
+ _leave(" [no parent]");
+ return NULL;
+ }
+@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
+ /* allocate and initialise a cookie */
+ cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
+ if (!cookie) {
+- fscache_stat(&fscache_n_acquires_oom);
++ fscache_stat_unchecked(&fscache_n_acquires_oom);
+ _leave(" [ENOMEM]");
+ return NULL;
+ }
+@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
+
+ switch (cookie->def->type) {
+ case FSCACHE_COOKIE_TYPE_INDEX:
+- fscache_stat(&fscache_n_cookie_index);
++ fscache_stat_unchecked(&fscache_n_cookie_index);
+ break;
+ case FSCACHE_COOKIE_TYPE_DATAFILE:
+- fscache_stat(&fscache_n_cookie_data);
++ fscache_stat_unchecked(&fscache_n_cookie_data);
+ break;
+ default:
+- fscache_stat(&fscache_n_cookie_special);
++ fscache_stat_unchecked(&fscache_n_cookie_special);
+ break;
+ }
+
+@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
+ if (fscache_acquire_non_index_cookie(cookie) < 0) {
+ atomic_dec(&parent->n_children);
+ __fscache_cookie_put(cookie);
+- fscache_stat(&fscache_n_acquires_nobufs);
++ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
+ _leave(" = NULL");
+ return NULL;
+ }
+ }
+
+- fscache_stat(&fscache_n_acquires_ok);
++ fscache_stat_unchecked(&fscache_n_acquires_ok);
+ _leave(" = %p", cookie);
+ return cookie;
+ }
+@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
+ cache = fscache_select_cache_for_object(cookie->parent);
+ if (!cache) {
+ up_read(&fscache_addremove_sem);
+- fscache_stat(&fscache_n_acquires_no_cache);
++ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
+ _leave(" = -ENOMEDIUM [no cache]");
+ return -ENOMEDIUM;
+ }
+@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
+ object = cache->ops->alloc_object(cache, cookie);
+ fscache_stat_d(&fscache_n_cop_alloc_object);
+ if (IS_ERR(object)) {
+- fscache_stat(&fscache_n_object_no_alloc);
++ fscache_stat_unchecked(&fscache_n_object_no_alloc);
+ ret = PTR_ERR(object);
+ goto error;
+ }
+
+- fscache_stat(&fscache_n_object_alloc);
++ fscache_stat_unchecked(&fscache_n_object_alloc);
+
+ object->debug_id = atomic_inc_return(&fscache_object_debug_id);
+
+@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
+ struct fscache_object *object;
+ struct hlist_node *_p;
+
+- fscache_stat(&fscache_n_updates);
++ fscache_stat_unchecked(&fscache_n_updates);
+
+ if (!cookie) {
+- fscache_stat(&fscache_n_updates_null);
++ fscache_stat_unchecked(&fscache_n_updates_null);
+ _leave(" [no cookie]");
+ return;
+ }
+@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
+ struct fscache_object *object;
+ unsigned long event;
+
+- fscache_stat(&fscache_n_relinquishes);
++ fscache_stat_unchecked(&fscache_n_relinquishes);
+ if (retire)
+- fscache_stat(&fscache_n_relinquishes_retire);
++ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
+
+ if (!cookie) {
+- fscache_stat(&fscache_n_relinquishes_null);
++ fscache_stat_unchecked(&fscache_n_relinquishes_null);
+ _leave(" [no cookie]");
+ return;
+ }
+@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
+
+ /* wait for the cookie to finish being instantiated (or to fail) */
+ if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
+- fscache_stat(&fscache_n_relinquishes_waitcrt);
++ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
+ wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
+ fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/fscache/internal.h linux-3.4-pax/fs/fscache/internal.h
+--- linux-3.4/fs/fscache/internal.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/fscache/internal.h 2012-05-21 12:10:11.128048975 +0200
+@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
+ extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
+ extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
+
+-extern atomic_t fscache_n_op_pend;
+-extern atomic_t fscache_n_op_run;
+-extern atomic_t fscache_n_op_enqueue;
+-extern atomic_t fscache_n_op_deferred_release;
+-extern atomic_t fscache_n_op_release;
+-extern atomic_t fscache_n_op_gc;
+-extern atomic_t fscache_n_op_cancelled;
+-extern atomic_t fscache_n_op_rejected;
+-
+-extern atomic_t fscache_n_attr_changed;
+-extern atomic_t fscache_n_attr_changed_ok;
+-extern atomic_t fscache_n_attr_changed_nobufs;
+-extern atomic_t fscache_n_attr_changed_nomem;
+-extern atomic_t fscache_n_attr_changed_calls;
+-
+-extern atomic_t fscache_n_allocs;
+-extern atomic_t fscache_n_allocs_ok;
+-extern atomic_t fscache_n_allocs_wait;
+-extern atomic_t fscache_n_allocs_nobufs;
+-extern atomic_t fscache_n_allocs_intr;
+-extern atomic_t fscache_n_allocs_object_dead;
+-extern atomic_t fscache_n_alloc_ops;
+-extern atomic_t fscache_n_alloc_op_waits;
+-
+-extern atomic_t fscache_n_retrievals;
+-extern atomic_t fscache_n_retrievals_ok;
+-extern atomic_t fscache_n_retrievals_wait;
+-extern atomic_t fscache_n_retrievals_nodata;
+-extern atomic_t fscache_n_retrievals_nobufs;
+-extern atomic_t fscache_n_retrievals_intr;
+-extern atomic_t fscache_n_retrievals_nomem;
+-extern atomic_t fscache_n_retrievals_object_dead;
+-extern atomic_t fscache_n_retrieval_ops;
+-extern atomic_t fscache_n_retrieval_op_waits;
+-
+-extern atomic_t fscache_n_stores;
+-extern atomic_t fscache_n_stores_ok;
+-extern atomic_t fscache_n_stores_again;
+-extern atomic_t fscache_n_stores_nobufs;
+-extern atomic_t fscache_n_stores_oom;
+-extern atomic_t fscache_n_store_ops;
+-extern atomic_t fscache_n_store_calls;
+-extern atomic_t fscache_n_store_pages;
+-extern atomic_t fscache_n_store_radix_deletes;
+-extern atomic_t fscache_n_store_pages_over_limit;
+-
+-extern atomic_t fscache_n_store_vmscan_not_storing;
+-extern atomic_t fscache_n_store_vmscan_gone;
+-extern atomic_t fscache_n_store_vmscan_busy;
+-extern atomic_t fscache_n_store_vmscan_cancelled;
+-
+-extern atomic_t fscache_n_marks;
+-extern atomic_t fscache_n_uncaches;
+-
+-extern atomic_t fscache_n_acquires;
+-extern atomic_t fscache_n_acquires_null;
+-extern atomic_t fscache_n_acquires_no_cache;
+-extern atomic_t fscache_n_acquires_ok;
+-extern atomic_t fscache_n_acquires_nobufs;
+-extern atomic_t fscache_n_acquires_oom;
+-
+-extern atomic_t fscache_n_updates;
+-extern atomic_t fscache_n_updates_null;
+-extern atomic_t fscache_n_updates_run;
+-
+-extern atomic_t fscache_n_relinquishes;
+-extern atomic_t fscache_n_relinquishes_null;
+-extern atomic_t fscache_n_relinquishes_waitcrt;
+-extern atomic_t fscache_n_relinquishes_retire;
+-
+-extern atomic_t fscache_n_cookie_index;
+-extern atomic_t fscache_n_cookie_data;
+-extern atomic_t fscache_n_cookie_special;
+-
+-extern atomic_t fscache_n_object_alloc;
+-extern atomic_t fscache_n_object_no_alloc;
+-extern atomic_t fscache_n_object_lookups;
+-extern atomic_t fscache_n_object_lookups_negative;
+-extern atomic_t fscache_n_object_lookups_positive;
+-extern atomic_t fscache_n_object_lookups_timed_out;
+-extern atomic_t fscache_n_object_created;
+-extern atomic_t fscache_n_object_avail;
+-extern atomic_t fscache_n_object_dead;
+-
+-extern atomic_t fscache_n_checkaux_none;
+-extern atomic_t fscache_n_checkaux_okay;
+-extern atomic_t fscache_n_checkaux_update;
+-extern atomic_t fscache_n_checkaux_obsolete;
++extern atomic_unchecked_t fscache_n_op_pend;
++extern atomic_unchecked_t fscache_n_op_run;
++extern atomic_unchecked_t fscache_n_op_enqueue;
++extern atomic_unchecked_t fscache_n_op_deferred_release;
++extern atomic_unchecked_t fscache_n_op_release;
++extern atomic_unchecked_t fscache_n_op_gc;
++extern atomic_unchecked_t fscache_n_op_cancelled;
++extern atomic_unchecked_t fscache_n_op_rejected;
++
++extern atomic_unchecked_t fscache_n_attr_changed;
++extern atomic_unchecked_t fscache_n_attr_changed_ok;
++extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
++extern atomic_unchecked_t fscache_n_attr_changed_nomem;
++extern atomic_unchecked_t fscache_n_attr_changed_calls;
++
++extern atomic_unchecked_t fscache_n_allocs;
++extern atomic_unchecked_t fscache_n_allocs_ok;
++extern atomic_unchecked_t fscache_n_allocs_wait;
++extern atomic_unchecked_t fscache_n_allocs_nobufs;
++extern atomic_unchecked_t fscache_n_allocs_intr;
++extern atomic_unchecked_t fscache_n_allocs_object_dead;
++extern atomic_unchecked_t fscache_n_alloc_ops;
++extern atomic_unchecked_t fscache_n_alloc_op_waits;
++
++extern atomic_unchecked_t fscache_n_retrievals;
++extern atomic_unchecked_t fscache_n_retrievals_ok;
++extern atomic_unchecked_t fscache_n_retrievals_wait;
++extern atomic_unchecked_t fscache_n_retrievals_nodata;
++extern atomic_unchecked_t fscache_n_retrievals_nobufs;
++extern atomic_unchecked_t fscache_n_retrievals_intr;
++extern atomic_unchecked_t fscache_n_retrievals_nomem;
++extern atomic_unchecked_t fscache_n_retrievals_object_dead;
++extern atomic_unchecked_t fscache_n_retrieval_ops;
++extern atomic_unchecked_t fscache_n_retrieval_op_waits;
++
++extern atomic_unchecked_t fscache_n_stores;
++extern atomic_unchecked_t fscache_n_stores_ok;
++extern atomic_unchecked_t fscache_n_stores_again;
++extern atomic_unchecked_t fscache_n_stores_nobufs;
++extern atomic_unchecked_t fscache_n_stores_oom;
++extern atomic_unchecked_t fscache_n_store_ops;
++extern atomic_unchecked_t fscache_n_store_calls;
++extern atomic_unchecked_t fscache_n_store_pages;
++extern atomic_unchecked_t fscache_n_store_radix_deletes;
++extern atomic_unchecked_t fscache_n_store_pages_over_limit;
++
++extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
++extern atomic_unchecked_t fscache_n_store_vmscan_gone;
++extern atomic_unchecked_t fscache_n_store_vmscan_busy;
++extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
++
++extern atomic_unchecked_t fscache_n_marks;
++extern atomic_unchecked_t fscache_n_uncaches;
++
++extern atomic_unchecked_t fscache_n_acquires;
++extern atomic_unchecked_t fscache_n_acquires_null;
++extern atomic_unchecked_t fscache_n_acquires_no_cache;
++extern atomic_unchecked_t fscache_n_acquires_ok;
++extern atomic_unchecked_t fscache_n_acquires_nobufs;
++extern atomic_unchecked_t fscache_n_acquires_oom;
++
++extern atomic_unchecked_t fscache_n_updates;
++extern atomic_unchecked_t fscache_n_updates_null;
++extern atomic_unchecked_t fscache_n_updates_run;
++
++extern atomic_unchecked_t fscache_n_relinquishes;
++extern atomic_unchecked_t fscache_n_relinquishes_null;
++extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
++extern atomic_unchecked_t fscache_n_relinquishes_retire;
++
++extern atomic_unchecked_t fscache_n_cookie_index;
++extern atomic_unchecked_t fscache_n_cookie_data;
++extern atomic_unchecked_t fscache_n_cookie_special;
++
++extern atomic_unchecked_t fscache_n_object_alloc;
++extern atomic_unchecked_t fscache_n_object_no_alloc;
++extern atomic_unchecked_t fscache_n_object_lookups;
++extern atomic_unchecked_t fscache_n_object_lookups_negative;
++extern atomic_unchecked_t fscache_n_object_lookups_positive;
++extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
++extern atomic_unchecked_t fscache_n_object_created;
++extern atomic_unchecked_t fscache_n_object_avail;
++extern atomic_unchecked_t fscache_n_object_dead;
++
++extern atomic_unchecked_t fscache_n_checkaux_none;
++extern atomic_unchecked_t fscache_n_checkaux_okay;
++extern atomic_unchecked_t fscache_n_checkaux_update;
++extern atomic_unchecked_t fscache_n_checkaux_obsolete;
+
+ extern atomic_t fscache_n_cop_alloc_object;
+ extern atomic_t fscache_n_cop_lookup_object;
+@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
+ atomic_inc(stat);
+ }
+
++static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
++{
++ atomic_inc_unchecked(stat);
++}
++
+ static inline void fscache_stat_d(atomic_t *stat)
+ {
+ atomic_dec(stat);
+@@ -267,6 +272,7 @@ extern const struct file_operations fsca
+
+ #define __fscache_stat(stat) (NULL)
+ #define fscache_stat(stat) do {} while (0)
++#define fscache_stat_unchecked(stat) do {} while (0)
+ #define fscache_stat_d(stat) do {} while (0)
+ #endif
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/fscache/object.c linux-3.4-pax/fs/fscache/object.c
+--- linux-3.4/fs/fscache/object.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/fscache/object.c 2012-05-21 12:10:11.128048975 +0200
+@@ -128,7 +128,7 @@ static void fscache_object_state_machine
+ /* update the object metadata on disk */
+ case FSCACHE_OBJECT_UPDATING:
+ clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
+- fscache_stat(&fscache_n_updates_run);
++ fscache_stat_unchecked(&fscache_n_updates_run);
+ fscache_stat(&fscache_n_cop_update_object);
+ object->cache->ops->update_object(object);
+ fscache_stat_d(&fscache_n_cop_update_object);
+@@ -217,7 +217,7 @@ static void fscache_object_state_machine
+ spin_lock(&object->lock);
+ object->state = FSCACHE_OBJECT_DEAD;
+ spin_unlock(&object->lock);
+- fscache_stat(&fscache_n_object_dead);
++ fscache_stat_unchecked(&fscache_n_object_dead);
+ goto terminal_transit;
+
+ /* handle the parent cache of this object being withdrawn from
+@@ -232,7 +232,7 @@ static void fscache_object_state_machine
+ spin_lock(&object->lock);
+ object->state = FSCACHE_OBJECT_DEAD;
+ spin_unlock(&object->lock);
+- fscache_stat(&fscache_n_object_dead);
++ fscache_stat_unchecked(&fscache_n_object_dead);
+ goto terminal_transit;
+
+ /* complain about the object being woken up once it is
+@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
+ parent->cookie->def->name, cookie->def->name,
+ object->cache->tag->name);
+
+- fscache_stat(&fscache_n_object_lookups);
++ fscache_stat_unchecked(&fscache_n_object_lookups);
+ fscache_stat(&fscache_n_cop_lookup_object);
+ ret = object->cache->ops->lookup_object(object);
+ fscache_stat_d(&fscache_n_cop_lookup_object);
+@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
+ if (ret == -ETIMEDOUT) {
+ /* probably stuck behind another object, so move this one to
+ * the back of the queue */
+- fscache_stat(&fscache_n_object_lookups_timed_out);
++ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
+ set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+ }
+
+@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
+
+ spin_lock(&object->lock);
+ if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
+- fscache_stat(&fscache_n_object_lookups_negative);
++ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
+
+ /* transit here to allow write requests to begin stacking up
+ * and read requests to begin returning ENODATA */
+@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
+ * result, in which case there may be data available */
+ spin_lock(&object->lock);
+ if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
+- fscache_stat(&fscache_n_object_lookups_positive);
++ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
+
+ clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+
+@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
+ set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+ } else {
+ ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
+- fscache_stat(&fscache_n_object_created);
++ fscache_stat_unchecked(&fscache_n_object_created);
+
+ object->state = FSCACHE_OBJECT_AVAILABLE;
+ spin_unlock(&object->lock);
+@@ -602,7 +602,7 @@ static void fscache_object_available(str
+ fscache_enqueue_dependents(object);
+
+ fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
+- fscache_stat(&fscache_n_object_avail);
++ fscache_stat_unchecked(&fscache_n_object_avail);
+
+ _leave("");
+ }
+@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
+ enum fscache_checkaux result;
+
+ if (!object->cookie->def->check_aux) {
+- fscache_stat(&fscache_n_checkaux_none);
++ fscache_stat_unchecked(&fscache_n_checkaux_none);
+ return FSCACHE_CHECKAUX_OKAY;
+ }
+
+@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
+ switch (result) {
+ /* entry okay as is */
+ case FSCACHE_CHECKAUX_OKAY:
+- fscache_stat(&fscache_n_checkaux_okay);
++ fscache_stat_unchecked(&fscache_n_checkaux_okay);
+ break;
+
+ /* entry requires update */
+ case FSCACHE_CHECKAUX_NEEDS_UPDATE:
+- fscache_stat(&fscache_n_checkaux_update);
++ fscache_stat_unchecked(&fscache_n_checkaux_update);
+ break;
+
+ /* entry requires deletion */
+ case FSCACHE_CHECKAUX_OBSOLETE:
+- fscache_stat(&fscache_n_checkaux_obsolete);
++ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
+ break;
+
+ default:
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/fscache/operation.c linux-3.4-pax/fs/fscache/operation.c
+--- linux-3.4/fs/fscache/operation.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/fscache/operation.c 2012-05-21 12:10:11.132048976 +0200
+@@ -17,7 +17,7 @@
+ #include <linux/slab.h>
+ #include "internal.h"
+
+-atomic_t fscache_op_debug_id;
++atomic_unchecked_t fscache_op_debug_id;
+ EXPORT_SYMBOL(fscache_op_debug_id);
+
+ /**
+@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
+ ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
+ ASSERTCMP(atomic_read(&op->usage), >, 0);
+
+- fscache_stat(&fscache_n_op_enqueue);
++ fscache_stat_unchecked(&fscache_n_op_enqueue);
+ switch (op->flags & FSCACHE_OP_TYPE) {
+ case FSCACHE_OP_ASYNC:
+ _debug("queue async");
+@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
+ wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
+ if (op->processor)
+ fscache_enqueue_operation(op);
+- fscache_stat(&fscache_n_op_run);
++ fscache_stat_unchecked(&fscache_n_op_run);
+ }
+
+ /*
+@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
+ if (object->n_ops > 1) {
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ } else if (!list_empty(&object->pending_ops)) {
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ fscache_start_operations(object);
+ } else {
+ ASSERTCMP(object->n_in_progress, ==, 0);
+@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
+ object->n_exclusive++; /* reads and writes must wait */
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ ret = 0;
+ } else {
+ /* not allowed to submit ops in any other state */
+@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
+ if (object->n_exclusive > 0) {
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ } else if (!list_empty(&object->pending_ops)) {
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ fscache_start_operations(object);
+ } else {
+ ASSERTCMP(object->n_exclusive, ==, 0);
+@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
+ object->n_ops++;
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ ret = 0;
+ } else if (object->state == FSCACHE_OBJECT_DYING ||
+ object->state == FSCACHE_OBJECT_LC_DYING ||
+ object->state == FSCACHE_OBJECT_WITHDRAWING) {
+- fscache_stat(&fscache_n_op_rejected);
++ fscache_stat_unchecked(&fscache_n_op_rejected);
+ ret = -ENOBUFS;
+ } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
+ fscache_report_unexpected_submission(object, op, ostate);
+@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
+
+ ret = -EBUSY;
+ if (!list_empty(&op->pend_link)) {
+- fscache_stat(&fscache_n_op_cancelled);
++ fscache_stat_unchecked(&fscache_n_op_cancelled);
+ list_del_init(&op->pend_link);
+ object->n_ops--;
+ if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
+@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
+ if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
+ BUG();
+
+- fscache_stat(&fscache_n_op_release);
++ fscache_stat_unchecked(&fscache_n_op_release);
+
+ if (op->release) {
+ op->release(op);
+@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
+ * lock, and defer it otherwise */
+ if (!spin_trylock(&object->lock)) {
+ _debug("defer put");
+- fscache_stat(&fscache_n_op_deferred_release);
++ fscache_stat_unchecked(&fscache_n_op_deferred_release);
+
+ cache = object->cache;
+ spin_lock(&cache->op_gc_list_lock);
+@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
+
+ _debug("GC DEFERRED REL OBJ%x OP%x",
+ object->debug_id, op->debug_id);
+- fscache_stat(&fscache_n_op_gc);
++ fscache_stat_unchecked(&fscache_n_op_gc);
+
+ ASSERTCMP(atomic_read(&op->usage), ==, 0);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/fscache/page.c linux-3.4-pax/fs/fscache/page.c
+--- linux-3.4/fs/fscache/page.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/fscache/page.c 2012-05-21 12:10:11.136048976 +0200
+@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
+ val = radix_tree_lookup(&cookie->stores, page->index);
+ if (!val) {
+ rcu_read_unlock();
+- fscache_stat(&fscache_n_store_vmscan_not_storing);
++ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
+ __fscache_uncache_page(cookie, page);
+ return true;
+ }
+@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
+ spin_unlock(&cookie->stores_lock);
+
+ if (xpage) {
+- fscache_stat(&fscache_n_store_vmscan_cancelled);
+- fscache_stat(&fscache_n_store_radix_deletes);
++ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
++ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
+ ASSERTCMP(xpage, ==, page);
+ } else {
+- fscache_stat(&fscache_n_store_vmscan_gone);
++ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
+ }
+
+ wake_up_bit(&cookie->flags, 0);
+@@ -107,7 +107,7 @@ page_busy:
+ /* we might want to wait here, but that could deadlock the allocator as
+ * the work threads writing to the cache may all end up sleeping
+ * on memory allocation */
+- fscache_stat(&fscache_n_store_vmscan_busy);
++ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
+ return false;
+ }
+ EXPORT_SYMBOL(__fscache_maybe_release_page);
+@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
+ FSCACHE_COOKIE_STORING_TAG);
+ if (!radix_tree_tag_get(&cookie->stores, page->index,
+ FSCACHE_COOKIE_PENDING_TAG)) {
+- fscache_stat(&fscache_n_store_radix_deletes);
++ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
+ xpage = radix_tree_delete(&cookie->stores, page->index);
+ }
+ spin_unlock(&cookie->stores_lock);
+@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
+
+ _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
+
+- fscache_stat(&fscache_n_attr_changed_calls);
++ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
+
+ if (fscache_object_is_active(object)) {
+ fscache_stat(&fscache_n_cop_attr_changed);
+@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
+
+ ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+
+- fscache_stat(&fscache_n_attr_changed);
++ fscache_stat_unchecked(&fscache_n_attr_changed);
+
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op) {
+- fscache_stat(&fscache_n_attr_changed_nomem);
++ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
+ if (fscache_submit_exclusive_op(object, op) < 0)
+ goto nobufs;
+ spin_unlock(&cookie->lock);
+- fscache_stat(&fscache_n_attr_changed_ok);
++ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
+ fscache_put_operation(op);
+ _leave(" = 0");
+ return 0;
+@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
+ nobufs:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+- fscache_stat(&fscache_n_attr_changed_nobufs);
++ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
+ _leave(" = %d", -ENOBUFS);
+ return -ENOBUFS;
+ }
+@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
+ /* allocate a retrieval operation and attempt to submit it */
+ op = kzalloc(sizeof(*op), GFP_NOIO);
+ if (!op) {
+- fscache_stat(&fscache_n_retrievals_nomem);
++ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
+ return NULL;
+ }
+
+@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
+ return 0;
+ }
+
+- fscache_stat(&fscache_n_retrievals_wait);
++ fscache_stat_unchecked(&fscache_n_retrievals_wait);
+
+ jif = jiffies;
+ if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
+ fscache_wait_bit_interruptible,
+ TASK_INTERRUPTIBLE) != 0) {
+- fscache_stat(&fscache_n_retrievals_intr);
++ fscache_stat_unchecked(&fscache_n_retrievals_intr);
+ _leave(" = -ERESTARTSYS");
+ return -ERESTARTSYS;
+ }
+@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
+ */
+ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
+ struct fscache_retrieval *op,
+- atomic_t *stat_op_waits,
+- atomic_t *stat_object_dead)
++ atomic_unchecked_t *stat_op_waits,
++ atomic_unchecked_t *stat_object_dead)
+ {
+ int ret;
+
+@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
+ goto check_if_dead;
+
+ _debug(">>> WT");
+- fscache_stat(stat_op_waits);
++ fscache_stat_unchecked(stat_op_waits);
+ if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
+ fscache_wait_bit_interruptible,
+ TASK_INTERRUPTIBLE) < 0) {
+@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
+
+ check_if_dead:
+ if (unlikely(fscache_object_is_dead(object))) {
+- fscache_stat(stat_object_dead);
++ fscache_stat_unchecked(stat_object_dead);
+ return -ENOBUFS;
+ }
+ return 0;
+@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
+
+ _enter("%p,%p,,,", cookie, page);
+
+- fscache_stat(&fscache_n_retrievals);
++ fscache_stat_unchecked(&fscache_n_retrievals);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs;
+@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
+ goto nobufs_unlock;
+ spin_unlock(&cookie->lock);
+
+- fscache_stat(&fscache_n_retrieval_ops);
++ fscache_stat_unchecked(&fscache_n_retrieval_ops);
+
+ /* pin the netfs read context in case we need to do the actual netfs
+ * read because we've encountered a cache read failure */
+@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
+
+ error:
+ if (ret == -ENOMEM)
+- fscache_stat(&fscache_n_retrievals_nomem);
++ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
+ else if (ret == -ERESTARTSYS)
+- fscache_stat(&fscache_n_retrievals_intr);
++ fscache_stat_unchecked(&fscache_n_retrievals_intr);
+ else if (ret == -ENODATA)
+- fscache_stat(&fscache_n_retrievals_nodata);
++ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
+ else if (ret < 0)
+- fscache_stat(&fscache_n_retrievals_nobufs);
++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+ else
+- fscache_stat(&fscache_n_retrievals_ok);
++ fscache_stat_unchecked(&fscache_n_retrievals_ok);
+
+ fscache_put_retrieval(op);
+ _leave(" = %d", ret);
+@@ -429,7 +429,7 @@ nobufs_unlock:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+ nobufs:
+- fscache_stat(&fscache_n_retrievals_nobufs);
++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+ }
+@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
+
+ _enter("%p,,%d,,,", cookie, *nr_pages);
+
+- fscache_stat(&fscache_n_retrievals);
++ fscache_stat_unchecked(&fscache_n_retrievals);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs;
+@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
+ goto nobufs_unlock;
+ spin_unlock(&cookie->lock);
+
+- fscache_stat(&fscache_n_retrieval_ops);
++ fscache_stat_unchecked(&fscache_n_retrieval_ops);
+
+ /* pin the netfs read context in case we need to do the actual netfs
+ * read because we've encountered a cache read failure */
+@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
+
+ error:
+ if (ret == -ENOMEM)
+- fscache_stat(&fscache_n_retrievals_nomem);
++ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
+ else if (ret == -ERESTARTSYS)
+- fscache_stat(&fscache_n_retrievals_intr);
++ fscache_stat_unchecked(&fscache_n_retrievals_intr);
+ else if (ret == -ENODATA)
+- fscache_stat(&fscache_n_retrievals_nodata);
++ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
+ else if (ret < 0)
+- fscache_stat(&fscache_n_retrievals_nobufs);
++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+ else
+- fscache_stat(&fscache_n_retrievals_ok);
++ fscache_stat_unchecked(&fscache_n_retrievals_ok);
+
+ fscache_put_retrieval(op);
+ _leave(" = %d", ret);
+@@ -545,7 +545,7 @@ nobufs_unlock:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+ nobufs:
+- fscache_stat(&fscache_n_retrievals_nobufs);
++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+ }
+@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
+
+ _enter("%p,%p,,,", cookie, page);
+
+- fscache_stat(&fscache_n_allocs);
++ fscache_stat_unchecked(&fscache_n_allocs);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs;
+@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
+ goto nobufs_unlock;
+ spin_unlock(&cookie->lock);
+
+- fscache_stat(&fscache_n_alloc_ops);
++ fscache_stat_unchecked(&fscache_n_alloc_ops);
+
+ ret = fscache_wait_for_retrieval_activation(
+ object, op,
+@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
+
+ error:
+ if (ret == -ERESTARTSYS)
+- fscache_stat(&fscache_n_allocs_intr);
++ fscache_stat_unchecked(&fscache_n_allocs_intr);
+ else if (ret < 0)
+- fscache_stat(&fscache_n_allocs_nobufs);
++ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
+ else
+- fscache_stat(&fscache_n_allocs_ok);
++ fscache_stat_unchecked(&fscache_n_allocs_ok);
+
+ fscache_put_retrieval(op);
+ _leave(" = %d", ret);
+@@ -625,7 +625,7 @@ nobufs_unlock:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+ nobufs:
+- fscache_stat(&fscache_n_allocs_nobufs);
++ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+ }
+@@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
+
+ spin_lock(&cookie->stores_lock);
+
+- fscache_stat(&fscache_n_store_calls);
++ fscache_stat_unchecked(&fscache_n_store_calls);
+
+ /* find a page to store */
+ page = NULL;
+@@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
+ page = results[0];
+ _debug("gang %d [%lx]", n, page->index);
+ if (page->index > op->store_limit) {
+- fscache_stat(&fscache_n_store_pages_over_limit);
++ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
+ goto superseded;
+ }
+
+@@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
+ spin_unlock(&cookie->stores_lock);
+ spin_unlock(&object->lock);
+
+- fscache_stat(&fscache_n_store_pages);
++ fscache_stat_unchecked(&fscache_n_store_pages);
+ fscache_stat(&fscache_n_cop_write_page);
+ ret = object->cache->ops->write_page(op, page);
+ fscache_stat_d(&fscache_n_cop_write_page);
+@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
+ ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+ ASSERT(PageFsCache(page));
+
+- fscache_stat(&fscache_n_stores);
++ fscache_stat_unchecked(&fscache_n_stores);
+
+ op = kzalloc(sizeof(*op), GFP_NOIO);
+ if (!op)
+@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
+ spin_unlock(&cookie->stores_lock);
+ spin_unlock(&object->lock);
+
+- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
++ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
+ op->store_limit = object->store_limit;
+
+ if (fscache_submit_op(object, &op->op) < 0)
+@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
+
+ spin_unlock(&cookie->lock);
+ radix_tree_preload_end();
+- fscache_stat(&fscache_n_store_ops);
+- fscache_stat(&fscache_n_stores_ok);
++ fscache_stat_unchecked(&fscache_n_store_ops);
++ fscache_stat_unchecked(&fscache_n_stores_ok);
+
+ /* the work queue now carries its own ref on the object */
+ fscache_put_operation(&op->op);
+@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
+ return 0;
+
+ already_queued:
+- fscache_stat(&fscache_n_stores_again);
++ fscache_stat_unchecked(&fscache_n_stores_again);
+ already_pending:
+ spin_unlock(&cookie->stores_lock);
+ spin_unlock(&object->lock);
+ spin_unlock(&cookie->lock);
+ radix_tree_preload_end();
+ kfree(op);
+- fscache_stat(&fscache_n_stores_ok);
++ fscache_stat_unchecked(&fscache_n_stores_ok);
+ _leave(" = 0");
+ return 0;
+
+@@ -851,14 +851,14 @@ nobufs:
+ spin_unlock(&cookie->lock);
+ radix_tree_preload_end();
+ kfree(op);
+- fscache_stat(&fscache_n_stores_nobufs);
++ fscache_stat_unchecked(&fscache_n_stores_nobufs);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+
+ nomem_free:
+ kfree(op);
+ nomem:
+- fscache_stat(&fscache_n_stores_oom);
++ fscache_stat_unchecked(&fscache_n_stores_oom);
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
+ ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+ ASSERTCMP(page, !=, NULL);
+
+- fscache_stat(&fscache_n_uncaches);
++ fscache_stat_unchecked(&fscache_n_uncaches);
+
+ /* cache withdrawal may beat us to it */
+ if (!PageFsCache(page))
+@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
+ unsigned long loop;
+
+ #ifdef CONFIG_FSCACHE_STATS
+- atomic_add(pagevec->nr, &fscache_n_marks);
++ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
+ #endif
+
+ for (loop = 0; loop < pagevec->nr; loop++) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/fscache/stats.c linux-3.4-pax/fs/fscache/stats.c
+--- linux-3.4/fs/fscache/stats.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/fscache/stats.c 2012-05-21 12:10:11.136048976 +0200
+@@ -18,95 +18,95 @@
+ /*
+ * operation counters
+ */
+-atomic_t fscache_n_op_pend;
+-atomic_t fscache_n_op_run;
+-atomic_t fscache_n_op_enqueue;
+-atomic_t fscache_n_op_requeue;
+-atomic_t fscache_n_op_deferred_release;
+-atomic_t fscache_n_op_release;
+-atomic_t fscache_n_op_gc;
+-atomic_t fscache_n_op_cancelled;
+-atomic_t fscache_n_op_rejected;
+-
+-atomic_t fscache_n_attr_changed;
+-atomic_t fscache_n_attr_changed_ok;
+-atomic_t fscache_n_attr_changed_nobufs;
+-atomic_t fscache_n_attr_changed_nomem;
+-atomic_t fscache_n_attr_changed_calls;
+-
+-atomic_t fscache_n_allocs;
+-atomic_t fscache_n_allocs_ok;
+-atomic_t fscache_n_allocs_wait;
+-atomic_t fscache_n_allocs_nobufs;
+-atomic_t fscache_n_allocs_intr;
+-atomic_t fscache_n_allocs_object_dead;
+-atomic_t fscache_n_alloc_ops;
+-atomic_t fscache_n_alloc_op_waits;
+-
+-atomic_t fscache_n_retrievals;
+-atomic_t fscache_n_retrievals_ok;
+-atomic_t fscache_n_retrievals_wait;
+-atomic_t fscache_n_retrievals_nodata;
+-atomic_t fscache_n_retrievals_nobufs;
+-atomic_t fscache_n_retrievals_intr;
+-atomic_t fscache_n_retrievals_nomem;
+-atomic_t fscache_n_retrievals_object_dead;
+-atomic_t fscache_n_retrieval_ops;
+-atomic_t fscache_n_retrieval_op_waits;
+-
+-atomic_t fscache_n_stores;
+-atomic_t fscache_n_stores_ok;
+-atomic_t fscache_n_stores_again;
+-atomic_t fscache_n_stores_nobufs;
+-atomic_t fscache_n_stores_oom;
+-atomic_t fscache_n_store_ops;
+-atomic_t fscache_n_store_calls;
+-atomic_t fscache_n_store_pages;
+-atomic_t fscache_n_store_radix_deletes;
+-atomic_t fscache_n_store_pages_over_limit;
+-
+-atomic_t fscache_n_store_vmscan_not_storing;
+-atomic_t fscache_n_store_vmscan_gone;
+-atomic_t fscache_n_store_vmscan_busy;
+-atomic_t fscache_n_store_vmscan_cancelled;
+-
+-atomic_t fscache_n_marks;
+-atomic_t fscache_n_uncaches;
+-
+-atomic_t fscache_n_acquires;
+-atomic_t fscache_n_acquires_null;
+-atomic_t fscache_n_acquires_no_cache;
+-atomic_t fscache_n_acquires_ok;
+-atomic_t fscache_n_acquires_nobufs;
+-atomic_t fscache_n_acquires_oom;
+-
+-atomic_t fscache_n_updates;
+-atomic_t fscache_n_updates_null;
+-atomic_t fscache_n_updates_run;
+-
+-atomic_t fscache_n_relinquishes;
+-atomic_t fscache_n_relinquishes_null;
+-atomic_t fscache_n_relinquishes_waitcrt;
+-atomic_t fscache_n_relinquishes_retire;
+-
+-atomic_t fscache_n_cookie_index;
+-atomic_t fscache_n_cookie_data;
+-atomic_t fscache_n_cookie_special;
+-
+-atomic_t fscache_n_object_alloc;
+-atomic_t fscache_n_object_no_alloc;
+-atomic_t fscache_n_object_lookups;
+-atomic_t fscache_n_object_lookups_negative;
+-atomic_t fscache_n_object_lookups_positive;
+-atomic_t fscache_n_object_lookups_timed_out;
+-atomic_t fscache_n_object_created;
+-atomic_t fscache_n_object_avail;
+-atomic_t fscache_n_object_dead;
+-
+-atomic_t fscache_n_checkaux_none;
+-atomic_t fscache_n_checkaux_okay;
+-atomic_t fscache_n_checkaux_update;
+-atomic_t fscache_n_checkaux_obsolete;
++atomic_unchecked_t fscache_n_op_pend;
++atomic_unchecked_t fscache_n_op_run;
++atomic_unchecked_t fscache_n_op_enqueue;
++atomic_unchecked_t fscache_n_op_requeue;
++atomic_unchecked_t fscache_n_op_deferred_release;
++atomic_unchecked_t fscache_n_op_release;
++atomic_unchecked_t fscache_n_op_gc;
++atomic_unchecked_t fscache_n_op_cancelled;
++atomic_unchecked_t fscache_n_op_rejected;
++
++atomic_unchecked_t fscache_n_attr_changed;
++atomic_unchecked_t fscache_n_attr_changed_ok;
++atomic_unchecked_t fscache_n_attr_changed_nobufs;
++atomic_unchecked_t fscache_n_attr_changed_nomem;
++atomic_unchecked_t fscache_n_attr_changed_calls;
++
++atomic_unchecked_t fscache_n_allocs;
++atomic_unchecked_t fscache_n_allocs_ok;
++atomic_unchecked_t fscache_n_allocs_wait;
++atomic_unchecked_t fscache_n_allocs_nobufs;
++atomic_unchecked_t fscache_n_allocs_intr;
++atomic_unchecked_t fscache_n_allocs_object_dead;
++atomic_unchecked_t fscache_n_alloc_ops;
++atomic_unchecked_t fscache_n_alloc_op_waits;
++
++atomic_unchecked_t fscache_n_retrievals;
++atomic_unchecked_t fscache_n_retrievals_ok;
++atomic_unchecked_t fscache_n_retrievals_wait;
++atomic_unchecked_t fscache_n_retrievals_nodata;
++atomic_unchecked_t fscache_n_retrievals_nobufs;
++atomic_unchecked_t fscache_n_retrievals_intr;
++atomic_unchecked_t fscache_n_retrievals_nomem;
++atomic_unchecked_t fscache_n_retrievals_object_dead;
++atomic_unchecked_t fscache_n_retrieval_ops;
++atomic_unchecked_t fscache_n_retrieval_op_waits;
++
++atomic_unchecked_t fscache_n_stores;
++atomic_unchecked_t fscache_n_stores_ok;
++atomic_unchecked_t fscache_n_stores_again;
++atomic_unchecked_t fscache_n_stores_nobufs;
++atomic_unchecked_t fscache_n_stores_oom;
++atomic_unchecked_t fscache_n_store_ops;
++atomic_unchecked_t fscache_n_store_calls;
++atomic_unchecked_t fscache_n_store_pages;
++atomic_unchecked_t fscache_n_store_radix_deletes;
++atomic_unchecked_t fscache_n_store_pages_over_limit;
++
++atomic_unchecked_t fscache_n_store_vmscan_not_storing;
++atomic_unchecked_t fscache_n_store_vmscan_gone;
++atomic_unchecked_t fscache_n_store_vmscan_busy;
++atomic_unchecked_t fscache_n_store_vmscan_cancelled;
++
++atomic_unchecked_t fscache_n_marks;
++atomic_unchecked_t fscache_n_uncaches;
++
++atomic_unchecked_t fscache_n_acquires;
++atomic_unchecked_t fscache_n_acquires_null;
++atomic_unchecked_t fscache_n_acquires_no_cache;
++atomic_unchecked_t fscache_n_acquires_ok;
++atomic_unchecked_t fscache_n_acquires_nobufs;
++atomic_unchecked_t fscache_n_acquires_oom;
++
++atomic_unchecked_t fscache_n_updates;
++atomic_unchecked_t fscache_n_updates_null;
++atomic_unchecked_t fscache_n_updates_run;
++
++atomic_unchecked_t fscache_n_relinquishes;
++atomic_unchecked_t fscache_n_relinquishes_null;
++atomic_unchecked_t fscache_n_relinquishes_waitcrt;
++atomic_unchecked_t fscache_n_relinquishes_retire;
++
++atomic_unchecked_t fscache_n_cookie_index;
++atomic_unchecked_t fscache_n_cookie_data;
++atomic_unchecked_t fscache_n_cookie_special;
++
++atomic_unchecked_t fscache_n_object_alloc;
++atomic_unchecked_t fscache_n_object_no_alloc;
++atomic_unchecked_t fscache_n_object_lookups;
++atomic_unchecked_t fscache_n_object_lookups_negative;
++atomic_unchecked_t fscache_n_object_lookups_positive;
++atomic_unchecked_t fscache_n_object_lookups_timed_out;
++atomic_unchecked_t fscache_n_object_created;
++atomic_unchecked_t fscache_n_object_avail;
++atomic_unchecked_t fscache_n_object_dead;
++
++atomic_unchecked_t fscache_n_checkaux_none;
++atomic_unchecked_t fscache_n_checkaux_okay;
++atomic_unchecked_t fscache_n_checkaux_update;
++atomic_unchecked_t fscache_n_checkaux_obsolete;
+
+ atomic_t fscache_n_cop_alloc_object;
+ atomic_t fscache_n_cop_lookup_object;
+@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
+ seq_puts(m, "FS-Cache statistics\n");
+
+ seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
+- atomic_read(&fscache_n_cookie_index),
+- atomic_read(&fscache_n_cookie_data),
+- atomic_read(&fscache_n_cookie_special));
++ atomic_read_unchecked(&fscache_n_cookie_index),
++ atomic_read_unchecked(&fscache_n_cookie_data),
++ atomic_read_unchecked(&fscache_n_cookie_special));
+
+ seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
+- atomic_read(&fscache_n_object_alloc),
+- atomic_read(&fscache_n_object_no_alloc),
+- atomic_read(&fscache_n_object_avail),
+- atomic_read(&fscache_n_object_dead));
++ atomic_read_unchecked(&fscache_n_object_alloc),
++ atomic_read_unchecked(&fscache_n_object_no_alloc),
++ atomic_read_unchecked(&fscache_n_object_avail),
++ atomic_read_unchecked(&fscache_n_object_dead));
+ seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
+- atomic_read(&fscache_n_checkaux_none),
+- atomic_read(&fscache_n_checkaux_okay),
+- atomic_read(&fscache_n_checkaux_update),
+- atomic_read(&fscache_n_checkaux_obsolete));
++ atomic_read_unchecked(&fscache_n_checkaux_none),
++ atomic_read_unchecked(&fscache_n_checkaux_okay),
++ atomic_read_unchecked(&fscache_n_checkaux_update),
++ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
+
+ seq_printf(m, "Pages : mrk=%u unc=%u\n",
+- atomic_read(&fscache_n_marks),
+- atomic_read(&fscache_n_uncaches));
++ atomic_read_unchecked(&fscache_n_marks),
++ atomic_read_unchecked(&fscache_n_uncaches));
+
+ seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
+ " oom=%u\n",
+- atomic_read(&fscache_n_acquires),
+- atomic_read(&fscache_n_acquires_null),
+- atomic_read(&fscache_n_acquires_no_cache),
+- atomic_read(&fscache_n_acquires_ok),
+- atomic_read(&fscache_n_acquires_nobufs),
+- atomic_read(&fscache_n_acquires_oom));
++ atomic_read_unchecked(&fscache_n_acquires),
++ atomic_read_unchecked(&fscache_n_acquires_null),
++ atomic_read_unchecked(&fscache_n_acquires_no_cache),
++ atomic_read_unchecked(&fscache_n_acquires_ok),
++ atomic_read_unchecked(&fscache_n_acquires_nobufs),
++ atomic_read_unchecked(&fscache_n_acquires_oom));
+
+ seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
+- atomic_read(&fscache_n_object_lookups),
+- atomic_read(&fscache_n_object_lookups_negative),
+- atomic_read(&fscache_n_object_lookups_positive),
+- atomic_read(&fscache_n_object_created),
+- atomic_read(&fscache_n_object_lookups_timed_out));
++ atomic_read_unchecked(&fscache_n_object_lookups),
++ atomic_read_unchecked(&fscache_n_object_lookups_negative),
++ atomic_read_unchecked(&fscache_n_object_lookups_positive),
++ atomic_read_unchecked(&fscache_n_object_created),
++ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
+
+ seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
+- atomic_read(&fscache_n_updates),
+- atomic_read(&fscache_n_updates_null),
+- atomic_read(&fscache_n_updates_run));
++ atomic_read_unchecked(&fscache_n_updates),
++ atomic_read_unchecked(&fscache_n_updates_null),
++ atomic_read_unchecked(&fscache_n_updates_run));
+
+ seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
+- atomic_read(&fscache_n_relinquishes),
+- atomic_read(&fscache_n_relinquishes_null),
+- atomic_read(&fscache_n_relinquishes_waitcrt),
+- atomic_read(&fscache_n_relinquishes_retire));
++ atomic_read_unchecked(&fscache_n_relinquishes),
++ atomic_read_unchecked(&fscache_n_relinquishes_null),
++ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
++ atomic_read_unchecked(&fscache_n_relinquishes_retire));
+
+ seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
+- atomic_read(&fscache_n_attr_changed),
+- atomic_read(&fscache_n_attr_changed_ok),
+- atomic_read(&fscache_n_attr_changed_nobufs),
+- atomic_read(&fscache_n_attr_changed_nomem),
+- atomic_read(&fscache_n_attr_changed_calls));
++ atomic_read_unchecked(&fscache_n_attr_changed),
++ atomic_read_unchecked(&fscache_n_attr_changed_ok),
++ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
++ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
++ atomic_read_unchecked(&fscache_n_attr_changed_calls));
+
+ seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
+- atomic_read(&fscache_n_allocs),
+- atomic_read(&fscache_n_allocs_ok),
+- atomic_read(&fscache_n_allocs_wait),
+- atomic_read(&fscache_n_allocs_nobufs),
+- atomic_read(&fscache_n_allocs_intr));
++ atomic_read_unchecked(&fscache_n_allocs),
++ atomic_read_unchecked(&fscache_n_allocs_ok),
++ atomic_read_unchecked(&fscache_n_allocs_wait),
++ atomic_read_unchecked(&fscache_n_allocs_nobufs),
++ atomic_read_unchecked(&fscache_n_allocs_intr));
+ seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
+- atomic_read(&fscache_n_alloc_ops),
+- atomic_read(&fscache_n_alloc_op_waits),
+- atomic_read(&fscache_n_allocs_object_dead));
++ atomic_read_unchecked(&fscache_n_alloc_ops),
++ atomic_read_unchecked(&fscache_n_alloc_op_waits),
++ atomic_read_unchecked(&fscache_n_allocs_object_dead));
+
+ seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
+ " int=%u oom=%u\n",
+- atomic_read(&fscache_n_retrievals),
+- atomic_read(&fscache_n_retrievals_ok),
+- atomic_read(&fscache_n_retrievals_wait),
+- atomic_read(&fscache_n_retrievals_nodata),
+- atomic_read(&fscache_n_retrievals_nobufs),
+- atomic_read(&fscache_n_retrievals_intr),
+- atomic_read(&fscache_n_retrievals_nomem));
++ atomic_read_unchecked(&fscache_n_retrievals),
++ atomic_read_unchecked(&fscache_n_retrievals_ok),
++ atomic_read_unchecked(&fscache_n_retrievals_wait),
++ atomic_read_unchecked(&fscache_n_retrievals_nodata),
++ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
++ atomic_read_unchecked(&fscache_n_retrievals_intr),
++ atomic_read_unchecked(&fscache_n_retrievals_nomem));
+ seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
+- atomic_read(&fscache_n_retrieval_ops),
+- atomic_read(&fscache_n_retrieval_op_waits),
+- atomic_read(&fscache_n_retrievals_object_dead));
++ atomic_read_unchecked(&fscache_n_retrieval_ops),
++ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
++ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
+
+ seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
+- atomic_read(&fscache_n_stores),
+- atomic_read(&fscache_n_stores_ok),
+- atomic_read(&fscache_n_stores_again),
+- atomic_read(&fscache_n_stores_nobufs),
+- atomic_read(&fscache_n_stores_oom));
++ atomic_read_unchecked(&fscache_n_stores),
++ atomic_read_unchecked(&fscache_n_stores_ok),
++ atomic_read_unchecked(&fscache_n_stores_again),
++ atomic_read_unchecked(&fscache_n_stores_nobufs),
++ atomic_read_unchecked(&fscache_n_stores_oom));
+ seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
+- atomic_read(&fscache_n_store_ops),
+- atomic_read(&fscache_n_store_calls),
+- atomic_read(&fscache_n_store_pages),
+- atomic_read(&fscache_n_store_radix_deletes),
+- atomic_read(&fscache_n_store_pages_over_limit));
++ atomic_read_unchecked(&fscache_n_store_ops),
++ atomic_read_unchecked(&fscache_n_store_calls),
++ atomic_read_unchecked(&fscache_n_store_pages),
++ atomic_read_unchecked(&fscache_n_store_radix_deletes),
++ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
+
+ seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
+- atomic_read(&fscache_n_store_vmscan_not_storing),
+- atomic_read(&fscache_n_store_vmscan_gone),
+- atomic_read(&fscache_n_store_vmscan_busy),
+- atomic_read(&fscache_n_store_vmscan_cancelled));
++ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
++ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
++ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
++ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
+
+ seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
+- atomic_read(&fscache_n_op_pend),
+- atomic_read(&fscache_n_op_run),
+- atomic_read(&fscache_n_op_enqueue),
+- atomic_read(&fscache_n_op_cancelled),
+- atomic_read(&fscache_n_op_rejected));
++ atomic_read_unchecked(&fscache_n_op_pend),
++ atomic_read_unchecked(&fscache_n_op_run),
++ atomic_read_unchecked(&fscache_n_op_enqueue),
++ atomic_read_unchecked(&fscache_n_op_cancelled),
++ atomic_read_unchecked(&fscache_n_op_rejected));
+ seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
+- atomic_read(&fscache_n_op_deferred_release),
+- atomic_read(&fscache_n_op_release),
+- atomic_read(&fscache_n_op_gc));
++ atomic_read_unchecked(&fscache_n_op_deferred_release),
++ atomic_read_unchecked(&fscache_n_op_release),
++ atomic_read_unchecked(&fscache_n_op_gc));
+
+ seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
+ atomic_read(&fscache_n_cop_alloc_object),
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/fs_struct.c linux-3.4-pax/fs/fs_struct.c
+--- linux-3.4/fs/fs_struct.c 2012-05-21 11:33:35.055929701 +0200
++++ linux-3.4-pax/fs/fs_struct.c 2012-05-21 12:10:11.140048976 +0200
+@@ -111,7 +111,7 @@ void exit_fs(struct task_struct *tsk)
+ task_lock(tsk);
+ spin_lock(&fs->lock);
+ tsk->fs = NULL;
+- kill = !--fs->users;
++ kill = !atomic_dec_return(&fs->users);
+ spin_unlock(&fs->lock);
+ task_unlock(tsk);
+ if (kill)
+@@ -124,7 +124,7 @@ struct fs_struct *copy_fs_struct(struct
+ struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
+ /* We don't need to lock fs - think why ;-) */
+ if (fs) {
+- fs->users = 1;
++ atomic_set(&fs->users, 1);
+ fs->in_exec = 0;
+ spin_lock_init(&fs->lock);
+ seqcount_init(&fs->seq);
+@@ -151,7 +151,7 @@ int unshare_fs_struct(void)
+
+ task_lock(current);
+ spin_lock(&fs->lock);
+- kill = !--fs->users;
++ kill = !atomic_dec_return(&fs->users);
+ current->fs = new_fs;
+ spin_unlock(&fs->lock);
+ task_unlock(current);
+@@ -171,7 +171,7 @@ EXPORT_SYMBOL(current_umask);
+
+ /* to be mentioned only in INIT_TASK */
+ struct fs_struct init_fs = {
+- .users = 1,
++ .users = ATOMIC_INIT(1),
+ .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
+ .seq = SEQCNT_ZERO,
+ .umask = 0022,
+@@ -187,12 +187,12 @@ void daemonize_fs_struct(void)
+ task_lock(current);
+
+ spin_lock(&init_fs.lock);
+- init_fs.users++;
++ atomic_inc(&init_fs.users);
+ spin_unlock(&init_fs.lock);
+
+ spin_lock(&fs->lock);
+ current->fs = &init_fs;
+- kill = !--fs->users;
++ kill = !atomic_dec_return(&fs->users);
+ spin_unlock(&fs->lock);
+
+ task_unlock(current);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/fuse/cuse.c linux-3.4-pax/fs/fuse/cuse.c
+--- linux-3.4/fs/fuse/cuse.c 2012-01-08 19:48:24.807471101 +0100
++++ linux-3.4-pax/fs/fuse/cuse.c 2012-05-21 12:10:11.140048976 +0200
+@@ -587,10 +587,12 @@ static int __init cuse_init(void)
+ INIT_LIST_HEAD(&cuse_conntbl[i]);
+
+ /* inherit and extend fuse_dev_operations */
+- cuse_channel_fops = fuse_dev_operations;
+- cuse_channel_fops.owner = THIS_MODULE;
+- cuse_channel_fops.open = cuse_channel_open;
+- cuse_channel_fops.release = cuse_channel_release;
++ pax_open_kernel();
++ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
++ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
++ *(void **)&cuse_channel_fops.open = cuse_channel_open;
++ *(void **)&cuse_channel_fops.release = cuse_channel_release;
++ pax_close_kernel();
+
+ cuse_class = class_create(THIS_MODULE, "cuse");
+ if (IS_ERR(cuse_class))
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/fuse/dev.c linux-3.4-pax/fs/fuse/dev.c
+--- linux-3.4/fs/fuse/dev.c 2012-05-21 11:33:35.059929701 +0200
++++ linux-3.4-pax/fs/fuse/dev.c 2012-05-21 12:10:11.144048976 +0200
+@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(stru
+ ret = 0;
+ pipe_lock(pipe);
+
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/fuse/dir.c linux-3.4-pax/fs/fuse/dir.c
+--- linux-3.4/fs/fuse/dir.c 2012-05-21 11:33:35.063929701 +0200
++++ linux-3.4-pax/fs/fuse/dir.c 2012-05-21 12:10:11.148048977 +0200
+@@ -1180,7 +1180,7 @@ static char *read_link(struct dentry *de
+ return link;
+ }
+
+-static void free_link(char *link)
++static void free_link(const char *link)
+ {
+ if (!IS_ERR(link))
+ free_page((unsigned long) link);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/gfs2/inode.c linux-3.4-pax/fs/gfs2/inode.c
+--- linux-3.4/fs/gfs2/inode.c 2012-05-21 11:33:35.111929704 +0200
++++ linux-3.4-pax/fs/gfs2/inode.c 2012-05-21 12:10:11.152048977 +0200
+@@ -1496,7 +1496,7 @@ out:
+
+ static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+ {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+ if (!IS_ERR(s))
+ kfree(s);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/inode.c linux-3.4-pax/fs/inode.c
+--- linux-3.4/fs/inode.c 2012-05-21 11:33:35.231929710 +0200
++++ linux-3.4-pax/fs/inode.c 2012-05-21 12:10:11.152048977 +0200
+@@ -860,8 +860,8 @@ unsigned int get_next_ino(void)
+
+ #ifdef CONFIG_SMP
+ if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
+- static atomic_t shared_last_ino;
+- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
++ static atomic_unchecked_t shared_last_ino;
++ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
+
+ res = next - LAST_INO_BATCH;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/jffs2/erase.c linux-3.4-pax/fs/jffs2/erase.c
+--- linux-3.4/fs/jffs2/erase.c 2012-05-21 11:33:35.619929732 +0200
++++ linux-3.4-pax/fs/jffs2/erase.c 2012-05-21 12:10:11.156048977 +0200
+@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(stru
+ struct jffs2_unknown_node marker = {
+ .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
+ .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
+- .totlen = cpu_to_je32(c->cleanmarker_size)
++ .totlen = cpu_to_je32(c->cleanmarker_size),
++ .hdr_crc = cpu_to_je32(0)
+ };
+
+ jffs2_prealloc_raw_node_refs(c, jeb, 1);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/jffs2/wbuf.c linux-3.4-pax/fs/jffs2/wbuf.c
+--- linux-3.4/fs/jffs2/wbuf.c 2012-05-21 11:33:35.683929735 +0200
++++ linux-3.4-pax/fs/jffs2/wbuf.c 2012-05-21 12:10:11.160048977 +0200
+@@ -1022,7 +1022,8 @@ static const struct jffs2_unknown_node o
+ {
+ .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
+ .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
+- .totlen = constant_cpu_to_je32(8)
++ .totlen = constant_cpu_to_je32(8),
++ .hdr_crc = constant_cpu_to_je32(0)
+ };
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/jfs/super.c linux-3.4-pax/fs/jfs/super.c
+--- linux-3.4/fs/jfs/super.c 2012-05-21 11:33:35.695929736 +0200
++++ linux-3.4-pax/fs/jfs/super.c 2012-05-21 12:10:11.160048977 +0200
+@@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
+
+ jfs_inode_cachep =
+ kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
+- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
++ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
+ init_once);
+ if (jfs_inode_cachep == NULL)
+ return -ENOMEM;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/Kconfig.binfmt linux-3.4-pax/fs/Kconfig.binfmt
+--- linux-3.4/fs/Kconfig.binfmt 2012-03-19 10:39:09.408049311 +0100
++++ linux-3.4-pax/fs/Kconfig.binfmt 2012-05-21 12:10:11.164048977 +0200
+@@ -89,7 +89,7 @@ config HAVE_AOUT
+
+ config BINFMT_AOUT
+ tristate "Kernel support for a.out and ECOFF binaries"
+- depends on HAVE_AOUT
++ depends on HAVE_AOUT && BROKEN
+ ---help---
+ A.out (Assembler.OUTput) is a set of formats for libraries and
+ executables used in the earliest versions of UNIX. Linux used
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/libfs.c linux-3.4-pax/fs/libfs.c
+--- linux-3.4/fs/libfs.c 2012-05-21 11:33:35.695929736 +0200
++++ linux-3.4-pax/fs/libfs.c 2012-05-21 12:10:11.168048978 +0200
+@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, v
+
+ for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
+ struct dentry *next;
++ char d_name[sizeof(next->d_iname)];
++ const unsigned char *name;
++
+ next = list_entry(p, struct dentry, d_u.d_child);
+ spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+ if (!simple_positive(next)) {
+@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, v
+
+ spin_unlock(&next->d_lock);
+ spin_unlock(&dentry->d_lock);
+- if (filldir(dirent, next->d_name.name,
++ name = next->d_name.name;
++ if (name == next->d_iname) {
++ memcpy(d_name, name, next->d_name.len);
++ name = d_name;
++ }
++ if (filldir(dirent, name,
+ next->d_name.len, filp->f_pos,
+ next->d_inode->i_ino,
+ dt_type(next->d_inode)) < 0)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/lockd/clntproc.c linux-3.4-pax/fs/lockd/clntproc.c
+--- linux-3.4/fs/lockd/clntproc.c 2011-10-24 12:48:39.983091040 +0200
++++ linux-3.4-pax/fs/lockd/clntproc.c 2012-05-21 12:10:11.168048978 +0200
+@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
+ /*
+ * Cookie counter for NLM requests
+ */
+-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
++static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
+
+ void nlmclnt_next_cookie(struct nlm_cookie *c)
+ {
+- u32 cookie = atomic_inc_return(&nlm_cookie);
++ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
+
+ memcpy(c->data, &cookie, 4);
+ c->len=4;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/locks.c linux-3.4-pax/fs/locks.c
+--- linux-3.4/fs/locks.c 2012-05-21 11:33:35.707929736 +0200
++++ linux-3.4-pax/fs/locks.c 2012-05-21 12:10:11.172048978 +0200
+@@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *fil
+ return;
+
+ if (filp->f_op && filp->f_op->flock) {
+- struct file_lock fl = {
++ struct file_lock flock = {
+ .fl_pid = current->tgid,
+ .fl_file = filp,
+ .fl_flags = FL_FLOCK,
+ .fl_type = F_UNLCK,
+ .fl_end = OFFSET_MAX,
+ };
+- filp->f_op->flock(filp, F_SETLKW, &fl);
+- if (fl.fl_ops && fl.fl_ops->fl_release_private)
+- fl.fl_ops->fl_release_private(&fl);
++ filp->f_op->flock(filp, F_SETLKW, &flock);
++ if (flock.fl_ops && flock.fl_ops->fl_release_private)
++ flock.fl_ops->fl_release_private(&flock);
+ }
+
+ lock_flocks();
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/namei.c linux-3.4-pax/fs/namei.c
+--- linux-3.4/fs/namei.c 2012-05-21 11:33:35.791929741 +0200
++++ linux-3.4-pax/fs/namei.c 2012-05-21 12:10:11.176048978 +0200
+@@ -656,7 +656,7 @@ follow_link(struct path *link, struct na
+ *p = dentry->d_inode->i_op->follow_link(dentry, nd);
+ error = PTR_ERR(*p);
+ if (!IS_ERR(*p)) {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+ error = 0;
+ if (s)
+ error = __vfs_follow_link(nd, s);
+@@ -3333,6 +3333,8 @@ SYSCALL_DEFINE2(rename, const char __use
+
+ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
+ {
++ char tmpbuf[64];
++ const char *newlink;
+ int len;
+
+ len = PTR_ERR(link);
+@@ -3342,7 +3344,14 @@ int vfs_readlink(struct dentry *dentry,
+ len = strlen(link);
+ if (len > (unsigned) buflen)
+ len = buflen;
+- if (copy_to_user(buffer, link, len))
++
++ if (len < sizeof(tmpbuf)) {
++ memcpy(tmpbuf, link, len);
++ newlink = tmpbuf;
++ } else
++ newlink = link;
++
++ if (copy_to_user(buffer, newlink, len))
+ len = -EFAULT;
+ out:
+ return len;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/nfs/inode.c linux-3.4-pax/fs/nfs/inode.c
+--- linux-3.4/fs/nfs/inode.c 2012-05-21 11:33:35.907929747 +0200
++++ linux-3.4-pax/fs/nfs/inode.c 2012-05-21 12:10:11.184048978 +0200
+@@ -152,7 +152,7 @@ static void nfs_zap_caches_locked(struct
+ nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
+ nfsi->attrtimeo_timestamp = jiffies;
+
+- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
++ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
+ if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
+ else
+@@ -1005,16 +1005,16 @@ static int nfs_size_need_update(const st
+ return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
+ }
+
+-static atomic_long_t nfs_attr_generation_counter;
++static atomic_long_unchecked_t nfs_attr_generation_counter;
+
+ static unsigned long nfs_read_attr_generation_counter(void)
+ {
+- return atomic_long_read(&nfs_attr_generation_counter);
++ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
+ }
+
+ unsigned long nfs_inc_attr_generation_counter(void)
+ {
+- return atomic_long_inc_return(&nfs_attr_generation_counter);
++ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
+ }
+
+ void nfs_fattr_init(struct nfs_fattr *fattr)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/nfsd/vfs.c linux-3.4-pax/fs/nfsd/vfs.c
+--- linux-3.4/fs/nfsd/vfs.c 2012-05-21 11:33:36.087929757 +0200
++++ linux-3.4-pax/fs/nfsd/vfs.c 2012-05-21 12:10:11.188048979 +0200
+@@ -933,7 +933,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
+ } else {
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
++ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
+ set_fs(oldfs);
+ }
+
+@@ -1037,7 +1037,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
+
+ /* Write the data. */
+ oldfs = get_fs(); set_fs(KERNEL_DS);
+- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
++ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
+ set_fs(oldfs);
+ if (host_err < 0)
+ goto out_nfserr;
+@@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
+ */
+
+ oldfs = get_fs(); set_fs(KERNEL_DS);
+- host_err = inode->i_op->readlink(path.dentry, buf, *lenp);
++ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
+ set_fs(oldfs);
+
+ if (host_err < 0)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/notify/fanotify/fanotify_user.c linux-3.4-pax/fs/notify/fanotify/fanotify_user.c
+--- linux-3.4/fs/notify/fanotify/fanotify_user.c 2012-03-19 10:39:10.316049265 +0100
++++ linux-3.4-pax/fs/notify/fanotify/fanotify_user.c 2012-05-21 12:10:11.188048979 +0200
+@@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct
+ goto out_close_fd;
+
+ ret = -EFAULT;
+- if (copy_to_user(buf, &fanotify_event_metadata,
++ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
++ copy_to_user(buf, &fanotify_event_metadata,
+ fanotify_event_metadata.event_len))
+ goto out_kill_access_response;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/notify/notification.c linux-3.4-pax/fs/notify/notification.c
+--- linux-3.4/fs/notify/notification.c 2012-05-21 11:33:36.119929759 +0200
++++ linux-3.4-pax/fs/notify/notification.c 2012-05-21 12:10:11.192048979 +0200
+@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
+ * get set to 0 so it will never get 'freed'
+ */
+ static struct fsnotify_event *q_overflow_event;
+-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
++static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
+
+ /**
+ * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
+@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
+ */
+ u32 fsnotify_get_cookie(void)
+ {
+- return atomic_inc_return(&fsnotify_sync_cookie);
++ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
+ }
+ EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/ntfs/dir.c linux-3.4-pax/fs/ntfs/dir.c
+--- linux-3.4/fs/ntfs/dir.c 2011-10-24 12:48:40.127091033 +0200
++++ linux-3.4-pax/fs/ntfs/dir.c 2012-05-21 12:10:11.196048979 +0200
+@@ -1329,7 +1329,7 @@ find_next_index_buffer:
+ ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
+ ~(s64)(ndir->itype.index.block_size - 1)));
+ /* Bounds checks. */
+- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
++ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
+ ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
+ "inode 0x%lx or driver bug.", vdir->i_ino);
+ goto err_out;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/ntfs/file.c linux-3.4-pax/fs/ntfs/file.c
+--- linux-3.4/fs/ntfs/file.c 2012-05-21 11:33:36.131929760 +0200
++++ linux-3.4-pax/fs/ntfs/file.c 2012-05-21 12:10:11.200048979 +0200
+@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_
+ #endif /* NTFS_RW */
+ };
+
+-const struct file_operations ntfs_empty_file_ops = {};
++const struct file_operations ntfs_empty_file_ops __read_only;
+
+-const struct inode_operations ntfs_empty_inode_ops = {};
++const struct inode_operations ntfs_empty_inode_ops __read_only;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/ocfs2/localalloc.c linux-3.4-pax/fs/ocfs2/localalloc.c
+--- linux-3.4/fs/ocfs2/localalloc.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/ocfs2/localalloc.c 2012-05-21 12:10:11.204048980 +0200
+@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
+ goto bail;
+ }
+
+- atomic_inc(&osb->alloc_stats.moves);
++ atomic_inc_unchecked(&osb->alloc_stats.moves);
+
+ bail:
+ if (handle)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/ocfs2/ocfs2.h linux-3.4-pax/fs/ocfs2/ocfs2.h
+--- linux-3.4/fs/ocfs2/ocfs2.h 2012-01-08 19:48:26.091471032 +0100
++++ linux-3.4-pax/fs/ocfs2/ocfs2.h 2012-05-21 12:10:11.204048980 +0200
+@@ -235,11 +235,11 @@ enum ocfs2_vol_state
+
+ struct ocfs2_alloc_stats
+ {
+- atomic_t moves;
+- atomic_t local_data;
+- atomic_t bitmap_data;
+- atomic_t bg_allocs;
+- atomic_t bg_extends;
++ atomic_unchecked_t moves;
++ atomic_unchecked_t local_data;
++ atomic_unchecked_t bitmap_data;
++ atomic_unchecked_t bg_allocs;
++ atomic_unchecked_t bg_extends;
+ };
+
+ enum ocfs2_local_alloc_state
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/ocfs2/suballoc.c linux-3.4-pax/fs/ocfs2/suballoc.c
+--- linux-3.4/fs/ocfs2/suballoc.c 2012-05-21 11:33:36.155929761 +0200
++++ linux-3.4-pax/fs/ocfs2/suballoc.c 2012-05-21 12:10:11.208048980 +0200
+@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
+ mlog_errno(status);
+ goto bail;
+ }
+- atomic_inc(&osb->alloc_stats.bg_extends);
++ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
+
+ /* You should never ask for this much metadata */
+ BUG_ON(bits_wanted >
+@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
+ mlog_errno(status);
+ goto bail;
+ }
+- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+
+ *suballoc_loc = res.sr_bg_blkno;
+ *suballoc_bit_start = res.sr_bit_offset;
+@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
+ trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
+ res->sr_bits);
+
+- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+
+ BUG_ON(res->sr_bits != 1);
+
+@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
+ mlog_errno(status);
+ goto bail;
+ }
+- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+
+ BUG_ON(res.sr_bits != 1);
+
+@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
+ cluster_start,
+ num_clusters);
+ if (!status)
+- atomic_inc(&osb->alloc_stats.local_data);
++ atomic_inc_unchecked(&osb->alloc_stats.local_data);
+ } else {
+ if (min_clusters > (osb->bitmap_cpg - 1)) {
+ /* The only paths asking for contiguousness
+@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
+ ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
+ res.sr_bg_blkno,
+ res.sr_bit_offset);
+- atomic_inc(&osb->alloc_stats.bitmap_data);
++ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
+ *num_clusters = res.sr_bits;
+ }
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/ocfs2/super.c linux-3.4-pax/fs/ocfs2/super.c
+--- linux-3.4/fs/ocfs2/super.c 2012-05-21 11:33:36.179929762 +0200
++++ linux-3.4-pax/fs/ocfs2/super.c 2012-05-21 12:10:11.212048980 +0200
+@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
+ "%10s => GlobalAllocs: %d LocalAllocs: %d "
+ "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
+ "Stats",
+- atomic_read(&osb->alloc_stats.bitmap_data),
+- atomic_read(&osb->alloc_stats.local_data),
+- atomic_read(&osb->alloc_stats.bg_allocs),
+- atomic_read(&osb->alloc_stats.moves),
+- atomic_read(&osb->alloc_stats.bg_extends));
++ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
++ atomic_read_unchecked(&osb->alloc_stats.local_data),
++ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
++ atomic_read_unchecked(&osb->alloc_stats.moves),
++ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
+
+ out += snprintf(buf + out, len - out,
+ "%10s => State: %u Descriptor: %llu Size: %u bits "
+@@ -2116,11 +2116,11 @@ static int ocfs2_initialize_super(struct
+ spin_lock_init(&osb->osb_xattr_lock);
+ ocfs2_init_steal_slots(osb);
+
+- atomic_set(&osb->alloc_stats.moves, 0);
+- atomic_set(&osb->alloc_stats.local_data, 0);
+- atomic_set(&osb->alloc_stats.bitmap_data, 0);
+- atomic_set(&osb->alloc_stats.bg_allocs, 0);
+- atomic_set(&osb->alloc_stats.bg_extends, 0);
++ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
++ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
++ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
++ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
++ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
+
+ /* Copy the blockcheck stats from the superblock probe */
+ osb->osb_ecc_stats = *stats;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/ocfs2/symlink.c linux-3.4-pax/fs/ocfs2/symlink.c
+--- linux-3.4/fs/ocfs2/symlink.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/ocfs2/symlink.c 2012-05-21 12:10:11.216048980 +0200
+@@ -142,7 +142,7 @@ bail:
+
+ static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
+ {
+- char *link = nd_get_link(nd);
++ const char *link = nd_get_link(nd);
+ if (!IS_ERR(link))
+ kfree(link);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/pipe.c linux-3.4-pax/fs/pipe.c
+--- linux-3.4/fs/pipe.c 2012-05-21 11:33:36.191929763 +0200
++++ linux-3.4-pax/fs/pipe.c 2012-05-21 12:10:11.220048980 +0200
+@@ -438,9 +438,9 @@ redo:
+ }
+ if (bufs) /* More to do? */
+ continue;
+- if (!pipe->writers)
++ if (!atomic_read(&pipe->writers))
+ break;
+- if (!pipe->waiting_writers) {
++ if (!atomic_read(&pipe->waiting_writers)) {
+ /* syscall merging: Usually we must not sleep
+ * if O_NONBLOCK is set, or if we got some data.
+ * But if a writer sleeps in kernel space, then
+@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const str
+ mutex_lock(&inode->i_mutex);
+ pipe = inode->i_pipe;
+
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ ret = -EPIPE;
+ goto out;
+@@ -553,7 +553,7 @@ redo1:
+ for (;;) {
+ int bufs;
+
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+@@ -644,9 +644,9 @@ redo2:
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ do_wakeup = 0;
+ }
+- pipe->waiting_writers++;
++ atomic_inc(&pipe->waiting_writers);
+ pipe_wait(pipe);
+- pipe->waiting_writers--;
++ atomic_dec(&pipe->waiting_writers);
+ }
+ out:
+ mutex_unlock(&inode->i_mutex);
+@@ -713,7 +713,7 @@ pipe_poll(struct file *filp, poll_table
+ mask = 0;
+ if (filp->f_mode & FMODE_READ) {
+ mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
+- if (!pipe->writers && filp->f_version != pipe->w_counter)
++ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
+ mask |= POLLHUP;
+ }
+
+@@ -723,7 +723,7 @@ pipe_poll(struct file *filp, poll_table
+ * Most Unices do not set POLLERR for FIFOs but on Linux they
+ * behave exactly like pipes for poll().
+ */
+- if (!pipe->readers)
++ if (!atomic_read(&pipe->readers))
+ mask |= POLLERR;
+ }
+
+@@ -737,10 +737,10 @@ pipe_release(struct inode *inode, int de
+
+ mutex_lock(&inode->i_mutex);
+ pipe = inode->i_pipe;
+- pipe->readers -= decr;
+- pipe->writers -= decw;
++ atomic_sub(decr, &pipe->readers);
++ atomic_sub(decw, &pipe->writers);
+
+- if (!pipe->readers && !pipe->writers) {
++ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
+ free_pipe_info(inode);
+ } else {
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
+@@ -830,7 +830,7 @@ pipe_read_open(struct inode *inode, stru
+
+ if (inode->i_pipe) {
+ ret = 0;
+- inode->i_pipe->readers++;
++ atomic_inc(&inode->i_pipe->readers);
+ }
+
+ mutex_unlock(&inode->i_mutex);
+@@ -847,7 +847,7 @@ pipe_write_open(struct inode *inode, str
+
+ if (inode->i_pipe) {
+ ret = 0;
+- inode->i_pipe->writers++;
++ atomic_inc(&inode->i_pipe->writers);
+ }
+
+ mutex_unlock(&inode->i_mutex);
+@@ -865,9 +865,9 @@ pipe_rdwr_open(struct inode *inode, stru
+ if (inode->i_pipe) {
+ ret = 0;
+ if (filp->f_mode & FMODE_READ)
+- inode->i_pipe->readers++;
++ atomic_inc(&inode->i_pipe->readers);
+ if (filp->f_mode & FMODE_WRITE)
+- inode->i_pipe->writers++;
++ atomic_inc(&inode->i_pipe->writers);
+ }
+
+ mutex_unlock(&inode->i_mutex);
+@@ -989,7 +989,8 @@ static struct inode * get_pipe_inode(voi
+ goto fail_iput;
+ inode->i_pipe = pipe;
+
+- pipe->readers = pipe->writers = 1;
++ atomic_set(&pipe->readers, 1);
++ atomic_set(&pipe->writers, 1);
+ inode->i_fop = &rdwr_pipefifo_fops;
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/proc/array.c linux-3.4-pax/fs/proc/array.c
+--- linux-3.4/fs/proc/array.c 2012-05-21 11:33:36.199929763 +0200
++++ linux-3.4-pax/fs/proc/array.c 2012-05-21 12:10:11.220048980 +0200
+@@ -337,6 +337,21 @@ static void task_cpus_allowed(struct seq
+ seq_putc(m, '\n');
+ }
+
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static inline void task_pax(struct seq_file *m, struct task_struct *p)
++{
++ if (p->mm)
++ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
++ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
++ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
++ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
++ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
++ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
++ else
++ seq_printf(m, "PaX:\t-----\n");
++}
++#endif
++
+ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task)
+ {
+@@ -354,6 +369,11 @@ int proc_pid_status(struct seq_file *m,
+ task_cpus_allowed(m, task);
+ cpuset_task_status_allowed(m, task);
+ task_context_switch_counts(m, task);
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ task_pax(m, task);
++#endif
++
+ return 0;
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/proc/base.c linux-3.4-pax/fs/proc/base.c
+--- linux-3.4/fs/proc/base.c 2012-05-21 11:33:36.203929763 +0200
++++ linux-3.4-pax/fs/proc/base.c 2012-05-21 12:10:11.224048981 +0200
+@@ -2763,7 +2763,7 @@ static void *proc_self_follow_link(struc
+ static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
+ void *cookie)
+ {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+ if (!IS_ERR(s))
+ __putname(s);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/proc/kcore.c linux-3.4-pax/fs/proc/kcore.c
+--- linux-3.4/fs/proc/kcore.c 2012-05-21 11:33:36.227929765 +0200
++++ linux-3.4-pax/fs/proc/kcore.c 2012-05-21 12:10:11.228048981 +0200
+@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __use
+ * the addresses in the elf_phdr on our list.
+ */
+ start = kc_offset_to_vaddr(*fpos - elf_buflen);
+- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
++ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
++ if (tsz > buflen)
+ tsz = buflen;
+-
++
+ while (buflen) {
+ struct kcore_list *m;
+
+@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __use
+ kfree(elf_buf);
+ } else {
+ if (kern_addr_valid(start)) {
+- unsigned long n;
++ char *elf_buf;
++ mm_segment_t oldfs;
+
+- n = copy_to_user(buffer, (char *)start, tsz);
+- /*
+- * We cannot distinguish between fault on source
+- * and fault on destination. When this happens
+- * we clear too and hope it will trigger the
+- * EFAULT again.
+- */
+- if (n) {
+- if (clear_user(buffer + tsz - n,
+- n))
++ elf_buf = kmalloc(tsz, GFP_KERNEL);
++ if (!elf_buf)
++ return -ENOMEM;
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
++ set_fs(oldfs);
++ if (copy_to_user(buffer, elf_buf, tsz)) {
++ kfree(elf_buf);
+ return -EFAULT;
++ }
+ }
++ set_fs(oldfs);
++ kfree(elf_buf);
+ } else {
+ if (clear_user(buffer, tsz))
+ return -EFAULT;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/proc/meminfo.c linux-3.4-pax/fs/proc/meminfo.c
+--- linux-3.4/fs/proc/meminfo.c 2012-01-08 19:48:26.167471028 +0100
++++ linux-3.4-pax/fs/proc/meminfo.c 2012-05-21 12:10:11.228048981 +0200
+@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_
+ vmi.used >> 10,
+ vmi.largest_chunk >> 10
+ #ifdef CONFIG_MEMORY_FAILURE
+- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
++ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
+ #endif
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/proc/nommu.c linux-3.4-pax/fs/proc/nommu.c
+--- linux-3.4/fs/proc/nommu.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/proc/nommu.c 2012-05-21 12:10:11.232048981 +0200
+@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
+ if (len < 1)
+ len = 1;
+ seq_printf(m, "%*c", len, ' ');
+- seq_path(m, &file->f_path, "");
++ seq_path(m, &file->f_path, "\n\\");
+ }
+
+ seq_putc(m, '\n');
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/proc/task_mmu.c linux-3.4-pax/fs/proc/task_mmu.c
+--- linux-3.4/fs/proc/task_mmu.c 2012-05-21 11:33:36.239929765 +0200
++++ linux-3.4-pax/fs/proc/task_mmu.c 2012-05-30 17:39:11.664992378 +0200
+@@ -52,8 +52,13 @@ void task_mem(struct seq_file *m, struct
+ "VmExe:\t%8lu kB\n"
+ "VmLib:\t%8lu kB\n"
+ "VmPTE:\t%8lu kB\n"
+- "VmSwap:\t%8lu kB\n",
+- hiwater_vm << (PAGE_SHIFT-10),
++ "VmSwap:\t%8lu kB\n"
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
++#endif
++
++ ,hiwater_vm << (PAGE_SHIFT-10),
+ (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
+ mm->locked_vm << (PAGE_SHIFT-10),
+ mm->pinned_vm << (PAGE_SHIFT-10),
+@@ -62,7 +67,13 @@ void task_mem(struct seq_file *m, struct
+ data << (PAGE_SHIFT-10),
+ mm->stack_vm << (PAGE_SHIFT-10), text, lib,
+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
+- swap << (PAGE_SHIFT-10));
++ swap << (PAGE_SHIFT-10)
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ , mm->context.user_cs_base, mm->context.user_cs_limit
++#endif
++
++ );
+ }
+
+ unsigned long task_vsize(struct mm_struct *mm)
+@@ -231,20 +242,23 @@ show_map_vma(struct seq_file *m, struct
+ pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
+ }
+
+- /* We don't show the stack guard page in /proc/maps */
+ start = vma->vm_start;
+- if (stack_guard_page_start(vma, start))
+- start += PAGE_SIZE;
+ end = vma->vm_end;
+- if (stack_guard_page_end(vma, end))
+- end -= PAGE_SIZE;
+
+ seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
+ start,
+ end,
++
++#if 0
++ flags & VM_MAYREAD ? flags & VM_READ ? 'R' : '+' : flags & VM_READ ? 'r' : '-',
++ flags & VM_MAYWRITE ? flags & VM_WRITE ? 'W' : '+' : flags & VM_WRITE ? 'w' : '-',
++ flags & VM_MAYEXEC ? flags & VM_EXEC ? 'X' : '+' : flags & VM_EXEC ? 'x' : '-',
++#else
+ flags & VM_READ ? 'r' : '-',
+ flags & VM_WRITE ? 'w' : '-',
+ flags & VM_EXEC ? 'x' : '-',
++#endif
++
+ flags & VM_MAYSHARE ? 's' : 'p',
+ pgoff,
+ MAJOR(dev), MINOR(dev), ino, &len);
+@@ -255,7 +269,7 @@ show_map_vma(struct seq_file *m, struct
+ */
+ if (file) {
+ pad_len_spaces(m, len);
+- seq_path(m, &file->f_path, "\n");
++ seq_path(m, &file->f_path, "\n\\");
+ goto done;
+ }
+
+@@ -281,8 +295,9 @@ show_map_vma(struct seq_file *m, struct
+ * Thread stack in /proc/PID/task/TID/maps or
+ * the main process stack.
+ */
+- if (!is_pid || (vma->vm_start <= mm->start_stack &&
+- vma->vm_end >= mm->start_stack)) {
++ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
++ (vma->vm_start <= mm->start_stack &&
++ vma->vm_end >= mm->start_stack)) {
+ name = "[stack]";
+ } else {
+ /* Thread stack in /proc/PID/maps */
+@@ -784,7 +799,7 @@ static int pagemap_pte_range(pmd_t *pmd,
+
+ /* find the first VMA at or above 'addr' */
+ vma = find_vma(walk->mm, addr);
+- if (pmd_trans_huge_lock(pmd, vma) == 1) {
++ if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
+ for (; addr != end; addr += PAGE_SIZE) {
+ unsigned long offset;
+
+@@ -1159,7 +1174,7 @@ static int show_numa_map(struct seq_file
+
+ if (file) {
+ seq_printf(m, " file=");
+- seq_path(m, &file->f_path, "\n\t= ");
++ seq_path(m, &file->f_path, "\n\t\\= ");
+ } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
+ seq_printf(m, " heap");
+ } else {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/proc/task_nommu.c linux-3.4-pax/fs/proc/task_nommu.c
+--- linux-3.4/fs/proc/task_nommu.c 2012-05-21 11:33:36.239929765 +0200
++++ linux-3.4-pax/fs/proc/task_nommu.c 2012-05-21 12:10:11.236048981 +0200
+@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
+ else
+ bytes += kobjsize(mm);
+
+- if (current->fs && current->fs->users > 1)
++ if (current->fs && atomic_read(&current->fs->users) > 1)
+ sbytes += kobjsize(current->fs);
+ else
+ bytes += kobjsize(current->fs);
+@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_fil
+
+ if (file) {
+ pad_len_spaces(m, len);
+- seq_path(m, &file->f_path, "");
++ seq_path(m, &file->f_path, "\n\\");
+ } else if (mm) {
+ pid_t tid = vm_is_stack(priv->task, vma, is_pid);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/quota/netlink.c linux-3.4-pax/fs/quota/netlink.c
+--- linux-3.4/fs/quota/netlink.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/quota/netlink.c 2012-05-21 12:10:11.236048981 +0200
+@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
+ void quota_send_warning(short type, unsigned int id, dev_t dev,
+ const char warntype)
+ {
+- static atomic_t seq;
++ static atomic_unchecked_t seq;
+ struct sk_buff *skb;
+ void *msg_head;
+ int ret;
+@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
+ "VFS: Not enough memory to send quota warning.\n");
+ return;
+ }
+- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
++ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
+ &quota_genl_family, 0, QUOTA_NL_C_WARNING);
+ if (!msg_head) {
+ printk(KERN_ERR
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/readdir.c linux-3.4-pax/fs/readdir.c
+--- linux-3.4/fs/readdir.c 2012-05-21 11:33:36.283929768 +0200
++++ linux-3.4-pax/fs/readdir.c 2012-05-21 12:10:11.240048982 +0200
+@@ -299,7 +299,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
+ error = buf.error;
+ lastdirent = buf.previous;
+ if (lastdirent) {
+- typeof(lastdirent->d_off) d_off = file->f_pos;
++ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
+ if (__put_user(d_off, &lastdirent->d_off))
+ error = -EFAULT;
+ else
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/reiserfs/do_balan.c linux-3.4-pax/fs/reiserfs/do_balan.c
+--- linux-3.4/fs/reiserfs/do_balan.c 2012-05-21 11:33:36.307929769 +0200
++++ linux-3.4-pax/fs/reiserfs/do_balan.c 2012-05-21 12:10:11.244048982 +0200
+@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
+ return;
+ }
+
+- atomic_inc(&(fs_generation(tb->tb_sb)));
++ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
+ do_balance_starts(tb);
+
+ /* balance leaf returns 0 except if combining L R and S into
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/reiserfs/procfs.c linux-3.4-pax/fs/reiserfs/procfs.c
+--- linux-3.4/fs/reiserfs/procfs.c 2012-05-21 11:33:36.387929773 +0200
++++ linux-3.4-pax/fs/reiserfs/procfs.c 2012-05-21 12:10:11.244048982 +0200
+@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m
+ "SMALL_TAILS " : "NO_TAILS ",
+ replay_only(sb) ? "REPLAY_ONLY " : "",
+ convert_reiserfs(sb) ? "CONV " : "",
+- atomic_read(&r->s_generation_counter),
++ atomic_read_unchecked(&r->s_generation_counter),
+ SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
+ SF(s_do_balance), SF(s_unneeded_left_neighbor),
+ SF(s_good_search_by_key_reada), SF(s_bmaps),
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/reiserfs/reiserfs.h linux-3.4-pax/fs/reiserfs/reiserfs.h
+--- linux-3.4/fs/reiserfs/reiserfs.h 2012-05-21 11:33:36.395929774 +0200
++++ linux-3.4-pax/fs/reiserfs/reiserfs.h 2012-05-21 12:10:11.248048982 +0200
+@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
+ /* Comment? -Hans */
+ wait_queue_head_t s_wait;
+ /* To be obsoleted soon by per buffer seals.. -Hans */
+- atomic_t s_generation_counter; // increased by one every time the
++ atomic_unchecked_t s_generation_counter; // increased by one every time the
+ // tree gets re-balanced
+ unsigned long s_properties; /* File system properties. Currently holds
+ on-disk FS format */
+@@ -1973,7 +1973,7 @@ static inline loff_t max_reiserfs_offset
+ #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
+
+ #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
+-#define get_generation(s) atomic_read (&fs_generation(s))
++#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
+ #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
+ #define __fs_changed(gen,s) (gen != get_generation (s))
+ #define fs_changed(gen,s) \
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/seq_file.c linux-3.4-pax/fs/seq_file.c
+--- linux-3.4/fs/seq_file.c 2012-05-21 11:33:36.463929777 +0200
++++ linux-3.4-pax/fs/seq_file.c 2012-05-21 12:10:11.252048982 +0200
+@@ -567,7 +567,7 @@ static void single_stop(struct seq_file
+ int single_open(struct file *file, int (*show)(struct seq_file *, void *),
+ void *data)
+ {
+- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
++ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
+ int res = -ENOMEM;
+
+ if (op) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/splice.c linux-3.4-pax/fs/splice.c
+--- linux-3.4/fs/splice.c 2012-05-21 11:33:36.463929777 +0200
++++ linux-3.4-pax/fs/splice.c 2012-05-21 12:10:11.256048982 +0200
+@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
+ pipe_lock(pipe);
+
+ for (;;) {
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
+ do_wakeup = 0;
+ }
+
+- pipe->waiting_writers++;
++ atomic_inc(&pipe->waiting_writers);
+ pipe_wait(pipe);
+- pipe->waiting_writers--;
++ atomic_dec(&pipe->waiting_writers);
+ }
+
+ pipe_unlock(pipe);
+@@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file
+ old_fs = get_fs();
+ set_fs(get_ds());
+ /* The cast to a user pointer is valid due to the set_fs() */
+- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
++ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
+ set_fs(old_fs);
+
+ return res;
+@@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file
+ old_fs = get_fs();
+ set_fs(get_ds());
+ /* The cast to a user pointer is valid due to the set_fs() */
+- res = vfs_write(file, (const char __user *)buf, count, &pos);
++ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
+ set_fs(old_fs);
+
+ return res;
+@@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct
+ goto err;
+
+ this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
+- vec[i].iov_base = (void __user *) page_address(page);
++ vec[i].iov_base = (void __force_user *) page_address(page);
+ vec[i].iov_len = this_len;
+ spd.pages[i] = page;
+ spd.nr_pages++;
+@@ -845,10 +845,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
+ int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
+ {
+ while (!pipe->nrbufs) {
+- if (!pipe->writers)
++ if (!atomic_read(&pipe->writers))
+ return 0;
+
+- if (!pipe->waiting_writers && sd->num_spliced)
++ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
+ return 0;
+
+ if (sd->flags & SPLICE_F_NONBLOCK)
+@@ -1181,7 +1181,7 @@ ssize_t splice_direct_to_actor(struct fi
+ * out of the pipe right after the splice_to_pipe(). So set
+ * PIPE_READERS appropriately.
+ */
+- pipe->readers = 1;
++ atomic_set(&pipe->readers, 1);
+
+ current->splice_pipe = pipe;
+ }
+@@ -1733,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_
+ ret = -ERESTARTSYS;
+ break;
+ }
+- if (!pipe->writers)
++ if (!atomic_read(&pipe->writers))
+ break;
+- if (!pipe->waiting_writers) {
++ if (!atomic_read(&pipe->waiting_writers)) {
+ if (flags & SPLICE_F_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+@@ -1767,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_
+ pipe_lock(pipe);
+
+ while (pipe->nrbufs >= pipe->buffers) {
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ ret = -EPIPE;
+ break;
+@@ -1780,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_
+ ret = -ERESTARTSYS;
+ break;
+ }
+- pipe->waiting_writers++;
++ atomic_inc(&pipe->waiting_writers);
+ pipe_wait(pipe);
+- pipe->waiting_writers--;
++ atomic_dec(&pipe->waiting_writers);
+ }
+
+ pipe_unlock(pipe);
+@@ -1818,14 +1818,14 @@ retry:
+ pipe_double_lock(ipipe, opipe);
+
+ do {
+- if (!opipe->readers) {
++ if (!atomic_read(&opipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+ break;
+ }
+
+- if (!ipipe->nrbufs && !ipipe->writers)
++ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
+ break;
+
+ /*
+@@ -1922,7 +1922,7 @@ static int link_pipe(struct pipe_inode_i
+ pipe_double_lock(ipipe, opipe);
+
+ do {
+- if (!opipe->readers) {
++ if (!atomic_read(&opipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+@@ -1967,7 +1967,7 @@ static int link_pipe(struct pipe_inode_i
+ * return EAGAIN if we have the potential of some data in the
+ * future, otherwise just return 0
+ */
+- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
++ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
+ ret = -EAGAIN;
+
+ pipe_unlock(ipipe);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/sysfs/file.c linux-3.4-pax/fs/sysfs/file.c
+--- linux-3.4/fs/sysfs/file.c 2012-03-19 10:39:10.472049254 +0100
++++ linux-3.4-pax/fs/sysfs/file.c 2012-05-21 12:10:11.260048983 +0200
+@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
+
+ struct sysfs_open_dirent {
+ atomic_t refcnt;
+- atomic_t event;
++ atomic_unchecked_t event;
+ wait_queue_head_t poll;
+ struct list_head buffers; /* goes through sysfs_buffer.list */
+ };
+@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
+ if (!sysfs_get_active(attr_sd))
+ return -ENODEV;
+
+- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
++ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
+ count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
+
+ sysfs_put_active(attr_sd);
+@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
+ return -ENOMEM;
+
+ atomic_set(&new_od->refcnt, 0);
+- atomic_set(&new_od->event, 1);
++ atomic_set_unchecked(&new_od->event, 1);
+ init_waitqueue_head(&new_od->poll);
+ INIT_LIST_HEAD(&new_od->buffers);
+ goto retry;
+@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
+
+ sysfs_put_active(attr_sd);
+
+- if (buffer->event != atomic_read(&od->event))
++ if (buffer->event != atomic_read_unchecked(&od->event))
+ goto trigger;
+
+ return DEFAULT_POLLMASK;
+@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
+
+ od = sd->s_attr.open;
+ if (od) {
+- atomic_inc(&od->event);
++ atomic_inc_unchecked(&od->event);
+ wake_up_interruptible(&od->poll);
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/sysfs/symlink.c linux-3.4-pax/fs/sysfs/symlink.c
+--- linux-3.4/fs/sysfs/symlink.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/fs/sysfs/symlink.c 2012-05-21 12:10:11.264048983 +0200
+@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
+
+ static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
+ {
+- char *page = nd_get_link(nd);
++ const char *page = nd_get_link(nd);
+ if (!IS_ERR(page))
+ free_page((unsigned long)page);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/udf/misc.c linux-3.4-pax/fs/udf/misc.c
+--- linux-3.4/fs/udf/misc.c 2012-01-08 19:48:26.387471016 +0100
++++ linux-3.4-pax/fs/udf/misc.c 2012-05-21 12:10:11.268048983 +0200
+@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t id
+
+ u8 udf_tag_checksum(const struct tag *t)
+ {
+- u8 *data = (u8 *)t;
++ const u8 *data = (const u8 *)t;
+ u8 checksum = 0;
+ int i;
+ for (i = 0; i < sizeof(struct tag); ++i)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/xattr_acl.c linux-3.4-pax/fs/xattr_acl.c
+--- linux-3.4/fs/xattr_acl.c 2012-05-21 11:33:36.571929783 +0200
++++ linux-3.4-pax/fs/xattr_acl.c 2012-05-21 12:10:11.272048983 +0200
+@@ -17,8 +17,8 @@
+ struct posix_acl *
+ posix_acl_from_xattr(const void *value, size_t size)
+ {
+- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
+- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
++ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
++ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
+ int count;
+ struct posix_acl *acl;
+ struct posix_acl_entry *acl_e;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/xfs/xfs_bmap.c linux-3.4-pax/fs/xfs/xfs_bmap.c
+--- linux-3.4/fs/xfs/xfs_bmap.c 2012-05-21 11:33:36.623929786 +0200
++++ linux-3.4-pax/fs/xfs/xfs_bmap.c 2012-05-21 12:10:11.276048983 +0200
+@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
+ int nmap,
+ int ret_nmap);
+ #else
+-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
++#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
+ #endif /* DEBUG */
+
+ STATIC int
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/xfs/xfs_dir2_sf.c linux-3.4-pax/fs/xfs/xfs_dir2_sf.c
+--- linux-3.4/fs/xfs/xfs_dir2_sf.c 2011-10-24 12:48:40.563091008 +0200
++++ linux-3.4-pax/fs/xfs/xfs_dir2_sf.c 2012-05-21 12:10:11.284048984 +0200
+@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
+ }
+
+ ino = xfs_dir2_sfe_get_ino(sfp, sfep);
+- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
++ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
++ char name[sfep->namelen];
++ memcpy(name, sfep->name, sfep->namelen);
++ if (filldir(dirent, name, sfep->namelen,
++ off & 0x7fffffff, ino, DT_UNKNOWN)) {
++ *offset = off & 0x7fffffff;
++ return 0;
++ }
++ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
+ *offset = off & 0x7fffffff;
+ return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/xfs/xfs_ioctl.c linux-3.4-pax/fs/xfs/xfs_ioctl.c
+--- linux-3.4/fs/xfs/xfs_ioctl.c 2012-05-21 11:33:36.715929791 +0200
++++ linux-3.4-pax/fs/xfs/xfs_ioctl.c 2012-05-21 12:10:11.284048984 +0200
+@@ -128,7 +128,7 @@ xfs_find_handle(
+ }
+
+ error = -EFAULT;
+- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
++ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
+ copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
+ goto out_put;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/fs/xfs/xfs_iops.c linux-3.4-pax/fs/xfs/xfs_iops.c
+--- linux-3.4/fs/xfs/xfs_iops.c 2012-05-21 11:33:36.723929792 +0200
++++ linux-3.4-pax/fs/xfs/xfs_iops.c 2012-05-21 12:10:11.288048984 +0200
+@@ -397,7 +397,7 @@ xfs_vn_put_link(
+ struct nameidata *nd,
+ void *p)
+ {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+
+ if (!IS_ERR(s))
+ kfree(s);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/acpi/acpi_bus.h linux-3.4-pax/include/acpi/acpi_bus.h
+--- linux-3.4/include/acpi/acpi_bus.h 2012-05-21 11:33:36.963929805 +0200
++++ linux-3.4-pax/include/acpi/acpi_bus.h 2012-05-21 12:10:11.292048984 +0200
+@@ -107,7 +107,7 @@ struct acpi_device_ops {
+ acpi_op_bind bind;
+ acpi_op_unbind unbind;
+ acpi_op_notify notify;
+-};
++} __no_const;
+
+ #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/asm-generic/atomic64.h linux-3.4-pax/include/asm-generic/atomic64.h
+--- linux-3.4/include/asm-generic/atomic64.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/asm-generic/atomic64.h 2012-05-21 12:10:11.292048984 +0200
+@@ -16,6 +16,8 @@ typedef struct {
+ long long counter;
+ } atomic64_t;
+
++typedef atomic64_t atomic64_unchecked_t;
++
+ #define ATOMIC64_INIT(i) { (i) }
+
+ extern long long atomic64_read(const atomic64_t *v);
+@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64
+ #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* _ASM_GENERIC_ATOMIC64_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/asm-generic/atomic-long.h linux-3.4-pax/include/asm-generic/atomic-long.h
+--- linux-3.4/include/asm-generic/atomic-long.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/asm-generic/atomic-long.h 2012-05-21 12:10:11.296048985 +0200
+@@ -22,6 +22,12 @@
+
+ typedef atomic64_t atomic_long_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef atomic64_unchecked_t atomic_long_unchecked_t;
++#else
++typedef atomic64_t atomic_long_unchecked_t;
++#endif
++
+ #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+
+ static inline long atomic_long_read(atomic_long_t *l)
+@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
+ return (long)atomic64_read(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ return (long)atomic64_read_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_set(atomic_long_t *l, long i)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
+ atomic64_set(v, i);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ atomic64_set_unchecked(v, i);
++}
++#endif
++
+ static inline void atomic_long_inc(atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
+ atomic64_inc(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ atomic64_inc_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_dec(atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
+ atomic64_dec(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ atomic64_dec_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_add(long i, atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
+ atomic64_add(i, v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ atomic64_add_unchecked(i, v);
++}
++#endif
++
+ static inline void atomic_long_sub(long i, atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
+ atomic64_sub(i, v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ atomic64_sub_unchecked(i, v);
++}
++#endif
++
+ static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
+ return (long)atomic64_inc_return(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ return (long)atomic64_inc_return_unchecked(v);
++}
++#endif
++
+ static inline long atomic_long_dec_return(atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
+
+ typedef atomic_t atomic_long_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef atomic_unchecked_t atomic_long_unchecked_t;
++#else
++typedef atomic_t atomic_long_unchecked_t;
++#endif
++
+ #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
+ static inline long atomic_long_read(atomic_long_t *l)
+ {
+@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
+ return (long)atomic_read(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ return (long)atomic_read_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_set(atomic_long_t *l, long i)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
+ atomic_set(v, i);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ atomic_set_unchecked(v, i);
++}
++#endif
++
+ static inline void atomic_long_inc(atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
+ atomic_inc(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ atomic_inc_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_dec(atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
+ atomic_dec(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ atomic_dec_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_add(long i, atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
+ atomic_add(i, v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ atomic_add_unchecked(i, v);
++}
++#endif
++
+ static inline void atomic_long_sub(long i, atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
+ atomic_sub(i, v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ atomic_sub_unchecked(i, v);
++}
++#endif
++
+ static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
+ return (long)atomic_inc_return(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ return (long)atomic_inc_return_unchecked(v);
++}
++#endif
++
+ static inline long atomic_long_dec_return(atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
+
+ #endif /* BITS_PER_LONG == 64 */
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void pax_refcount_needs_these_functions(void)
++{
++ atomic_read_unchecked((atomic_unchecked_t *)NULL);
++ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
++ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
++ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
++ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
++ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
++ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
++ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
++ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
++ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
++ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
++
++ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
++ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
++ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
++ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
++ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
++ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
++ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
++}
++#else
++#define atomic_read_unchecked(v) atomic_read(v)
++#define atomic_set_unchecked(v, i) atomic_set((v), (i))
++#define atomic_add_unchecked(i, v) atomic_add((i), (v))
++#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
++#define atomic_inc_unchecked(v) atomic_inc(v)
++#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
++#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
++#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
++#define atomic_dec_unchecked(v) atomic_dec(v)
++#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
++#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
++
++#define atomic_long_read_unchecked(v) atomic_long_read(v)
++#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
++#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
++#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
++#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
++#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
++#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
++#endif
++
+ #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/asm-generic/cache.h linux-3.4-pax/include/asm-generic/cache.h
+--- linux-3.4/include/asm-generic/cache.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/asm-generic/cache.h 2012-05-21 12:10:11.296048985 +0200
+@@ -6,7 +6,7 @@
+ * cache lines need to provide their own cache.h.
+ */
+
+-#define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_SHIFT 5UL
++#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
+
+ #endif /* __ASM_GENERIC_CACHE_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/asm-generic/emergency-restart.h linux-3.4-pax/include/asm-generic/emergency-restart.h
+--- linux-3.4/include/asm-generic/emergency-restart.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/asm-generic/emergency-restart.h 2012-05-21 12:10:11.300048985 +0200
+@@ -1,7 +1,7 @@
+ #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
+ #define _ASM_GENERIC_EMERGENCY_RESTART_H
+
+-static inline void machine_emergency_restart(void)
++static inline __noreturn void machine_emergency_restart(void)
+ {
+ machine_restart(NULL);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/asm-generic/kmap_types.h linux-3.4-pax/include/asm-generic/kmap_types.h
+--- linux-3.4/include/asm-generic/kmap_types.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/asm-generic/kmap_types.h 2012-05-21 12:10:11.300048985 +0200
+@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
+ KMAP_D(17) KM_NMI,
+ KMAP_D(18) KM_NMI_PTE,
+ KMAP_D(19) KM_KDB,
++KMAP_D(20) KM_CLEARPAGE,
+ /*
+ * Remember to update debug_kmap_atomic() when adding new kmap types!
+ */
+-KMAP_D(20) KM_TYPE_NR
++KMAP_D(21) KM_TYPE_NR
+ };
+
+ #undef KMAP_D
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/asm-generic/local.h linux-3.4-pax/include/asm-generic/local.h
+--- linux-3.4/include/asm-generic/local.h 2011-10-24 12:48:40.939091028 +0200
++++ linux-3.4-pax/include/asm-generic/local.h 2012-05-21 12:10:11.300048985 +0200
+@@ -39,6 +39,7 @@ typedef struct
+ #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
+ #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
+ #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
++#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
+
+ #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
+ #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/asm-generic/pgtable.h linux-3.4-pax/include/asm-generic/pgtable.h
+--- linux-3.4/include/asm-generic/pgtable.h 2012-05-21 11:33:37.011929806 +0200
++++ linux-3.4-pax/include/asm-generic/pgtable.h 2012-05-21 12:10:11.304048985 +0200
+@@ -503,6 +503,14 @@ static inline int pmd_trans_unstable(pmd
+ #endif
+ }
+
++#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
++static inline unsigned long pax_open_kernel(void) { return 0; }
++#endif
++
++#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ #endif /* CONFIG_MMU */
+
+ #endif /* !__ASSEMBLY__ */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/asm-generic/pgtable-nopmd.h linux-3.4-pax/include/asm-generic/pgtable-nopmd.h
+--- linux-3.4/include/asm-generic/pgtable-nopmd.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/asm-generic/pgtable-nopmd.h 2012-05-21 12:10:11.304048985 +0200
+@@ -1,14 +1,19 @@
+ #ifndef _PGTABLE_NOPMD_H
+ #define _PGTABLE_NOPMD_H
+
+-#ifndef __ASSEMBLY__
+-
+ #include <asm-generic/pgtable-nopud.h>
+
+-struct mm_struct;
+-
+ #define __PAGETABLE_PMD_FOLDED
+
++#define PMD_SHIFT PUD_SHIFT
++#define PTRS_PER_PMD 1
++#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
++#define PMD_MASK (~(PMD_SIZE-1))
++
++#ifndef __ASSEMBLY__
++
++struct mm_struct;
++
+ /*
+ * Having the pmd type consist of a pud gets the size right, and allows
+ * us to conceptually access the pud entry that this pmd is folded into
+@@ -16,11 +21,6 @@ struct mm_struct;
+ */
+ typedef struct { pud_t pud; } pmd_t;
+
+-#define PMD_SHIFT PUD_SHIFT
+-#define PTRS_PER_PMD 1
+-#define PMD_SIZE (1UL << PMD_SHIFT)
+-#define PMD_MASK (~(PMD_SIZE-1))
+-
+ /*
+ * The "pud_xxx()" functions here are trivial for a folded two-level
+ * setup: the pmd is never bad, and a pmd always exists (as it's folded
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/asm-generic/pgtable-nopud.h linux-3.4-pax/include/asm-generic/pgtable-nopud.h
+--- linux-3.4/include/asm-generic/pgtable-nopud.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/asm-generic/pgtable-nopud.h 2012-05-25 08:41:25.246798329 +0200
+@@ -1,10 +1,15 @@
+ #ifndef _PGTABLE_NOPUD_H
+ #define _PGTABLE_NOPUD_H
+
+-#ifndef __ASSEMBLY__
+-
+ #define __PAGETABLE_PUD_FOLDED
+
++#define PUD_SHIFT PGDIR_SHIFT
++#define PTRS_PER_PUD 1
++#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
++#define PUD_MASK (~(PUD_SIZE-1))
++
++#ifndef __ASSEMBLY__
++
+ /*
+ * Having the pud type consist of a pgd gets the size right, and allows
+ * us to conceptually access the pgd entry that this pud is folded into
+@@ -12,11 +17,6 @@
+ */
+ typedef struct { pgd_t pgd; } pud_t;
+
+-#define PUD_SHIFT PGDIR_SHIFT
+-#define PTRS_PER_PUD 1
+-#define PUD_SIZE (1UL << PUD_SHIFT)
+-#define PUD_MASK (~(PUD_SIZE-1))
+-
+ /*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pud is never bad, and a pud always exists (as it's folded
+@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd)
+ #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
+
+ #define pgd_populate(mm, pgd, pud) do { } while (0)
++#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
+ /*
+ * (puds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/asm-generic/vmlinux.lds.h linux-3.4-pax/include/asm-generic/vmlinux.lds.h
+--- linux-3.4/include/asm-generic/vmlinux.lds.h 2012-05-21 11:33:37.063929810 +0200
++++ linux-3.4-pax/include/asm-generic/vmlinux.lds.h 2012-05-21 12:10:11.312048985 +0200
+@@ -218,6 +218,7 @@
+ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_rodata) = .; \
+ *(.rodata) *(.rodata.*) \
++ *(.data..read_only) \
+ *(__vermagic) /* Kernel version magic */ \
+ . = ALIGN(8); \
+ VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
+@@ -716,17 +717,18 @@
+ * section in the linker script will go there too. @phdr should have
+ * a leading colon.
+ *
+- * Note that this macros defines __per_cpu_load as an absolute symbol.
++ * Note that this macros defines per_cpu_load as an absolute symbol.
+ * If there is no need to put the percpu section at a predetermined
+ * address, use PERCPU_SECTION.
+ */
+ #define PERCPU_VADDR(cacheline, vaddr, phdr) \
+- VMLINUX_SYMBOL(__per_cpu_load) = .; \
+- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
++ per_cpu_load = .; \
++ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
+ - LOAD_OFFSET) { \
++ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
+ PERCPU_INPUT(cacheline) \
+ } phdr \
+- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
++ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
+
+ /**
+ * PERCPU_SECTION - define output section for percpu area, simple version
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/drm/drm_crtc_helper.h linux-3.4-pax/include/drm/drm_crtc_helper.h
+--- linux-3.4/include/drm/drm_crtc_helper.h 2012-03-19 10:39:10.800049513 +0100
++++ linux-3.4-pax/include/drm/drm_crtc_helper.h 2012-05-21 12:10:11.316048986 +0200
+@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
+
+ /* disable crtc when not in use - more explicit than dpms off */
+ void (*disable)(struct drm_crtc *crtc);
+-};
++} __no_const;
+
+ struct drm_encoder_helper_funcs {
+ void (*dpms)(struct drm_encoder *encoder, int mode);
+@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
+ struct drm_connector *connector);
+ /* disable encoder when not in use - more explicit than dpms off */
+ void (*disable)(struct drm_encoder *encoder);
+-};
++} __no_const;
+
+ struct drm_connector_helper_funcs {
+ int (*get_modes)(struct drm_connector *connector);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/drm/drmP.h linux-3.4-pax/include/drm/drmP.h
+--- linux-3.4/include/drm/drmP.h 2012-05-21 11:33:37.075929811 +0200
++++ linux-3.4-pax/include/drm/drmP.h 2012-05-21 12:10:11.316048986 +0200
+@@ -72,6 +72,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/poll.h>
+ #include <asm/pgalloc.h>
++#include <asm/local.h>
+ #include "drm.h"
+
+ #include <linux/idr.h>
+@@ -1074,7 +1075,7 @@ struct drm_device {
+
+ /** \name Usage Counters */
+ /*@{ */
+- int open_count; /**< Outstanding files open */
++ local_t open_count; /**< Outstanding files open */
+ atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
+ atomic_t vma_count; /**< Outstanding vma areas open */
+ int buf_use; /**< Buffers in use -- cannot alloc */
+@@ -1085,7 +1086,7 @@ struct drm_device {
+ /*@{ */
+ unsigned long counters;
+ enum drm_stat_type types[15];
+- atomic_t counts[15];
++ atomic_unchecked_t counts[15];
+ /*@} */
+
+ struct list_head filelist;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/drm/ttm/ttm_memory.h linux-3.4-pax/include/drm/ttm/ttm_memory.h
+--- linux-3.4/include/drm/ttm/ttm_memory.h 2012-05-21 11:33:37.123929813 +0200
++++ linux-3.4-pax/include/drm/ttm/ttm_memory.h 2012-05-21 12:10:11.320048986 +0200
+@@ -48,7 +48,7 @@
+
+ struct ttm_mem_shrink {
+ int (*do_shrink) (struct ttm_mem_shrink *);
+-};
++} __no_const;
+
+ /**
+ * struct ttm_mem_global - Global memory accounting structure.
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/a.out.h linux-3.4-pax/include/linux/a.out.h
+--- linux-3.4/include/linux/a.out.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/linux/a.out.h 2012-05-21 12:10:11.324048986 +0200
+@@ -39,6 +39,14 @@ enum machine_type {
+ M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
+ };
+
++/* Constants for the N_FLAGS field */
++#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
++#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
++#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
++#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
++/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
++#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
++
+ #if !defined (N_MAGIC)
+ #define N_MAGIC(exec) ((exec).a_info & 0xffff)
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/atmdev.h linux-3.4-pax/include/linux/atmdev.h
+--- linux-3.4/include/linux/atmdev.h 2012-05-21 11:33:37.191929816 +0200
++++ linux-3.4-pax/include/linux/atmdev.h 2012-05-21 12:10:11.324048986 +0200
+@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
+ #endif
+
+ struct k_atm_aal_stats {
+-#define __HANDLE_ITEM(i) atomic_t i
++#define __HANDLE_ITEM(i) atomic_unchecked_t i
+ __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ };
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/binfmts.h linux-3.4-pax/include/linux/binfmts.h
+--- linux-3.4/include/linux/binfmts.h 2012-05-21 11:33:37.223929819 +0200
++++ linux-3.4-pax/include/linux/binfmts.h 2012-05-21 12:10:11.328048986 +0200
+@@ -89,6 +89,7 @@ struct linux_binfmt {
+ int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
+ int (*load_shlib)(struct file *);
+ int (*core_dump)(struct coredump_params *cprm);
++ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
+ unsigned long min_coredump; /* minimal dump size */
+ };
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/blkdev.h linux-3.4-pax/include/linux/blkdev.h
+--- linux-3.4/include/linux/blkdev.h 2012-05-21 11:33:37.235929819 +0200
++++ linux-3.4-pax/include/linux/blkdev.h 2012-05-21 12:10:11.328048986 +0200
+@@ -1376,7 +1376,7 @@ struct block_device_operations {
+ /* this callback is with swap_lock and sometimes page table lock held */
+ void (*swap_slot_free_notify) (struct block_device *, unsigned long);
+ struct module *owner;
+-};
++} __do_const;
+
+ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
+ unsigned long);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/blktrace_api.h linux-3.4-pax/include/linux/blktrace_api.h
+--- linux-3.4/include/linux/blktrace_api.h 2012-01-08 19:48:26.739470998 +0100
++++ linux-3.4-pax/include/linux/blktrace_api.h 2012-05-21 12:10:11.364048988 +0200
+@@ -162,7 +162,7 @@ struct blk_trace {
+ struct dentry *dir;
+ struct dentry *dropped_file;
+ struct dentry *msg_file;
+- atomic_t dropped;
++ atomic_unchecked_t dropped;
+ };
+
+ extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/byteorder/little_endian.h linux-3.4-pax/include/linux/byteorder/little_endian.h
+--- linux-3.4/include/linux/byteorder/little_endian.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/linux/byteorder/little_endian.h 2012-05-21 12:10:11.364048988 +0200
+@@ -42,51 +42,51 @@
+
+ static inline __le64 __cpu_to_le64p(const __u64 *p)
+ {
+- return (__force __le64)*p;
++ return (__force const __le64)*p;
+ }
+ static inline __u64 __le64_to_cpup(const __le64 *p)
+ {
+- return (__force __u64)*p;
++ return (__force const __u64)*p;
+ }
+ static inline __le32 __cpu_to_le32p(const __u32 *p)
+ {
+- return (__force __le32)*p;
++ return (__force const __le32)*p;
+ }
+ static inline __u32 __le32_to_cpup(const __le32 *p)
+ {
+- return (__force __u32)*p;
++ return (__force const __u32)*p;
+ }
+ static inline __le16 __cpu_to_le16p(const __u16 *p)
+ {
+- return (__force __le16)*p;
++ return (__force const __le16)*p;
+ }
+ static inline __u16 __le16_to_cpup(const __le16 *p)
+ {
+- return (__force __u16)*p;
++ return (__force const __u16)*p;
+ }
+ static inline __be64 __cpu_to_be64p(const __u64 *p)
+ {
+- return (__force __be64)__swab64p(p);
++ return (__force const __be64)__swab64p(p);
+ }
+ static inline __u64 __be64_to_cpup(const __be64 *p)
+ {
+- return __swab64p((__u64 *)p);
++ return __swab64p((const __u64 *)p);
+ }
+ static inline __be32 __cpu_to_be32p(const __u32 *p)
+ {
+- return (__force __be32)__swab32p(p);
++ return (__force const __be32)__swab32p(p);
+ }
+ static inline __u32 __be32_to_cpup(const __be32 *p)
+ {
+- return __swab32p((__u32 *)p);
++ return __swab32p((const __u32 *)p);
+ }
+ static inline __be16 __cpu_to_be16p(const __u16 *p)
+ {
+- return (__force __be16)__swab16p(p);
++ return (__force const __be16)__swab16p(p);
+ }
+ static inline __u16 __be16_to_cpup(const __be16 *p)
+ {
+- return __swab16p((__u16 *)p);
++ return __swab16p((const __u16 *)p);
+ }
+ #define __cpu_to_le64s(x) do { (void)(x); } while (0)
+ #define __le64_to_cpus(x) do { (void)(x); } while (0)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/cache.h linux-3.4-pax/include/linux/cache.h
+--- linux-3.4/include/linux/cache.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/linux/cache.h 2012-05-21 12:10:11.368048988 +0200
+@@ -16,6 +16,10 @@
+ #define __read_mostly
+ #endif
+
++#ifndef __read_only
++#define __read_only __read_mostly
++#endif
++
+ #ifndef ____cacheline_aligned
+ #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/cleancache.h linux-3.4-pax/include/linux/cleancache.h
+--- linux-3.4/include/linux/cleancache.h 2012-05-21 11:33:37.275929822 +0200
++++ linux-3.4-pax/include/linux/cleancache.h 2012-05-21 12:10:11.368048988 +0200
+@@ -31,7 +31,7 @@ struct cleancache_ops {
+ void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
+ void (*invalidate_inode)(int, struct cleancache_filekey);
+ void (*invalidate_fs)(int);
+-};
++} __no_const;
+
+ extern struct cleancache_ops
+ cleancache_register_ops(struct cleancache_ops *ops);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/compiler-gcc4.h linux-3.4-pax/include/linux/compiler-gcc4.h
+--- linux-3.4/include/linux/compiler-gcc4.h 2012-03-19 10:39:10.852049231 +0100
++++ linux-3.4-pax/include/linux/compiler-gcc4.h 2012-05-22 15:28:31.419384614 +0200
+@@ -32,6 +32,16 @@
+ #define __linktime_error(message) __attribute__((__error__(message)))
+
+ #if __GNUC_MINOR__ >= 5
++
++#ifdef CONSTIFY_PLUGIN
++#define __no_const __attribute__((no_const))
++#define __do_const __attribute__((do_const))
++#endif
++
++#ifdef SIZE_OVERFLOW_PLUGIN
++#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
++#endif
++
+ /*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+@@ -47,6 +57,11 @@
+ #define __noclone __attribute__((__noclone__))
+
+ #endif
++
++#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
++#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
++#define __bos0(ptr) __bos((ptr), 0)
++#define __bos1(ptr) __bos((ptr), 1)
+ #endif
+
+ #if __GNUC_MINOR__ > 0
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/compiler.h linux-3.4-pax/include/linux/compiler.h
+--- linux-3.4/include/linux/compiler.h 2012-05-21 11:33:37.311929823 +0200
++++ linux-3.4-pax/include/linux/compiler.h 2012-05-22 15:28:31.483384610 +0200
+@@ -5,31 +5,62 @@
+
+ #ifdef __CHECKER__
+ # define __user __attribute__((noderef, address_space(1)))
++# define __force_user __force __user
+ # define __kernel __attribute__((address_space(0)))
++# define __force_kernel __force __kernel
+ # define __safe __attribute__((safe))
+ # define __force __attribute__((force))
+ # define __nocast __attribute__((nocast))
+ # define __iomem __attribute__((noderef, address_space(2)))
++# define __force_iomem __force __iomem
+ # define __acquires(x) __attribute__((context(x,0,1)))
+ # define __releases(x) __attribute__((context(x,1,0)))
+ # define __acquire(x) __context__(x,1)
+ # define __release(x) __context__(x,-1)
+ # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
+ # define __percpu __attribute__((noderef, address_space(3)))
++# define __force_percpu __force __percpu
+ #ifdef CONFIG_SPARSE_RCU_POINTER
+ # define __rcu __attribute__((noderef, address_space(4)))
++# define __force_rcu __force __rcu
+ #else
+ # define __rcu
++# define __force_rcu
+ #endif
+ extern void __chk_user_ptr(const volatile void __user *);
+ extern void __chk_io_ptr(const volatile void __iomem *);
++#elif defined(CHECKER_PLUGIN)
++//# define __user
++//# define __force_user
++//# define __kernel
++//# define __force_kernel
++# define __safe
++# define __force
++# define __nocast
++# define __iomem
++# define __force_iomem
++# define __chk_user_ptr(x) (void)0
++# define __chk_io_ptr(x) (void)0
++# define __builtin_warning(x, y...) (1)
++# define __acquires(x)
++# define __releases(x)
++# define __acquire(x) (void)0
++# define __release(x) (void)0
++# define __cond_lock(x,c) (c)
++# define __percpu
++# define __force_percpu
++# define __rcu
++# define __force_rcu
+ #else
+ # define __user
++# define __force_user
+ # define __kernel
++# define __force_kernel
+ # define __safe
+ # define __force
+ # define __nocast
+ # define __iomem
++# define __force_iomem
+ # define __chk_user_ptr(x) (void)0
+ # define __chk_io_ptr(x) (void)0
+ # define __builtin_warning(x, y...) (1)
+@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile
+ # define __release(x) (void)0
+ # define __cond_lock(x,c) (c)
+ # define __percpu
++# define __force_percpu
+ # define __rcu
++# define __force_rcu
+ #endif
+
+ #ifdef __KERNEL__
+@@ -264,6 +297,18 @@ void ftrace_likely_update(struct ftrace_
+ # define __attribute_const__ /* unimplemented */
+ #endif
+
++#ifndef __no_const
++# define __no_const
++#endif
++
++#ifndef __do_const
++# define __do_const
++#endif
++
++#ifndef __size_overflow
++# define __size_overflow(...)
++#endif
++
+ /*
+ * Tell gcc if a function is cold. The compiler will assume any path
+ * directly leading to the call is unlikely.
+@@ -273,6 +318,22 @@ void ftrace_likely_update(struct ftrace_
+ #define __cold
+ #endif
+
++#ifndef __alloc_size
++#define __alloc_size(...)
++#endif
++
++#ifndef __bos
++#define __bos(ptr, arg)
++#endif
++
++#ifndef __bos0
++#define __bos0(ptr)
++#endif
++
++#ifndef __bos1
++#define __bos1(ptr)
++#endif
++
+ /* Simple shorthand for a section definition */
+ #ifndef __section
+ # define __section(S) __attribute__ ((__section__(#S)))
+@@ -308,6 +369,7 @@ void ftrace_likely_update(struct ftrace_
+ * use is to mediate communication between process-level code and irq/NMI
+ * handlers, all running on the same CPU.
+ */
+-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
++#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
++#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
+
+ #endif /* __LINUX_COMPILER_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/crypto.h linux-3.4-pax/include/linux/crypto.h
+--- linux-3.4/include/linux/crypto.h 2012-05-21 11:33:37.339929825 +0200
++++ linux-3.4-pax/include/linux/crypto.h 2012-05-21 12:10:11.376048989 +0200
+@@ -373,7 +373,7 @@ struct cipher_tfm {
+ const u8 *key, unsigned int keylen);
+ void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+ void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+-};
++} __no_const;
+
+ struct hash_tfm {
+ int (*init)(struct hash_desc *desc);
+@@ -394,13 +394,13 @@ struct compress_tfm {
+ int (*cot_decompress)(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+-};
++} __no_const;
+
+ struct rng_tfm {
+ int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
+ unsigned int dlen);
+ int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
+-};
++} __no_const;
+
+ #define crt_ablkcipher crt_u.ablkcipher
+ #define crt_aead crt_u.aead
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/decompress/mm.h linux-3.4-pax/include/linux/decompress/mm.h
+--- linux-3.4/include/linux/decompress/mm.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/linux/decompress/mm.h 2012-05-21 12:10:11.380048989 +0200
+@@ -77,7 +77,7 @@ static void free(void *where)
+ * warnings when not needed (indeed large_malloc / large_free are not
+ * needed by inflate */
+
+-#define malloc(a) kmalloc(a, GFP_KERNEL)
++#define malloc(a) kmalloc((a), GFP_KERNEL)
+ #define free(a) kfree(a)
+
+ #define large_malloc(a) vmalloc(a)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/dma-mapping.h linux-3.4-pax/include/linux/dma-mapping.h
+--- linux-3.4/include/linux/dma-mapping.h 2012-05-21 11:33:37.383929828 +0200
++++ linux-3.4-pax/include/linux/dma-mapping.h 2012-05-21 12:10:11.380048989 +0200
+@@ -51,7 +51,7 @@ struct dma_map_ops {
+ u64 (*get_required_mask)(struct device *dev);
+ #endif
+ int is_phys;
+-};
++} __do_const;
+
+ #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/efi.h linux-3.4-pax/include/linux/efi.h
+--- linux-3.4/include/linux/efi.h 2012-05-21 11:33:37.395929829 +0200
++++ linux-3.4-pax/include/linux/efi.h 2012-05-21 12:10:11.384048989 +0200
+@@ -635,7 +635,7 @@ struct efivar_operations {
+ efi_get_variable_t *get_variable;
+ efi_get_next_variable_t *get_next_variable;
+ efi_set_variable_t *set_variable;
+-};
++} __no_const;
+
+ struct efivars {
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/elf.h linux-3.4-pax/include/linux/elf.h
+--- linux-3.4/include/linux/elf.h 2012-03-19 10:39:10.892049236 +0100
++++ linux-3.4-pax/include/linux/elf.h 2012-05-21 12:10:11.388048990 +0200
+@@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
+ #define PT_GNU_EH_FRAME 0x6474e550
+
+ #define PT_GNU_STACK (PT_LOOS + 0x474e551)
++#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
++
++#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
++
++/* Constants for the e_flags field */
++#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
++#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
++#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
++#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
++/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
++#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
+
+ /*
+ * Extended Numbering
+@@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
+ #define DT_DEBUG 21
+ #define DT_TEXTREL 22
+ #define DT_JMPREL 23
++#define DT_FLAGS 30
++ #define DF_TEXTREL 0x00000004
+ #define DT_ENCODING 32
+ #define OLD_DT_LOOS 0x60000000
+ #define DT_LOOS 0x6000000d
+@@ -243,6 +256,19 @@ typedef struct elf64_hdr {
+ #define PF_W 0x2
+ #define PF_X 0x1
+
++#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
++#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
++#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
++#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
++#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
++#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
++/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
++/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
++#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
++#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
++#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
++#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
++
+ typedef struct elf32_phdr{
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+@@ -335,6 +361,8 @@ typedef struct elf64_shdr {
+ #define EI_OSABI 7
+ #define EI_PAD 8
+
++#define EI_PAX 14
++
+ #define ELFMAG0 0x7f /* EI_MAG */
+ #define ELFMAG1 'E'
+ #define ELFMAG2 'L'
+@@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
+ #define elf_note elf32_note
+ #define elf_addr_t Elf32_Off
+ #define Elf_Half Elf32_Half
++#define elf_dyn Elf32_Dyn
+
+ #else
+
+@@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
+ #define elf_note elf64_note
+ #define elf_addr_t Elf64_Off
+ #define Elf_Half Elf64_Half
++#define elf_dyn Elf64_Dyn
+
+ #endif
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/filter.h linux-3.4-pax/include/linux/filter.h
+--- linux-3.4/include/linux/filter.h 2012-01-08 19:48:26.931470987 +0100
++++ linux-3.4-pax/include/linux/filter.h 2012-05-21 12:10:11.388048990 +0200
+@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_A
+
+ struct sk_buff;
+ struct sock;
++struct bpf_jit_work;
+
+ struct sk_filter
+ {
+@@ -141,6 +142,9 @@ struct sk_filter
+ unsigned int len; /* Number of filter blocks */
+ unsigned int (*bpf_func)(const struct sk_buff *skb,
+ const struct sock_filter *filter);
++#ifdef CONFIG_BPF_JIT
++ struct bpf_jit_work *work;
++#endif
+ struct rcu_head rcu;
+ struct sock_filter insns[0];
+ };
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/firewire.h linux-3.4-pax/include/linux/firewire.h
+--- linux-3.4/include/linux/firewire.h 2012-05-21 11:33:37.443929831 +0200
++++ linux-3.4-pax/include/linux/firewire.h 2012-05-21 12:10:11.392048990 +0200
+@@ -413,7 +413,7 @@ struct fw_iso_context {
+ union {
+ fw_iso_callback_t sc;
+ fw_iso_mc_callback_t mc;
+- } callback;
++ } __no_const callback;
+ void *callback_data;
+ };
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/fscache-cache.h linux-3.4-pax/include/linux/fscache-cache.h
+--- linux-3.4/include/linux/fscache-cache.h 2012-01-08 19:48:26.947470986 +0100
++++ linux-3.4-pax/include/linux/fscache-cache.h 2012-05-21 12:10:11.392048990 +0200
+@@ -102,7 +102,7 @@ struct fscache_operation {
+ fscache_operation_release_t release;
+ };
+
+-extern atomic_t fscache_op_debug_id;
++extern atomic_unchecked_t fscache_op_debug_id;
+ extern void fscache_op_work_func(struct work_struct *work);
+
+ extern void fscache_enqueue_operation(struct fscache_operation *);
+@@ -122,7 +122,7 @@ static inline void fscache_operation_ini
+ {
+ INIT_WORK(&op->work, fscache_op_work_func);
+ atomic_set(&op->usage, 1);
+- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
++ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
+ op->processor = processor;
+ op->release = release;
+ INIT_LIST_HEAD(&op->pend_link);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/fs.h linux-3.4-pax/include/linux/fs.h
+--- linux-3.4/include/linux/fs.h 2012-05-21 11:33:37.447929831 +0200
++++ linux-3.4-pax/include/linux/fs.h 2012-05-21 12:10:11.396048990 +0200
+@@ -1634,7 +1634,8 @@ struct file_operations {
+ int (*setlease)(struct file *, long, struct file_lock **);
+ long (*fallocate)(struct file *file, int mode, loff_t offset,
+ loff_t len);
+-};
++} __do_const;
++typedef struct file_operations __no_const file_operations_no_const;
+
+ struct inode_operations {
+ struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/fsnotify_backend.h linux-3.4-pax/include/linux/fsnotify_backend.h
+--- linux-3.4/include/linux/fsnotify_backend.h 2011-10-24 12:48:41.163090979 +0200
++++ linux-3.4-pax/include/linux/fsnotify_backend.h 2012-05-21 12:10:11.400048990 +0200
+@@ -105,6 +105,7 @@ struct fsnotify_ops {
+ void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
+ void (*free_event_priv)(struct fsnotify_event_private_data *priv);
+ };
++typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
+
+ /*
+ * A group is a "thing" that wants to receive notification about filesystem
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/fsnotify.h linux-3.4-pax/include/linux/fsnotify.h
+--- linux-3.4/include/linux/fsnotify.h 2012-05-21 11:33:37.455929831 +0200
++++ linux-3.4-pax/include/linux/fsnotify.h 2012-05-21 12:10:11.404048990 +0200
+@@ -315,7 +315,7 @@ static inline void fsnotify_change(struc
+ */
+ static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
+ {
+- return kstrdup(name, GFP_KERNEL);
++ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
+ }
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/fs_struct.h linux-3.4-pax/include/linux/fs_struct.h
+--- linux-3.4/include/linux/fs_struct.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/linux/fs_struct.h 2012-05-21 12:10:11.404048990 +0200
+@@ -6,7 +6,7 @@
+ #include <linux/seqlock.h>
+
+ struct fs_struct {
+- int users;
++ atomic_t users;
+ spinlock_t lock;
+ seqcount_t seq;
+ int umask;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/ftrace_event.h linux-3.4-pax/include/linux/ftrace_event.h
+--- linux-3.4/include/linux/ftrace_event.h 2012-05-21 11:33:37.459929831 +0200
++++ linux-3.4-pax/include/linux/ftrace_event.h 2012-05-21 12:10:11.408048991 +0200
+@@ -97,7 +97,7 @@ struct trace_event_functions {
+ trace_print_func raw;
+ trace_print_func hex;
+ trace_print_func binary;
+-};
++} __no_const;
+
+ struct trace_event {
+ struct hlist_node node;
+@@ -263,7 +263,7 @@ extern int trace_define_field(struct ftr
+ extern int trace_add_event_call(struct ftrace_event_call *call);
+ extern void trace_remove_event_call(struct ftrace_event_call *call);
+
+-#define is_signed_type(type) (((type)(-1)) < 0)
++#define is_signed_type(type) (((type)(-1)) < (type)1)
+
+ int trace_set_clr_event(const char *system, const char *event, int set);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/genhd.h linux-3.4-pax/include/linux/genhd.h
+--- linux-3.4/include/linux/genhd.h 2012-05-21 11:33:37.467929832 +0200
++++ linux-3.4-pax/include/linux/genhd.h 2012-05-21 12:10:11.440048992 +0200
+@@ -185,7 +185,7 @@ struct gendisk {
+ struct kobject *slave_dir;
+
+ struct timer_rand_state *random;
+- atomic_t sync_io; /* RAID */
++ atomic_unchecked_t sync_io; /* RAID */
+ struct disk_events *ev;
+ #ifdef CONFIG_BLK_DEV_INTEGRITY
+ struct blk_integrity *integrity;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/hid.h linux-3.4-pax/include/linux/hid.h
+--- linux-3.4/include/linux/hid.h 2012-03-19 10:39:10.960049233 +0100
++++ linux-3.4-pax/include/linux/hid.h 2012-05-21 12:10:11.440048992 +0200
+@@ -696,7 +696,7 @@ struct hid_ll_driver {
+ unsigned int code, int value);
+
+ int (*parse)(struct hid_device *hdev);
+-};
++} __no_const;
+
+ #define PM_HINT_FULLON 1<<5
+ #define PM_HINT_NORMAL 1<<1
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/highmem.h linux-3.4-pax/include/linux/highmem.h
+--- linux-3.4/include/linux/highmem.h 2012-05-21 11:33:37.519929835 +0200
++++ linux-3.4-pax/include/linux/highmem.h 2012-05-21 12:10:11.444048993 +0200
+@@ -221,6 +221,18 @@ static inline void clear_highpage(struct
+ kunmap_atomic(kaddr);
+ }
+
++static inline void sanitize_highpage(struct page *page)
++{
++ void *kaddr;
++ unsigned long flags;
++
++ local_irq_save(flags);
++ kaddr = kmap_atomic(page);
++ clear_page(kaddr);
++ kunmap_atomic(kaddr);
++ local_irq_restore(flags);
++}
++
+ static inline void zero_user_segments(struct page *page,
+ unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/i2c.h linux-3.4-pax/include/linux/i2c.h
+--- linux-3.4/include/linux/i2c.h 2012-05-21 11:33:37.567929837 +0200
++++ linux-3.4-pax/include/linux/i2c.h 2012-05-21 12:10:11.448048993 +0200
+@@ -365,6 +365,7 @@ struct i2c_algorithm {
+ /* To determine what the adapter supports */
+ u32 (*functionality) (struct i2c_adapter *);
+ };
++typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
+
+ /*
+ * i2c_adapter is the structure used to identify a physical i2c bus along
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/i2o.h linux-3.4-pax/include/linux/i2o.h
+--- linux-3.4/include/linux/i2o.h 2012-05-21 11:33:37.579929838 +0200
++++ linux-3.4-pax/include/linux/i2o.h 2012-05-21 12:10:11.448048993 +0200
+@@ -565,7 +565,7 @@ struct i2o_controller {
+ struct i2o_device *exec; /* Executive */
+ #if BITS_PER_LONG == 64
+ spinlock_t context_list_lock; /* lock for context_list */
+- atomic_t context_list_counter; /* needed for unique contexts */
++ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
+ struct list_head context_list; /* list of context id's
+ and pointers */
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/if_team.h linux-3.4-pax/include/linux/if_team.h
+--- linux-3.4/include/linux/if_team.h 2012-03-19 10:39:10.972049232 +0100
++++ linux-3.4-pax/include/linux/if_team.h 2012-05-21 12:10:11.452048993 +0200
+@@ -64,6 +64,7 @@ struct team_mode_ops {
+ void (*port_leave)(struct team *team, struct team_port *port);
+ void (*port_change_mac)(struct team *team, struct team_port *port);
+ };
++typedef struct team_mode_ops __no_const team_mode_ops_no_const;
+
+ enum team_option_type {
+ TEAM_OPTION_TYPE_U32,
+@@ -112,7 +113,7 @@ struct team {
+ struct list_head option_list;
+
+ const struct team_mode *mode;
+- struct team_mode_ops ops;
++ team_mode_ops_no_const ops;
+ long mode_priv[TEAM_MODE_PRIV_LONGS];
+ };
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/init.h linux-3.4-pax/include/linux/init.h
+--- linux-3.4/include/linux/init.h 2012-03-19 10:39:10.976049229 +0100
++++ linux-3.4-pax/include/linux/init.h 2012-05-21 12:10:11.456048993 +0200
+@@ -294,13 +294,13 @@ void __init parse_early_options(char *cm
+
+ /* Each module must use one module_init(). */
+ #define module_init(initfn) \
+- static inline initcall_t __inittest(void) \
++ static inline __used initcall_t __inittest(void) \
+ { return initfn; } \
+ int init_module(void) __attribute__((alias(#initfn)));
+
+ /* This is only required if you want to be unloadable. */
+ #define module_exit(exitfn) \
+- static inline exitcall_t __exittest(void) \
++ static inline __used exitcall_t __exittest(void) \
+ { return exitfn; } \
+ void cleanup_module(void) __attribute__((alias(#exitfn)));
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/init_task.h linux-3.4-pax/include/linux/init_task.h
+--- linux-3.4/include/linux/init_task.h 2012-05-21 11:33:37.615929840 +0200
++++ linux-3.4-pax/include/linux/init_task.h 2012-05-21 12:10:11.456048993 +0200
+@@ -134,6 +134,12 @@ extern struct cred init_cred;
+
+ #define INIT_TASK_COMM "swapper"
+
++#ifdef CONFIG_X86
++#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
++#else
++#define INIT_TASK_THREAD_INFO
++#endif
++
+ /*
+ * INIT_TASK is used to set up the first task table, touch at
+ * your own risk!. Base=0, limit=0x1fffff (=2MB)
+@@ -172,6 +178,7 @@ extern struct cred init_cred;
+ RCU_INIT_POINTER(.cred, &init_cred), \
+ .comm = INIT_TASK_COMM, \
+ .thread = INIT_THREAD, \
++ INIT_TASK_THREAD_INFO \
+ .fs = &init_fs, \
+ .files = &init_files, \
+ .signal = &init_signals, \
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/intel-iommu.h linux-3.4-pax/include/linux/intel-iommu.h
+--- linux-3.4/include/linux/intel-iommu.h 2012-01-08 19:48:27.059470980 +0100
++++ linux-3.4-pax/include/linux/intel-iommu.h 2012-05-21 12:10:11.460048993 +0200
+@@ -296,7 +296,7 @@ struct iommu_flush {
+ u8 fm, u64 type);
+ void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
+-};
++} __no_const;
+
+ enum {
+ SR_DMAR_FECTL_REG,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/interrupt.h linux-3.4-pax/include/linux/interrupt.h
+--- linux-3.4/include/linux/interrupt.h 2012-05-21 11:33:37.643929841 +0200
++++ linux-3.4-pax/include/linux/interrupt.h 2012-05-21 12:10:11.460048993 +0200
+@@ -439,7 +439,7 @@ enum
+ /* map softirq index to softirq name. update 'softirq_to_name' in
+ * kernel/softirq.c when adding a new softirq.
+ */
+-extern char *softirq_to_name[NR_SOFTIRQS];
++extern const char * const softirq_to_name[NR_SOFTIRQS];
+
+ /* softirq mask and active fields moved to irq_cpustat_t in
+ * asm/hardirq.h to get better cache usage. KAO
+@@ -447,12 +447,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
+
+ struct softirq_action
+ {
+- void (*action)(struct softirq_action *);
++ void (*action)(void);
+ };
+
+ asmlinkage void do_softirq(void);
+ asmlinkage void __do_softirq(void);
+-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
++extern void open_softirq(int nr, void (*action)(void));
+ extern void softirq_init(void);
+ extern void __raise_softirq_irqoff(unsigned int nr);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/Kbuild linux-3.4-pax/include/linux/Kbuild
+--- linux-3.4/include/linux/Kbuild 2012-05-21 11:33:37.123929813 +0200
++++ linux-3.4-pax/include/linux/Kbuild 2012-05-30 18:38:29.984802844 +0200
+@@ -20,7 +20,7 @@ header-y += netfilter_ipv6/
+ header-y += usb/
+ header-y += wimax/
+
+-objhdr-y += version.h
++genhdr-y += version.h
+
+ ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \
+ $(srctree)/include/asm-$(SRCARCH)/a.out.h \
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/kgdb.h linux-3.4-pax/include/linux/kgdb.h
+--- linux-3.4/include/linux/kgdb.h 2012-05-21 11:33:37.723929846 +0200
++++ linux-3.4-pax/include/linux/kgdb.h 2012-05-21 12:10:11.464048994 +0200
+@@ -53,7 +53,7 @@ extern int kgdb_connected;
+ extern int kgdb_io_module_registered;
+
+ extern atomic_t kgdb_setting_breakpoint;
+-extern atomic_t kgdb_cpu_doing_single_step;
++extern atomic_unchecked_t kgdb_cpu_doing_single_step;
+
+ extern struct task_struct *kgdb_usethread;
+ extern struct task_struct *kgdb_contthread;
+@@ -252,7 +252,7 @@ struct kgdb_arch {
+ void (*disable_hw_break)(struct pt_regs *regs);
+ void (*remove_all_hw_break)(void);
+ void (*correct_hw_break)(void);
+-};
++} __do_const;
+
+ /**
+ * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
+@@ -277,7 +277,7 @@ struct kgdb_io {
+ void (*pre_exception) (void);
+ void (*post_exception) (void);
+ int is_console;
+-};
++} __do_const;
+
+ extern struct kgdb_arch arch_kgdb_ops;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/kref.h linux-3.4-pax/include/linux/kref.h
+--- linux-3.4/include/linux/kref.h 2012-03-19 10:39:11.000049230 +0100
++++ linux-3.4-pax/include/linux/kref.h 2012-05-21 12:10:11.468048994 +0200
+@@ -63,7 +63,7 @@ static inline void kref_get(struct kref
+ static inline int kref_sub(struct kref *kref, unsigned int count,
+ void (*release)(struct kref *kref))
+ {
+- WARN_ON(release == NULL);
++ BUG_ON(release == NULL);
+
+ if (atomic_sub_and_test((int) count, &kref->refcount)) {
+ release(kref);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/kvm_host.h linux-3.4-pax/include/linux/kvm_host.h
+--- linux-3.4/include/linux/kvm_host.h 2012-05-21 11:33:37.735929846 +0200
++++ linux-3.4-pax/include/linux/kvm_host.h 2012-05-22 15:28:31.499384610 +0200
+@@ -322,7 +322,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
+ void vcpu_load(struct kvm_vcpu *vcpu);
+ void vcpu_put(struct kvm_vcpu *vcpu);
+
+-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
++int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+ struct module *module);
+ void kvm_exit(void);
+
+@@ -486,7 +486,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
+ struct kvm_guest_debug *dbg);
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
+
+-int kvm_arch_init(void *opaque);
++int kvm_arch_init(const void *opaque);
+ void kvm_arch_exit(void);
+
+ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/libata.h linux-3.4-pax/include/linux/libata.h
+--- linux-3.4/include/linux/libata.h 2012-05-21 11:33:37.755929848 +0200
++++ linux-3.4-pax/include/linux/libata.h 2012-05-21 12:10:11.472048994 +0200
+@@ -909,7 +909,7 @@ struct ata_port_operations {
+ * fields must be pointers.
+ */
+ const struct ata_port_operations *inherits;
+-};
++} __do_const;
+
+ struct ata_port_info {
+ unsigned long flags;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/mca.h linux-3.4-pax/include/linux/mca.h
+--- linux-3.4/include/linux/mca.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/linux/mca.h 2012-05-21 12:10:11.476048994 +0200
+@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
+ int region);
+ void * (*mca_transform_memory)(struct mca_device *,
+ void *memory);
+-};
++} __no_const;
+
+ struct mca_bus {
+ u64 default_dma_mask;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/memory.h linux-3.4-pax/include/linux/memory.h
+--- linux-3.4/include/linux/memory.h 2012-03-19 10:39:11.012049227 +0100
++++ linux-3.4-pax/include/linux/memory.h 2012-05-21 12:10:11.476048994 +0200
+@@ -143,7 +143,7 @@ struct memory_accessor {
+ size_t count);
+ ssize_t (*write)(struct memory_accessor *, const char *buf,
+ off_t offset, size_t count);
+-};
++} __no_const;
+
+ /*
+ * Kernel text modification mutex, used for code patching. Users of this lock
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/mfd/abx500/ux500_chargalg.h linux-3.4-pax/include/linux/mfd/abx500/ux500_chargalg.h
+--- linux-3.4/include/linux/mfd/abx500/ux500_chargalg.h 2012-05-21 11:33:37.803929850 +0200
++++ linux-3.4-pax/include/linux/mfd/abx500/ux500_chargalg.h 2012-05-30 03:09:09.990871235 +0200
+@@ -19,7 +19,7 @@ struct ux500_charger_ops {
+ int (*enable) (struct ux500_charger *, int, int, int);
+ int (*kick_wd) (struct ux500_charger *);
+ int (*update_curr) (struct ux500_charger *, int);
+-};
++} __no_const;
+
+ /**
+ * struct ux500_charger - power supply ux500 charger sub class
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/mfd/abx500.h linux-3.4-pax/include/linux/mfd/abx500.h
+--- linux-3.4/include/linux/mfd/abx500.h 2012-05-21 11:33:37.791929850 +0200
++++ linux-3.4-pax/include/linux/mfd/abx500.h 2012-05-21 12:10:11.480048995 +0200
+@@ -455,6 +455,7 @@ struct abx500_ops {
+ int (*event_registers_startup_state_get) (struct device *, u8 *);
+ int (*startup_irq_enabled) (struct device *, unsigned int);
+ };
++typedef struct abx500_ops __no_const abx500_ops_no_const;
+
+ int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
+ void abx500_remove_ops(struct device *dev);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/mm.h linux-3.4-pax/include/linux/mm.h
+--- linux-3.4/include/linux/mm.h 2012-05-21 11:33:37.895929855 +0200
++++ linux-3.4-pax/include/linux/mm.h 2012-05-21 12:10:11.484048995 +0200
+@@ -116,7 +116,14 @@ extern unsigned int kobjsize(const void
+
+ #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
+ #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
++#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
++#else
+ #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
++#endif
++
+ #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
+ #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
+
+@@ -1013,34 +1020,6 @@ int set_page_dirty(struct page *page);
+ int set_page_dirty_lock(struct page *page);
+ int clear_page_dirty_for_io(struct page *page);
+
+-/* Is the vma a continuation of the stack vma above it? */
+-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+-}
+-
+-static inline int stack_guard_page_start(struct vm_area_struct *vma,
+- unsigned long addr)
+-{
+- return (vma->vm_flags & VM_GROWSDOWN) &&
+- (vma->vm_start == addr) &&
+- !vma_growsdown(vma->vm_prev, addr);
+-}
+-
+-/* Is the vma a continuation of the stack vma below it? */
+-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
+-}
+-
+-static inline int stack_guard_page_end(struct vm_area_struct *vma,
+- unsigned long addr)
+-{
+- return (vma->vm_flags & VM_GROWSUP) &&
+- (vma->vm_end == addr) &&
+- !vma_growsup(vma->vm_next, addr);
+-}
+-
+ extern pid_t
+ vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
+
+@@ -1139,6 +1118,15 @@ static inline void sync_mm_rss(struct mm
+ }
+ #endif
+
++#ifdef CONFIG_MMU
++pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
++#else
++static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
++{
++ return __pgprot(0);
++}
++#endif
++
+ int vma_wants_writenotify(struct vm_area_struct *vma);
+
+ extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
+@@ -1157,8 +1145,15 @@ static inline int __pud_alloc(struct mm_
+ {
+ return 0;
+ }
++
++static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
++ unsigned long address)
++{
++ return 0;
++}
+ #else
+ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
++int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
+ #endif
+
+ #ifdef __PAGETABLE_PMD_FOLDED
+@@ -1167,8 +1162,15 @@ static inline int __pmd_alloc(struct mm_
+ {
+ return 0;
+ }
++
++static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
++ unsigned long address)
++{
++ return 0;
++}
+ #else
+ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
++int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
+ #endif
+
+ int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -1186,11 +1188,23 @@ static inline pud_t *pud_alloc(struct mm
+ NULL: pud_offset(pgd, address);
+ }
+
++static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
++{
++ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
++ NULL: pud_offset(pgd, address);
++}
++
+ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+ {
+ return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
+ NULL: pmd_offset(pud, address);
+ }
++
++static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
++{
++ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
++ NULL: pmd_offset(pud, address);
++}
+ #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
+
+ #if USE_SPLIT_PTLOCKS
+@@ -1400,6 +1414,7 @@ extern unsigned long do_mmap(struct file
+ unsigned long, unsigned long,
+ unsigned long, unsigned long);
+ extern int do_munmap(struct mm_struct *, unsigned long, size_t);
++extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
+
+ /* These take the mm semaphore themselves */
+ extern unsigned long vm_brk(unsigned long, unsigned long);
+@@ -1462,6 +1477,10 @@ extern struct vm_area_struct * find_vma(
+ extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
+ struct vm_area_struct **pprev);
+
++extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
++extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
++extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
++
+ /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+ NULL if none. Assume start_addr < end_addr. */
+ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+@@ -1490,15 +1509,6 @@ static inline struct vm_area_struct *fin
+ return vma;
+ }
+
+-#ifdef CONFIG_MMU
+-pgprot_t vm_get_page_prot(unsigned long vm_flags);
+-#else
+-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
+-{
+- return __pgprot(0);
+-}
+-#endif
+-
+ struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
+ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t);
+@@ -1602,7 +1612,7 @@ extern int unpoison_memory(unsigned long
+ extern int sysctl_memory_failure_early_kill;
+ extern int sysctl_memory_failure_recovery;
+ extern void shake_page(struct page *p, int access);
+-extern atomic_long_t mce_bad_pages;
++extern atomic_long_unchecked_t mce_bad_pages;
+ extern int soft_offline_page(struct page *page, int flags);
+
+ extern void dump_page(struct page *page);
+@@ -1633,5 +1643,11 @@ static inline unsigned int debug_guardpa
+ static inline bool page_is_guard(struct page *page) { return false; }
+ #endif /* CONFIG_DEBUG_PAGEALLOC */
+
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
++#else
++static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
++#endif
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_MM_H */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/mm_types.h linux-3.4-pax/include/linux/mm_types.h
+--- linux-3.4/include/linux/mm_types.h 2012-03-19 10:39:11.048049224 +0100
++++ linux-3.4-pax/include/linux/mm_types.h 2012-05-21 12:10:11.488048995 +0200
+@@ -252,6 +252,8 @@ struct vm_area_struct {
+ #ifdef CONFIG_NUMA
+ struct mempolicy *vm_policy; /* NUMA policy for the VMA */
+ #endif
++
++ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
+ };
+
+ struct core_thread {
+@@ -326,7 +328,7 @@ struct mm_struct {
+ unsigned long def_flags;
+ unsigned long nr_ptes; /* Page table pages */
+ unsigned long start_code, end_code, start_data, end_data;
+- unsigned long start_brk, brk, start_stack;
++ unsigned long brk_gap, start_brk, brk, start_stack;
+ unsigned long arg_start, arg_end, env_start, env_end;
+
+ unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
+@@ -388,6 +390,24 @@ struct mm_struct {
+ #ifdef CONFIG_CPUMASK_OFFSTACK
+ struct cpumask cpumask_allocation;
+ #endif
++
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++ unsigned long pax_flags;
++#endif
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ unsigned long call_dl_resolve;
++#endif
++
++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
++ unsigned long call_syscall;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ unsigned long delta_mmap; /* randomized offset */
++ unsigned long delta_stack; /* randomized offset */
++#endif
++
+ };
+
+ static inline void mm_init_cpumask(struct mm_struct *mm)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/mmu_notifier.h linux-3.4-pax/include/linux/mmu_notifier.h
+--- linux-3.4/include/linux/mmu_notifier.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/linux/mmu_notifier.h 2012-05-21 12:10:11.488048995 +0200
+@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
+ */
+ #define ptep_clear_flush_notify(__vma, __address, __ptep) \
+ ({ \
+- pte_t __pte; \
++ pte_t ___pte; \
+ struct vm_area_struct *___vma = __vma; \
+ unsigned long ___address = __address; \
+- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
++ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
+ mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
+- __pte; \
++ ___pte; \
+ })
+
+ #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/mmzone.h linux-3.4-pax/include/linux/mmzone.h
+--- linux-3.4/include/linux/mmzone.h 2012-05-21 11:33:37.951929858 +0200
++++ linux-3.4-pax/include/linux/mmzone.h 2012-05-21 12:10:11.492048995 +0200
+@@ -380,7 +380,7 @@ struct zone {
+ unsigned long flags; /* zone flags, see below */
+
+ /* Zone statistics */
+- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
++ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+
+ /*
+ * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/mod_devicetable.h linux-3.4-pax/include/linux/mod_devicetable.h
+--- linux-3.4/include/linux/mod_devicetable.h 2012-05-21 11:33:37.963929859 +0200
++++ linux-3.4-pax/include/linux/mod_devicetable.h 2012-05-21 12:10:11.496048995 +0200
+@@ -12,7 +12,7 @@
+ typedef unsigned long kernel_ulong_t;
+ #endif
+
+-#define PCI_ANY_ID (~0)
++#define PCI_ANY_ID ((__u16)~0)
+
+ struct pci_device_id {
+ __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
+@@ -131,7 +131,7 @@ struct usb_device_id {
+ #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
+ #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
+
+-#define HID_ANY_ID (~0)
++#define HID_ANY_ID (~0U)
+
+ struct hid_device_id {
+ __u16 bus;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/module.h linux-3.4-pax/include/linux/module.h
+--- linux-3.4/include/linux/module.h 2012-05-21 11:33:37.963929859 +0200
++++ linux-3.4-pax/include/linux/module.h 2012-05-21 12:10:11.496048995 +0200
+@@ -17,6 +17,7 @@
+ #include <linux/moduleparam.h>
+ #include <linux/tracepoint.h>
+ #include <linux/export.h>
++#include <linux/fs.h>
+
+ #include <linux/percpu.h>
+ #include <asm/module.h>
+@@ -273,19 +274,16 @@ struct module
+ int (*init)(void);
+
+ /* If this is non-NULL, vfree after init() returns */
+- void *module_init;
++ void *module_init_rx, *module_init_rw;
+
+ /* Here is the actual code + data, vfree'd on unload. */
+- void *module_core;
++ void *module_core_rx, *module_core_rw;
+
+ /* Here are the sizes of the init and core sections */
+- unsigned int init_size, core_size;
++ unsigned int init_size_rw, core_size_rw;
+
+ /* The size of the executable code in each section. */
+- unsigned int init_text_size, core_text_size;
+-
+- /* Size of RO sections of the module (text+rodata) */
+- unsigned int init_ro_size, core_ro_size;
++ unsigned int init_size_rx, core_size_rx;
+
+ /* Arch-specific module values */
+ struct mod_arch_specific arch;
+@@ -341,6 +339,10 @@ struct module
+ #ifdef CONFIG_EVENT_TRACING
+ struct ftrace_event_call **trace_events;
+ unsigned int num_trace_events;
++ struct file_operations trace_id;
++ struct file_operations trace_enable;
++ struct file_operations trace_format;
++ struct file_operations trace_filter;
+ #endif
+ #ifdef CONFIG_FTRACE_MCOUNT_RECORD
+ unsigned int num_ftrace_callsites;
+@@ -388,16 +390,46 @@ bool is_module_address(unsigned long add
+ bool is_module_percpu_address(unsigned long addr);
+ bool is_module_text_address(unsigned long addr);
+
++static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if (ktla_ktva(addr) >= (unsigned long)start &&
++ ktla_ktva(addr) < (unsigned long)start + size)
++ return 1;
++#endif
++
++ return ((void *)addr >= start && (void *)addr < start + size);
++}
++
++static inline int within_module_core_rx(unsigned long addr, struct module *mod)
++{
++ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
++}
++
++static inline int within_module_core_rw(unsigned long addr, struct module *mod)
++{
++ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
++}
++
++static inline int within_module_init_rx(unsigned long addr, struct module *mod)
++{
++ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
++}
++
++static inline int within_module_init_rw(unsigned long addr, struct module *mod)
++{
++ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
++}
++
+ static inline int within_module_core(unsigned long addr, struct module *mod)
+ {
+- return (unsigned long)mod->module_core <= addr &&
+- addr < (unsigned long)mod->module_core + mod->core_size;
++ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
+ }
+
+ static inline int within_module_init(unsigned long addr, struct module *mod)
+ {
+- return (unsigned long)mod->module_init <= addr &&
+- addr < (unsigned long)mod->module_init + mod->init_size;
++ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
+ }
+
+ /* Search for module by name: must hold module_mutex. */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/moduleloader.h linux-3.4-pax/include/linux/moduleloader.h
+--- linux-3.4/include/linux/moduleloader.h 2011-10-24 12:48:41.295090970 +0200
++++ linux-3.4-pax/include/linux/moduleloader.h 2012-05-21 12:10:11.500048996 +0200
+@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(st
+
+ /* Allocator used for allocating struct module, core sections and init
+ sections. Returns NULL on failure. */
+-void *module_alloc(unsigned long size);
++void *module_alloc(unsigned long size) __size_overflow(1);
++
++#ifdef CONFIG_PAX_KERNEXEC
++void *module_alloc_exec(unsigned long size) __size_overflow(1);
++#else
++#define module_alloc_exec(x) module_alloc(x)
++#endif
+
+ /* Free memory returned from module_alloc. */
+ void module_free(struct module *mod, void *module_region);
+
++#ifdef CONFIG_PAX_KERNEXEC
++void module_free_exec(struct module *mod, void *module_region);
++#else
++#define module_free_exec(x, y) module_free((x), (y))
++#endif
++
+ /* Apply the given relocation to the (simplified) ELF. Return -error
+ or 0. */
+ int apply_relocate(Elf_Shdr *sechdrs,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/moduleparam.h linux-3.4-pax/include/linux/moduleparam.h
+--- linux-3.4/include/linux/moduleparam.h 2012-05-21 11:33:37.967929859 +0200
++++ linux-3.4-pax/include/linux/moduleparam.h 2012-05-21 12:10:11.500048996 +0200
+@@ -286,7 +286,7 @@ static inline void __kernel_param_unlock
+ * @len is usually just sizeof(string).
+ */
+ #define module_param_string(name, string, len, perm) \
+- static const struct kparam_string __param_string_##name \
++ static const struct kparam_string __param_string_##name __used \
+ = { len, string }; \
+ __module_param_call(MODULE_PARAM_PREFIX, name, \
+ &param_ops_string, \
+@@ -424,7 +424,7 @@ extern int param_set_bint(const char *va
+ */
+ #define module_param_array_named(name, array, type, nump, perm) \
+ param_check_##type(name, &(array)[0]); \
+- static const struct kparam_array __param_arr_##name \
++ static const struct kparam_array __param_arr_##name __used \
+ = { .max = ARRAY_SIZE(array), .num = nump, \
+ .ops = &param_ops_##type, \
+ .elemsize = sizeof(array[0]), .elem = array }; \
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/namei.h linux-3.4-pax/include/linux/namei.h
+--- linux-3.4/include/linux/namei.h 2012-01-08 19:48:27.327470966 +0100
++++ linux-3.4-pax/include/linux/namei.h 2012-05-21 12:10:11.504048996 +0200
+@@ -24,7 +24,7 @@ struct nameidata {
+ unsigned seq;
+ int last_type;
+ unsigned depth;
+- char *saved_names[MAX_NESTED_LINKS + 1];
++ const char *saved_names[MAX_NESTED_LINKS + 1];
+
+ /* Intent data */
+ union {
+@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
+ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
+ extern void unlock_rename(struct dentry *, struct dentry *);
+
+-static inline void nd_set_link(struct nameidata *nd, char *path)
++static inline void nd_set_link(struct nameidata *nd, const char *path)
+ {
+ nd->saved_names[nd->depth] = path;
+ }
+
+-static inline char *nd_get_link(struct nameidata *nd)
++static inline const char *nd_get_link(const struct nameidata *nd)
+ {
+ return nd->saved_names[nd->depth];
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/netdevice.h linux-3.4-pax/include/linux/netdevice.h
+--- linux-3.4/include/linux/netdevice.h 2012-05-21 11:33:38.023929862 +0200
++++ linux-3.4-pax/include/linux/netdevice.h 2012-05-21 12:10:11.508048996 +0200
+@@ -1003,6 +1003,7 @@ struct net_device_ops {
+ int (*ndo_neigh_construct)(struct neighbour *n);
+ void (*ndo_neigh_destroy)(struct neighbour *n);
+ };
++typedef struct net_device_ops __no_const net_device_ops_no_const;
+
+ /*
+ * The DEVICE structure.
+@@ -1064,7 +1065,7 @@ struct net_device {
+ int iflink;
+
+ struct net_device_stats stats;
+- atomic_long_t rx_dropped; /* dropped packets by core network
++ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
+ * Do not use this in drivers.
+ */
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/of_pdt.h linux-3.4-pax/include/linux/of_pdt.h
+--- linux-3.4/include/linux/of_pdt.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/linux/of_pdt.h 2012-05-21 12:10:11.512048996 +0200
+@@ -32,7 +32,7 @@ struct of_pdt_ops {
+
+ /* return 0 on success; fill in 'len' with number of bytes in path */
+ int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
+-};
++} __no_const;
+
+ extern void *prom_early_alloc(unsigned long size);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/oprofile.h linux-3.4-pax/include/linux/oprofile.h
+--- linux-3.4/include/linux/oprofile.h 2012-01-08 19:48:27.415470961 +0100
++++ linux-3.4-pax/include/linux/oprofile.h 2012-05-22 15:28:31.515384609 +0200
+@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
+ int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
+ char const * name, ulong * val);
+
+-/** Create a file for read-only access to an atomic_t. */
++/** Create a file for read-only access to an atomic_unchecked_t. */
+ int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
+- char const * name, atomic_t * val);
++ char const * name, atomic_unchecked_t * val);
+
+ /** create a directory */
+ struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/perf_event.h linux-3.4-pax/include/linux/perf_event.h
+--- linux-3.4/include/linux/perf_event.h 2012-05-21 11:33:38.151929869 +0200
++++ linux-3.4-pax/include/linux/perf_event.h 2012-05-21 12:10:11.516048997 +0200
+@@ -879,8 +879,8 @@ struct perf_event {
+
+ enum perf_event_active_state state;
+ unsigned int attach_state;
+- local64_t count;
+- atomic64_t child_count;
++ local64_t count; /* PaX: fix it one day */
++ atomic64_unchecked_t child_count;
+
+ /*
+ * These are the total time in nanoseconds that the event
+@@ -931,8 +931,8 @@ struct perf_event {
+ * These accumulate total time (in nanoseconds) that children
+ * events have been enabled and running, respectively.
+ */
+- atomic64_t child_total_time_enabled;
+- atomic64_t child_total_time_running;
++ atomic64_unchecked_t child_total_time_enabled;
++ atomic64_unchecked_t child_total_time_running;
+
+ /*
+ * Protect attach/detach and child_list:
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/pipe_fs_i.h linux-3.4-pax/include/linux/pipe_fs_i.h
+--- linux-3.4/include/linux/pipe_fs_i.h 2012-05-21 11:33:38.191929871 +0200
++++ linux-3.4-pax/include/linux/pipe_fs_i.h 2012-05-21 12:10:11.520048997 +0200
+@@ -45,9 +45,9 @@ struct pipe_buffer {
+ struct pipe_inode_info {
+ wait_queue_head_t wait;
+ unsigned int nrbufs, curbuf, buffers;
+- unsigned int readers;
+- unsigned int writers;
+- unsigned int waiting_writers;
++ atomic_t readers;
++ atomic_t writers;
++ atomic_t waiting_writers;
+ unsigned int r_counter;
+ unsigned int w_counter;
+ struct page *tmp_page;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/pm_runtime.h linux-3.4-pax/include/linux/pm_runtime.h
+--- linux-3.4/include/linux/pm_runtime.h 2012-03-19 10:39:11.284049214 +0100
++++ linux-3.4-pax/include/linux/pm_runtime.h 2012-05-21 12:10:11.520048997 +0200
+@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_
+
+ static inline void pm_runtime_mark_last_busy(struct device *dev)
+ {
+- ACCESS_ONCE(dev->power.last_busy) = jiffies;
++ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
+ }
+
+ #else /* !CONFIG_PM_RUNTIME */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/poison.h linux-3.4-pax/include/linux/poison.h
+--- linux-3.4/include/linux/poison.h 2012-03-19 10:39:11.288049213 +0100
++++ linux-3.4-pax/include/linux/poison.h 2012-05-21 12:10:11.524048997 +0200
+@@ -19,8 +19,8 @@
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
+-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
++#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
++#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
+
+ /********** include/linux/timer.h **********/
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/preempt.h linux-3.4-pax/include/linux/preempt.h
+--- linux-3.4/include/linux/preempt.h 2012-05-21 11:33:38.243929874 +0200
++++ linux-3.4-pax/include/linux/preempt.h 2012-05-21 12:10:11.524048997 +0200
+@@ -126,7 +126,7 @@ struct preempt_ops {
+ void (*sched_in)(struct preempt_notifier *notifier, int cpu);
+ void (*sched_out)(struct preempt_notifier *notifier,
+ struct task_struct *next);
+-};
++} __no_const;
+
+ /**
+ * preempt_notifier - key for installing preemption notifiers
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/proc_fs.h linux-3.4-pax/include/linux/proc_fs.h
+--- linux-3.4/include/linux/proc_fs.h 2012-03-19 10:39:11.292049211 +0100
++++ linux-3.4-pax/include/linux/proc_fs.h 2012-05-21 12:10:11.528048997 +0200
+@@ -258,7 +258,7 @@ union proc_op {
+ int (*proc_show)(struct seq_file *m,
+ struct pid_namespace *ns, struct pid *pid,
+ struct task_struct *task);
+-};
++} __no_const;
+
+ struct ctl_table_header;
+ struct ctl_table;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/random.h linux-3.4-pax/include/linux/random.h
+--- linux-3.4/include/linux/random.h 2012-01-08 19:48:27.503470957 +0100
++++ linux-3.4-pax/include/linux/random.h 2012-05-21 12:10:11.528048997 +0200
+@@ -69,12 +69,17 @@ void srandom32(u32 seed);
+
+ u32 prandom32(struct rnd_state *);
+
++static inline unsigned long pax_get_random_long(void)
++{
++ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
++}
++
+ /*
+ * Handle minimum values for seeds
+ */
+ static inline u32 __seed(u32 x, u32 m)
+ {
+- return (x < m) ? x + m : x;
++ return (x <= m) ? x + m + 1 : x;
+ }
+
+ /**
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/reboot.h linux-3.4-pax/include/linux/reboot.h
+--- linux-3.4/include/linux/reboot.h 2011-10-24 12:48:41.455090964 +0200
++++ linux-3.4-pax/include/linux/reboot.h 2012-05-21 12:10:11.532048997 +0200
+@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(st
+ * Architecture-specific implementations of sys_reboot commands.
+ */
+
+-extern void machine_restart(char *cmd);
+-extern void machine_halt(void);
+-extern void machine_power_off(void);
++extern void machine_restart(char *cmd) __noreturn;
++extern void machine_halt(void) __noreturn;
++extern void machine_power_off(void) __noreturn;
+
+ extern void machine_shutdown(void);
+ struct pt_regs;
+@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struc
+ */
+
+ extern void kernel_restart_prepare(char *cmd);
+-extern void kernel_restart(char *cmd);
+-extern void kernel_halt(void);
+-extern void kernel_power_off(void);
++extern void kernel_restart(char *cmd) __noreturn;
++extern void kernel_halt(void) __noreturn;
++extern void kernel_power_off(void) __noreturn;
+
+ extern int C_A_D; /* for sysctl */
+ void ctrl_alt_del(void);
+@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
+ * Emergency restart, callable from an interrupt handler.
+ */
+
+-extern void emergency_restart(void);
++extern void emergency_restart(void) __noreturn;
+ #include <asm/emergency-restart.h>
+
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/relay.h linux-3.4-pax/include/linux/relay.h
+--- linux-3.4/include/linux/relay.h 2012-05-21 11:33:38.327929879 +0200
++++ linux-3.4-pax/include/linux/relay.h 2012-05-21 12:10:11.532048997 +0200
+@@ -160,7 +160,7 @@ struct rchan_callbacks
+ * The callback should return 0 if successful, negative if not.
+ */
+ int (*remove_buf_file)(struct dentry *dentry);
+-};
++} __no_const;
+
+ /*
+ * CONFIG_RELAY kernel API, kernel/relay.c
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/rfkill.h linux-3.4-pax/include/linux/rfkill.h
+--- linux-3.4/include/linux/rfkill.h 2012-05-21 11:33:38.331929879 +0200
++++ linux-3.4-pax/include/linux/rfkill.h 2012-05-21 12:10:11.536048998 +0200
+@@ -147,6 +147,7 @@ struct rfkill_ops {
+ void (*query)(struct rfkill *rfkill, void *data);
+ int (*set_block)(void *data, bool blocked);
+ };
++typedef struct rfkill_ops __no_const rfkill_ops_no_const;
+
+ #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
+ /**
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/rio.h linux-3.4-pax/include/linux/rio.h
+--- linux-3.4/include/linux/rio.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/linux/rio.h 2012-05-21 12:10:11.536048998 +0200
+@@ -315,7 +315,7 @@ struct rio_ops {
+ int mbox, void *buffer, size_t len);
+ int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
+ void *(*get_inb_message)(struct rio_mport *mport, int mbox);
+-};
++} __no_const;
+
+ #define RIO_RESOURCE_MEM 0x00000100
+ #define RIO_RESOURCE_DOORBELL 0x00000200
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/rmap.h linux-3.4-pax/include/linux/rmap.h
+--- linux-3.4/include/linux/rmap.h 2012-05-21 11:33:38.335929879 +0200
++++ linux-3.4-pax/include/linux/rmap.h 2012-05-21 12:10:11.540048998 +0200
+@@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struc
+ void anon_vma_init(void); /* create anon_vma_cachep */
+ int anon_vma_prepare(struct vm_area_struct *);
+ void unlink_anon_vmas(struct vm_area_struct *);
+-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
++int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
+ void anon_vma_moveto_tail(struct vm_area_struct *);
+-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
++int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
+
+ static inline void anon_vma_merge(struct vm_area_struct *vma,
+ struct vm_area_struct *next)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/sched.h linux-3.4-pax/include/linux/sched.h
+--- linux-3.4/include/linux/sched.h 2012-05-21 11:33:38.359929881 +0200
++++ linux-3.4-pax/include/linux/sched.h 2012-05-21 12:10:11.544048998 +0200
+@@ -100,6 +100,7 @@ struct bio_list;
+ struct fs_struct;
+ struct perf_event_context;
+ struct blk_plug;
++struct linux_binprm;
+
+ /*
+ * List of flags we want to share for kernel threads,
+@@ -382,10 +383,13 @@ struct user_namespace;
+ #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+
+ extern int sysctl_max_map_count;
++extern unsigned long sysctl_heap_stack_gap;
+
+ #include <linux/aio.h>
+
+ #ifdef CONFIG_MMU
++extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
++extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
+ extern void arch_pick_mmap_layout(struct mm_struct *mm);
+ extern unsigned long
+ arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+@@ -1386,8 +1390,8 @@ struct task_struct {
+ struct list_head thread_group;
+
+ struct completion *vfork_done; /* for vfork() */
+- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
+- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
++ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
++ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
+
+ cputime_t utime, stime, utimescaled, stimescaled;
+ cputime_t gtime;
+@@ -1426,6 +1430,10 @@ struct task_struct {
+ #endif
+ /* CPU-specific state of this task */
+ struct thread_struct thread;
++/* thread_info moved to task_struct */
++#ifdef CONFIG_X86
++ struct thread_info tinfo;
++#endif
+ /* filesystem information */
+ struct fs_struct *fs;
+ /* open file information */
+@@ -1619,6 +1627,51 @@ struct task_struct {
+ #endif
+ };
+
++#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
++#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
++#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
++#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
++/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
++#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
++
++#ifdef CONFIG_PAX_SOFTMODE
++extern int pax_softmode;
++#endif
++
++extern int pax_check_flags(unsigned long *);
++
++/* if tsk != current then task_lock must be held on it */
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static inline unsigned long pax_get_flags(struct task_struct *tsk)
++{
++ if (likely(tsk->mm))
++ return tsk->mm->pax_flags;
++ else
++ return 0UL;
++}
++
++/* if tsk != current then task_lock must be held on it */
++static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
++{
++ if (likely(tsk->mm)) {
++ tsk->mm->pax_flags = flags;
++ return 0;
++ }
++ return -EINVAL;
++}
++#endif
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++extern void pax_set_initial_flags(struct linux_binprm *bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
++#endif
++
++extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
++extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
++extern void pax_report_refcount_overflow(struct pt_regs *regs);
++extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
++
+ /* Future-safe accessor for struct task_struct's cpus_allowed. */
+ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+
+@@ -2138,7 +2191,9 @@ void yield(void);
+ extern struct exec_domain default_exec_domain;
+
+ union thread_union {
++#ifndef CONFIG_X86
+ struct thread_info thread_info;
++#endif
+ unsigned long stack[THREAD_SIZE/sizeof(long)];
+ };
+
+@@ -2314,7 +2369,7 @@ extern void __cleanup_sighand(struct sig
+ extern void exit_itimers(struct signal_struct *);
+ extern void flush_itimer_signals(void);
+
+-extern void do_group_exit(int);
++extern __noreturn void do_group_exit(int);
+
+ extern void daemonize(const char *, ...);
+ extern int allow_signal(int);
+@@ -2515,13 +2570,17 @@ static inline unsigned long *end_of_stac
+
+ #endif
+
+-static inline int object_is_on_stack(void *obj)
++static inline int object_starts_on_stack(void *obj)
+ {
+- void *stack = task_stack_page(current);
++ const void *stack = task_stack_page(current);
+
+ return (obj >= stack) && (obj < (stack + THREAD_SIZE));
+ }
+
++#ifdef CONFIG_PAX_USERCOPY
++extern int object_is_on_stack(const void *obj, unsigned long len);
++#endif
++
+ extern void thread_info_cache_init(void);
+
+ #ifdef CONFIG_DEBUG_STACK_USAGE
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/screen_info.h linux-3.4-pax/include/linux/screen_info.h
+--- linux-3.4/include/linux/screen_info.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/linux/screen_info.h 2012-05-21 12:10:11.548048998 +0200
+@@ -43,7 +43,8 @@ struct screen_info {
+ __u16 pages; /* 0x32 */
+ __u16 vesa_attributes; /* 0x34 */
+ __u32 capabilities; /* 0x36 */
+- __u8 _reserved[6]; /* 0x3a */
++ __u16 vesapm_size; /* 0x3a */
++ __u8 _reserved[4]; /* 0x3c */
+ } __attribute__((packed));
+
+ #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/seq_file.h linux-3.4-pax/include/linux/seq_file.h
+--- linux-3.4/include/linux/seq_file.h 2012-05-21 11:33:38.363929882 +0200
++++ linux-3.4-pax/include/linux/seq_file.h 2012-05-21 12:10:11.548048998 +0200
+@@ -34,6 +34,7 @@ struct seq_operations {
+ void * (*next) (struct seq_file *m, void *v, loff_t *pos);
+ int (*show) (struct seq_file *m, void *v);
+ };
++typedef struct seq_operations __no_const seq_operations_no_const;
+
+ #define SEQ_SKIP 1
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/skbuff.h linux-3.4-pax/include/linux/skbuff.h
+--- linux-3.4/include/linux/skbuff.h 2012-05-21 11:33:38.395929882 +0200
++++ linux-3.4-pax/include/linux/skbuff.h 2012-05-21 12:10:11.552048998 +0200
+@@ -666,7 +666,7 @@ static inline struct skb_shared_hwtstamp
+ */
+ static inline int skb_queue_empty(const struct sk_buff_head *list)
+ {
+- return list->next == (struct sk_buff *)list;
++ return list->next == (const struct sk_buff *)list;
+ }
+
+ /**
+@@ -679,7 +679,7 @@ static inline int skb_queue_empty(const
+ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+ {
+- return skb->next == (struct sk_buff *)list;
++ return skb->next == (const struct sk_buff *)list;
+ }
+
+ /**
+@@ -692,7 +692,7 @@ static inline bool skb_queue_is_last(con
+ static inline bool skb_queue_is_first(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+ {
+- return skb->prev == (struct sk_buff *)list;
++ return skb->prev == (const struct sk_buff *)list;
+ }
+
+ /**
+@@ -1587,7 +1587,7 @@ static inline int pskb_network_may_pull(
+ * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
+ */
+ #ifndef NET_SKB_PAD
+-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
++#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
+ #endif
+
+ extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/slab_def.h linux-3.4-pax/include/linux/slab_def.h
+--- linux-3.4/include/linux/slab_def.h 2012-03-19 10:39:11.332049207 +0100
++++ linux-3.4-pax/include/linux/slab_def.h 2012-05-22 15:28:31.523384609 +0200
+@@ -66,10 +66,10 @@ struct kmem_cache {
+ unsigned long node_allocs;
+ unsigned long node_frees;
+ unsigned long node_overflow;
+- atomic_t allochit;
+- atomic_t allocmiss;
+- atomic_t freehit;
+- atomic_t freemiss;
++ atomic_unchecked_t allochit;
++ atomic_unchecked_t allocmiss;
++ atomic_unchecked_t freehit;
++ atomic_unchecked_t freemiss;
+
+ /*
+ * If debugging is enabled, then the allocator can add additional
+@@ -107,7 +107,7 @@ struct cache_sizes {
+ extern struct cache_sizes malloc_sizes[];
+
+ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
+-void *__kmalloc(size_t size, gfp_t flags);
++void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
+
+ #ifdef CONFIG_TRACING
+ extern void *kmem_cache_alloc_trace(size_t size,
+@@ -160,7 +160,7 @@ found:
+ }
+
+ #ifdef CONFIG_NUMA
+-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
++extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
+ extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+
+ #ifdef CONFIG_TRACING
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/slab.h linux-3.4-pax/include/linux/slab.h
+--- linux-3.4/include/linux/slab.h 2012-05-21 11:33:38.407929883 +0200
++++ linux-3.4-pax/include/linux/slab.h 2012-05-22 15:28:31.515384609 +0200
+@@ -11,12 +11,20 @@
+
+ #include <linux/gfp.h>
+ #include <linux/types.h>
++#include <linux/err.h>
+
+ /*
+ * Flags to pass to kmem_cache_create().
+ * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
+ */
+ #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
++
++#ifdef CONFIG_PAX_USERCOPY
++#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
++#else
++#define SLAB_USERCOPY 0x00000000UL
++#endif
++
+ #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
+ #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
+ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
+@@ -87,10 +95,13 @@
+ * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
+ * Both make kfree a no-op.
+ */
+-#define ZERO_SIZE_PTR ((void *)16)
++#define ZERO_SIZE_PTR \
++({ \
++ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
++ (void *)(-MAX_ERRNO-1L); \
++})
+
+-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
+- (unsigned long)ZERO_SIZE_PTR)
++#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
+
+ /*
+ * struct kmem_cache related prototypes
+@@ -161,6 +172,7 @@ void * __must_check krealloc(const void
+ void kfree(const void *);
+ void kzfree(const void *);
+ size_t ksize(const void *);
++void check_object_size(const void *ptr, unsigned long n, bool to);
+
+ /*
+ * Allocator specific definitions. These are mainly used to establish optimized
+@@ -240,6 +252,7 @@ size_t ksize(const void *);
+ * for general use, and so are not documented here. For a full list of
+ * potential flags, always refer to linux/gfp.h.
+ */
++static void *kmalloc_array(size_t n, size_t size, gfp_t flags) __size_overflow(1, 2);
+ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+ {
+ if (size != 0 && n > ULONG_MAX / size)
+@@ -298,7 +311,7 @@ static inline void *kmem_cache_alloc_nod
+ */
+ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
+ (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
+-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
++extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
+ #define kmalloc_track_caller(size, flags) \
+ __kmalloc_track_caller(size, flags, _RET_IP_)
+ #else
+@@ -317,7 +330,7 @@ extern void *__kmalloc_track_caller(size
+ */
+ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
+ (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
+-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
++extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
+ #define kmalloc_node_track_caller(size, flags, node) \
+ __kmalloc_node_track_caller(size, flags, node, \
+ _RET_IP_)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/slob_def.h linux-3.4-pax/include/linux/slob_def.h
+--- linux-3.4/include/linux/slob_def.h 2011-10-24 12:48:41.499090961 +0200
++++ linux-3.4-pax/include/linux/slob_def.h 2012-05-22 15:28:31.523384609 +0200
+@@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_
+ return kmem_cache_alloc_node(cachep, flags, -1);
+ }
+
+-void *__kmalloc_node(size_t size, gfp_t flags, int node);
++void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
+
+ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+ {
+@@ -29,6 +29,7 @@ static __always_inline void *kmalloc(siz
+ return __kmalloc_node(size, flags, -1);
+ }
+
++static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
+ static __always_inline void *__kmalloc(size_t size, gfp_t flags)
+ {
+ return kmalloc(size, flags);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/slub_def.h linux-3.4-pax/include/linux/slub_def.h
+--- linux-3.4/include/linux/slub_def.h 2012-05-21 11:33:38.411929883 +0200
++++ linux-3.4-pax/include/linux/slub_def.h 2012-05-22 15:30:13.015379190 +0200
+@@ -92,7 +92,7 @@ struct kmem_cache {
+ struct kmem_cache_order_objects max;
+ struct kmem_cache_order_objects min;
+ gfp_t allocflags; /* gfp flags to use on each alloc */
+- int refcount; /* Refcount for slab cache destroy */
++ atomic_t refcount; /* Refcount for slab cache destroy */
+ void (*ctor)(void *);
+ int inuse; /* Offset to metadata */
+ int align; /* Alignment */
+@@ -153,6 +153,7 @@ extern struct kmem_cache *kmalloc_caches
+ * Sorry that the following has to be that ugly but some versions of GCC
+ * have trouble with constant propagation and loops.
+ */
++static __always_inline int kmalloc_index(size_t size) __size_overflow(1);
+ static __always_inline int kmalloc_index(size_t size)
+ {
+ if (!size)
+@@ -218,7 +219,7 @@ static __always_inline struct kmem_cache
+ }
+
+ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
+-void *__kmalloc(size_t size, gfp_t flags);
++void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
+
+ static __always_inline void *
+ kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+@@ -259,6 +260,7 @@ kmalloc_order_trace(size_t size, gfp_t f
+ }
+ #endif
+
++static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
+ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+ {
+ unsigned int order = get_order(size);
+@@ -284,7 +286,7 @@ static __always_inline void *kmalloc(siz
+ }
+
+ #ifdef CONFIG_NUMA
+-void *__kmalloc_node(size_t size, gfp_t flags, int node);
++void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
+ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+
+ #ifdef CONFIG_TRACING
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/sonet.h linux-3.4-pax/include/linux/sonet.h
+--- linux-3.4/include/linux/sonet.h 2011-10-24 12:48:41.503090958 +0200
++++ linux-3.4-pax/include/linux/sonet.h 2012-05-21 12:10:11.564048999 +0200
+@@ -61,7 +61,7 @@ struct sonet_stats {
+ #include <linux/atomic.h>
+
+ struct k_sonet_stats {
+-#define __HANDLE_ITEM(i) atomic_t i
++#define __HANDLE_ITEM(i) atomic_unchecked_t i
+ __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ };
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/sunrpc/clnt.h linux-3.4-pax/include/linux/sunrpc/clnt.h
+--- linux-3.4/include/linux/sunrpc/clnt.h 2012-05-21 11:33:38.687929898 +0200
++++ linux-3.4-pax/include/linux/sunrpc/clnt.h 2012-05-21 12:10:11.568048999 +0200
+@@ -174,9 +174,9 @@ static inline unsigned short rpc_get_por
+ {
+ switch (sap->sa_family) {
+ case AF_INET:
+- return ntohs(((struct sockaddr_in *)sap)->sin_port);
++ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
+ case AF_INET6:
+- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
++ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
+ }
+ return 0;
+ }
+@@ -209,7 +209,7 @@ static inline bool __rpc_cmp_addr4(const
+ static inline bool __rpc_copy_addr4(struct sockaddr *dst,
+ const struct sockaddr *src)
+ {
+- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
++ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
+ struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
+
+ dsin->sin_family = ssin->sin_family;
+@@ -312,7 +312,7 @@ static inline u32 rpc_get_scope_id(const
+ if (sa->sa_family != AF_INET6)
+ return 0;
+
+- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
++ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
+ }
+
+ #endif /* __KERNEL__ */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/sunrpc/sched.h linux-3.4-pax/include/linux/sunrpc/sched.h
+--- linux-3.4/include/linux/sunrpc/sched.h 2012-05-21 11:33:38.703929899 +0200
++++ linux-3.4-pax/include/linux/sunrpc/sched.h 2012-05-21 12:10:11.568048999 +0200
+@@ -106,6 +106,7 @@ struct rpc_call_ops {
+ void (*rpc_count_stats)(struct rpc_task *, void *);
+ void (*rpc_release)(void *);
+ };
++typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
+
+ struct rpc_task_setup {
+ struct rpc_task *task;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/sunrpc/svc_rdma.h linux-3.4-pax/include/linux/sunrpc/svc_rdma.h
+--- linux-3.4/include/linux/sunrpc/svc_rdma.h 2012-05-21 11:33:38.711929899 +0200
++++ linux-3.4-pax/include/linux/sunrpc/svc_rdma.h 2012-05-21 12:10:11.572049000 +0200
+@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
+ extern unsigned int svcrdma_max_requests;
+ extern unsigned int svcrdma_max_req_size;
+
+-extern atomic_t rdma_stat_recv;
+-extern atomic_t rdma_stat_read;
+-extern atomic_t rdma_stat_write;
+-extern atomic_t rdma_stat_sq_starve;
+-extern atomic_t rdma_stat_rq_starve;
+-extern atomic_t rdma_stat_rq_poll;
+-extern atomic_t rdma_stat_rq_prod;
+-extern atomic_t rdma_stat_sq_poll;
+-extern atomic_t rdma_stat_sq_prod;
++extern atomic_unchecked_t rdma_stat_recv;
++extern atomic_unchecked_t rdma_stat_read;
++extern atomic_unchecked_t rdma_stat_write;
++extern atomic_unchecked_t rdma_stat_sq_starve;
++extern atomic_unchecked_t rdma_stat_rq_starve;
++extern atomic_unchecked_t rdma_stat_rq_poll;
++extern atomic_unchecked_t rdma_stat_rq_prod;
++extern atomic_unchecked_t rdma_stat_sq_poll;
++extern atomic_unchecked_t rdma_stat_sq_prod;
+
+ #define RPCRDMA_VERSION 1
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/sysctl.h linux-3.4-pax/include/linux/sysctl.h
+--- linux-3.4/include/linux/sysctl.h 2012-05-21 11:33:38.735929901 +0200
++++ linux-3.4-pax/include/linux/sysctl.h 2012-05-21 12:10:11.572049000 +0200
+@@ -155,7 +155,11 @@ enum
+ KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
+ };
+
+-
++#ifdef CONFIG_PAX_SOFTMODE
++enum {
++ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
++};
++#endif
+
+ /* CTL_VM names: */
+ enum
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/tty_ldisc.h linux-3.4-pax/include/linux/tty_ldisc.h
+--- linux-3.4/include/linux/tty_ldisc.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/linux/tty_ldisc.h 2012-05-21 12:10:11.576049000 +0200
+@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
+
+ struct module *owner;
+
+- int refcount;
++ atomic_t refcount;
+ };
+
+ struct tty_ldisc {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/types.h linux-3.4-pax/include/linux/types.h
+--- linux-3.4/include/linux/types.h 2012-05-21 11:33:38.751929902 +0200
++++ linux-3.4-pax/include/linux/types.h 2012-05-21 12:10:11.580049000 +0200
+@@ -220,10 +220,26 @@ typedef struct {
+ int counter;
+ } atomic_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ int counter;
++} atomic_unchecked_t;
++#else
++typedef atomic_t atomic_unchecked_t;
++#endif
++
+ #ifdef CONFIG_64BIT
+ typedef struct {
+ long counter;
+ } atomic64_t;
++
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ long counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
+ #endif
+
+ struct list_head {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/uaccess.h linux-3.4-pax/include/linux/uaccess.h
+--- linux-3.4/include/linux/uaccess.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/linux/uaccess.h 2012-05-22 15:28:31.531384607 +0200
+@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
+ long ret; \
+ mm_segment_t old_fs = get_fs(); \
+ \
+- set_fs(KERNEL_DS); \
+ pagefault_disable(); \
+- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
+- pagefault_enable(); \
++ set_fs(KERNEL_DS); \
++ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
+ set_fs(old_fs); \
++ pagefault_enable(); \
+ ret; \
+ })
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/unaligned/access_ok.h linux-3.4-pax/include/linux/unaligned/access_ok.h
+--- linux-3.4/include/linux/unaligned/access_ok.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/linux/unaligned/access_ok.h 2012-05-21 12:10:11.584049000 +0200
+@@ -6,32 +6,32 @@
+
+ static inline u16 get_unaligned_le16(const void *p)
+ {
+- return le16_to_cpup((__le16 *)p);
++ return le16_to_cpup((const __le16 *)p);
+ }
+
+ static inline u32 get_unaligned_le32(const void *p)
+ {
+- return le32_to_cpup((__le32 *)p);
++ return le32_to_cpup((const __le32 *)p);
+ }
+
+ static inline u64 get_unaligned_le64(const void *p)
+ {
+- return le64_to_cpup((__le64 *)p);
++ return le64_to_cpup((const __le64 *)p);
+ }
+
+ static inline u16 get_unaligned_be16(const void *p)
+ {
+- return be16_to_cpup((__be16 *)p);
++ return be16_to_cpup((const __be16 *)p);
+ }
+
+ static inline u32 get_unaligned_be32(const void *p)
+ {
+- return be32_to_cpup((__be32 *)p);
++ return be32_to_cpup((const __be32 *)p);
+ }
+
+ static inline u64 get_unaligned_be64(const void *p)
+ {
+- return be64_to_cpup((__be64 *)p);
++ return be64_to_cpup((const __be64 *)p);
+ }
+
+ static inline void put_unaligned_le16(u16 val, void *p)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/usb/renesas_usbhs.h linux-3.4-pax/include/linux/usb/renesas_usbhs.h
+--- linux-3.4/include/linux/usb/renesas_usbhs.h 2012-05-21 11:33:38.791929904 +0200
++++ linux-3.4-pax/include/linux/usb/renesas_usbhs.h 2012-05-21 12:10:11.584049000 +0200
+@@ -39,7 +39,7 @@ enum {
+ */
+ struct renesas_usbhs_driver_callback {
+ int (*notify_hotplug)(struct platform_device *pdev);
+-};
++} __no_const;
+
+ /*
+ * callback functions for platform
+@@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
+ * VBUS control is needed for Host
+ */
+ int (*set_vbus)(struct platform_device *pdev, int enable);
+-};
++} __no_const;
+
+ /*
+ * parameters for renesas usbhs
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/vermagic.h linux-3.4-pax/include/linux/vermagic.h
+--- linux-3.4/include/linux/vermagic.h 2012-01-08 19:48:27.795470941 +0100
++++ linux-3.4-pax/include/linux/vermagic.h 2012-05-21 12:10:11.588049000 +0200
+@@ -25,9 +25,28 @@
+ #define MODULE_ARCH_VERMAGIC ""
+ #endif
+
++#ifdef CONFIG_PAX_REFCOUNT
++#define MODULE_PAX_REFCOUNT "REFCOUNT "
++#else
++#define MODULE_PAX_REFCOUNT ""
++#endif
++
++#ifdef CONSTIFY_PLUGIN
++#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
++#else
++#define MODULE_CONSTIFY_PLUGIN ""
++#endif
++
++#ifdef STACKLEAK_PLUGIN
++#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
++#else
++#define MODULE_STACKLEAK_PLUGIN ""
++#endif
++
+ #define VERMAGIC_STRING \
+ UTS_RELEASE " " \
+ MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
+ MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
+- MODULE_ARCH_VERMAGIC
++ MODULE_ARCH_VERMAGIC \
++ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/vmalloc.h linux-3.4-pax/include/linux/vmalloc.h
+--- linux-3.4/include/linux/vmalloc.h 2012-03-19 10:39:11.388049209 +0100
++++ linux-3.4-pax/include/linux/vmalloc.h 2012-05-22 15:28:31.531384607 +0200
+@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining
+ #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
+ #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
+ #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
++#endif
++
+ /* bits [20..32] reserved for arch specific ioremap internals */
+
+ /*
+@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned lo
+ extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
+ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
+ unsigned long start, unsigned long end, gfp_t gfp_mask,
+- pgprot_t prot, int node, void *caller);
++ pgprot_t prot, int node, void *caller) __size_overflow(1);
+ extern void vfree(const void *addr);
+
+ extern void *vmap(struct page **pages, unsigned int count,
+@@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(s
+ extern void free_vm_area(struct vm_struct *area);
+
+ /* for /dev/kmem */
+-extern long vread(char *buf, char *addr, unsigned long count);
+-extern long vwrite(char *buf, char *addr, unsigned long count);
++extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
++extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
+
+ /*
+ * Internals. Dont't use..
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/vmstat.h linux-3.4-pax/include/linux/vmstat.h
+--- linux-3.4/include/linux/vmstat.h 2011-10-24 12:48:41.579090956 +0200
++++ linux-3.4-pax/include/linux/vmstat.h 2012-05-21 12:10:11.592049001 +0200
+@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
+ /*
+ * Zone based page accounting with per cpu differentials.
+ */
+-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
++extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+
+ static inline void zone_page_state_add(long x, struct zone *zone,
+ enum zone_stat_item item)
+ {
+- atomic_long_add(x, &zone->vm_stat[item]);
+- atomic_long_add(x, &vm_stat[item]);
++ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
++ atomic_long_add_unchecked(x, &vm_stat[item]);
+ }
+
+ static inline unsigned long global_page_state(enum zone_stat_item item)
+ {
+- long x = atomic_long_read(&vm_stat[item]);
++ long x = atomic_long_read_unchecked(&vm_stat[item]);
+ #ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+@@ -109,7 +109,7 @@ static inline unsigned long global_page_
+ static inline unsigned long zone_page_state(struct zone *zone,
+ enum zone_stat_item item)
+ {
+- long x = atomic_long_read(&zone->vm_stat[item]);
++ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
+ #ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+@@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
+ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+ enum zone_stat_item item)
+ {
+- long x = atomic_long_read(&zone->vm_stat[item]);
++ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
+
+ #ifdef CONFIG_SMP
+ int cpu;
+@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
+
+ static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
+ {
+- atomic_long_inc(&zone->vm_stat[item]);
+- atomic_long_inc(&vm_stat[item]);
++ atomic_long_inc_unchecked(&zone->vm_stat[item]);
++ atomic_long_inc_unchecked(&vm_stat[item]);
+ }
+
+ static inline void __inc_zone_page_state(struct page *page,
+@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
+
+ static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
+ {
+- atomic_long_dec(&zone->vm_stat[item]);
+- atomic_long_dec(&vm_stat[item]);
++ atomic_long_dec_unchecked(&zone->vm_stat[item]);
++ atomic_long_dec_unchecked(&vm_stat[item]);
+ }
+
+ static inline void __dec_zone_page_state(struct page *page,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/linux/xattr.h linux-3.4-pax/include/linux/xattr.h
+--- linux-3.4/include/linux/xattr.h 2012-01-08 19:48:27.851470938 +0100
++++ linux-3.4-pax/include/linux/xattr.h 2012-05-21 12:10:11.592049001 +0200
+@@ -57,6 +57,11 @@
+ #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
+ #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
+
++/* User namespace */
++#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
++#define XATTR_PAX_FLAGS_SUFFIX "flags"
++#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
++
+ #ifdef __KERNEL__
+
+ #include <linux/types.h>
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/media/saa7146_vv.h linux-3.4-pax/include/media/saa7146_vv.h
+--- linux-3.4/include/media/saa7146_vv.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/media/saa7146_vv.h 2012-05-21 12:10:11.596049001 +0200
+@@ -163,7 +163,7 @@ struct saa7146_ext_vv
+ int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
+
+ /* the extension can override this */
+- struct v4l2_ioctl_ops ops;
++ v4l2_ioctl_ops_no_const ops;
+ /* pointer to the saa7146 core ops */
+ const struct v4l2_ioctl_ops *core_ops;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/media/v4l2-dev.h linux-3.4-pax/include/media/v4l2-dev.h
+--- linux-3.4/include/media/v4l2-dev.h 2012-05-21 11:33:38.883929909 +0200
++++ linux-3.4-pax/include/media/v4l2-dev.h 2012-05-21 12:10:11.596049001 +0200
+@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
+
+
+ struct v4l2_file_operations {
+- struct module *owner;
++ struct module * const owner;
+ ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
+ ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
+ unsigned int (*poll) (struct file *, struct poll_table_struct *);
+@@ -71,6 +71,7 @@ struct v4l2_file_operations {
+ int (*open) (struct file *);
+ int (*release) (struct file *);
+ };
++typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
+
+ /*
+ * Newer version of video_device, handled by videodev2.c
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/media/v4l2-ioctl.h linux-3.4-pax/include/media/v4l2-ioctl.h
+--- linux-3.4/include/media/v4l2-ioctl.h 2012-05-21 11:33:38.895929909 +0200
++++ linux-3.4-pax/include/media/v4l2-ioctl.h 2012-05-21 12:10:11.600049001 +0200
+@@ -281,7 +281,7 @@ struct v4l2_ioctl_ops {
+ long (*vidioc_default) (struct file *file, void *fh,
+ bool valid_prio, int cmd, void *arg);
+ };
+-
++typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
+
+ /* v4l debugging and diagnostics */
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/caif/caif_hsi.h linux-3.4-pax/include/net/caif/caif_hsi.h
+--- linux-3.4/include/net/caif/caif_hsi.h 2012-05-21 11:33:38.935929912 +0200
++++ linux-3.4-pax/include/net/caif/caif_hsi.h 2012-05-21 12:10:11.600049001 +0200
+@@ -98,7 +98,7 @@ struct cfhsi_drv {
+ void (*rx_done_cb) (struct cfhsi_drv *drv);
+ void (*wake_up_cb) (struct cfhsi_drv *drv);
+ void (*wake_down_cb) (struct cfhsi_drv *drv);
+-};
++} __no_const;
+
+ /* Structure implemented by HSI device. */
+ struct cfhsi_dev {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/caif/cfctrl.h linux-3.4-pax/include/net/caif/cfctrl.h
+--- linux-3.4/include/net/caif/cfctrl.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/net/caif/cfctrl.h 2012-05-21 12:10:11.604049001 +0200
+@@ -52,7 +52,7 @@ struct cfctrl_rsp {
+ void (*radioset_rsp)(void);
+ void (*reject_rsp)(struct cflayer *layer, u8 linkid,
+ struct cflayer *client_layer);
+-};
++} __no_const;
+
+ /* Link Setup Parameters for CAIF-Links. */
+ struct cfctrl_link_param {
+@@ -101,8 +101,8 @@ struct cfctrl_request_info {
+ struct cfctrl {
+ struct cfsrvl serv;
+ struct cfctrl_rsp res;
+- atomic_t req_seq_no;
+- atomic_t rsp_seq_no;
++ atomic_unchecked_t req_seq_no;
++ atomic_unchecked_t rsp_seq_no;
+ struct list_head list;
+ /* Protects from simultaneous access to first_req list */
+ spinlock_t info_list_lock;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/flow.h linux-3.4-pax/include/net/flow.h
+--- linux-3.4/include/net/flow.h 2012-03-19 10:39:11.432049206 +0100
++++ linux-3.4-pax/include/net/flow.h 2012-05-21 12:10:11.604049001 +0200
+@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_ca
+
+ extern void flow_cache_flush(void);
+ extern void flow_cache_flush_deferred(void);
+-extern atomic_t flow_cache_genid;
++extern atomic_unchecked_t flow_cache_genid;
+
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/inetpeer.h linux-3.4-pax/include/net/inetpeer.h
+--- linux-3.4/include/net/inetpeer.h 2012-03-19 10:39:11.440049201 +0100
++++ linux-3.4-pax/include/net/inetpeer.h 2012-05-21 12:10:11.608049002 +0200
+@@ -48,8 +48,8 @@ struct inet_peer {
+ */
+ union {
+ struct {
+- atomic_t rid; /* Frag reception counter */
+- atomic_t ip_id_count; /* IP ID for the next packet */
++ atomic_unchecked_t rid; /* Frag reception counter */
++ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
+ __u32 tcp_ts;
+ __u32 tcp_ts_stamp;
+ };
+@@ -115,11 +115,11 @@ static inline int inet_getid(struct inet
+ more++;
+ inet_peer_refcheck(p);
+ do {
+- old = atomic_read(&p->ip_id_count);
++ old = atomic_read_unchecked(&p->ip_id_count);
+ new = old + more;
+ if (!new)
+ new = 1;
+- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
++ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
+ return new;
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/ip_fib.h linux-3.4-pax/include/net/ip_fib.h
+--- linux-3.4/include/net/ip_fib.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/net/ip_fib.h 2012-05-21 12:10:11.608049002 +0200
+@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
+
+ #define FIB_RES_SADDR(net, res) \
+ ((FIB_RES_NH(res).nh_saddr_genid == \
+- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
++ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
+ FIB_RES_NH(res).nh_saddr : \
+ fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
+ #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/ip_vs.h linux-3.4-pax/include/net/ip_vs.h
+--- linux-3.4/include/net/ip_vs.h 2012-05-21 11:33:38.967929913 +0200
++++ linux-3.4-pax/include/net/ip_vs.h 2012-05-21 12:10:11.612049002 +0200
+@@ -510,7 +510,7 @@ struct ip_vs_conn {
+ struct ip_vs_conn *control; /* Master control connection */
+ atomic_t n_control; /* Number of controlled ones */
+ struct ip_vs_dest *dest; /* real server */
+- atomic_t in_pkts; /* incoming packet counter */
++ atomic_unchecked_t in_pkts; /* incoming packet counter */
+
+ /* packet transmitter for different forwarding methods. If it
+ mangles the packet, it must return NF_DROP or better NF_STOLEN,
+@@ -648,7 +648,7 @@ struct ip_vs_dest {
+ __be16 port; /* port number of the server */
+ union nf_inet_addr addr; /* IP address of the server */
+ volatile unsigned flags; /* dest status flags */
+- atomic_t conn_flags; /* flags to copy to conn */
++ atomic_unchecked_t conn_flags; /* flags to copy to conn */
+ atomic_t weight; /* server weight */
+
+ atomic_t refcnt; /* reference counter */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/irda/ircomm_core.h linux-3.4-pax/include/net/irda/ircomm_core.h
+--- linux-3.4/include/net/irda/ircomm_core.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/net/irda/ircomm_core.h 2012-05-21 12:10:11.616049001 +0200
+@@ -51,7 +51,7 @@ typedef struct {
+ int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
+ int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
+ struct ircomm_info *);
+-} call_t;
++} __no_const call_t;
+
+ struct ircomm_cb {
+ irda_queue_t queue;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/irda/ircomm_tty.h linux-3.4-pax/include/net/irda/ircomm_tty.h
+--- linux-3.4/include/net/irda/ircomm_tty.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/net/irda/ircomm_tty.h 2012-05-21 12:10:11.616049001 +0200
+@@ -35,6 +35,7 @@
+ #include <linux/termios.h>
+ #include <linux/timer.h>
+ #include <linux/tty.h> /* struct tty_struct */
++#include <asm/local.h>
+
+ #include <net/irda/irias_object.h>
+ #include <net/irda/ircomm_core.h>
+@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
+ unsigned short close_delay;
+ unsigned short closing_wait; /* time to wait before closing */
+
+- int open_count;
+- int blocked_open; /* # of blocked opens */
++ local_t open_count;
++ local_t blocked_open; /* # of blocked opens */
+
+ /* Protect concurent access to :
+ * o self->open_count
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/iucv/af_iucv.h linux-3.4-pax/include/net/iucv/af_iucv.h
+--- linux-3.4/include/net/iucv/af_iucv.h 2012-05-21 11:33:38.979929914 +0200
++++ linux-3.4-pax/include/net/iucv/af_iucv.h 2012-05-21 12:10:11.620049001 +0200
+@@ -141,7 +141,7 @@ struct iucv_sock {
+ struct iucv_sock_list {
+ struct hlist_head head;
+ rwlock_t lock;
+- atomic_t autobind_name;
++ atomic_unchecked_t autobind_name;
+ };
+
+ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/neighbour.h linux-3.4-pax/include/net/neighbour.h
+--- linux-3.4/include/net/neighbour.h 2012-03-19 10:39:11.448049201 +0100
++++ linux-3.4-pax/include/net/neighbour.h 2012-05-21 12:10:11.620049001 +0200
+@@ -123,7 +123,7 @@ struct neigh_ops {
+ void (*error_report)(struct neighbour *, struct sk_buff *);
+ int (*output)(struct neighbour *, struct sk_buff *);
+ int (*connected_output)(struct neighbour *, struct sk_buff *);
+-};
++} __do_const;
+
+ struct pneigh_entry {
+ struct pneigh_entry *next;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/netlink.h linux-3.4-pax/include/net/netlink.h
+--- linux-3.4/include/net/netlink.h 2012-05-21 11:33:39.003929915 +0200
++++ linux-3.4-pax/include/net/netlink.h 2012-05-21 12:10:11.624049003 +0200
+@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct
+ static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
+ {
+ if (mark)
+- skb_trim(skb, (unsigned char *) mark - skb->data);
++ skb_trim(skb, (const unsigned char *) mark - skb->data);
+ }
+
+ /**
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/netns/ipv4.h linux-3.4-pax/include/net/netns/ipv4.h
+--- linux-3.4/include/net/netns/ipv4.h 2012-03-19 10:39:11.456049205 +0100
++++ linux-3.4-pax/include/net/netns/ipv4.h 2012-05-21 12:10:11.624049003 +0200
+@@ -57,8 +57,8 @@ struct netns_ipv4 {
+ unsigned int sysctl_ping_group_range[2];
+ long sysctl_tcp_mem[3];
+
+- atomic_t rt_genid;
+- atomic_t dev_addr_genid;
++ atomic_unchecked_t rt_genid;
++ atomic_unchecked_t dev_addr_genid;
+
+ #ifdef CONFIG_IP_MROUTE
+ #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/sctp/sctp.h linux-3.4-pax/include/net/sctp/sctp.h
+--- linux-3.4/include/net/sctp/sctp.h 2012-05-21 11:33:39.015929916 +0200
++++ linux-3.4-pax/include/net/sctp/sctp.h 2012-05-21 12:10:11.628049004 +0200
+@@ -318,9 +318,9 @@ do { \
+
+ #else /* SCTP_DEBUG */
+
+-#define SCTP_DEBUG_PRINTK(whatever...)
+-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
+-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
++#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
++#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
++#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
+ #define SCTP_ENABLE_DEBUG
+ #define SCTP_DISABLE_DEBUG
+ #define SCTP_ASSERT(expr, str, func)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/sock.h linux-3.4-pax/include/net/sock.h
+--- linux-3.4/include/net/sock.h 2012-05-21 11:33:39.019929916 +0200
++++ linux-3.4-pax/include/net/sock.h 2012-05-21 12:10:11.632049004 +0200
+@@ -302,7 +302,7 @@ struct sock {
+ #ifdef CONFIG_RPS
+ __u32 sk_rxhash;
+ #endif
+- atomic_t sk_drops;
++ atomic_unchecked_t sk_drops;
+ int sk_rcvbuf;
+
+ struct sk_filter __rcu *sk_filter;
+@@ -1691,7 +1691,7 @@ static inline void sk_nocaps_add(struct
+ }
+
+ static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
+- char __user *from, char *to,
++ char __user *from, unsigned char *to,
+ int copy, int offset)
+ {
+ if (skb->ip_summed == CHECKSUM_NONE) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/tcp.h linux-3.4-pax/include/net/tcp.h
+--- linux-3.4/include/net/tcp.h 2012-05-21 11:33:39.027929917 +0200
++++ linux-3.4-pax/include/net/tcp.h 2012-05-21 12:10:11.636049004 +0200
+@@ -1425,7 +1425,7 @@ struct tcp_seq_afinfo {
+ char *name;
+ sa_family_t family;
+ const struct file_operations *seq_fops;
+- struct seq_operations seq_ops;
++ seq_operations_no_const seq_ops;
+ };
+
+ struct tcp_iter_state {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/udp.h linux-3.4-pax/include/net/udp.h
+--- linux-3.4/include/net/udp.h 2012-05-21 11:33:39.031929917 +0200
++++ linux-3.4-pax/include/net/udp.h 2012-05-21 12:10:11.636049004 +0200
+@@ -244,7 +244,7 @@ struct udp_seq_afinfo {
+ sa_family_t family;
+ struct udp_table *udp_table;
+ const struct file_operations *seq_fops;
+- struct seq_operations seq_ops;
++ seq_operations_no_const seq_ops;
+ };
+
+ struct udp_iter_state {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/net/xfrm.h linux-3.4-pax/include/net/xfrm.h
+--- linux-3.4/include/net/xfrm.h 2012-05-21 11:33:39.055929918 +0200
++++ linux-3.4-pax/include/net/xfrm.h 2012-05-21 12:10:11.640049003 +0200
+@@ -505,7 +505,7 @@ struct xfrm_policy {
+ struct timer_list timer;
+
+ struct flow_cache_object flo;
+- atomic_t genid;
++ atomic_unchecked_t genid;
+ u32 priority;
+ u32 index;
+ struct xfrm_mark mark;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/rdma/iw_cm.h linux-3.4-pax/include/rdma/iw_cm.h
+--- linux-3.4/include/rdma/iw_cm.h 2012-01-08 19:48:28.211470919 +0100
++++ linux-3.4-pax/include/rdma/iw_cm.h 2012-05-21 12:10:11.644049003 +0200
+@@ -122,7 +122,7 @@ struct iw_cm_verbs {
+ int backlog);
+
+ int (*destroy_listen)(struct iw_cm_id *cm_id);
+-};
++} __no_const;
+
+ /**
+ * iw_create_cm_id - Create an IW CM identifier.
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/scsi/libfc.h linux-3.4-pax/include/scsi/libfc.h
+--- linux-3.4/include/scsi/libfc.h 2012-05-21 11:33:39.075929919 +0200
++++ linux-3.4-pax/include/scsi/libfc.h 2012-05-21 12:10:11.644049003 +0200
+@@ -756,6 +756,7 @@ struct libfc_function_template {
+ */
+ void (*disc_stop_final) (struct fc_lport *);
+ };
++typedef struct libfc_function_template __no_const libfc_function_template_no_const;
+
+ /**
+ * struct fc_disc - Discovery context
+@@ -861,7 +862,7 @@ struct fc_lport {
+ struct fc_vport *vport;
+
+ /* Operational Information */
+- struct libfc_function_template tt;
++ libfc_function_template_no_const tt;
+ u8 link_up;
+ u8 qfull;
+ enum fc_lport_state state;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/scsi/scsi_device.h linux-3.4-pax/include/scsi/scsi_device.h
+--- linux-3.4/include/scsi/scsi_device.h 2012-05-21 11:33:39.115929921 +0200
++++ linux-3.4-pax/include/scsi/scsi_device.h 2012-05-21 12:10:11.648049004 +0200
+@@ -162,9 +162,9 @@ struct scsi_device {
+ unsigned int max_device_blocked; /* what device_blocked counts down from */
+ #define SCSI_DEFAULT_DEVICE_BLOCKED 3
+
+- atomic_t iorequest_cnt;
+- atomic_t iodone_cnt;
+- atomic_t ioerr_cnt;
++ atomic_unchecked_t iorequest_cnt;
++ atomic_unchecked_t iodone_cnt;
++ atomic_unchecked_t ioerr_cnt;
+
+ struct device sdev_gendev,
+ sdev_dev;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/scsi/scsi_transport_fc.h linux-3.4-pax/include/scsi/scsi_transport_fc.h
+--- linux-3.4/include/scsi/scsi_transport_fc.h 2012-05-21 11:33:39.123929922 +0200
++++ linux-3.4-pax/include/scsi/scsi_transport_fc.h 2012-05-21 12:10:11.652049004 +0200
+@@ -739,7 +739,7 @@ struct fc_function_template {
+ unsigned long show_host_system_hostname:1;
+
+ unsigned long disable_target_scan:1;
+-};
++} __do_const;
+
+
+ /**
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/sound/ak4xxx-adda.h linux-3.4-pax/include/sound/ak4xxx-adda.h
+--- linux-3.4/include/sound/ak4xxx-adda.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/sound/ak4xxx-adda.h 2012-05-21 12:10:11.652049004 +0200
+@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
+ void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
+ unsigned char val);
+ void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
+-};
++} __no_const;
+
+ #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/sound/hwdep.h linux-3.4-pax/include/sound/hwdep.h
+--- linux-3.4/include/sound/hwdep.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/sound/hwdep.h 2012-05-21 12:10:11.656049004 +0200
+@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
+ struct snd_hwdep_dsp_status *status);
+ int (*dsp_load)(struct snd_hwdep *hw,
+ struct snd_hwdep_dsp_image *image);
+-};
++} __no_const;
+
+ struct snd_hwdep {
+ struct snd_card *card;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/sound/info.h linux-3.4-pax/include/sound/info.h
+--- linux-3.4/include/sound/info.h 2012-03-19 10:39:11.748049215 +0100
++++ linux-3.4-pax/include/sound/info.h 2012-05-21 12:10:11.656049004 +0200
+@@ -44,7 +44,7 @@ struct snd_info_entry_text {
+ struct snd_info_buffer *buffer);
+ void (*write)(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer);
+-};
++} __no_const;
+
+ struct snd_info_entry_ops {
+ int (*open)(struct snd_info_entry *entry,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/sound/pcm.h linux-3.4-pax/include/sound/pcm.h
+--- linux-3.4/include/sound/pcm.h 2012-05-21 11:33:39.159929924 +0200
++++ linux-3.4-pax/include/sound/pcm.h 2012-05-21 12:10:11.660049004 +0200
+@@ -81,6 +81,7 @@ struct snd_pcm_ops {
+ int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
+ int (*ack)(struct snd_pcm_substream *substream);
+ };
++typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
+
+ /*
+ *
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/sound/sb16_csp.h linux-3.4-pax/include/sound/sb16_csp.h
+--- linux-3.4/include/sound/sb16_csp.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/sound/sb16_csp.h 2012-05-21 12:10:11.660049004 +0200
+@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
+ int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
+ int (*csp_stop) (struct snd_sb_csp * p);
+ int (*csp_qsound_transfer) (struct snd_sb_csp * p);
+-};
++} __no_const;
+
+ /*
+ * CSP private data
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/sound/soc.h linux-3.4-pax/include/sound/soc.h
+--- linux-3.4/include/sound/soc.h 2012-05-21 11:33:39.179929925 +0200
++++ linux-3.4-pax/include/sound/soc.h 2012-05-21 12:10:11.664049005 +0200
+@@ -711,7 +711,7 @@ struct snd_soc_platform_driver {
+ /* platform IO - used for platform DAPM */
+ unsigned int (*read)(struct snd_soc_platform *, unsigned int);
+ int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
+-};
++} __do_const;
+
+ struct snd_soc_platform {
+ const char *name;
+@@ -887,7 +887,7 @@ struct snd_soc_pcm_runtime {
+ struct snd_soc_dai_link *dai_link;
+ struct mutex pcm_mutex;
+ enum snd_soc_pcm_subclass pcm_subclass;
+- struct snd_pcm_ops ops;
++ snd_pcm_ops_no_const ops;
+
+ unsigned int complete:1;
+ unsigned int dev_registered:1;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/sound/ymfpci.h linux-3.4-pax/include/sound/ymfpci.h
+--- linux-3.4/include/sound/ymfpci.h 2012-05-21 11:33:39.203929926 +0200
++++ linux-3.4-pax/include/sound/ymfpci.h 2012-05-21 12:10:11.668049005 +0200
+@@ -358,7 +358,7 @@ struct snd_ymfpci {
+ spinlock_t reg_lock;
+ spinlock_t voice_lock;
+ wait_queue_head_t interrupt_sleep;
+- atomic_t interrupt_sleep_count;
++ atomic_unchecked_t interrupt_sleep_count;
+ struct snd_info_entry *proc_entry;
+ const struct firmware *dsp_microcode;
+ const struct firmware *controller_microcode;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/target/target_core_base.h linux-3.4-pax/include/target/target_core_base.h
+--- linux-3.4/include/target/target_core_base.h 2012-05-21 11:33:39.215929927 +0200
++++ linux-3.4-pax/include/target/target_core_base.h 2012-05-21 12:10:11.668049005 +0200
+@@ -447,7 +447,7 @@ struct t10_reservation_ops {
+ int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
+ int (*t10_pr_register)(struct se_cmd *);
+ int (*t10_pr_clear)(struct se_cmd *);
+-};
++} __no_const;
+
+ struct t10_reservation {
+ /* Reservation effects all target ports */
+@@ -576,7 +576,7 @@ struct se_cmd {
+ atomic_t t_se_count;
+ atomic_t t_task_cdbs_left;
+ atomic_t t_task_cdbs_ex_left;
+- atomic_t t_task_cdbs_sent;
++ atomic_unchecked_t t_task_cdbs_sent;
+ unsigned int transport_state;
+ #define CMD_T_ABORTED (1 << 0)
+ #define CMD_T_ACTIVE (1 << 1)
+@@ -802,7 +802,7 @@ struct se_device {
+ spinlock_t stats_lock;
+ /* Active commands on this virtual SE device */
+ atomic_t simple_cmds;
+- atomic_t dev_ordered_id;
++ atomic_unchecked_t dev_ordered_id;
+ atomic_t execute_tasks;
+ atomic_t dev_ordered_sync;
+ atomic_t dev_qf_count;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/trace/events/irq.h linux-3.4-pax/include/trace/events/irq.h
+--- linux-3.4/include/trace/events/irq.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/trace/events/irq.h 2012-05-21 12:10:11.672049005 +0200
+@@ -36,7 +36,7 @@ struct softirq_action;
+ */
+ TRACE_EVENT(irq_handler_entry,
+
+- TP_PROTO(int irq, struct irqaction *action),
++ TP_PROTO(int irq, const struct irqaction *action),
+
+ TP_ARGS(irq, action),
+
+@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
+ */
+ TRACE_EVENT(irq_handler_exit,
+
+- TP_PROTO(int irq, struct irqaction *action, int ret),
++ TP_PROTO(int irq, const struct irqaction *action, int ret),
+
+ TP_ARGS(irq, action, ret),
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/video/udlfb.h linux-3.4-pax/include/video/udlfb.h
+--- linux-3.4/include/video/udlfb.h 2012-05-21 11:33:39.279929930 +0200
++++ linux-3.4-pax/include/video/udlfb.h 2012-05-21 12:10:11.672049005 +0200
+@@ -53,10 +53,10 @@ struct dlfb_data {
+ u32 pseudo_palette[256];
+ int blank_mode; /*one of FB_BLANK_ */
+ /* blit-only rendering path metrics, exposed through sysfs */
+- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
+- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
+- atomic_t bytes_sent; /* to usb, after compression including overhead */
+- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
++ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
++ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
++ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
++ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
+ };
+
+ #define NR_USB_REQUEST_I2C_SUB_IO 0x02
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/include/video/uvesafb.h linux-3.4-pax/include/video/uvesafb.h
+--- linux-3.4/include/video/uvesafb.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/include/video/uvesafb.h 2012-05-21 12:10:11.676049005 +0200
+@@ -177,6 +177,7 @@ struct uvesafb_par {
+ u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
+ u8 pmi_setpal; /* PMI for palette changes */
+ u16 *pmi_base; /* protected mode interface location */
++ u8 *pmi_code; /* protected mode code location */
+ void *pmi_start;
+ void *pmi_pal;
+ u8 *vbe_state_orig; /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/init/do_mounts.c linux-3.4-pax/init/do_mounts.c
+--- linux-3.4/init/do_mounts.c 2012-05-21 11:33:39.323929933 +0200
++++ linux-3.4-pax/init/do_mounts.c 2012-05-21 12:10:11.676049005 +0200
+@@ -326,11 +326,11 @@ static void __init get_fs_names(char *pa
+ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
+ {
+ struct super_block *s;
+- int err = sys_mount(name, "/root", fs, flags, data);
++ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
+ if (err)
+ return err;
+
+- sys_chdir((const char __user __force *)"/root");
++ sys_chdir((const char __force_user *)"/root");
+ s = current->fs->pwd.dentry->d_sb;
+ ROOT_DEV = s->s_dev;
+ printk(KERN_INFO
+@@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...
+ va_start(args, fmt);
+ vsprintf(buf, fmt, args);
+ va_end(args);
+- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
++ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
+ if (fd >= 0) {
+ sys_ioctl(fd, FDEJECT, 0);
+ sys_close(fd);
+ }
+ printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
+- fd = sys_open("/dev/console", O_RDWR, 0);
++ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
+ if (fd >= 0) {
+ sys_ioctl(fd, TCGETS, (long)&termios);
+ termios.c_lflag &= ~ICANON;
+ sys_ioctl(fd, TCSETSF, (long)&termios);
+- sys_read(fd, &c, 1);
++ sys_read(fd, (char __user *)&c, 1);
+ termios.c_lflag |= ICANON;
+ sys_ioctl(fd, TCSETSF, (long)&termios);
+ sys_close(fd);
+@@ -555,6 +555,6 @@ void __init prepare_namespace(void)
+ mount_root();
+ out:
+ devtmpfs_mount("dev");
+- sys_mount(".", "/", NULL, MS_MOVE, NULL);
+- sys_chroot((const char __user __force *)".");
++ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
++ sys_chroot((const char __force_user *)".");
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/init/do_mounts.h linux-3.4-pax/init/do_mounts.h
+--- linux-3.4/init/do_mounts.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/init/do_mounts.h 2012-05-21 12:10:11.680049005 +0200
+@@ -15,15 +15,15 @@ extern int root_mountflags;
+
+ static inline int create_dev(char *name, dev_t dev)
+ {
+- sys_unlink(name);
+- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
++ sys_unlink((char __force_user *)name);
++ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
+ }
+
+ #if BITS_PER_LONG == 32
+ static inline u32 bstat(char *name)
+ {
+ struct stat64 stat;
+- if (sys_stat64(name, &stat) != 0)
++ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
+ return 0;
+ if (!S_ISBLK(stat.st_mode))
+ return 0;
+@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
+ static inline u32 bstat(char *name)
+ {
+ struct stat stat;
+- if (sys_newstat(name, &stat) != 0)
++ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
+ return 0;
+ if (!S_ISBLK(stat.st_mode))
+ return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/init/do_mounts_initrd.c linux-3.4-pax/init/do_mounts_initrd.c
+--- linux-3.4/init/do_mounts_initrd.c 2012-05-21 11:33:39.323929933 +0200
++++ linux-3.4-pax/init/do_mounts_initrd.c 2012-05-21 12:10:11.680049005 +0200
+@@ -43,13 +43,13 @@ static void __init handle_initrd(void)
+ create_dev("/dev/root.old", Root_RAM0);
+ /* mount initrd on rootfs' /root */
+ mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
+- sys_mkdir("/old", 0700);
+- root_fd = sys_open("/", 0, 0);
+- old_fd = sys_open("/old", 0, 0);
++ sys_mkdir((const char __force_user *)"/old", 0700);
++ root_fd = sys_open((const char __force_user *)"/", 0, 0);
++ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
+ /* move initrd over / and chdir/chroot in initrd root */
+- sys_chdir("/root");
+- sys_mount(".", "/", NULL, MS_MOVE, NULL);
+- sys_chroot(".");
++ sys_chdir((const char __force_user *)"/root");
++ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
++ sys_chroot((const char __force_user *)".");
+
+ /*
+ * In case that a resume from disk is carried out by linuxrc or one of
+@@ -66,15 +66,15 @@ static void __init handle_initrd(void)
+
+ /* move initrd to rootfs' /old */
+ sys_fchdir(old_fd);
+- sys_mount("/", ".", NULL, MS_MOVE, NULL);
++ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
+ /* switch root and cwd back to / of rootfs */
+ sys_fchdir(root_fd);
+- sys_chroot(".");
++ sys_chroot((const char __force_user *)".");
+ sys_close(old_fd);
+ sys_close(root_fd);
+
+ if (new_decode_dev(real_root_dev) == Root_RAM0) {
+- sys_chdir("/old");
++ sys_chdir((const char __force_user *)"/old");
+ return;
+ }
+
+@@ -82,17 +82,17 @@ static void __init handle_initrd(void)
+ mount_root();
+
+ printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
+- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
++ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
+ if (!error)
+ printk("okay\n");
+ else {
+- int fd = sys_open("/dev/root.old", O_RDWR, 0);
++ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
+ if (error == -ENOENT)
+ printk("/initrd does not exist. Ignored.\n");
+ else
+ printk("failed\n");
+ printk(KERN_NOTICE "Unmounting old root\n");
+- sys_umount("/old", MNT_DETACH);
++ sys_umount((char __force_user *)"/old", MNT_DETACH);
+ printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
+ if (fd < 0) {
+ error = fd;
+@@ -115,11 +115,11 @@ int __init initrd_load(void)
+ * mounted in the normal path.
+ */
+ if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
+- sys_unlink("/initrd.image");
++ sys_unlink((const char __force_user *)"/initrd.image");
+ handle_initrd();
+ return 1;
+ }
+ }
+- sys_unlink("/initrd.image");
++ sys_unlink((const char __force_user *)"/initrd.image");
+ return 0;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/init/do_mounts_md.c linux-3.4-pax/init/do_mounts_md.c
+--- linux-3.4/init/do_mounts_md.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/init/do_mounts_md.c 2012-05-21 12:10:11.684049006 +0200
+@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
+ partitioned ? "_d" : "", minor,
+ md_setup_args[ent].device_names);
+
+- fd = sys_open(name, 0, 0);
++ fd = sys_open((char __force_user *)name, 0, 0);
+ if (fd < 0) {
+ printk(KERN_ERR "md: open failed - cannot start "
+ "array %s\n", name);
+@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
+ * array without it
+ */
+ sys_close(fd);
+- fd = sys_open(name, 0, 0);
++ fd = sys_open((char __force_user *)name, 0, 0);
+ sys_ioctl(fd, BLKRRPART, 0);
+ }
+ sys_close(fd);
+@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
+
+ wait_for_device_probe();
+
+- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
++ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
+ if (fd >= 0) {
+ sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
+ sys_close(fd);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/init/initramfs.c linux-3.4-pax/init/initramfs.c
+--- linux-3.4/init/initramfs.c 2012-03-19 10:39:11.784049196 +0100
++++ linux-3.4-pax/init/initramfs.c 2012-05-21 12:10:11.684049006 +0200
+@@ -74,7 +74,7 @@ static void __init free_hash(void)
+ }
+ }
+
+-static long __init do_utime(char __user *filename, time_t mtime)
++static long __init do_utime(__force char __user *filename, time_t mtime)
+ {
+ struct timespec t[2];
+
+@@ -109,7 +109,7 @@ static void __init dir_utime(void)
+ struct dir_entry *de, *tmp;
+ list_for_each_entry_safe(de, tmp, &dir_list, list) {
+ list_del(&de->list);
+- do_utime(de->name, de->mtime);
++ do_utime((char __force_user *)de->name, de->mtime);
+ kfree(de->name);
+ kfree(de);
+ }
+@@ -271,7 +271,7 @@ static int __init maybe_link(void)
+ if (nlink >= 2) {
+ char *old = find_link(major, minor, ino, mode, collected);
+ if (old)
+- return (sys_link(old, collected) < 0) ? -1 : 1;
++ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
+ }
+ return 0;
+ }
+@@ -280,11 +280,11 @@ static void __init clean_path(char *path
+ {
+ struct stat st;
+
+- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
++ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
+ if (S_ISDIR(st.st_mode))
+- sys_rmdir(path);
++ sys_rmdir((char __force_user *)path);
+ else
+- sys_unlink(path);
++ sys_unlink((char __force_user *)path);
+ }
+ }
+
+@@ -305,7 +305,7 @@ static int __init do_name(void)
+ int openflags = O_WRONLY|O_CREAT;
+ if (ml != 1)
+ openflags |= O_TRUNC;
+- wfd = sys_open(collected, openflags, mode);
++ wfd = sys_open((char __force_user *)collected, openflags, mode);
+
+ if (wfd >= 0) {
+ sys_fchown(wfd, uid, gid);
+@@ -317,17 +317,17 @@ static int __init do_name(void)
+ }
+ }
+ } else if (S_ISDIR(mode)) {
+- sys_mkdir(collected, mode);
+- sys_chown(collected, uid, gid);
+- sys_chmod(collected, mode);
++ sys_mkdir((char __force_user *)collected, mode);
++ sys_chown((char __force_user *)collected, uid, gid);
++ sys_chmod((char __force_user *)collected, mode);
+ dir_add(collected, mtime);
+ } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
+ S_ISFIFO(mode) || S_ISSOCK(mode)) {
+ if (maybe_link() == 0) {
+- sys_mknod(collected, mode, rdev);
+- sys_chown(collected, uid, gid);
+- sys_chmod(collected, mode);
+- do_utime(collected, mtime);
++ sys_mknod((char __force_user *)collected, mode, rdev);
++ sys_chown((char __force_user *)collected, uid, gid);
++ sys_chmod((char __force_user *)collected, mode);
++ do_utime((char __force_user *)collected, mtime);
+ }
+ }
+ return 0;
+@@ -336,15 +336,15 @@ static int __init do_name(void)
+ static int __init do_copy(void)
+ {
+ if (count >= body_len) {
+- sys_write(wfd, victim, body_len);
++ sys_write(wfd, (char __force_user *)victim, body_len);
+ sys_close(wfd);
+- do_utime(vcollected, mtime);
++ do_utime((char __force_user *)vcollected, mtime);
+ kfree(vcollected);
+ eat(body_len);
+ state = SkipIt;
+ return 0;
+ } else {
+- sys_write(wfd, victim, count);
++ sys_write(wfd, (char __force_user *)victim, count);
+ body_len -= count;
+ eat(count);
+ return 1;
+@@ -355,9 +355,9 @@ static int __init do_symlink(void)
+ {
+ collected[N_ALIGN(name_len) + body_len] = '\0';
+ clean_path(collected, 0);
+- sys_symlink(collected + N_ALIGN(name_len), collected);
+- sys_lchown(collected, uid, gid);
+- do_utime(collected, mtime);
++ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
++ sys_lchown((char __force_user *)collected, uid, gid);
++ do_utime((char __force_user *)collected, mtime);
+ state = SkipIt;
+ next_state = Reset;
+ return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/init/Kconfig linux-3.4-pax/init/Kconfig
+--- linux-3.4/init/Kconfig 2012-05-21 11:33:39.311929932 +0200
++++ linux-3.4-pax/init/Kconfig 2012-05-21 12:10:11.688049006 +0200
+@@ -1240,7 +1240,7 @@ config SLUB_DEBUG
+
+ config COMPAT_BRK
+ bool "Disable heap randomization"
+- default y
++ default n
+ help
+ Randomizing heap placement makes heap exploits harder, but it
+ also breaks ancient binaries (including anything libc5 based).
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/init/main.c linux-3.4-pax/init/main.c
+--- linux-3.4/init/main.c 2012-05-21 11:33:39.331929933 +0200
++++ linux-3.4-pax/init/main.c 2012-05-21 12:10:11.692049006 +0200
+@@ -148,6 +148,49 @@ static int __init set_reset_devices(char
+
+ __setup("reset_devices", set_reset_devices);
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++extern char pax_enter_kernel_user[];
++extern char pax_exit_kernel_user[];
++extern pgdval_t clone_pgd_mask;
++#endif
++
++#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
++static int __init setup_pax_nouderef(char *str)
++{
++#ifdef CONFIG_X86_32
++ unsigned int cpu;
++ struct desc_struct *gdt;
++
++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
++ gdt = get_cpu_gdt_table(cpu);
++ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
++ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
++ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
++ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
++ }
++ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
++#else
++ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
++ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
++ clone_pgd_mask = ~(pgdval_t)0UL;
++#endif
++
++ return 0;
++}
++early_param("pax_nouderef", setup_pax_nouderef);
++#endif
++
++#ifdef CONFIG_PAX_SOFTMODE
++int pax_softmode;
++
++static int __init setup_pax_softmode(char *str)
++{
++ get_option(&str, &pax_softmode);
++ return 1;
++}
++__setup("pax_softmode=", setup_pax_softmode);
++#endif
++
+ static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
+ const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
+ static const char *panic_later, *panic_param;
+@@ -677,6 +720,7 @@ int __init_or_module do_one_initcall(ini
+ {
+ int count = preempt_count();
+ int ret;
++ const char *msg1 = "", *msg2 = "";
+
+ if (initcall_debug)
+ ret = do_one_initcall_debug(fn);
+@@ -689,15 +733,15 @@ int __init_or_module do_one_initcall(ini
+ sprintf(msgbuf, "error code %d ", ret);
+
+ if (preempt_count() != count) {
+- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
++ msg1 = " preemption imbalance";
+ preempt_count() = count;
+ }
+ if (irqs_disabled()) {
+- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
++ msg2 = " disabled interrupts";
+ local_irq_enable();
+ }
+- if (msgbuf[0]) {
+- printk("initcall %pF returned with %s\n", fn, msgbuf);
++ if (msgbuf[0] || *msg1 || *msg2) {
++ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
+ }
+
+ return ret;
+@@ -864,7 +908,7 @@ static int __init kernel_init(void * unu
+ do_basic_setup();
+
+ /* Open the /dev/console on the rootfs, this should never fail */
+- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
++ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
+ printk(KERN_WARNING "Warning: unable to open an initial console.\n");
+
+ (void) sys_dup(0);
+@@ -877,7 +921,7 @@ static int __init kernel_init(void * unu
+ if (!ramdisk_execute_command)
+ ramdisk_execute_command = "/init";
+
+- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
++ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
+ ramdisk_execute_command = NULL;
+ prepare_namespace();
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/ipc/msg.c linux-3.4-pax/ipc/msg.c
+--- linux-3.4/ipc/msg.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/ipc/msg.c 2012-05-21 12:10:11.692049006 +0200
+@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
+ return security_msg_queue_associate(msq, msgflg);
+ }
+
++static struct ipc_ops msg_ops = {
++ .getnew = newque,
++ .associate = msg_security,
++ .more_checks = NULL
++};
++
+ SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
+ {
+ struct ipc_namespace *ns;
+- struct ipc_ops msg_ops;
+ struct ipc_params msg_params;
+
+ ns = current->nsproxy->ipc_ns;
+
+- msg_ops.getnew = newque;
+- msg_ops.associate = msg_security;
+- msg_ops.more_checks = NULL;
+-
+ msg_params.key = key;
+ msg_params.flg = msgflg;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/ipc/sem.c linux-3.4-pax/ipc/sem.c
+--- linux-3.4/ipc/sem.c 2012-01-08 19:48:28.579470899 +0100
++++ linux-3.4-pax/ipc/sem.c 2012-05-21 12:10:11.696049006 +0200
+@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct
+ return 0;
+ }
+
++static struct ipc_ops sem_ops = {
++ .getnew = newary,
++ .associate = sem_security,
++ .more_checks = sem_more_checks
++};
++
+ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
+ {
+ struct ipc_namespace *ns;
+- struct ipc_ops sem_ops;
+ struct ipc_params sem_params;
+
+ ns = current->nsproxy->ipc_ns;
+@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
+ if (nsems < 0 || nsems > ns->sc_semmsl)
+ return -EINVAL;
+
+- sem_ops.getnew = newary;
+- sem_ops.associate = sem_security;
+- sem_ops.more_checks = sem_more_checks;
+-
+ sem_params.key = key;
+ sem_params.flg = semflg;
+ sem_params.u.nsems = nsems;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/ipc/shm.c linux-3.4-pax/ipc/shm.c
+--- linux-3.4/ipc/shm.c 2012-05-21 11:33:39.339929934 +0200
++++ linux-3.4-pax/ipc/shm.c 2012-05-21 12:10:11.700049007 +0200
+@@ -559,18 +559,19 @@ static inline int shm_more_checks(struct
+ return 0;
+ }
+
++static struct ipc_ops shm_ops = {
++ .getnew = newseg,
++ .associate = shm_security,
++ .more_checks = shm_more_checks
++};
++
+ SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
+ {
+ struct ipc_namespace *ns;
+- struct ipc_ops shm_ops;
+ struct ipc_params shm_params;
+
+ ns = current->nsproxy->ipc_ns;
+
+- shm_ops.getnew = newseg;
+- shm_ops.associate = shm_security;
+- shm_ops.more_checks = shm_more_checks;
+-
+ shm_params.key = key;
+ shm_params.flg = shmflg;
+ shm_params.u.size = size;
+@@ -988,6 +989,12 @@ long do_shmat(int shmid, char __user *sh
+ f_mode = FMODE_READ | FMODE_WRITE;
+ }
+ if (shmflg & SHM_EXEC) {
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->mm->pax_flags & MF_PAX_MPROTECT)
++ goto out;
++#endif
++
+ prot |= PROT_EXEC;
+ acc_mode |= S_IXUGO;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/acct.c linux-3.4-pax/kernel/acct.c
+--- linux-3.4/kernel/acct.c 2012-03-19 10:39:11.804049197 +0100
++++ linux-3.4-pax/kernel/acct.c 2012-05-21 12:10:11.704049007 +0200
+@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_a
+ */
+ flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+- file->f_op->write(file, (char *)&ac,
++ file->f_op->write(file, (char __force_user *)&ac,
+ sizeof(acct_t), &file->f_pos);
+ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
+ set_fs(fs);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/audit.c linux-3.4-pax/kernel/audit.c
+--- linux-3.4/kernel/audit.c 2012-05-21 11:33:39.347929934 +0200
++++ linux-3.4-pax/kernel/audit.c 2012-05-21 12:10:11.704049007 +0200
+@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
+ 3) suppressed due to audit_rate_limit
+ 4) suppressed due to audit_backlog_limit
+ */
+-static atomic_t audit_lost = ATOMIC_INIT(0);
++static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
+
+ /* The netlink socket. */
+ static struct sock *audit_sock;
+@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
+ unsigned long now;
+ int print;
+
+- atomic_inc(&audit_lost);
++ atomic_inc_unchecked(&audit_lost);
+
+ print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
+
+@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
+ printk(KERN_WARNING
+ "audit: audit_lost=%d audit_rate_limit=%d "
+ "audit_backlog_limit=%d\n",
+- atomic_read(&audit_lost),
++ atomic_read_unchecked(&audit_lost),
+ audit_rate_limit,
+ audit_backlog_limit);
+ audit_panic(message);
+@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_b
+ status_set.pid = audit_pid;
+ status_set.rate_limit = audit_rate_limit;
+ status_set.backlog_limit = audit_backlog_limit;
+- status_set.lost = atomic_read(&audit_lost);
++ status_set.lost = atomic_read_unchecked(&audit_lost);
+ status_set.backlog = skb_queue_len(&audit_skb_queue);
+ audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
+ &status_set, sizeof(status_set));
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/auditsc.c linux-3.4-pax/kernel/auditsc.c
+--- linux-3.4/kernel/auditsc.c 2012-03-19 10:39:11.812049196 +0100
++++ linux-3.4-pax/kernel/auditsc.c 2012-05-21 12:10:11.708049006 +0200
+@@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_conte
+ }
+
+ /* global counter which is incremented every time something logs in */
+-static atomic_t session_id = ATOMIC_INIT(0);
++static atomic_unchecked_t session_id = ATOMIC_INIT(0);
+
+ /**
+ * audit_set_loginuid - set current task's audit_context loginuid
+@@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
+ return -EPERM;
+ #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
+
+- sessionid = atomic_inc_return(&session_id);
++ sessionid = atomic_inc_return_unchecked(&session_id);
+ if (context && context->in_syscall) {
+ struct audit_buffer *ab;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/capability.c linux-3.4-pax/kernel/capability.c
+--- linux-3.4/kernel/capability.c 2012-03-19 10:39:11.812049196 +0100
++++ linux-3.4-pax/kernel/capability.c 2012-05-21 12:10:11.712049006 +0200
+@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
+ * before modification is attempted and the application
+ * fails.
+ */
++ if (tocopy > ARRAY_SIZE(kdata))
++ return -EFAULT;
++
+ if (copy_to_user(dataptr, kdata, tocopy
+ * sizeof(struct __user_cap_data_struct))) {
+ return -EFAULT;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/compat.c linux-3.4-pax/kernel/compat.c
+--- linux-3.4/kernel/compat.c 2012-05-21 11:33:39.367929935 +0200
++++ linux-3.4-pax/kernel/compat.c 2012-05-21 12:10:11.716049008 +0200
+@@ -220,7 +220,7 @@ static long compat_nanosleep_restart(str
+ mm_segment_t oldfs;
+ long ret;
+
+- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
++ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = hrtimer_nanosleep_restart(restart);
+@@ -252,7 +252,7 @@ asmlinkage long compat_sys_nanosleep(str
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = hrtimer_nanosleep(&tu,
+- rmtp ? (struct timespec __user *)&rmt : NULL,
++ rmtp ? (struct timespec __force_user *)&rmt : NULL,
+ HRTIMER_MODE_REL, CLOCK_MONOTONIC);
+ set_fs(oldfs);
+
+@@ -361,7 +361,7 @@ asmlinkage long compat_sys_sigpending(co
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- ret = sys_sigpending((old_sigset_t __user *) &s);
++ ret = sys_sigpending((old_sigset_t __force_user *) &s);
+ set_fs(old_fs);
+ if (ret == 0)
+ ret = put_user(s, set);
+@@ -451,7 +451,7 @@ asmlinkage long compat_sys_old_getrlimit
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- ret = sys_old_getrlimit(resource, &r);
++ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
+ set_fs(old_fs);
+
+ if (!ret) {
+@@ -523,7 +523,7 @@ asmlinkage long compat_sys_getrusage(int
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- ret = sys_getrusage(who, (struct rusage __user *) &r);
++ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
+ set_fs(old_fs);
+
+ if (ret)
+@@ -550,8 +550,8 @@ compat_sys_wait4(compat_pid_t pid, compa
+ set_fs (KERNEL_DS);
+ ret = sys_wait4(pid,
+ (stat_addr ?
+- (unsigned int __user *) &status : NULL),
+- options, (struct rusage __user *) &r);
++ (unsigned int __force_user *) &status : NULL),
++ options, (struct rusage __force_user *) &r);
+ set_fs (old_fs);
+
+ if (ret > 0) {
+@@ -576,8 +576,8 @@ asmlinkage long compat_sys_waitid(int wh
+ memset(&info, 0, sizeof(info));
+
+ set_fs(KERNEL_DS);
+- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
+- uru ? (struct rusage __user *)&ru : NULL);
++ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
++ uru ? (struct rusage __force_user *)&ru : NULL);
+ set_fs(old_fs);
+
+ if ((ret < 0) || (info.si_signo == 0))
+@@ -707,8 +707,8 @@ long compat_sys_timer_settime(timer_t ti
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_timer_settime(timer_id, flags,
+- (struct itimerspec __user *) &newts,
+- (struct itimerspec __user *) &oldts);
++ (struct itimerspec __force_user *) &newts,
++ (struct itimerspec __force_user *) &oldts);
+ set_fs(oldfs);
+ if (!err && old && put_compat_itimerspec(old, &oldts))
+ return -EFAULT;
+@@ -725,7 +725,7 @@ long compat_sys_timer_gettime(timer_t ti
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_timer_gettime(timer_id,
+- (struct itimerspec __user *) &ts);
++ (struct itimerspec __force_user *) &ts);
+ set_fs(oldfs);
+ if (!err && put_compat_itimerspec(setting, &ts))
+ return -EFAULT;
+@@ -744,7 +744,7 @@ long compat_sys_clock_settime(clockid_t
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_clock_settime(which_clock,
+- (struct timespec __user *) &ts);
++ (struct timespec __force_user *) &ts);
+ set_fs(oldfs);
+ return err;
+ }
+@@ -759,7 +759,7 @@ long compat_sys_clock_gettime(clockid_t
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_clock_gettime(which_clock,
+- (struct timespec __user *) &ts);
++ (struct timespec __force_user *) &ts);
+ set_fs(oldfs);
+ if (!err && put_compat_timespec(&ts, tp))
+ return -EFAULT;
+@@ -779,7 +779,7 @@ long compat_sys_clock_adjtime(clockid_t
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
++ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
+ set_fs(oldfs);
+
+ err = compat_put_timex(utp, &txc);
+@@ -799,7 +799,7 @@ long compat_sys_clock_getres(clockid_t w
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_clock_getres(which_clock,
+- (struct timespec __user *) &ts);
++ (struct timespec __force_user *) &ts);
+ set_fs(oldfs);
+ if (!err && tp && put_compat_timespec(&ts, tp))
+ return -EFAULT;
+@@ -811,9 +811,9 @@ static long compat_clock_nanosleep_resta
+ long err;
+ mm_segment_t oldfs;
+ struct timespec tu;
+- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
++ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
+
+- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
++ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = clock_nanosleep_restart(restart);
+@@ -845,8 +845,8 @@ long compat_sys_clock_nanosleep(clockid_
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_clock_nanosleep(which_clock, flags,
+- (struct timespec __user *) &in,
+- (struct timespec __user *) &out);
++ (struct timespec __force_user *) &in,
++ (struct timespec __force_user *) &out);
+ set_fs(oldfs);
+
+ if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/debug/debug_core.c linux-3.4-pax/kernel/debug/debug_core.c
+--- linux-3.4/kernel/debug/debug_core.c 2012-05-21 11:33:39.407929937 +0200
++++ linux-3.4-pax/kernel/debug/debug_core.c 2012-05-21 12:10:11.720049009 +0200
+@@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
+ */
+ static atomic_t masters_in_kgdb;
+ static atomic_t slaves_in_kgdb;
+-static atomic_t kgdb_break_tasklet_var;
++static atomic_unchecked_t kgdb_break_tasklet_var;
+ atomic_t kgdb_setting_breakpoint;
+
+ struct task_struct *kgdb_usethread;
+@@ -132,7 +132,7 @@ int kgdb_single_step;
+ static pid_t kgdb_sstep_pid;
+
+ /* to keep track of the CPU which is doing the single stepping*/
+-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
++atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
+
+ /*
+ * If you are debugging a problem where roundup (the collection of
+@@ -540,7 +540,7 @@ return_normal:
+ * kernel will only try for the value of sstep_tries before
+ * giving up and continuing on.
+ */
+- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
+ (kgdb_info[cpu].task &&
+ kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
+ atomic_set(&kgdb_active, -1);
+@@ -634,8 +634,8 @@ cpu_master_loop:
+ }
+
+ kgdb_restore:
+- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
+- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
++ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
+ if (kgdb_info[sstep_cpu].task)
+ kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
+ else
+@@ -861,18 +861,18 @@ static void kgdb_unregister_callbacks(vo
+ static void kgdb_tasklet_bpt(unsigned long ing)
+ {
+ kgdb_breakpoint();
+- atomic_set(&kgdb_break_tasklet_var, 0);
++ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
+ }
+
+ static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
+
+ void kgdb_schedule_breakpoint(void)
+ {
+- if (atomic_read(&kgdb_break_tasklet_var) ||
++ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
+ atomic_read(&kgdb_active) != -1 ||
+ atomic_read(&kgdb_setting_breakpoint))
+ return;
+- atomic_inc(&kgdb_break_tasklet_var);
++ atomic_inc_unchecked(&kgdb_break_tasklet_var);
+ tasklet_schedule(&kgdb_tasklet_breakpoint);
+ }
+ EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/debug/kdb/kdb_main.c linux-3.4-pax/kernel/debug/kdb/kdb_main.c
+--- linux-3.4/kernel/debug/kdb/kdb_main.c 2012-05-21 11:33:39.423929937 +0200
++++ linux-3.4-pax/kernel/debug/kdb/kdb_main.c 2012-05-21 12:10:11.720049009 +0200
+@@ -1983,7 +1983,7 @@ static int kdb_lsmod(int argc, const cha
+ list_for_each_entry(mod, kdb_modules, list) {
+
+ kdb_printf("%-20s%8u 0x%p ", mod->name,
+- mod->core_size, (void *)mod);
++ mod->core_size_rx + mod->core_size_rw, (void *)mod);
+ #ifdef CONFIG_MODULE_UNLOAD
+ kdb_printf("%4ld ", module_refcount(mod));
+ #endif
+@@ -1993,7 +1993,7 @@ static int kdb_lsmod(int argc, const cha
+ kdb_printf(" (Loading)");
+ else
+ kdb_printf(" (Live)");
+- kdb_printf(" 0x%p", mod->module_core);
++ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
+
+ #ifdef CONFIG_MODULE_UNLOAD
+ {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/events/core.c linux-3.4-pax/kernel/events/core.c
+--- linux-3.4/kernel/events/core.c 2012-05-21 11:33:39.439929940 +0200
++++ linux-3.4-pax/kernel/events/core.c 2012-05-21 12:10:11.728049009 +0200
+@@ -181,7 +181,7 @@ int perf_proc_update_handler(struct ctl_
+ return 0;
+ }
+
+-static atomic64_t perf_event_id;
++static atomic64_unchecked_t perf_event_id;
+
+ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
+ enum event_type_t event_type);
+@@ -2659,7 +2659,7 @@ static void __perf_event_read(void *info
+
+ static inline u64 perf_event_count(struct perf_event *event)
+ {
+- return local64_read(&event->count) + atomic64_read(&event->child_count);
++ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
+ }
+
+ static u64 perf_event_read(struct perf_event *event)
+@@ -2983,9 +2983,9 @@ u64 perf_event_read_value(struct perf_ev
+ mutex_lock(&event->child_mutex);
+ total += perf_event_read(event);
+ *enabled += event->total_time_enabled +
+- atomic64_read(&event->child_total_time_enabled);
++ atomic64_read_unchecked(&event->child_total_time_enabled);
+ *running += event->total_time_running +
+- atomic64_read(&event->child_total_time_running);
++ atomic64_read_unchecked(&event->child_total_time_running);
+
+ list_for_each_entry(child, &event->child_list, child_list) {
+ total += perf_event_read(child);
+@@ -3393,10 +3393,10 @@ void perf_event_update_userpage(struct p
+ userpg->offset -= local64_read(&event->hw.prev_count);
+
+ userpg->time_enabled = enabled +
+- atomic64_read(&event->child_total_time_enabled);
++ atomic64_read_unchecked(&event->child_total_time_enabled);
+
+ userpg->time_running = running +
+- atomic64_read(&event->child_total_time_running);
++ atomic64_read_unchecked(&event->child_total_time_running);
+
+ arch_perf_update_userpage(userpg, now);
+
+@@ -3829,11 +3829,11 @@ static void perf_output_read_one(struct
+ values[n++] = perf_event_count(event);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ values[n++] = enabled +
+- atomic64_read(&event->child_total_time_enabled);
++ atomic64_read_unchecked(&event->child_total_time_enabled);
+ }
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ values[n++] = running +
+- atomic64_read(&event->child_total_time_running);
++ atomic64_read_unchecked(&event->child_total_time_running);
+ }
+ if (read_format & PERF_FORMAT_ID)
+ values[n++] = primary_event_id(event);
+@@ -4511,12 +4511,12 @@ static void perf_event_mmap_event(struct
+ * need to add enough zero bytes after the string to handle
+ * the 64bit alignment we do later.
+ */
+- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
++ buf = kzalloc(PATH_MAX, GFP_KERNEL);
+ if (!buf) {
+ name = strncpy(tmp, "//enomem", sizeof(tmp));
+ goto got_name;
+ }
+- name = d_path(&file->f_path, buf, PATH_MAX);
++ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
+ if (IS_ERR(name)) {
+ name = strncpy(tmp, "//toolong", sizeof(tmp));
+ goto got_name;
+@@ -5929,7 +5929,7 @@ perf_event_alloc(struct perf_event_attr
+ event->parent = parent_event;
+
+ event->ns = get_pid_ns(current->nsproxy->pid_ns);
+- event->id = atomic64_inc_return(&perf_event_id);
++ event->id = atomic64_inc_return_unchecked(&perf_event_id);
+
+ event->state = PERF_EVENT_STATE_INACTIVE;
+
+@@ -6491,10 +6491,10 @@ static void sync_child_event(struct perf
+ /*
+ * Add back the child's count to the parent's count:
+ */
+- atomic64_add(child_val, &parent_event->child_count);
+- atomic64_add(child_event->total_time_enabled,
++ atomic64_add_unchecked(child_val, &parent_event->child_count);
++ atomic64_add_unchecked(child_event->total_time_enabled,
+ &parent_event->child_total_time_enabled);
+- atomic64_add(child_event->total_time_running,
++ atomic64_add_unchecked(child_event->total_time_running,
+ &parent_event->child_total_time_running);
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/exit.c linux-3.4-pax/kernel/exit.c
+--- linux-3.4/kernel/exit.c 2012-05-21 11:33:39.443929939 +0200
++++ linux-3.4-pax/kernel/exit.c 2012-05-21 12:10:11.732049008 +0200
+@@ -382,7 +382,7 @@ int allow_signal(int sig)
+ * know it'll be handled, so that they don't get converted to
+ * SIGKILL or just silently dropped.
+ */
+- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
++ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ return 0;
+@@ -1093,7 +1093,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
+ * Take down every thread in the group. This is called by fatal signals
+ * as well as by sys_exit_group (below).
+ */
+-void
++__noreturn void
+ do_group_exit(int exit_code)
+ {
+ struct signal_struct *sig = current->signal;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/fork.c linux-3.4-pax/kernel/fork.c
+--- linux-3.4/kernel/fork.c 2012-05-21 11:33:39.451929940 +0200
++++ linux-3.4-pax/kernel/fork.c 2012-05-30 17:47:26.612966042 +0200
+@@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
+ *stackend = STACK_END_MAGIC; /* for overflow detection */
+
+ #ifdef CONFIG_CC_STACKPROTECTOR
+- tsk->stack_canary = get_random_int();
++ tsk->stack_canary = pax_get_random_long();
+ #endif
+
+ /*
+@@ -310,13 +310,78 @@ out:
+ }
+
+ #ifdef CONFIG_MMU
++static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
++{
++ struct vm_area_struct *tmp;
++ unsigned long charge;
++ struct mempolicy *pol;
++ struct file *file;
++
++ charge = 0;
++ if (mpnt->vm_flags & VM_ACCOUNT) {
++ unsigned long len;
++ len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
++ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
++ goto fail_nomem;
++ charge = len;
++ }
++ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
++ if (!tmp)
++ goto fail_nomem;
++ *tmp = *mpnt;
++ tmp->vm_mm = mm;
++ INIT_LIST_HEAD(&tmp->anon_vma_chain);
++ pol = mpol_dup(vma_policy(mpnt));
++ if (IS_ERR(pol))
++ goto fail_nomem_policy;
++ vma_set_policy(tmp, pol);
++ if (anon_vma_fork(tmp, mpnt))
++ goto fail_nomem_anon_vma_fork;
++ tmp->vm_flags &= ~VM_LOCKED;
++ tmp->vm_next = tmp->vm_prev = NULL;
++ tmp->vm_mirror = NULL;
++ file = tmp->vm_file;
++ if (file) {
++ struct inode *inode = file->f_path.dentry->d_inode;
++ struct address_space *mapping = file->f_mapping;
++
++ get_file(file);
++ if (tmp->vm_flags & VM_DENYWRITE)
++ atomic_dec(&inode->i_writecount);
++ mutex_lock(&mapping->i_mmap_mutex);
++ if (tmp->vm_flags & VM_SHARED)
++ mapping->i_mmap_writable++;
++ flush_dcache_mmap_lock(mapping);
++ /* insert tmp into the share list, just after mpnt */
++ vma_prio_tree_add(tmp, mpnt);
++ flush_dcache_mmap_unlock(mapping);
++ mutex_unlock(&mapping->i_mmap_mutex);
++ }
++
++ /*
++ * Clear hugetlb-related page reserves for children. This only
++ * affects MAP_PRIVATE mappings. Faults generated by the child
++ * are not guaranteed to succeed, even if read-only
++ */
++ if (is_vm_hugetlb_page(tmp))
++ reset_vma_resv_huge_pages(tmp);
++
++ return tmp;
++
++fail_nomem_anon_vma_fork:
++ mpol_put(pol);
++fail_nomem_policy:
++ kmem_cache_free(vm_area_cachep, tmp);
++fail_nomem:
++ vm_unacct_memory(charge);
++ return NULL;
++}
++
+ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+ {
+ struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
+ struct rb_node **rb_link, *rb_parent;
+ int retval;
+- unsigned long charge;
+- struct mempolicy *pol;
+
+ down_write(&oldmm->mmap_sem);
+ flush_cache_dup_mm(oldmm);
+@@ -328,8 +393,8 @@ static int dup_mmap(struct mm_struct *mm
+ mm->locked_vm = 0;
+ mm->mmap = NULL;
+ mm->mmap_cache = NULL;
+- mm->free_area_cache = oldmm->mmap_base;
+- mm->cached_hole_size = ~0UL;
++ mm->free_area_cache = oldmm->free_area_cache;
++ mm->cached_hole_size = oldmm->cached_hole_size;
+ mm->map_count = 0;
+ cpumask_clear(mm_cpumask(mm));
+ mm->mm_rb = RB_ROOT;
+@@ -345,8 +410,6 @@ static int dup_mmap(struct mm_struct *mm
+
+ prev = NULL;
+ for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
+- struct file *file;
+-
+ if (mpnt->vm_flags & VM_DONTCOPY) {
+ long pages = vma_pages(mpnt);
+ mm->total_vm -= pages;
+@@ -354,55 +417,13 @@ static int dup_mmap(struct mm_struct *mm
+ -pages);
+ continue;
+ }
+- charge = 0;
+- if (mpnt->vm_flags & VM_ACCOUNT) {
+- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
+- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
+- goto fail_nomem;
+- charge = len;
+- }
+- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+- if (!tmp)
+- goto fail_nomem;
+- *tmp = *mpnt;
+- INIT_LIST_HEAD(&tmp->anon_vma_chain);
+- pol = mpol_dup(vma_policy(mpnt));
+- retval = PTR_ERR(pol);
+- if (IS_ERR(pol))
+- goto fail_nomem_policy;
+- vma_set_policy(tmp, pol);
+- tmp->vm_mm = mm;
+- if (anon_vma_fork(tmp, mpnt))
+- goto fail_nomem_anon_vma_fork;
+- tmp->vm_flags &= ~VM_LOCKED;
+- tmp->vm_next = tmp->vm_prev = NULL;
+- file = tmp->vm_file;
+- if (file) {
+- struct inode *inode = file->f_path.dentry->d_inode;
+- struct address_space *mapping = file->f_mapping;
+-
+- get_file(file);
+- if (tmp->vm_flags & VM_DENYWRITE)
+- atomic_dec(&inode->i_writecount);
+- mutex_lock(&mapping->i_mmap_mutex);
+- if (tmp->vm_flags & VM_SHARED)
+- mapping->i_mmap_writable++;
+- flush_dcache_mmap_lock(mapping);
+- /* insert tmp into the share list, just after mpnt */
+- vma_prio_tree_add(tmp, mpnt);
+- flush_dcache_mmap_unlock(mapping);
+- mutex_unlock(&mapping->i_mmap_mutex);
++ tmp = dup_vma(mm, oldmm, mpnt);
++ if (!tmp) {
++ retval = -ENOMEM;
++ goto out;
+ }
+
+ /*
+- * Clear hugetlb-related page reserves for children. This only
+- * affects MAP_PRIVATE mappings. Faults generated by the child
+- * are not guaranteed to succeed, even if read-only
+- */
+- if (is_vm_hugetlb_page(tmp))
+- reset_vma_resv_huge_pages(tmp);
+-
+- /*
+ * Link in the new vma and copy the page table entries.
+ */
+ *pprev = tmp;
+@@ -423,6 +444,31 @@ static int dup_mmap(struct mm_struct *mm
+ if (retval)
+ goto out;
+ }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
++ struct vm_area_struct *mpnt_m;
++
++ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
++ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
++
++ if (!mpnt->vm_mirror)
++ continue;
++
++ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
++ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
++ mpnt->vm_mirror = mpnt_m;
++ } else {
++ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
++ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
++ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
++ mpnt->vm_mirror->vm_mirror = mpnt;
++ }
++ }
++ BUG_ON(mpnt_m);
++ }
++#endif
++
+ /* a new mm has just been created */
+ arch_dup_mmap(oldmm, mm);
+ retval = 0;
+@@ -431,14 +477,6 @@ out:
+ flush_tlb_mm(oldmm);
+ up_write(&oldmm->mmap_sem);
+ return retval;
+-fail_nomem_anon_vma_fork:
+- mpol_put(pol);
+-fail_nomem_policy:
+- kmem_cache_free(vm_area_cachep, tmp);
+-fail_nomem:
+- retval = -ENOMEM;
+- vm_unacct_memory(charge);
+- goto out;
+ }
+
+ static inline int mm_alloc_pgd(struct mm_struct *mm)
+@@ -898,7 +936,7 @@ static int copy_fs(unsigned long clone_f
+ spin_unlock(&fs->lock);
+ return -EAGAIN;
+ }
+- fs->users++;
++ atomic_inc(&fs->users);
+ spin_unlock(&fs->lock);
+ return 0;
+ }
+@@ -1699,7 +1737,7 @@ static int unshare_fs(unsigned long unsh
+ return 0;
+
+ /* don't need lock here; in the worst case we'll do useless copy */
+- if (fs->users == 1)
++ if (atomic_read(&fs->users) == 1)
+ return 0;
+
+ *new_fsp = copy_fs_struct(fs);
+@@ -1788,7 +1826,7 @@ SYSCALL_DEFINE1(unshare, unsigned long,
+ fs = current->fs;
+ spin_lock(&fs->lock);
+ current->fs = new_fs;
+- if (--fs->users)
++ if (atomic_dec_return(&fs->users))
+ new_fs = NULL;
+ else
+ new_fs = fs;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/futex.c linux-3.4-pax/kernel/futex.c
+--- linux-3.4/kernel/futex.c 2012-05-21 11:33:39.455929940 +0200
++++ linux-3.4-pax/kernel/futex.c 2012-05-21 12:10:11.740049009 +0200
+@@ -239,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
+ struct page *page, *page_head;
+ int err, ro = 0;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
++ return -EFAULT;
++#endif
++
+ /*
+ * The futex address must be "naturally" aligned.
+ */
+@@ -2711,6 +2716,7 @@ static int __init futex_init(void)
+ {
+ u32 curval;
+ int i;
++ mm_segment_t oldfs;
+
+ /*
+ * This will fail and we want it. Some arch implementations do
+@@ -2722,8 +2728,11 @@ static int __init futex_init(void)
+ * implementation, the non-functional ones will return
+ * -ENOSYS.
+ */
++ oldfs = get_fs();
++ set_fs(USER_DS);
+ if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
+ futex_cmpxchg_enabled = 1;
++ set_fs(oldfs);
+
+ for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
+ plist_head_init(&futex_queues[i].chain);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/gcov/base.c linux-3.4-pax/kernel/gcov/base.c
+--- linux-3.4/kernel/gcov/base.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/kernel/gcov/base.c 2012-05-21 12:10:11.744049009 +0200
+@@ -102,11 +102,6 @@ void gcov_enable_events(void)
+ }
+
+ #ifdef CONFIG_MODULES
+-static inline int within(void *addr, void *start, unsigned long size)
+-{
+- return ((addr >= start) && (addr < start + size));
+-}
+-
+ /* Update list and generate events when modules are unloaded. */
+ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
+ void *data)
+@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
+ prev = NULL;
+ /* Remove entries located in module from linked list. */
+ for (info = gcov_info_head; info; info = info->next) {
+- if (within(info, mod->module_core, mod->core_size)) {
++ if (within_module_core_rw((unsigned long)info, mod)) {
+ if (prev)
+ prev->next = info->next;
+ else
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/hrtimer.c linux-3.4-pax/kernel/hrtimer.c
+--- linux-3.4/kernel/hrtimer.c 2012-01-08 19:48:28.683470894 +0100
++++ linux-3.4-pax/kernel/hrtimer.c 2012-05-21 12:10:11.748049009 +0200
+@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
+ local_irq_restore(flags);
+ }
+
+-static void run_hrtimer_softirq(struct softirq_action *h)
++static void run_hrtimer_softirq(void)
+ {
+ hrtimer_peek_ahead_timers();
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/jump_label.c linux-3.4-pax/kernel/jump_label.c
+--- linux-3.4/kernel/jump_label.c 2012-05-21 11:33:39.491929942 +0200
++++ linux-3.4-pax/kernel/jump_label.c 2012-05-21 12:10:11.748049009 +0200
+@@ -50,7 +50,9 @@ jump_label_sort_entries(struct jump_entr
+
+ size = (((unsigned long)stop - (unsigned long)start)
+ / sizeof(struct jump_entry));
++ pax_open_kernel();
+ sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
++ pax_close_kernel();
+ }
+
+ static void jump_label_update(struct static_key *key, int enable);
+@@ -356,10 +358,12 @@ static void jump_label_invalidate_module
+ struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
+ struct jump_entry *iter;
+
++ pax_open_kernel();
+ for (iter = iter_start; iter < iter_stop; iter++) {
+ if (within_module_init(iter->code, mod))
+ iter->code = 0;
+ }
++ pax_close_kernel();
+ }
+
+ static int
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/kallsyms.c linux-3.4-pax/kernel/kallsyms.c
+--- linux-3.4/kernel/kallsyms.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/kernel/kallsyms.c 2012-05-21 12:10:11.752049009 +0200
+@@ -53,12 +53,33 @@ extern const unsigned long kallsyms_mark
+
+ static inline int is_kernel_inittext(unsigned long addr)
+ {
++ if (system_state != SYSTEM_BOOTING)
++ return 0;
++
+ if (addr >= (unsigned long)_sinittext
+ && addr <= (unsigned long)_einittext)
+ return 1;
+ return 0;
+ }
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#ifdef CONFIG_MODULES
++static inline int is_module_text(unsigned long addr)
++{
++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
++ return 1;
++
++ addr = ktla_ktva(addr);
++ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
++}
++#else
++static inline int is_module_text(unsigned long addr)
++{
++ return 0;
++}
++#endif
++#endif
++
+ static inline int is_kernel_text(unsigned long addr)
+ {
+ if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
+@@ -69,13 +90,28 @@ static inline int is_kernel_text(unsigne
+
+ static inline int is_kernel(unsigned long addr)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (is_kernel_text(addr) || is_kernel_inittext(addr))
++ return 1;
++
++ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
++#else
+ if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
++#endif
++
+ return 1;
+ return in_gate_area_no_mm(addr);
+ }
+
+ static int is_ksym_addr(unsigned long addr)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (is_module_text(addr))
++ return 0;
++#endif
++
+ if (all_var)
+ return is_kernel(addr);
+
+@@ -454,7 +490,6 @@ static unsigned long get_ksymbol_core(st
+
+ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
+ {
+- iter->name[0] = '\0';
+ iter->nameoff = get_symbol_offset(new_pos);
+ iter->pos = new_pos;
+ }
+@@ -540,7 +575,7 @@ static int kallsyms_open(struct inode *i
+ struct kallsym_iter *iter;
+ int ret;
+
+- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
++ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return -ENOMEM;
+ reset_iter(iter, 0);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/kexec.c linux-3.4-pax/kernel/kexec.c
+--- linux-3.4/kernel/kexec.c 2012-05-21 11:33:39.495929942 +0200
++++ linux-3.4-pax/kernel/kexec.c 2012-05-21 12:10:11.756049010 +0200
+@@ -1046,7 +1046,8 @@ asmlinkage long compat_sys_kexec_load(un
+ unsigned long flags)
+ {
+ struct compat_kexec_segment in;
+- struct kexec_segment out, __user *ksegments;
++ struct kexec_segment out;
++ struct kexec_segment __user *ksegments;
+ unsigned long i, result;
+
+ /* Don't allow clients that don't understand the native
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/kmod.c linux-3.4-pax/kernel/kmod.c
+--- linux-3.4/kernel/kmod.c 2012-05-21 11:33:39.499929942 +0200
++++ linux-3.4-pax/kernel/kmod.c 2012-05-21 12:10:11.756049010 +0200
+@@ -267,7 +267,7 @@ static int wait_for_helper(void *data)
+ *
+ * Thus the __user pointer cast is valid here.
+ */
+- sys_wait4(pid, (int __user *)&ret, 0, NULL);
++ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
+
+ /*
+ * If ret is 0, either ____call_usermodehelper failed and the
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/kprobes.c linux-3.4-pax/kernel/kprobes.c
+--- linux-3.4/kernel/kprobes.c 2012-03-19 10:39:11.868049182 +0100
++++ linux-3.4-pax/kernel/kprobes.c 2012-05-21 12:10:11.760049010 +0200
+@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
+ * kernel image and loaded module images reside. This is required
+ * so x86_64 can correctly handle the %rip-relative fixups.
+ */
+- kip->insns = module_alloc(PAGE_SIZE);
++ kip->insns = module_alloc_exec(PAGE_SIZE);
+ if (!kip->insns) {
+ kfree(kip);
+ return NULL;
+@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
+ */
+ if (!list_is_singular(&kip->list)) {
+ list_del(&kip->list);
+- module_free(NULL, kip->insns);
++ module_free_exec(NULL, kip->insns);
+ kfree(kip);
+ }
+ return 1;
+@@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
+ {
+ int i, err = 0;
+ unsigned long offset = 0, size = 0;
+- char *modname, namebuf[128];
++ char *modname, namebuf[KSYM_NAME_LEN];
+ const char *symbol_name;
+ void *addr;
+ struct kprobe_blackpoint *kb;
+@@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(st
+ const char *sym = NULL;
+ unsigned int i = *(loff_t *) v;
+ unsigned long offset = 0;
+- char *modname, namebuf[128];
++ char *modname, namebuf[KSYM_NAME_LEN];
+
+ head = &kprobe_table[i];
+ preempt_disable();
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/lockdep.c linux-3.4-pax/kernel/lockdep.c
+--- linux-3.4/kernel/lockdep.c 2012-05-21 11:33:39.507929943 +0200
++++ linux-3.4-pax/kernel/lockdep.c 2012-05-21 12:10:11.768049010 +0200
+@@ -590,6 +590,10 @@ static int static_obj(void *obj)
+ end = (unsigned long) &_end,
+ addr = (unsigned long) obj;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ start = ktla_ktva(start);
++#endif
++
+ /*
+ * static variable?
+ */
+@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *
+ if (!static_obj(lock->key)) {
+ debug_locks_off();
+ printk("INFO: trying to register non-static key.\n");
++ printk("lock:%pS key:%pS.\n", lock, lock->key);
+ printk("the code is fine but needs lockdep annotation.\n");
+ printk("turning off the locking correctness validator.\n");
+ dump_stack();
+@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep
+ if (!class)
+ return 0;
+ }
+- atomic_inc((atomic_t *)&class->ops);
++ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
+ if (very_verbose(class)) {
+ printk("\nacquire class [%p] %s", class->key, class->name);
+ if (class->name_version > 1)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/lockdep_proc.c linux-3.4-pax/kernel/lockdep_proc.c
+--- linux-3.4/kernel/lockdep_proc.c 2012-01-08 19:48:28.755470890 +0100
++++ linux-3.4-pax/kernel/lockdep_proc.c 2012-05-21 12:10:11.768049010 +0200
+@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
+
+ static void print_name(struct seq_file *m, struct lock_class *class)
+ {
+- char str[128];
++ char str[KSYM_NAME_LEN];
+ const char *name = class->name;
+
+ if (!name) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/module.c linux-3.4-pax/kernel/module.c
+--- linux-3.4/kernel/module.c 2012-05-21 11:33:39.511929942 +0200
++++ linux-3.4-pax/kernel/module.c 2012-05-21 12:10:11.776049011 +0200
+@@ -114,7 +114,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
+
+ /* Bounds of module allocation, for speeding __module_address.
+ * Protected by module_mutex. */
+-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
++static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
++static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
+
+ int register_module_notifier(struct notifier_block * nb)
+ {
+@@ -278,7 +279,7 @@ bool each_symbol_section(bool (*fn)(cons
+ return true;
+
+ list_for_each_entry_rcu(mod, &modules, list) {
+- struct symsearch arr[] = {
++ struct symsearch modarr[] = {
+ { mod->syms, mod->syms + mod->num_syms, mod->crcs,
+ NOT_GPL_ONLY, false },
+ { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
+@@ -300,7 +301,7 @@ bool each_symbol_section(bool (*fn)(cons
+ #endif
+ };
+
+- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
++ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
+ return true;
+ }
+ return false;
+@@ -432,7 +433,7 @@ static inline void __percpu *mod_percpu(
+ static int percpu_modalloc(struct module *mod,
+ unsigned long size, unsigned long align)
+ {
+- if (align > PAGE_SIZE) {
++ if (align-1 >= PAGE_SIZE) {
+ printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
+ mod->name, align, PAGE_SIZE);
+ align = PAGE_SIZE;
+@@ -1032,7 +1033,7 @@ struct module_attribute module_uevent =
+ static ssize_t show_coresize(struct module_attribute *mattr,
+ struct module_kobject *mk, char *buffer)
+ {
+- return sprintf(buffer, "%u\n", mk->mod->core_size);
++ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
+ }
+
+ static struct module_attribute modinfo_coresize =
+@@ -1041,7 +1042,7 @@ static struct module_attribute modinfo_c
+ static ssize_t show_initsize(struct module_attribute *mattr,
+ struct module_kobject *mk, char *buffer)
+ {
+- return sprintf(buffer, "%u\n", mk->mod->init_size);
++ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
+ }
+
+ static struct module_attribute modinfo_initsize =
+@@ -1721,21 +1722,21 @@ static void set_section_ro_nx(void *base
+
+ static void unset_module_core_ro_nx(struct module *mod)
+ {
+- set_page_attributes(mod->module_core + mod->core_text_size,
+- mod->module_core + mod->core_size,
++ set_page_attributes(mod->module_core_rw,
++ mod->module_core_rw + mod->core_size_rw,
+ set_memory_x);
+- set_page_attributes(mod->module_core,
+- mod->module_core + mod->core_ro_size,
++ set_page_attributes(mod->module_core_rx,
++ mod->module_core_rx + mod->core_size_rx,
+ set_memory_rw);
+ }
+
+ static void unset_module_init_ro_nx(struct module *mod)
+ {
+- set_page_attributes(mod->module_init + mod->init_text_size,
+- mod->module_init + mod->init_size,
++ set_page_attributes(mod->module_init_rw,
++ mod->module_init_rw + mod->init_size_rw,
+ set_memory_x);
+- set_page_attributes(mod->module_init,
+- mod->module_init + mod->init_ro_size,
++ set_page_attributes(mod->module_init_rx,
++ mod->module_init_rx + mod->init_size_rx,
+ set_memory_rw);
+ }
+
+@@ -1746,14 +1747,14 @@ void set_all_modules_text_rw(void)
+
+ mutex_lock(&module_mutex);
+ list_for_each_entry_rcu(mod, &modules, list) {
+- if ((mod->module_core) && (mod->core_text_size)) {
+- set_page_attributes(mod->module_core,
+- mod->module_core + mod->core_text_size,
++ if ((mod->module_core_rx) && (mod->core_size_rx)) {
++ set_page_attributes(mod->module_core_rx,
++ mod->module_core_rx + mod->core_size_rx,
+ set_memory_rw);
+ }
+- if ((mod->module_init) && (mod->init_text_size)) {
+- set_page_attributes(mod->module_init,
+- mod->module_init + mod->init_text_size,
++ if ((mod->module_init_rx) && (mod->init_size_rx)) {
++ set_page_attributes(mod->module_init_rx,
++ mod->module_init_rx + mod->init_size_rx,
+ set_memory_rw);
+ }
+ }
+@@ -1767,14 +1768,14 @@ void set_all_modules_text_ro(void)
+
+ mutex_lock(&module_mutex);
+ list_for_each_entry_rcu(mod, &modules, list) {
+- if ((mod->module_core) && (mod->core_text_size)) {
+- set_page_attributes(mod->module_core,
+- mod->module_core + mod->core_text_size,
++ if ((mod->module_core_rx) && (mod->core_size_rx)) {
++ set_page_attributes(mod->module_core_rx,
++ mod->module_core_rx + mod->core_size_rx,
+ set_memory_ro);
+ }
+- if ((mod->module_init) && (mod->init_text_size)) {
+- set_page_attributes(mod->module_init,
+- mod->module_init + mod->init_text_size,
++ if ((mod->module_init_rx) && (mod->init_size_rx)) {
++ set_page_attributes(mod->module_init_rx,
++ mod->module_init_rx + mod->init_size_rx,
+ set_memory_ro);
+ }
+ }
+@@ -1820,16 +1821,19 @@ static void free_module(struct module *m
+
+ /* This may be NULL, but that's OK */
+ unset_module_init_ro_nx(mod);
+- module_free(mod, mod->module_init);
++ module_free(mod, mod->module_init_rw);
++ module_free_exec(mod, mod->module_init_rx);
+ kfree(mod->args);
+ percpu_modfree(mod);
+
+ /* Free lock-classes: */
+- lockdep_free_key_range(mod->module_core, mod->core_size);
++ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
++ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
+
+ /* Finally, free the core (containing the module structure) */
+ unset_module_core_ro_nx(mod);
+- module_free(mod, mod->module_core);
++ module_free_exec(mod, mod->module_core_rx);
++ module_free(mod, mod->module_core_rw);
+
+ #ifdef CONFIG_MPU
+ update_protections(current->mm);
+@@ -1922,7 +1926,9 @@ static int simplify_symbols(struct modul
+ ksym = resolve_symbol_wait(mod, info, name);
+ /* Ok if resolved. */
+ if (ksym && !IS_ERR(ksym)) {
++ pax_open_kernel();
+ sym[i].st_value = ksym->value;
++ pax_close_kernel();
+ break;
+ }
+
+@@ -1941,7 +1947,9 @@ static int simplify_symbols(struct modul
+ secbase = (unsigned long)mod_percpu(mod);
+ else
+ secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
++ pax_open_kernel();
+ sym[i].st_value += secbase;
++ pax_close_kernel();
+ break;
+ }
+ }
+@@ -2049,22 +2057,12 @@ static void layout_sections(struct modul
+ || s->sh_entsize != ~0UL
+ || strstarts(sname, ".init"))
+ continue;
+- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
++ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
++ else
++ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
+ pr_debug("\t%s\n", sname);
+ }
+- switch (m) {
+- case 0: /* executable */
+- mod->core_size = debug_align(mod->core_size);
+- mod->core_text_size = mod->core_size;
+- break;
+- case 1: /* RO: text and ro-data */
+- mod->core_size = debug_align(mod->core_size);
+- mod->core_ro_size = mod->core_size;
+- break;
+- case 3: /* whole core */
+- mod->core_size = debug_align(mod->core_size);
+- break;
+- }
+ }
+
+ pr_debug("Init section allocation order:\n");
+@@ -2078,23 +2076,13 @@ static void layout_sections(struct modul
+ || s->sh_entsize != ~0UL
+ || !strstarts(sname, ".init"))
+ continue;
+- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
+- | INIT_OFFSET_MASK);
++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
++ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
++ else
++ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
++ s->sh_entsize |= INIT_OFFSET_MASK;
+ pr_debug("\t%s\n", sname);
+ }
+- switch (m) {
+- case 0: /* executable */
+- mod->init_size = debug_align(mod->init_size);
+- mod->init_text_size = mod->init_size;
+- break;
+- case 1: /* RO: text and ro-data */
+- mod->init_size = debug_align(mod->init_size);
+- mod->init_ro_size = mod->init_size;
+- break;
+- case 3: /* whole init */
+- mod->init_size = debug_align(mod->init_size);
+- break;
+- }
+ }
+ }
+
+@@ -2266,7 +2254,7 @@ static void layout_symtab(struct module
+
+ /* Put symbol section at end of init part of module. */
+ symsect->sh_flags |= SHF_ALLOC;
+- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
++ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
+ info->index.sym) | INIT_OFFSET_MASK;
+ pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
+
+@@ -2281,13 +2269,13 @@ static void layout_symtab(struct module
+ }
+
+ /* Append room for core symbols at end of core part. */
+- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
+- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
+- mod->core_size += strtab_size;
++ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
++ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
++ mod->core_size_rx += strtab_size;
+
+ /* Put string table section at end of init part of module. */
+ strsect->sh_flags |= SHF_ALLOC;
+- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
++ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
+ info->index.str) | INIT_OFFSET_MASK;
+ pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
+ }
+@@ -2305,12 +2293,14 @@ static void add_kallsyms(struct module *
+ /* Make sure we get permanent strtab: don't use info->strtab. */
+ mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
+
++ pax_open_kernel();
++
+ /* Set types up while we still have access to sections. */
+ for (i = 0; i < mod->num_symtab; i++)
+ mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
+
+- mod->core_symtab = dst = mod->module_core + info->symoffs;
+- mod->core_strtab = s = mod->module_core + info->stroffs;
++ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
++ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
+ src = mod->symtab;
+ *dst = *src;
+ *s++ = 0;
+@@ -2323,6 +2313,8 @@ static void add_kallsyms(struct module *
+ s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
+ }
+ mod->core_num_syms = ndst;
++
++ pax_close_kernel();
+ }
+ #else
+ static inline void layout_symtab(struct module *mod, struct load_info *info)
+@@ -2356,17 +2348,33 @@ void * __weak module_alloc(unsigned long
+ return size == 0 ? NULL : vmalloc_exec(size);
+ }
+
+-static void *module_alloc_update_bounds(unsigned long size)
++static void *module_alloc_update_bounds_rw(unsigned long size)
+ {
+ void *ret = module_alloc(size);
+
+ if (ret) {
+ mutex_lock(&module_mutex);
+ /* Update module bounds. */
+- if ((unsigned long)ret < module_addr_min)
+- module_addr_min = (unsigned long)ret;
+- if ((unsigned long)ret + size > module_addr_max)
+- module_addr_max = (unsigned long)ret + size;
++ if ((unsigned long)ret < module_addr_min_rw)
++ module_addr_min_rw = (unsigned long)ret;
++ if ((unsigned long)ret + size > module_addr_max_rw)
++ module_addr_max_rw = (unsigned long)ret + size;
++ mutex_unlock(&module_mutex);
++ }
++ return ret;
++}
++
++static void *module_alloc_update_bounds_rx(unsigned long size)
++{
++ void *ret = module_alloc_exec(size);
++
++ if (ret) {
++ mutex_lock(&module_mutex);
++ /* Update module bounds. */
++ if ((unsigned long)ret < module_addr_min_rx)
++ module_addr_min_rx = (unsigned long)ret;
++ if ((unsigned long)ret + size > module_addr_max_rx)
++ module_addr_max_rx = (unsigned long)ret + size;
+ mutex_unlock(&module_mutex);
+ }
+ return ret;
+@@ -2543,8 +2551,14 @@ static struct module *setup_load_info(st
+ static int check_modinfo(struct module *mod, struct load_info *info)
+ {
+ const char *modmagic = get_modinfo(info, "vermagic");
++ const char *license = get_modinfo(info, "license");
+ int err;
+
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ if (!license || !license_is_gpl_compatible(license))
++ return -ENOEXEC;
++#endif
++
+ /* This is allowed: modprobe --force will invalidate it. */
+ if (!modmagic) {
+ err = try_to_force_load(mod, "bad vermagic");
+@@ -2567,7 +2581,7 @@ static int check_modinfo(struct module *
+ }
+
+ /* Set up license info based on the info section */
+- set_license(mod, get_modinfo(info, "license"));
++ set_license(mod, license);
+
+ return 0;
+ }
+@@ -2661,7 +2675,7 @@ static int move_module(struct module *mo
+ void *ptr;
+
+ /* Do the allocs. */
+- ptr = module_alloc_update_bounds(mod->core_size);
++ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
+ /*
+ * The pointer to this block is stored in the module structure
+ * which is inside the block. Just mark it as not being a
+@@ -2671,23 +2685,50 @@ static int move_module(struct module *mo
+ if (!ptr)
+ return -ENOMEM;
+
+- memset(ptr, 0, mod->core_size);
+- mod->module_core = ptr;
++ memset(ptr, 0, mod->core_size_rw);
++ mod->module_core_rw = ptr;
+
+- ptr = module_alloc_update_bounds(mod->init_size);
++ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
+ /*
+ * The pointer to this block is stored in the module structure
+ * which is inside the block. This block doesn't need to be
+ * scanned as it contains data and code that will be freed
+ * after the module is initialized.
+ */
+- kmemleak_ignore(ptr);
+- if (!ptr && mod->init_size) {
+- module_free(mod, mod->module_core);
++ kmemleak_not_leak(ptr);
++ if (!ptr && mod->init_size_rw) {
++ module_free(mod, mod->module_core_rw);
+ return -ENOMEM;
+ }
+- memset(ptr, 0, mod->init_size);
+- mod->module_init = ptr;
++ memset(ptr, 0, mod->init_size_rw);
++ mod->module_init_rw = ptr;
++
++ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
++ kmemleak_not_leak(ptr);
++ if (!ptr) {
++ module_free(mod, mod->module_init_rw);
++ module_free(mod, mod->module_core_rw);
++ return -ENOMEM;
++ }
++
++ pax_open_kernel();
++ memset(ptr, 0, mod->core_size_rx);
++ pax_close_kernel();
++ mod->module_core_rx = ptr;
++
++ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
++ kmemleak_not_leak(ptr);
++ if (!ptr && mod->init_size_rx) {
++ module_free_exec(mod, mod->module_core_rx);
++ module_free(mod, mod->module_init_rw);
++ module_free(mod, mod->module_core_rw);
++ return -ENOMEM;
++ }
++
++ pax_open_kernel();
++ memset(ptr, 0, mod->init_size_rx);
++ pax_close_kernel();
++ mod->module_init_rx = ptr;
+
+ /* Transfer each section which specifies SHF_ALLOC */
+ pr_debug("final section addresses:\n");
+@@ -2698,16 +2739,45 @@ static int move_module(struct module *mo
+ if (!(shdr->sh_flags & SHF_ALLOC))
+ continue;
+
+- if (shdr->sh_entsize & INIT_OFFSET_MASK)
+- dest = mod->module_init
+- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
+- else
+- dest = mod->module_core + shdr->sh_entsize;
++ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
++ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
++ dest = mod->module_init_rw
++ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
++ else
++ dest = mod->module_init_rx
++ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
++ } else {
++ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
++ dest = mod->module_core_rw + shdr->sh_entsize;
++ else
++ dest = mod->module_core_rx + shdr->sh_entsize;
++ }
++
++ if (shdr->sh_type != SHT_NOBITS) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_64
++ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
++ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
++#endif
++ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
++ pax_open_kernel();
++ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
++ pax_close_kernel();
++ } else
++#endif
+
+- if (shdr->sh_type != SHT_NOBITS)
+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
++ }
+ /* Update sh_addr to point to copy in image. */
+- shdr->sh_addr = (unsigned long)dest;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if (shdr->sh_flags & SHF_EXECINSTR)
++ shdr->sh_addr = ktva_ktla((unsigned long)dest);
++ else
++#endif
++
++ shdr->sh_addr = (unsigned long)dest;
+ pr_debug("\t0x%lx %s\n",
+ (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
+ }
+@@ -2758,12 +2828,12 @@ static void flush_module_icache(const st
+ * Do it before processing of module parameters, so the module
+ * can provide parameter accessor functions of its own.
+ */
+- if (mod->module_init)
+- flush_icache_range((unsigned long)mod->module_init,
+- (unsigned long)mod->module_init
+- + mod->init_size);
+- flush_icache_range((unsigned long)mod->module_core,
+- (unsigned long)mod->module_core + mod->core_size);
++ if (mod->module_init_rx)
++ flush_icache_range((unsigned long)mod->module_init_rx,
++ (unsigned long)mod->module_init_rx
++ + mod->init_size_rx);
++ flush_icache_range((unsigned long)mod->module_core_rx,
++ (unsigned long)mod->module_core_rx + mod->core_size_rx);
+
+ set_fs(old_fs);
+ }
+@@ -2833,8 +2903,10 @@ out:
+ static void module_deallocate(struct module *mod, struct load_info *info)
+ {
+ percpu_modfree(mod);
+- module_free(mod, mod->module_init);
+- module_free(mod, mod->module_core);
++ module_free_exec(mod, mod->module_init_rx);
++ module_free_exec(mod, mod->module_core_rx);
++ module_free(mod, mod->module_init_rw);
++ module_free(mod, mod->module_core_rw);
+ }
+
+ int __weak module_finalize(const Elf_Ehdr *hdr,
+@@ -3025,16 +3097,16 @@ SYSCALL_DEFINE3(init_module, void __user
+ MODULE_STATE_COMING, mod);
+
+ /* Set RO and NX regions for core */
+- set_section_ro_nx(mod->module_core,
+- mod->core_text_size,
+- mod->core_ro_size,
+- mod->core_size);
++ set_section_ro_nx(mod->module_core_rx,
++ mod->core_size_rx,
++ mod->core_size_rx,
++ mod->core_size_rx);
+
+ /* Set RO and NX regions for init */
+- set_section_ro_nx(mod->module_init,
+- mod->init_text_size,
+- mod->init_ro_size,
+- mod->init_size);
++ set_section_ro_nx(mod->module_init_rx,
++ mod->init_size_rx,
++ mod->init_size_rx,
++ mod->init_size_rx);
+
+ do_mod_ctors(mod);
+ /* Start the module */
+@@ -3080,11 +3152,12 @@ SYSCALL_DEFINE3(init_module, void __user
+ mod->strtab = mod->core_strtab;
+ #endif
+ unset_module_init_ro_nx(mod);
+- module_free(mod, mod->module_init);
+- mod->module_init = NULL;
+- mod->init_size = 0;
+- mod->init_ro_size = 0;
+- mod->init_text_size = 0;
++ module_free(mod, mod->module_init_rw);
++ module_free_exec(mod, mod->module_init_rx);
++ mod->module_init_rw = NULL;
++ mod->module_init_rx = NULL;
++ mod->init_size_rw = 0;
++ mod->init_size_rx = 0;
+ mutex_unlock(&module_mutex);
+
+ return 0;
+@@ -3115,10 +3188,16 @@ static const char *get_ksymbol(struct mo
+ unsigned long nextval;
+
+ /* At worse, next value is at end of module */
+- if (within_module_init(addr, mod))
+- nextval = (unsigned long)mod->module_init+mod->init_text_size;
++ if (within_module_init_rx(addr, mod))
++ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
++ else if (within_module_init_rw(addr, mod))
++ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
++ else if (within_module_core_rx(addr, mod))
++ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
++ else if (within_module_core_rw(addr, mod))
++ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
+ else
+- nextval = (unsigned long)mod->module_core+mod->core_text_size;
++ return NULL;
+
+ /* Scan for closest preceding symbol, and next symbol. (ELF
+ starts real symbols at 1). */
+@@ -3353,7 +3432,7 @@ static int m_show(struct seq_file *m, vo
+ char buf[8];
+
+ seq_printf(m, "%s %u",
+- mod->name, mod->init_size + mod->core_size);
++ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
+ print_unload_info(m, mod);
+
+ /* Informative for users. */
+@@ -3362,7 +3441,7 @@ static int m_show(struct seq_file *m, vo
+ mod->state == MODULE_STATE_COMING ? "Loading":
+ "Live");
+ /* Used by oprofile and other similar tools. */
+- seq_printf(m, " 0x%pK", mod->module_core);
++ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
+
+ /* Taints info */
+ if (mod->taints)
+@@ -3457,12 +3536,12 @@ struct module *__module_address(unsigned
+ {
+ struct module *mod;
+
+- if (addr < module_addr_min || addr > module_addr_max)
++ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
++ (addr < module_addr_min_rw || addr > module_addr_max_rw))
+ return NULL;
+
+ list_for_each_entry_rcu(mod, &modules, list)
+- if (within_module_core(addr, mod)
+- || within_module_init(addr, mod))
++ if (within_module_init(addr, mod) || within_module_core(addr, mod))
+ return mod;
+ return NULL;
+ }
+@@ -3496,11 +3575,20 @@ bool is_module_text_address(unsigned lon
+ */
+ struct module *__module_text_address(unsigned long addr)
+ {
+- struct module *mod = __module_address(addr);
++ struct module *mod;
++
++#ifdef CONFIG_X86_32
++ addr = ktla_ktva(addr);
++#endif
++
++ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
++ return NULL;
++
++ mod = __module_address(addr);
++
+ if (mod) {
+ /* Make sure it's within the text section. */
+- if (!within(addr, mod->module_init, mod->init_text_size)
+- && !within(addr, mod->module_core, mod->core_text_size))
++ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
+ mod = NULL;
+ }
+ return mod;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/mutex.c linux-3.4-pax/kernel/mutex.c
+--- linux-3.4/kernel/mutex.c 2012-05-21 11:33:39.539929944 +0200
++++ linux-3.4-pax/kernel/mutex.c 2012-05-21 12:10:11.776049011 +0200
+@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
+ spin_lock_mutex(&lock->wait_lock, flags);
+
+ debug_mutex_lock_common(lock, &waiter);
+- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
++ debug_mutex_add_waiter(lock, &waiter, task);
+
+ /* add waiting tasks to the end of the waitqueue (FIFO): */
+ list_add_tail(&waiter.list, &lock->wait_list);
+@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
+ * TASK_UNINTERRUPTIBLE case.)
+ */
+ if (unlikely(signal_pending_state(state, task))) {
+- mutex_remove_waiter(lock, &waiter,
+- task_thread_info(task));
++ mutex_remove_waiter(lock, &waiter, task);
+ mutex_release(&lock->dep_map, 1, ip);
+ spin_unlock_mutex(&lock->wait_lock, flags);
+
+@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock,
+ done:
+ lock_acquired(&lock->dep_map, ip);
+ /* got the lock - rejoice! */
+- mutex_remove_waiter(lock, &waiter, current_thread_info());
++ mutex_remove_waiter(lock, &waiter, task);
+ mutex_set_owner(lock);
+
+ /* set it to 0 if there are no waiters left: */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/mutex-debug.c linux-3.4-pax/kernel/mutex-debug.c
+--- linux-3.4/kernel/mutex-debug.c 2012-01-08 19:48:28.763470889 +0100
++++ linux-3.4-pax/kernel/mutex-debug.c 2012-05-21 12:10:11.780049011 +0200
+@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
+ }
+
+ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+- struct thread_info *ti)
++ struct task_struct *task)
+ {
+ SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
+
+ /* Mark the current thread as blocked on the lock: */
+- ti->task->blocked_on = waiter;
++ task->blocked_on = waiter;
+ }
+
+ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+- struct thread_info *ti)
++ struct task_struct *task)
+ {
+ DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
+- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
+- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
+- ti->task->blocked_on = NULL;
++ DEBUG_LOCKS_WARN_ON(waiter->task != task);
++ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
++ task->blocked_on = NULL;
+
+ list_del_init(&waiter->list);
+ waiter->task = NULL;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/mutex-debug.h linux-3.4-pax/kernel/mutex-debug.h
+--- linux-3.4/kernel/mutex-debug.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/kernel/mutex-debug.h 2012-05-21 12:10:11.780049011 +0200
+@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
+ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
+ extern void debug_mutex_add_waiter(struct mutex *lock,
+ struct mutex_waiter *waiter,
+- struct thread_info *ti);
++ struct task_struct *task);
+ extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+- struct thread_info *ti);
++ struct task_struct *task);
+ extern void debug_mutex_unlock(struct mutex *lock);
+ extern void debug_mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/panic.c linux-3.4-pax/kernel/panic.c
+--- linux-3.4/kernel/panic.c 2012-05-21 11:33:39.543929945 +0200
++++ linux-3.4-pax/kernel/panic.c 2012-05-21 12:10:11.784049011 +0200
+@@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
+ */
+ void __stack_chk_fail(void)
+ {
+- panic("stack-protector: Kernel stack is corrupted in: %p\n",
++ dump_stack();
++ panic("stack-protector: Kernel stack is corrupted in: %pS\n",
+ __builtin_return_address(0));
+ }
+ EXPORT_SYMBOL(__stack_chk_fail);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/pid.c linux-3.4-pax/kernel/pid.c
+--- linux-3.4/kernel/pid.c 2012-03-19 10:39:11.880049184 +0100
++++ linux-3.4-pax/kernel/pid.c 2012-05-21 12:10:11.788049011 +0200
+@@ -45,7 +45,7 @@ struct pid init_struct_pid = INIT_STRUCT
+
+ int pid_max = PID_MAX_DEFAULT;
+
+-#define RESERVED_PIDS 300
++#define RESERVED_PIDS 500
+
+ int pid_max_min = RESERVED_PIDS + 1;
+ int pid_max_max = PID_MAX_LIMIT;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/posix-cpu-timers.c linux-3.4-pax/kernel/posix-cpu-timers.c
+--- linux-3.4/kernel/posix-cpu-timers.c 2012-03-19 10:39:11.880049184 +0100
++++ linux-3.4-pax/kernel/posix-cpu-timers.c 2012-05-21 12:10:11.788049011 +0200
+@@ -1578,14 +1578,14 @@ struct k_clock clock_posix_cpu = {
+
+ static __init int init_posix_cpu_timers(void)
+ {
+- struct k_clock process = {
++ static struct k_clock process = {
+ .clock_getres = process_cpu_clock_getres,
+ .clock_get = process_cpu_clock_get,
+ .timer_create = process_cpu_timer_create,
+ .nsleep = process_cpu_nsleep,
+ .nsleep_restart = process_cpu_nsleep_restart,
+ };
+- struct k_clock thread = {
++ static struct k_clock thread = {
+ .clock_getres = thread_cpu_clock_getres,
+ .clock_get = thread_cpu_clock_get,
+ .timer_create = thread_cpu_timer_create,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/posix-timers.c linux-3.4-pax/kernel/posix-timers.c
+--- linux-3.4/kernel/posix-timers.c 2012-01-08 19:48:28.791470888 +0100
++++ linux-3.4-pax/kernel/posix-timers.c 2012-05-21 12:10:11.792049012 +0200
+@@ -129,7 +129,7 @@ static DEFINE_SPINLOCK(idr_lock);
+ * which we beg off on and pass to do_sys_settimeofday().
+ */
+
+-static struct k_clock posix_clocks[MAX_CLOCKS];
++static struct k_clock *posix_clocks[MAX_CLOCKS];
+
+ /*
+ * These ones are defined below.
+@@ -227,7 +227,7 @@ static int posix_get_boottime(const cloc
+ */
+ static __init int init_posix_timers(void)
+ {
+- struct k_clock clock_realtime = {
++ static struct k_clock clock_realtime = {
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_clock_realtime_get,
+ .clock_set = posix_clock_realtime_set,
+@@ -239,7 +239,7 @@ static __init int init_posix_timers(void
+ .timer_get = common_timer_get,
+ .timer_del = common_timer_del,
+ };
+- struct k_clock clock_monotonic = {
++ static struct k_clock clock_monotonic = {
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_ktime_get_ts,
+ .nsleep = common_nsleep,
+@@ -249,19 +249,19 @@ static __init int init_posix_timers(void
+ .timer_get = common_timer_get,
+ .timer_del = common_timer_del,
+ };
+- struct k_clock clock_monotonic_raw = {
++ static struct k_clock clock_monotonic_raw = {
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_get_monotonic_raw,
+ };
+- struct k_clock clock_realtime_coarse = {
++ static struct k_clock clock_realtime_coarse = {
+ .clock_getres = posix_get_coarse_res,
+ .clock_get = posix_get_realtime_coarse,
+ };
+- struct k_clock clock_monotonic_coarse = {
++ static struct k_clock clock_monotonic_coarse = {
+ .clock_getres = posix_get_coarse_res,
+ .clock_get = posix_get_monotonic_coarse,
+ };
+- struct k_clock clock_boottime = {
++ static struct k_clock clock_boottime = {
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_get_boottime,
+ .nsleep = common_nsleep,
+@@ -473,7 +473,7 @@ void posix_timers_register_clock(const c
+ return;
+ }
+
+- posix_clocks[clock_id] = *new_clock;
++ posix_clocks[clock_id] = new_clock;
+ }
+ EXPORT_SYMBOL_GPL(posix_timers_register_clock);
+
+@@ -519,9 +519,9 @@ static struct k_clock *clockid_to_kclock
+ return (id & CLOCKFD_MASK) == CLOCKFD ?
+ &clock_posix_dynamic : &clock_posix_cpu;
+
+- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
++ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
+ return NULL;
+- return &posix_clocks[id];
++ return posix_clocks[id];
+ }
+
+ static int common_timer_create(struct k_itimer *new_timer)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/power/poweroff.c linux-3.4-pax/kernel/power/poweroff.c
+--- linux-3.4/kernel/power/poweroff.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/kernel/power/poweroff.c 2012-05-21 12:10:11.796049012 +0200
+@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
+ .enable_mask = SYSRQ_ENABLE_BOOT,
+ };
+
+-static int pm_sysrq_init(void)
++static int __init pm_sysrq_init(void)
+ {
+ register_sysrq_key('o', &sysrq_poweroff_op);
+ return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/power/process.c linux-3.4-pax/kernel/power/process.c
+--- linux-3.4/kernel/power/process.c 2012-05-21 11:33:39.551929945 +0200
++++ linux-3.4-pax/kernel/power/process.c 2012-05-21 12:10:11.796049012 +0200
+@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user
+ u64 elapsed_csecs64;
+ unsigned int elapsed_csecs;
+ bool wakeup = false;
++ bool timedout = false;
+
+ do_gettimeofday(&start);
+
+@@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user
+
+ while (true) {
+ todo = 0;
++ if (time_after(jiffies, end_time))
++ timedout = true;
+ read_lock(&tasklist_lock);
+ do_each_thread(g, p) {
+ if (p == current || !freeze_task(p))
+@@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user
+ * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
+ * transition can't race with task state testing here.
+ */
+- if (!task_is_stopped_or_traced(p) &&
+- !freezer_should_skip(p))
++ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
+ todo++;
++ if (timedout) {
++ printk(KERN_ERR "Task refusing to freeze:\n");
++ sched_show_task(p);
++ }
++ }
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
+
+@@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user
+ todo += wq_busy;
+ }
+
+- if (!todo || time_after(jiffies, end_time))
++ if (!todo || timedout)
+ break;
+
+ if (pm_wakeup_pending()) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/profile.c linux-3.4-pax/kernel/profile.c
+--- linux-3.4/kernel/profile.c 2012-01-08 19:48:28.831470886 +0100
++++ linux-3.4-pax/kernel/profile.c 2012-05-21 12:10:11.800049011 +0200
+@@ -39,7 +39,7 @@ struct profile_hit {
+ /* Oprofile timer tick hook */
+ static int (*timer_hook)(struct pt_regs *) __read_mostly;
+
+-static atomic_t *prof_buffer;
++static atomic_unchecked_t *prof_buffer;
+ static unsigned long prof_len, prof_shift;
+
+ int prof_on __read_mostly;
+@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
+ hits[i].pc = 0;
+ continue;
+ }
+- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
++ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
+ hits[i].hits = hits[i].pc = 0;
+ }
+ }
+@@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
+ * Add the current hit(s) and flush the write-queue out
+ * to the global buffer:
+ */
+- atomic_add(nr_hits, &prof_buffer[pc]);
++ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
+ for (i = 0; i < NR_PROFILE_HIT; ++i) {
+- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
++ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
+ hits[i].pc = hits[i].hits = 0;
+ }
+ out:
+@@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
+ {
+ unsigned long pc;
+ pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
+- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
++ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
+ }
+ #endif /* !CONFIG_SMP */
+
+@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
+ return -EFAULT;
+ buf++; p++; count--; read++;
+ }
+- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
++ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
+ if (copy_to_user(buf, (void *)pnt, count))
+ return -EFAULT;
+ read += count;
+@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
+ }
+ #endif
+ profile_discard_flip_buffers();
+- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
++ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
+ return count;
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/ptrace.c linux-3.4-pax/kernel/ptrace.c
+--- linux-3.4/kernel/ptrace.c 2012-05-21 11:33:39.567929946 +0200
++++ linux-3.4-pax/kernel/ptrace.c 2012-05-21 12:10:11.804049011 +0200
+@@ -487,7 +487,7 @@ int ptrace_readdata(struct task_struct *
+ break;
+ return -EIO;
+ }
+- if (copy_to_user(dst, buf, retval))
++ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
+ return -EFAULT;
+ copied += retval;
+ src += retval;
+@@ -672,7 +672,7 @@ int ptrace_request(struct task_struct *c
+ bool seized = child->ptrace & PT_SEIZED;
+ int ret = -EIO;
+ siginfo_t siginfo, *si;
+- void __user *datavp = (void __user *) data;
++ void __user *datavp = (__force void __user *) data;
+ unsigned long __user *datalp = datavp;
+ unsigned long flags;
+
+@@ -907,7 +907,7 @@ int generic_ptrace_peekdata(struct task_
+ copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
+ if (copied != sizeof(tmp))
+ return -EIO;
+- return put_user(tmp, (unsigned long __user *)data);
++ return put_user(tmp, (__force unsigned long __user *)data);
+ }
+
+ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/rcutiny.c linux-3.4-pax/kernel/rcutiny.c
+--- linux-3.4/kernel/rcutiny.c 2012-05-21 11:33:39.571929946 +0200
++++ linux-3.4-pax/kernel/rcutiny.c 2012-05-21 12:10:11.804049011 +0200
+@@ -46,7 +46,7 @@
+ struct rcu_ctrlblk;
+ static void invoke_rcu_callbacks(void);
+ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
+-static void rcu_process_callbacks(struct softirq_action *unused);
++static void rcu_process_callbacks(void);
+ static void __call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu),
+ struct rcu_ctrlblk *rcp);
+@@ -307,7 +307,7 @@ static void __rcu_process_callbacks(stru
+ rcu_is_callbacks_kthread()));
+ }
+
+-static void rcu_process_callbacks(struct softirq_action *unused)
++static void rcu_process_callbacks(void)
+ {
+ __rcu_process_callbacks(&rcu_sched_ctrlblk);
+ __rcu_process_callbacks(&rcu_bh_ctrlblk);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/rcutiny_plugin.h linux-3.4-pax/kernel/rcutiny_plugin.h
+--- linux-3.4/kernel/rcutiny_plugin.h 2012-05-21 11:33:39.575929946 +0200
++++ linux-3.4-pax/kernel/rcutiny_plugin.h 2012-05-21 12:10:11.808049013 +0200
+@@ -955,7 +955,7 @@ static int rcu_kthread(void *arg)
+ have_rcu_kthread_work = morework;
+ local_irq_restore(flags);
+ if (work)
+- rcu_process_callbacks(NULL);
++ rcu_process_callbacks();
+ schedule_timeout_interruptible(1); /* Leave CPU for others. */
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/rcutorture.c linux-3.4-pax/kernel/rcutorture.c
+--- linux-3.4/kernel/rcutorture.c 2012-05-21 11:33:39.607929947 +0200
++++ linux-3.4-pax/kernel/rcutorture.c 2012-05-21 12:10:11.812049014 +0200
+@@ -158,12 +158,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
+ { 0 };
+ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
+ { 0 };
+-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
+-static atomic_t n_rcu_torture_alloc;
+-static atomic_t n_rcu_torture_alloc_fail;
+-static atomic_t n_rcu_torture_free;
+-static atomic_t n_rcu_torture_mberror;
+-static atomic_t n_rcu_torture_error;
++static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
++static atomic_unchecked_t n_rcu_torture_alloc;
++static atomic_unchecked_t n_rcu_torture_alloc_fail;
++static atomic_unchecked_t n_rcu_torture_free;
++static atomic_unchecked_t n_rcu_torture_mberror;
++static atomic_unchecked_t n_rcu_torture_error;
+ static long n_rcu_torture_boost_ktrerror;
+ static long n_rcu_torture_boost_rterror;
+ static long n_rcu_torture_boost_failure;
+@@ -253,11 +253,11 @@ rcu_torture_alloc(void)
+
+ spin_lock_bh(&rcu_torture_lock);
+ if (list_empty(&rcu_torture_freelist)) {
+- atomic_inc(&n_rcu_torture_alloc_fail);
++ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
+ spin_unlock_bh(&rcu_torture_lock);
+ return NULL;
+ }
+- atomic_inc(&n_rcu_torture_alloc);
++ atomic_inc_unchecked(&n_rcu_torture_alloc);
+ p = rcu_torture_freelist.next;
+ list_del_init(p);
+ spin_unlock_bh(&rcu_torture_lock);
+@@ -270,7 +270,7 @@ rcu_torture_alloc(void)
+ static void
+ rcu_torture_free(struct rcu_torture *p)
+ {
+- atomic_inc(&n_rcu_torture_free);
++ atomic_inc_unchecked(&n_rcu_torture_free);
+ spin_lock_bh(&rcu_torture_lock);
+ list_add_tail(&p->rtort_free, &rcu_torture_freelist);
+ spin_unlock_bh(&rcu_torture_lock);
+@@ -390,7 +390,7 @@ rcu_torture_cb(struct rcu_head *p)
+ i = rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+- atomic_inc(&rcu_torture_wcount[i]);
++ atomic_inc_unchecked(&rcu_torture_wcount[i]);
+ if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
+ rp->rtort_mbtest = 0;
+ rcu_torture_free(rp);
+@@ -437,7 +437,7 @@ static void rcu_sync_torture_deferred_fr
+ i = rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+- atomic_inc(&rcu_torture_wcount[i]);
++ atomic_inc_unchecked(&rcu_torture_wcount[i]);
+ if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
+ rp->rtort_mbtest = 0;
+ list_del(&rp->rtort_free);
+@@ -926,7 +926,7 @@ rcu_torture_writer(void *arg)
+ i = old_rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+- atomic_inc(&rcu_torture_wcount[i]);
++ atomic_inc_unchecked(&rcu_torture_wcount[i]);
+ old_rp->rtort_pipe_count++;
+ cur_ops->deferred_free(old_rp);
+ }
+@@ -1007,7 +1007,7 @@ static void rcu_torture_timer(unsigned l
+ }
+ do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
+ if (p->rtort_mbtest == 0)
+- atomic_inc(&n_rcu_torture_mberror);
++ atomic_inc_unchecked(&n_rcu_torture_mberror);
+ spin_lock(&rand_lock);
+ cur_ops->read_delay(&rand);
+ n_rcu_torture_timers++;
+@@ -1071,7 +1071,7 @@ rcu_torture_reader(void *arg)
+ }
+ do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
+ if (p->rtort_mbtest == 0)
+- atomic_inc(&n_rcu_torture_mberror);
++ atomic_inc_unchecked(&n_rcu_torture_mberror);
+ cur_ops->read_delay(&rand);
+ preempt_disable();
+ pipe_count = p->rtort_pipe_count;
+@@ -1133,10 +1133,10 @@ rcu_torture_printk(char *page)
+ rcu_torture_current,
+ rcu_torture_current_version,
+ list_empty(&rcu_torture_freelist),
+- atomic_read(&n_rcu_torture_alloc),
+- atomic_read(&n_rcu_torture_alloc_fail),
+- atomic_read(&n_rcu_torture_free),
+- atomic_read(&n_rcu_torture_mberror),
++ atomic_read_unchecked(&n_rcu_torture_alloc),
++ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
++ atomic_read_unchecked(&n_rcu_torture_free),
++ atomic_read_unchecked(&n_rcu_torture_mberror),
+ n_rcu_torture_boost_ktrerror,
+ n_rcu_torture_boost_rterror,
+ n_rcu_torture_boost_failure,
+@@ -1146,7 +1146,7 @@ rcu_torture_printk(char *page)
+ n_online_attempts,
+ n_offline_successes,
+ n_offline_attempts);
+- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
++ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
+ n_rcu_torture_boost_ktrerror != 0 ||
+ n_rcu_torture_boost_rterror != 0 ||
+ n_rcu_torture_boost_failure != 0)
+@@ -1154,7 +1154,7 @@ rcu_torture_printk(char *page)
+ cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
+ if (i > 1) {
+ cnt += sprintf(&page[cnt], "!!! ");
+- atomic_inc(&n_rcu_torture_error);
++ atomic_inc_unchecked(&n_rcu_torture_error);
+ WARN_ON_ONCE(1);
+ }
+ cnt += sprintf(&page[cnt], "Reader Pipe: ");
+@@ -1168,7 +1168,7 @@ rcu_torture_printk(char *page)
+ cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
+ cnt += sprintf(&page[cnt], " %d",
+- atomic_read(&rcu_torture_wcount[i]));
++ atomic_read_unchecked(&rcu_torture_wcount[i]));
+ }
+ cnt += sprintf(&page[cnt], "\n");
+ if (cur_ops->stats)
+@@ -1676,7 +1676,7 @@ rcu_torture_cleanup(void)
+
+ if (cur_ops->cleanup)
+ cur_ops->cleanup();
+- if (atomic_read(&n_rcu_torture_error))
++ if (atomic_read_unchecked(&n_rcu_torture_error))
+ rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
+ else if (n_online_successes != n_online_attempts ||
+ n_offline_successes != n_offline_attempts)
+@@ -1744,17 +1744,17 @@ rcu_torture_init(void)
+
+ rcu_torture_current = NULL;
+ rcu_torture_current_version = 0;
+- atomic_set(&n_rcu_torture_alloc, 0);
+- atomic_set(&n_rcu_torture_alloc_fail, 0);
+- atomic_set(&n_rcu_torture_free, 0);
+- atomic_set(&n_rcu_torture_mberror, 0);
+- atomic_set(&n_rcu_torture_error, 0);
++ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
++ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
++ atomic_set_unchecked(&n_rcu_torture_free, 0);
++ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
++ atomic_set_unchecked(&n_rcu_torture_error, 0);
+ n_rcu_torture_boost_ktrerror = 0;
+ n_rcu_torture_boost_rterror = 0;
+ n_rcu_torture_boost_failure = 0;
+ n_rcu_torture_boosts = 0;
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
+- atomic_set(&rcu_torture_wcount[i], 0);
++ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
+ per_cpu(rcu_torture_count, cpu)[i] = 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/rcutree.c linux-3.4-pax/kernel/rcutree.c
+--- linux-3.4/kernel/rcutree.c 2012-05-21 11:33:39.611929949 +0200
++++ linux-3.4-pax/kernel/rcutree.c 2012-05-21 12:10:11.816049014 +0200
+@@ -357,9 +357,9 @@ static void rcu_idle_enter_common(struct
+ rcu_prepare_for_idle(smp_processor_id());
+ /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
+ smp_mb__before_atomic_inc(); /* See above. */
+- atomic_inc(&rdtp->dynticks);
++ atomic_inc_unchecked(&rdtp->dynticks);
+ smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
+- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
++ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
+
+ /*
+ * The idle task is not permitted to enter the idle loop while
+@@ -448,10 +448,10 @@ void rcu_irq_exit(void)
+ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
+ {
+ smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
+- atomic_inc(&rdtp->dynticks);
++ atomic_inc_unchecked(&rdtp->dynticks);
+ /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
+ smp_mb__after_atomic_inc(); /* See above. */
+- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
++ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
+ rcu_cleanup_after_idle(smp_processor_id());
+ trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
+ if (!is_idle_task(current)) {
+@@ -545,14 +545,14 @@ void rcu_nmi_enter(void)
+ struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+ if (rdtp->dynticks_nmi_nesting == 0 &&
+- (atomic_read(&rdtp->dynticks) & 0x1))
++ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
+ return;
+ rdtp->dynticks_nmi_nesting++;
+ smp_mb__before_atomic_inc(); /* Force delay from prior write. */
+- atomic_inc(&rdtp->dynticks);
++ atomic_inc_unchecked(&rdtp->dynticks);
+ /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
+ smp_mb__after_atomic_inc(); /* See above. */
+- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
++ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
+ }
+
+ /**
+@@ -571,9 +571,9 @@ void rcu_nmi_exit(void)
+ return;
+ /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
+ smp_mb__before_atomic_inc(); /* See above. */
+- atomic_inc(&rdtp->dynticks);
++ atomic_inc_unchecked(&rdtp->dynticks);
+ smp_mb__after_atomic_inc(); /* Force delay to next write. */
+- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
++ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
+ }
+
+ #ifdef CONFIG_PROVE_RCU
+@@ -589,7 +589,7 @@ int rcu_is_cpu_idle(void)
+ int ret;
+
+ preempt_disable();
+- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
++ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
+ preempt_enable();
+ return ret;
+ }
+@@ -659,7 +659,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
+ */
+ static int dyntick_save_progress_counter(struct rcu_data *rdp)
+ {
+- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
++ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
+ return (rdp->dynticks_snap & 0x1) == 0;
+ }
+
+@@ -674,7 +674,7 @@ static int rcu_implicit_dynticks_qs(stru
+ unsigned int curr;
+ unsigned int snap;
+
+- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
++ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
+ snap = (unsigned int)rdp->dynticks_snap;
+
+ /*
+@@ -704,10 +704,10 @@ static int jiffies_till_stall_check(void
+ * for CONFIG_RCU_CPU_STALL_TIMEOUT.
+ */
+ if (till_stall_check < 3) {
+- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
++ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
+ till_stall_check = 3;
+ } else if (till_stall_check > 300) {
+- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
++ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
+ till_stall_check = 300;
+ }
+ return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
+@@ -1766,7 +1766,7 @@ __rcu_process_callbacks(struct rcu_state
+ /*
+ * Do RCU core processing for the current CPU.
+ */
+-static void rcu_process_callbacks(struct softirq_action *unused)
++static void rcu_process_callbacks(void)
+ {
+ trace_rcu_utilization("Start RCU core");
+ __rcu_process_callbacks(&rcu_sched_state,
+@@ -1949,8 +1949,8 @@ void synchronize_rcu_bh(void)
+ }
+ EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
+
+-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
+-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
++static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
++static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
+
+ static int synchronize_sched_expedited_cpu_stop(void *data)
+ {
+@@ -2011,7 +2011,7 @@ void synchronize_sched_expedited(void)
+ int firstsnap, s, snap, trycount = 0;
+
+ /* Note that atomic_inc_return() implies full memory barrier. */
+- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
++ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
+ get_online_cpus();
+ WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
+
+@@ -2033,7 +2033,7 @@ void synchronize_sched_expedited(void)
+ }
+
+ /* Check to see if someone else did our work for us. */
+- s = atomic_read(&sync_sched_expedited_done);
++ s = atomic_read_unchecked(&sync_sched_expedited_done);
+ if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
+ smp_mb(); /* ensure test happens before caller kfree */
+ return;
+@@ -2048,7 +2048,7 @@ void synchronize_sched_expedited(void)
+ * grace period works for us.
+ */
+ get_online_cpus();
+- snap = atomic_read(&sync_sched_expedited_started);
++ snap = atomic_read_unchecked(&sync_sched_expedited_started);
+ smp_mb(); /* ensure read is before try_stop_cpus(). */
+ }
+
+@@ -2059,12 +2059,12 @@ void synchronize_sched_expedited(void)
+ * than we did beat us to the punch.
+ */
+ do {
+- s = atomic_read(&sync_sched_expedited_done);
++ s = atomic_read_unchecked(&sync_sched_expedited_done);
+ if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
+ smp_mb(); /* ensure test happens before caller kfree */
+ break;
+ }
+- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
++ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
+
+ put_online_cpus();
+ }
+@@ -2262,7 +2262,7 @@ rcu_boot_init_percpu_data(int cpu, struc
+ rdp->qlen = 0;
+ rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
+ WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
+- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
++ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
+ rdp->cpu = cpu;
+ rdp->rsp = rsp;
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
+@@ -2290,8 +2290,8 @@ rcu_init_percpu_data(int cpu, struct rcu
+ rdp->n_force_qs_snap = rsp->n_force_qs;
+ rdp->blimit = blimit;
+ rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
+- atomic_set(&rdp->dynticks->dynticks,
+- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
++ atomic_set_unchecked(&rdp->dynticks->dynticks,
++ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
+ rcu_prepare_for_idle_init(cpu);
+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/rcutree.h linux-3.4-pax/kernel/rcutree.h
+--- linux-3.4/kernel/rcutree.h 2012-05-21 11:33:39.615929950 +0200
++++ linux-3.4-pax/kernel/rcutree.h 2012-05-21 12:10:11.820049014 +0200
+@@ -87,7 +87,7 @@ struct rcu_dynticks {
+ long long dynticks_nesting; /* Track irq/process nesting level. */
+ /* Process level is worth LLONG_MAX/2. */
+ int dynticks_nmi_nesting; /* Track NMI nesting level. */
+- atomic_t dynticks; /* Even value for idle, else odd. */
++ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
+ };
+
+ /* RCU's kthread states for tracing. */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/rcutree_plugin.h linux-3.4-pax/kernel/rcutree_plugin.h
+--- linux-3.4/kernel/rcutree_plugin.h 2012-05-21 11:33:39.623929950 +0200
++++ linux-3.4-pax/kernel/rcutree_plugin.h 2012-05-21 12:10:11.824049013 +0200
+@@ -909,7 +909,7 @@ void synchronize_rcu_expedited(void)
+
+ /* Clean up and exit. */
+ smp_mb(); /* ensure expedited GP seen before counter increment. */
+- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
++ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
+ unlock_mb_ret:
+ mutex_unlock(&sync_rcu_preempt_exp_mutex);
+ mb_ret:
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/rcutree_trace.c linux-3.4-pax/kernel/rcutree_trace.c
+--- linux-3.4/kernel/rcutree_trace.c 2012-05-21 11:33:39.623929950 +0200
++++ linux-3.4-pax/kernel/rcutree_trace.c 2012-05-21 12:10:11.824049013 +0200
+@@ -68,7 +68,7 @@ static void print_one_rcu_data(struct se
+ rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
+ rdp->qs_pending);
+ seq_printf(m, " dt=%d/%llx/%d df=%lu",
+- atomic_read(&rdp->dynticks->dynticks),
++ atomic_read_unchecked(&rdp->dynticks->dynticks),
+ rdp->dynticks->dynticks_nesting,
+ rdp->dynticks->dynticks_nmi_nesting,
+ rdp->dynticks_fqs);
+@@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struc
+ rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
+ rdp->qs_pending);
+ seq_printf(m, ",%d,%llx,%d,%lu",
+- atomic_read(&rdp->dynticks->dynticks),
++ atomic_read_unchecked(&rdp->dynticks->dynticks),
+ rdp->dynticks->dynticks_nesting,
+ rdp->dynticks->dynticks_nmi_nesting,
+ rdp->dynticks_fqs);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/rtmutex-tester.c linux-3.4-pax/kernel/rtmutex-tester.c
+--- linux-3.4/kernel/rtmutex-tester.c 2012-03-19 10:39:11.908049183 +0100
++++ linux-3.4-pax/kernel/rtmutex-tester.c 2012-05-21 12:10:11.828049013 +0200
+@@ -20,7 +20,7 @@
+ #define MAX_RT_TEST_MUTEXES 8
+
+ static spinlock_t rttest_lock;
+-static atomic_t rttest_event;
++static atomic_unchecked_t rttest_event;
+
+ struct test_thread_data {
+ int opcode;
+@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
+
+ case RTTEST_LOCKCONT:
+ td->mutexes[td->opdata] = 1;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ return 0;
+
+ case RTTEST_RESET:
+@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
+ return 0;
+
+ case RTTEST_RESETEVENT:
+- atomic_set(&rttest_event, 0);
++ atomic_set_unchecked(&rttest_event, 0);
+ return 0;
+
+ default:
+@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
+ return ret;
+
+ td->mutexes[id] = 1;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ rt_mutex_lock(&mutexes[id]);
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ td->mutexes[id] = 4;
+ return 0;
+
+@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
+ return ret;
+
+ td->mutexes[id] = 1;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ td->mutexes[id] = ret ? 0 : 4;
+ return ret ? -EINTR : 0;
+
+@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
+ if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
+ return ret;
+
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ rt_mutex_unlock(&mutexes[id]);
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ td->mutexes[id] = 0;
+ return 0;
+
+@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
+ break;
+
+ td->mutexes[dat] = 2;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ break;
+
+ default:
+@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
+ return;
+
+ td->mutexes[dat] = 3;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ break;
+
+ case RTTEST_LOCKNOWAIT:
+@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
+ return;
+
+ td->mutexes[dat] = 1;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ return;
+
+ default:
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/sched/auto_group.c linux-3.4-pax/kernel/sched/auto_group.c
+--- linux-3.4/kernel/sched/auto_group.c 2012-05-21 11:33:39.639929950 +0200
++++ linux-3.4-pax/kernel/sched/auto_group.c 2012-05-21 12:10:11.828049013 +0200
+@@ -11,7 +11,7 @@
+
+ unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
+ static struct autogroup autogroup_default;
+-static atomic_t autogroup_seq_nr;
++static atomic_unchecked_t autogroup_seq_nr;
+
+ void __init autogroup_init(struct task_struct *init_task)
+ {
+@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
+
+ kref_init(&ag->kref);
+ init_rwsem(&ag->lock);
+- ag->id = atomic_inc_return(&autogroup_seq_nr);
++ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
+ ag->tg = tg;
+ #ifdef CONFIG_RT_GROUP_SCHED
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/sched/fair.c linux-3.4-pax/kernel/sched/fair.c
+--- linux-3.4/kernel/sched/fair.c 2012-05-21 11:33:39.659929951 +0200
++++ linux-3.4-pax/kernel/sched/fair.c 2012-05-21 12:10:11.836049014 +0200
+@@ -5107,7 +5107,7 @@ static void nohz_idle_balance(int this_c
+ * run_rebalance_domains is triggered when needed from the scheduler tick.
+ * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
+ */
+-static void run_rebalance_domains(struct softirq_action *h)
++static void run_rebalance_domains(void)
+ {
+ int this_cpu = smp_processor_id();
+ struct rq *this_rq = cpu_rq(this_cpu);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/signal.c linux-3.4-pax/kernel/signal.c
+--- linux-3.4/kernel/signal.c 2012-05-21 11:33:39.667929951 +0200
++++ linux-3.4-pax/kernel/signal.c 2012-05-21 12:10:11.840049014 +0200
+@@ -47,12 +47,12 @@ static struct kmem_cache *sigqueue_cache
+
+ int print_fatal_signals __read_mostly;
+
+-static void __user *sig_handler(struct task_struct *t, int sig)
++static __sighandler_t sig_handler(struct task_struct *t, int sig)
+ {
+ return t->sighand->action[sig - 1].sa.sa_handler;
+ }
+
+-static int sig_handler_ignored(void __user *handler, int sig)
++static int sig_handler_ignored(__sighandler_t handler, int sig)
+ {
+ /* Is it explicitly or implicitly ignored? */
+ return handler == SIG_IGN ||
+@@ -61,7 +61,7 @@ static int sig_handler_ignored(void __us
+
+ static int sig_task_ignored(struct task_struct *t, int sig, bool force)
+ {
+- void __user *handler;
++ __sighandler_t handler;
+
+ handler = sig_handler(t, sig);
+
+@@ -489,7 +489,7 @@ flush_signal_handlers(struct task_struct
+
+ int unhandled_signal(struct task_struct *tsk, int sig)
+ {
+- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
++ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
+ if (is_global_init(tsk))
+ return 1;
+ if (handler != SIG_IGN && handler != SIG_DFL)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/smp.c linux-3.4-pax/kernel/smp.c
+--- linux-3.4/kernel/smp.c 2012-05-21 11:33:39.675929952 +0200
++++ linux-3.4-pax/kernel/smp.c 2012-05-21 12:10:11.844049014 +0200
+@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
+ }
+ EXPORT_SYMBOL(smp_call_function);
+
+-void ipi_call_lock(void)
++void ipi_call_lock(void) __acquires(call_function.lock)
+ {
+ raw_spin_lock(&call_function.lock);
+ }
+
+-void ipi_call_unlock(void)
++void ipi_call_unlock(void) __releases(call_function.lock)
+ {
+ raw_spin_unlock(&call_function.lock);
+ }
+
+-void ipi_call_lock_irq(void)
++void ipi_call_lock_irq(void) __acquires(call_function.lock)
+ {
+ raw_spin_lock_irq(&call_function.lock);
+ }
+
+-void ipi_call_unlock_irq(void)
++void ipi_call_unlock_irq(void) __releases(call_function.lock)
+ {
+ raw_spin_unlock_irq(&call_function.lock);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/softirq.c linux-3.4-pax/kernel/softirq.c
+--- linux-3.4/kernel/softirq.c 2012-05-21 11:33:39.679929952 +0200
++++ linux-3.4-pax/kernel/softirq.c 2012-05-21 12:10:11.848049015 +0200
+@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
+
+ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+
+-char *softirq_to_name[NR_SOFTIRQS] = {
++const char * const softirq_to_name[NR_SOFTIRQS] = {
+ "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
+ "TASKLET", "SCHED", "HRTIMER", "RCU"
+ };
+@@ -235,7 +235,7 @@ restart:
+ kstat_incr_softirqs_this_cpu(vec_nr);
+
+ trace_softirq_entry(vec_nr);
+- h->action(h);
++ h->action();
+ trace_softirq_exit(vec_nr);
+ if (unlikely(prev_count != preempt_count())) {
+ printk(KERN_ERR "huh, entered softirq %u %s %p"
+@@ -381,9 +381,11 @@ void __raise_softirq_irqoff(unsigned int
+ or_softirq_pending(1UL << nr);
+ }
+
+-void open_softirq(int nr, void (*action)(struct softirq_action *))
++void open_softirq(int nr, void (*action)(void))
+ {
+- softirq_vec[nr].action = action;
++ pax_open_kernel();
++ *(void **)&softirq_vec[nr].action = action;
++ pax_close_kernel();
+ }
+
+ /*
+@@ -437,7 +439,7 @@ void __tasklet_hi_schedule_first(struct
+
+ EXPORT_SYMBOL(__tasklet_hi_schedule_first);
+
+-static void tasklet_action(struct softirq_action *a)
++static void tasklet_action(void)
+ {
+ struct tasklet_struct *list;
+
+@@ -472,7 +474,7 @@ static void tasklet_action(struct softir
+ }
+ }
+
+-static void tasklet_hi_action(struct softirq_action *a)
++static void tasklet_hi_action(void)
+ {
+ struct tasklet_struct *list;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/sys.c linux-3.4-pax/kernel/sys.c
+--- linux-3.4/kernel/sys.c 2012-05-21 11:33:39.695929952 +0200
++++ linux-3.4-pax/kernel/sys.c 2012-05-21 12:10:11.848049015 +0200
+@@ -1252,19 +1252,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_
+ return -EFAULT;
+
+ down_read(&uts_sem);
+- error = __copy_to_user(&name->sysname, &utsname()->sysname,
++ error = __copy_to_user(name->sysname, &utsname()->sysname,
+ __OLD_UTS_LEN);
+ error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
+- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
++ error |= __copy_to_user(name->nodename, &utsname()->nodename,
+ __OLD_UTS_LEN);
+ error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
+- error |= __copy_to_user(&name->release, &utsname()->release,
++ error |= __copy_to_user(name->release, &utsname()->release,
+ __OLD_UTS_LEN);
+ error |= __put_user(0, name->release + __OLD_UTS_LEN);
+- error |= __copy_to_user(&name->version, &utsname()->version,
++ error |= __copy_to_user(name->version, &utsname()->version,
+ __OLD_UTS_LEN);
+ error |= __put_user(0, name->version + __OLD_UTS_LEN);
+- error |= __copy_to_user(&name->machine, &utsname()->machine,
++ error |= __copy_to_user(name->machine, &utsname()->machine,
+ __OLD_UTS_LEN);
+ error |= __put_user(0, name->machine + __OLD_UTS_LEN);
+ up_read(&uts_sem);
+@@ -1847,7 +1847,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
+ error = get_dumpable(me->mm);
+ break;
+ case PR_SET_DUMPABLE:
+- if (arg2 < 0 || arg2 > 1) {
++ if (arg2 > 1) {
+ error = -EINVAL;
+ break;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/sysctl_binary.c linux-3.4-pax/kernel/sysctl_binary.c
+--- linux-3.4/kernel/sysctl_binary.c 2012-01-08 19:48:29.027470875 +0100
++++ linux-3.4-pax/kernel/sysctl_binary.c 2012-05-21 12:10:11.852049015 +0200
+@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *f
+ int i;
+
+ set_fs(KERNEL_DS);
+- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
++ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out_kfree;
+@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *f
+ }
+
+ set_fs(KERNEL_DS);
+- result = vfs_write(file, buffer, str - buffer, &pos);
++ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out_kfree;
+@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file
+ int i;
+
+ set_fs(KERNEL_DS);
+- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
++ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out_kfree;
+@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file
+ }
+
+ set_fs(KERNEL_DS);
+- result = vfs_write(file, buffer, str - buffer, &pos);
++ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out_kfree;
+@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *fil
+ int i;
+
+ set_fs(KERNEL_DS);
+- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
++ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out;
+@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struc
+ __le16 dnaddr;
+
+ set_fs(KERNEL_DS);
+- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
++ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out;
+@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struc
+ le16_to_cpu(dnaddr) & 0x3ff);
+
+ set_fs(KERNEL_DS);
+- result = vfs_write(file, buf, len, &pos);
++ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/sysctl.c linux-3.4-pax/kernel/sysctl.c
+--- linux-3.4/kernel/sysctl.c 2012-05-21 11:33:39.695929952 +0200
++++ linux-3.4-pax/kernel/sysctl.c 2012-05-21 12:10:11.856049015 +0200
+@@ -210,6 +210,20 @@ extern struct ctl_table epoll_table[];
+ int sysctl_legacy_va_layout;
+ #endif
+
++#ifdef CONFIG_PAX_SOFTMODE
++static ctl_table pax_table[] = {
++ {
++ .procname = "softmode",
++ .data = &pax_softmode,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++
++ { }
++};
++#endif
++
+ /* The default sysctl tables: */
+
+ static struct ctl_table sysctl_base_table[] = {
+@@ -256,6 +270,15 @@ static int max_extfrag_threshold = 1000;
+ #endif
+
+ static struct ctl_table kern_table[] = {
++
++#ifdef CONFIG_PAX_SOFTMODE
++ {
++ .procname = "pax",
++ .mode = 0500,
++ .child = pax_table,
++ },
++#endif
++
+ {
+ .procname = "sched_child_runs_first",
+ .data = &sysctl_sched_child_runs_first,
+@@ -1215,6 +1238,13 @@ static struct ctl_table vm_table[] = {
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ },
++ {
++ .procname = "heap_stack_gap",
++ .data = &sysctl_heap_stack_gap,
++ .maxlen = sizeof(sysctl_heap_stack_gap),
++ .mode = 0644,
++ .proc_handler = proc_doulongvec_minmax,
++ },
+ #else
+ {
+ .procname = "nr_trim_pages",
+@@ -1750,6 +1780,8 @@ static int proc_put_long(void __user **b
+ len = strlen(tmp);
+ if (len > *size)
+ len = *size;
++ if (len > sizeof(tmp))
++ len = sizeof(tmp);
+ if (copy_to_user(*buf, tmp, len))
+ return -EFAULT;
+ *size -= len;
+@@ -2066,8 +2098,11 @@ static int __do_proc_doulongvec_minmax(v
+ *i = val;
+ } else {
+ val = convdiv * (*i) / convmul;
+- if (!first)
++ if (!first) {
+ err = proc_put_char(&buffer, &left, '\t');
++ if (err)
++ break;
++ }
+ err = proc_put_long(&buffer, &left, val, false);
+ if (err)
+ break;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/time/alarmtimer.c linux-3.4-pax/kernel/time/alarmtimer.c
+--- linux-3.4/kernel/time/alarmtimer.c 2012-05-21 11:33:39.703929954 +0200
++++ linux-3.4-pax/kernel/time/alarmtimer.c 2012-05-21 12:10:11.860049015 +0200
+@@ -779,7 +779,7 @@ static int __init alarmtimer_init(void)
+ struct platform_device *pdev;
+ int error = 0;
+ int i;
+- struct k_clock alarm_clock = {
++ static struct k_clock alarm_clock = {
+ .clock_getres = alarm_clock_getres,
+ .clock_get = alarm_clock_get,
+ .timer_create = alarm_timer_create,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/time/tick-broadcast.c linux-3.4-pax/kernel/time/tick-broadcast.c
+--- linux-3.4/kernel/time/tick-broadcast.c 2012-05-21 11:33:39.707929955 +0200
++++ linux-3.4-pax/kernel/time/tick-broadcast.c 2012-05-21 12:10:11.864049015 +0200
+@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
+ * then clear the broadcast bit.
+ */
+ if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
+- int cpu = smp_processor_id();
++ cpu = smp_processor_id();
+
+ cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
+ tick_broadcast_clear_oneshot(cpu);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/time/timer_stats.c linux-3.4-pax/kernel/time/timer_stats.c
+--- linux-3.4/kernel/time/timer_stats.c 2012-01-08 19:48:29.095470872 +0100
++++ linux-3.4-pax/kernel/time/timer_stats.c 2012-05-21 12:10:11.864049015 +0200
+@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
+ static unsigned long nr_entries;
+ static struct entry entries[MAX_ENTRIES];
+
+-static atomic_t overflow_count;
++static atomic_unchecked_t overflow_count;
+
+ /*
+ * The entries are in a hash-table, for fast lookup:
+@@ -140,7 +140,7 @@ static void reset_entries(void)
+ nr_entries = 0;
+ memset(entries, 0, sizeof(entries));
+ memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
+- atomic_set(&overflow_count, 0);
++ atomic_set_unchecked(&overflow_count, 0);
+ }
+
+ static struct entry *alloc_entry(void)
+@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
+ if (likely(entry))
+ entry->count++;
+ else
+- atomic_inc(&overflow_count);
++ atomic_inc_unchecked(&overflow_count);
+
+ out_unlock:
+ raw_spin_unlock_irqrestore(lock, flags);
+@@ -300,9 +300,9 @@ static int tstats_show(struct seq_file *
+
+ seq_puts(m, "Timer Stats Version: v0.2\n");
+ seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
+- if (atomic_read(&overflow_count))
++ if (atomic_read_unchecked(&overflow_count))
+ seq_printf(m, "Overflow: %d entries\n",
+- atomic_read(&overflow_count));
++ atomic_read_unchecked(&overflow_count));
+
+ for (i = 0; i < nr_entries; i++) {
+ entry = entries + i;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/timer.c linux-3.4-pax/kernel/timer.c
+--- linux-3.4/kernel/timer.c 2012-03-19 10:39:11.968049177 +0100
++++ linux-3.4-pax/kernel/timer.c 2012-05-21 12:10:11.868049016 +0200
+@@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
+ /*
+ * This function runs timers and the timer-tq in bottom half context.
+ */
+-static void run_timer_softirq(struct softirq_action *h)
++static void run_timer_softirq(void)
+ {
+ struct tvec_base *base = __this_cpu_read(tvec_bases);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/trace/blktrace.c linux-3.4-pax/kernel/trace/blktrace.c
+--- linux-3.4/kernel/trace/blktrace.c 2012-05-21 11:33:39.739929955 +0200
++++ linux-3.4-pax/kernel/trace/blktrace.c 2012-05-21 12:10:11.872049016 +0200
+@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct f
+ struct blk_trace *bt = filp->private_data;
+ char buf[16];
+
+- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
++ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
+
+ return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ }
+@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(str
+ return 1;
+
+ bt = buf->chan->private_data;
+- atomic_inc(&bt->dropped);
++ atomic_inc_unchecked(&bt->dropped);
+ return 0;
+ }
+
+@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_qu
+
+ bt->dir = dir;
+ bt->dev = dev;
+- atomic_set(&bt->dropped, 0);
++ atomic_set_unchecked(&bt->dropped, 0);
+
+ ret = -EIO;
+ bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/trace/ftrace.c linux-3.4-pax/kernel/trace/ftrace.c
+--- linux-3.4/kernel/trace/ftrace.c 2012-05-21 11:33:39.755929956 +0200
++++ linux-3.4-pax/kernel/trace/ftrace.c 2012-05-21 12:10:11.876049016 +0200
+@@ -1800,12 +1800,17 @@ ftrace_code_disable(struct module *mod,
+ if (unlikely(ftrace_disabled))
+ return 0;
+
++ ret = ftrace_arch_code_modify_prepare();
++ FTRACE_WARN_ON(ret);
++ if (ret)
++ return 0;
++
+ ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
++ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
+ if (ret) {
+ ftrace_bug(ret, ip);
+- return 0;
+ }
+- return 1;
++ return ret ? 0 : 1;
+ }
+
+ /*
+@@ -2917,7 +2922,7 @@ static void ftrace_free_entry_rcu(struct
+
+ int
+ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+- void *data)
++ void *data)
+ {
+ struct ftrace_func_probe *entry;
+ struct ftrace_page *pg;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/trace/trace.c linux-3.4-pax/kernel/trace/trace.c
+--- linux-3.4/kernel/trace/trace.c 2012-05-21 11:33:39.767929957 +0200
++++ linux-3.4-pax/kernel/trace/trace.c 2012-05-21 12:10:11.880049016 +0200
+@@ -4312,10 +4312,9 @@ static const struct file_operations trac
+ };
+ #endif
+
+-static struct dentry *d_tracer;
+-
+ struct dentry *tracing_init_dentry(void)
+ {
++ static struct dentry *d_tracer;
+ static int once;
+
+ if (d_tracer)
+@@ -4335,10 +4334,9 @@ struct dentry *tracing_init_dentry(void)
+ return d_tracer;
+ }
+
+-static struct dentry *d_percpu;
+-
+ struct dentry *tracing_dentry_percpu(void)
+ {
++ static struct dentry *d_percpu;
+ static int once;
+ struct dentry *d_tracer;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/trace/trace_events.c linux-3.4-pax/kernel/trace/trace_events.c
+--- linux-3.4/kernel/trace/trace_events.c 2012-05-21 11:33:39.799929960 +0200
++++ linux-3.4-pax/kernel/trace/trace_events.c 2012-05-21 12:10:11.892049016 +0200
+@@ -1308,10 +1308,6 @@ static LIST_HEAD(ftrace_module_file_list
+ struct ftrace_module_file_ops {
+ struct list_head list;
+ struct module *mod;
+- struct file_operations id;
+- struct file_operations enable;
+- struct file_operations format;
+- struct file_operations filter;
+ };
+
+ static struct ftrace_module_file_ops *
+@@ -1332,17 +1328,12 @@ trace_create_file_ops(struct module *mod
+
+ file_ops->mod = mod;
+
+- file_ops->id = ftrace_event_id_fops;
+- file_ops->id.owner = mod;
+-
+- file_ops->enable = ftrace_enable_fops;
+- file_ops->enable.owner = mod;
+-
+- file_ops->filter = ftrace_event_filter_fops;
+- file_ops->filter.owner = mod;
+-
+- file_ops->format = ftrace_event_format_fops;
+- file_ops->format.owner = mod;
++ pax_open_kernel();
++ *(void **)&mod->trace_id.owner = mod;
++ *(void **)&mod->trace_enable.owner = mod;
++ *(void **)&mod->trace_filter.owner = mod;
++ *(void **)&mod->trace_format.owner = mod;
++ pax_close_kernel();
+
+ list_add(&file_ops->list, &ftrace_module_file_list);
+
+@@ -1366,8 +1357,8 @@ static void trace_module_add_events(stru
+
+ for_each_event(call, start, end) {
+ __trace_add_event_call(*call, mod,
+- &file_ops->id, &file_ops->enable,
+- &file_ops->filter, &file_ops->format);
++ &mod->trace_id, &mod->trace_enable,
++ &mod->trace_filter, &mod->trace_format);
+ }
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/trace/trace_kprobe.c linux-3.4-pax/kernel/trace/trace_kprobe.c
+--- linux-3.4/kernel/trace/trace_kprobe.c 2012-05-21 11:33:39.823929960 +0200
++++ linux-3.4-pax/kernel/trace/trace_kprobe.c 2012-05-21 12:10:11.896049016 +0200
+@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(me
+ long ret;
+ int maxlen = get_rloc_len(*(u32 *)dest);
+ u8 *dst = get_rloc_data(dest);
+- u8 *src = addr;
++ const u8 __user *src = (const u8 __force_user *)addr;
+ mm_segment_t old_fs = get_fs();
+ if (!maxlen)
+ return;
+@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(me
+ pagefault_disable();
+ do
+ ret = __copy_from_user_inatomic(dst++, src++, 1);
+- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
++ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
+ dst[-1] = '\0';
+ pagefault_enable();
+ set_fs(old_fs);
+@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(me
+ ((u8 *)get_rloc_data(dest))[0] = '\0';
+ *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
+ } else
+- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
++ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
+ get_rloc_offs(*(u32 *)dest));
+ }
+ /* Return the length of string -- including null terminal byte */
+@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(me
+ set_fs(KERNEL_DS);
+ pagefault_disable();
+ do {
+- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
++ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
+ len++;
+ } while (c && ret == 0 && len < MAX_STRING_SIZE);
+ pagefault_enable();
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/trace/trace_mmiotrace.c linux-3.4-pax/kernel/trace/trace_mmiotrace.c
+--- linux-3.4/kernel/trace/trace_mmiotrace.c 2011-10-24 12:48:42.103090928 +0200
++++ linux-3.4-pax/kernel/trace/trace_mmiotrace.c 2012-05-21 12:10:11.896049016 +0200
+@@ -24,7 +24,7 @@ struct header_iter {
+ static struct trace_array *mmio_trace_array;
+ static bool overrun_detected;
+ static unsigned long prev_overruns;
+-static atomic_t dropped_count;
++static atomic_unchecked_t dropped_count;
+
+ static void mmio_reset_data(struct trace_array *tr)
+ {
+@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
+
+ static unsigned long count_overruns(struct trace_iterator *iter)
+ {
+- unsigned long cnt = atomic_xchg(&dropped_count, 0);
++ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
+ unsigned long over = ring_buffer_overruns(iter->tr->buffer);
+
+ if (over > prev_overruns)
+@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
+ event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
+ sizeof(*entry), 0, pc);
+ if (!event) {
+- atomic_inc(&dropped_count);
++ atomic_inc_unchecked(&dropped_count);
+ return;
+ }
+ entry = ring_buffer_event_data(event);
+@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
+ event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
+ sizeof(*entry), 0, pc);
+ if (!event) {
+- atomic_inc(&dropped_count);
++ atomic_inc_unchecked(&dropped_count);
+ return;
+ }
+ entry = ring_buffer_event_data(event);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/trace/trace_output.c linux-3.4-pax/kernel/trace/trace_output.c
+--- linux-3.4/kernel/trace/trace_output.c 2012-05-21 11:33:39.823929960 +0200
++++ linux-3.4-pax/kernel/trace/trace_output.c 2012-05-21 12:10:11.900049018 +0200
+@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
+
+ p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
+ if (!IS_ERR(p)) {
+- p = mangle_path(s->buffer + s->len, p, "\n");
++ p = mangle_path(s->buffer + s->len, p, "\n\\");
+ if (p) {
+ s->len = p - s->buffer;
+ return 1;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/trace/trace_stack.c linux-3.4-pax/kernel/trace/trace_stack.c
+--- linux-3.4/kernel/trace/trace_stack.c 2012-03-19 10:39:11.988049175 +0100
++++ linux-3.4-pax/kernel/trace/trace_stack.c 2012-05-21 12:10:11.900049018 +0200
+@@ -53,7 +53,7 @@ static inline void check_stack(void)
+ return;
+
+ /* we do not handle interrupt stacks yet */
+- if (!object_is_on_stack(&this_size))
++ if (!object_starts_on_stack(&this_size))
+ return;
+
+ local_irq_save(flags);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/kernel/trace/trace_workqueue.c linux-3.4-pax/kernel/trace/trace_workqueue.c
+--- linux-3.4/kernel/trace/trace_workqueue.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/kernel/trace/trace_workqueue.c 2012-05-21 12:10:11.904049019 +0200
+@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
+ int cpu;
+ pid_t pid;
+ /* Can be inserted from interrupt or user context, need to be atomic */
+- atomic_t inserted;
++ atomic_unchecked_t inserted;
+ /*
+ * Don't need to be atomic, works are serialized in a single workqueue thread
+ * on a single CPU.
+@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
+ spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+ list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
+ if (node->pid == wq_thread->pid) {
+- atomic_inc(&node->inserted);
++ atomic_inc_unchecked(&node->inserted);
+ goto found;
+ }
+ }
+@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ if (tsk) {
+ seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
+- atomic_read(&cws->inserted), cws->executed,
++ atomic_read_unchecked(&cws->inserted), cws->executed,
+ tsk->comm);
+ put_task_struct(tsk);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/lib/bitmap.c linux-3.4-pax/lib/bitmap.c
+--- linux-3.4/lib/bitmap.c 2012-05-21 11:33:39.867929962 +0200
++++ linux-3.4-pax/lib/bitmap.c 2012-05-21 12:10:11.908049019 +0200
+@@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsi
+ {
+ int c, old_c, totaldigits, ndigits, nchunks, nbits;
+ u32 chunk;
+- const char __user __force *ubuf = (const char __user __force *)buf;
++ const char __user *ubuf = (const char __force_user *)buf;
+
+ bitmap_zero(maskp, nmaskbits);
+
+@@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user
+ {
+ if (!access_ok(VERIFY_READ, ubuf, ulen))
+ return -EFAULT;
+- return __bitmap_parse((const char __force *)ubuf,
++ return __bitmap_parse((const char __force_kernel *)ubuf,
+ ulen, 1, maskp, nmaskbits);
+
+ }
+@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char
+ {
+ unsigned a, b;
+ int c, old_c, totaldigits;
+- const char __user __force *ubuf = (const char __user __force *)buf;
++ const char __user *ubuf = (const char __force_user *)buf;
+ int exp_digit, in_range;
+
+ totaldigits = c = 0;
+@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __u
+ {
+ if (!access_ok(VERIFY_READ, ubuf, ulen))
+ return -EFAULT;
+- return __bitmap_parselist((const char __force *)ubuf,
++ return __bitmap_parselist((const char __force_kernel *)ubuf,
+ ulen, 1, maskp, nmaskbits);
+ }
+ EXPORT_SYMBOL(bitmap_parselist_user);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/lib/bug.c linux-3.4-pax/lib/bug.c
+--- linux-3.4/lib/bug.c 2012-03-19 10:39:12.028049173 +0100
++++ linux-3.4-pax/lib/bug.c 2012-05-21 12:10:11.908049019 +0200
+@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
+ return BUG_TRAP_TYPE_NONE;
+
+ bug = find_bug(bugaddr);
++ if (!bug)
++ return BUG_TRAP_TYPE_NONE;
+
+ file = NULL;
+ line = 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/lib/debugobjects.c linux-3.4-pax/lib/debugobjects.c
+--- linux-3.4/lib/debugobjects.c 2012-03-19 10:39:12.032049172 +0100
++++ linux-3.4-pax/lib/debugobjects.c 2012-05-21 12:10:11.912049019 +0200
+@@ -288,7 +288,7 @@ static void debug_object_is_on_stack(voi
+ if (limit > 4)
+ return;
+
+- is_on_stack = object_is_on_stack(addr);
++ is_on_stack = object_starts_on_stack(addr);
+ if (is_on_stack == onstack)
+ return;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/lib/devres.c linux-3.4-pax/lib/devres.c
+--- linux-3.4/lib/devres.c 2012-05-21 11:33:39.891929965 +0200
++++ linux-3.4-pax/lib/devres.c 2012-05-21 12:10:11.912049019 +0200
+@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
+ void devm_iounmap(struct device *dev, void __iomem *addr)
+ {
+ WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
+- (void *)addr));
++ (void __force *)addr));
+ iounmap(addr);
+ }
+ EXPORT_SYMBOL(devm_iounmap);
+@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *de
+ {
+ ioport_unmap(addr);
+ WARN_ON(devres_destroy(dev, devm_ioport_map_release,
+- devm_ioport_map_match, (void *)addr));
++ devm_ioport_map_match, (void __force *)addr));
+ }
+ EXPORT_SYMBOL(devm_ioport_unmap);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/lib/dma-debug.c linux-3.4-pax/lib/dma-debug.c
+--- linux-3.4/lib/dma-debug.c 2012-05-21 11:33:39.895929965 +0200
++++ linux-3.4-pax/lib/dma-debug.c 2012-05-21 12:10:11.916049018 +0200
+@@ -924,7 +924,7 @@ out:
+
+ static void check_for_stack(struct device *dev, void *addr)
+ {
+- if (object_is_on_stack(addr))
++ if (object_starts_on_stack(addr))
+ err_printk(dev, NULL, "DMA-API: device driver maps memory from"
+ "stack [addr=%p]\n", addr);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/lib/extable.c linux-3.4-pax/lib/extable.c
+--- linux-3.4/lib/extable.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/lib/extable.c 2012-05-21 12:10:11.920049018 +0200
+@@ -13,6 +13,7 @@
+ #include <linux/init.h>
+ #include <linux/sort.h>
+ #include <asm/uaccess.h>
++#include <asm/pgtable.h>
+
+ #ifndef ARCH_HAS_SORT_EXTABLE
+ /*
+@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
+ void sort_extable(struct exception_table_entry *start,
+ struct exception_table_entry *finish)
+ {
++ pax_open_kernel();
+ sort(start, finish - start, sizeof(struct exception_table_entry),
+ cmp_ex, NULL);
++ pax_close_kernel();
+ }
+
+ #ifdef CONFIG_MODULES
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/lib/inflate.c linux-3.4-pax/lib/inflate.c
+--- linux-3.4/lib/inflate.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/lib/inflate.c 2012-05-21 12:10:11.920049018 +0200
+@@ -269,7 +269,7 @@ static void free(void *where)
+ malloc_ptr = free_mem_ptr;
+ }
+ #else
+-#define malloc(a) kmalloc(a, GFP_KERNEL)
++#define malloc(a) kmalloc((a), GFP_KERNEL)
+ #define free(a) kfree(a)
+ #endif
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/lib/ioremap.c linux-3.4-pax/lib/ioremap.c
+--- linux-3.4/lib/ioremap.c 2012-05-21 11:33:39.951929967 +0200
++++ linux-3.4-pax/lib/ioremap.c 2012-05-21 12:10:11.924049019 +0200
+@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_
+ unsigned long next;
+
+ phys_addr -= addr;
+- pmd = pmd_alloc(&init_mm, pud, addr);
++ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
+ if (!pmd)
+ return -ENOMEM;
+ do {
+@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_
+ unsigned long next;
+
+ phys_addr -= addr;
+- pud = pud_alloc(&init_mm, pgd, addr);
++ pud = pud_alloc_kernel(&init_mm, pgd, addr);
+ if (!pud)
+ return -ENOMEM;
+ do {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/lib/radix-tree.c linux-3.4-pax/lib/radix-tree.c
+--- linux-3.4/lib/radix-tree.c 2012-05-21 11:33:39.999929969 +0200
++++ linux-3.4-pax/lib/radix-tree.c 2012-05-21 12:10:11.928049019 +0200
+@@ -79,7 +79,7 @@ struct radix_tree_preload {
+ int nr;
+ struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
+ };
+-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
++static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
+
+ static inline void *ptr_to_indirect(void *ptr)
+ {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/lib/vsprintf.c linux-3.4-pax/lib/vsprintf.c
+--- linux-3.4/lib/vsprintf.c 2012-05-21 11:33:40.027929971 +0200
++++ linux-3.4-pax/lib/vsprintf.c 2012-05-21 12:10:11.932049019 +0200
+@@ -868,12 +868,12 @@ char *pointer(const char *fmt, char *buf
+ {
+ if (!ptr && *fmt != 'K') {
+ /*
+- * Print (null) with the same width as a pointer so it makes
++ * Print (nil) with the same width as a pointer so it makes
+ * tabular output look nice.
+ */
+ if (spec.field_width == -1)
+ spec.field_width = 2 * sizeof(void *);
+- return string(buf, end, "(null)", spec);
++ return string(buf, end, "(nil)", spec);
+ }
+
+ switch (*fmt) {
+@@ -1653,11 +1653,11 @@ int bstr_printf(char *buf, size_t size,
+ typeof(type) value; \
+ if (sizeof(type) == 8) { \
+ args = PTR_ALIGN(args, sizeof(u32)); \
+- *(u32 *)&value = *(u32 *)args; \
+- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
++ *(u32 *)&value = *(const u32 *)args; \
++ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
+ } else { \
+ args = PTR_ALIGN(args, sizeof(type)); \
+- value = *(typeof(type) *)args; \
++ value = *(const typeof(type) *)args; \
+ } \
+ args += sizeof(type); \
+ value; \
+@@ -1720,7 +1720,7 @@ int bstr_printf(char *buf, size_t size,
+ case FORMAT_TYPE_STR: {
+ const char *str_arg = args;
+ args += strlen(str_arg) + 1;
+- str = string(str, end, (char *)str_arg, spec);
++ str = string(str, end, str_arg, spec);
+ break;
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/Makefile linux-3.4-pax/Makefile
+--- linux-3.4/Makefile 2012-05-21 11:32:30.559926198 +0200
++++ linux-3.4-pax/Makefile 2012-05-28 00:32:47.711160421 +0200
+@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
+
+ HOSTCC = gcc
+ HOSTCXX = g++
+-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
+-HOSTCXXFLAGS = -O2
++HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
++HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
++HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
+
+ # Decide whether to build built-in, modular, or both.
+ # Normally, just do built-in.
+@@ -357,9 +358,9 @@ CFLAGS_GCOV = -fprofile-arcs -ftest-cove
+
+ # Use LINUXINCLUDE when you must reference the include/ directory.
+ # Needed to be compatible with the O= option
+-LINUXINCLUDE := -I$(srctree)/arch/$(hdr-arch)/include \
+- -Iarch/$(hdr-arch)/include/generated -Iinclude \
+- $(if $(KBUILD_SRC), -I$(srctree)/include) \
++LINUXINCLUDE := -isystem arch/$(hdr-arch)/include \
++ -isystem arch/$(hdr-arch)/include/generated -isystem include \
++ -isystem include/generated \
+ -include $(srctree)/include/linux/kconfig.h
+
+ KBUILD_CPPFLAGS := -D__KERNEL__
+@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
+ # Rules shared between *config targets and build targets
+
+ # Basic helpers built in scripts/
+-PHONY += scripts_basic
+-scripts_basic:
++PHONY += scripts_basic gcc-plugins
++scripts_basic: gcc-plugins
+ $(Q)$(MAKE) $(build)=scripts/basic
+ $(Q)rm -f .tmp_quiet_recordmcount
+
+@@ -442,7 +443,7 @@ asm-generic:
+
+ no-dot-config-targets := clean mrproper distclean \
+ cscope gtags TAGS tags help %docs check% coccicheck \
+- include/linux/version.h headers_% archheaders archscripts \
++ include/generated/linux/version.h headers_% archheaders archscripts \
+ kernelversion %src-pkg
+
+ config-targets := 0
+@@ -485,11 +486,11 @@ include $(srctree)/arch/$(SRCARCH)/Makef
+ export KBUILD_DEFCONFIG KBUILD_KCONFIG
+
+ config: scripts_basic outputmakefile FORCE
+- $(Q)mkdir -p include/linux include/config
++ $(Q)mkdir -p include/generated/linux include/config
+ $(Q)$(MAKE) $(build)=scripts/kconfig $@
+
+ %config: scripts_basic outputmakefile FORCE
+- $(Q)mkdir -p include/linux include/config
++ $(Q)mkdir -p include/generated/linux include/config
+ $(Q)$(MAKE) $(build)=scripts/kconfig $@
+
+ else
+@@ -564,6 +565,51 @@ else
+ KBUILD_CFLAGS += -O2
+ endif
+
++ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
++ifdef CONFIG_PAX_CONSTIFY_PLUGIN
++CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
++endif
++ifdef CONFIG_PAX_MEMORY_STACKLEAK
++STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
++STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
++endif
++ifdef CONFIG_KALLOCSTAT_PLUGIN
++KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
++endif
++ifdef CONFIG_PAX_KERNEXEC_PLUGIN
++KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
++KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
++KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
++endif
++ifdef CONFIG_CHECKER_PLUGIN
++ifeq ($(call cc-ifversion, -ge, 0406, y), y)
++CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
++endif
++endif
++COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
++ifdef CONFIG_PAX_SIZE_OVERFLOW
++SIZE_OVERFLOW_PLUGIN := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
++endif
++GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
++GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN)
++GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
++export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
++ifeq ($(KBUILD_EXTMOD),)
++gcc-plugins:
++ $(Q)$(MAKE) $(build)=tools/gcc
++else
++gcc-plugins: ;
++endif
++else
++gcc-plugins:
++ifeq ($(call cc-ifversion, -ge, 0405, y), y)
++ $(Q)echo "warning, your gcc installation does not support plugins, perhaps the necessary headers are missing?"
++else
++ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
++endif
++ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
++endif
++
+ include $(srctree)/arch/$(SRCARCH)/Makefile
+
+ ifneq ($(CONFIG_FRAME_WARN),0)
+@@ -932,6 +978,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-mai
+
+ # The actual objects are generated when descending,
+ # make sure no implicit rule kicks in
++$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+
+ # Handle descending into subdirectories listed in $(vmlinux-dirs)
+@@ -941,7 +989,7 @@ $(sort $(vmlinux-init) $(vmlinux-main))
+ # Error messages still appears in the original language
+
+ PHONY += $(vmlinux-dirs)
+-$(vmlinux-dirs): prepare scripts
++$(vmlinux-dirs): gcc-plugins prepare scripts
+ $(Q)$(MAKE) $(build)=$@
+
+ # Store (new) KERNELRELASE string in include/config/kernel.release
+@@ -970,12 +1018,14 @@ ifneq ($(KBUILD_SRC),)
+ echo " in the '$(srctree)' directory.";\
+ /bin/false; \
+ fi;
++ $(Q)for dir in $(srctree)/include/* ; do ln -fsn $$dir include/`basename $$dir` ; done
++ $(Q)ln -fsn $(srctree)/arch/$(SRCARCH)/include/asm arch/$(SRCARCH)/include;
+ endif
+
+ # prepare2 creates a makefile if using a separate output directory
+ prepare2: prepare3 outputmakefile asm-generic
+
+-prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
++prepare1: prepare2 include/generated/linux/version.h include/generated/utsrelease.h \
+ include/config/auto.conf
+ $(cmd_crmodverdir)
+
+@@ -985,6 +1035,7 @@ prepare0: archprepare FORCE
+ $(Q)$(MAKE) $(build)=.
+
+ # All the preparing..
++prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
+ prepare: prepare0
+
+ # Generate some files
+@@ -1008,7 +1059,7 @@ define filechk_version.h
+ echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';)
+ endef
+
+-include/linux/version.h: $(srctree)/Makefile FORCE
++include/generated/linux/version.h: $(srctree)/Makefile FORCE
+ $(call filechk,version.h)
+
+ include/generated/utsrelease.h: include/config/kernel.release FORCE
+@@ -1053,7 +1104,7 @@ PHONY += archscripts
+ archscripts:
+
+ PHONY += __headers
+-__headers: include/linux/version.h scripts_basic asm-generic archheaders archscripts FORCE
++__headers: include/generated/linux/version.h scripts_basic asm-generic archheaders archscripts FORCE
+ $(Q)$(MAKE) $(build)=scripts build_unifdef
+
+ PHONY += headers_install_all
+@@ -1092,6 +1143,8 @@ all: modules
+ # using awk while concatenating to the final file.
+
+ PHONY += modules
++modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
+ $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
+ @$(kecho) ' Building modules, stage 2.';
+@@ -1107,7 +1160,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
+
+ # Target to prepare building external modules
+ PHONY += modules_prepare
+-modules_prepare: prepare scripts
++modules_prepare: gcc-plugins prepare scripts
+
+ # Target to install modules
+ PHONY += modules_install
+@@ -1166,7 +1219,7 @@ CLEAN_FILES += vmlinux System.map \
+ MRPROPER_DIRS += include/config usr/include include/generated \
+ arch/*/include/generated
+ MRPROPER_FILES += .config .config.old .version .old_version \
+- include/linux/version.h \
++ include/generated/linux/version.h \
+ Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
+
+ # clean - Delete most, but leave enough to build external modules
+@@ -1204,6 +1257,7 @@ distclean: mrproper
+ \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
+ -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
+ -o -name '.*.rej' \
++ -o -name '.*.rej' -o -name '*.so' \
+ -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
+ -type f -print | xargs rm -f
+
+@@ -1364,6 +1418,8 @@ PHONY += $(module-dirs) modules
+ $(module-dirs): crmodverdir $(objtree)/Module.symvers
+ $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
+
++modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+ modules: $(module-dirs)
+ @$(kecho) ' Building modules, stage 2.';
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
+@@ -1490,17 +1546,21 @@ else
+ target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
+ endif
+
+-%.s: %.c prepare scripts FORCE
++%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%.s: %.c gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.i: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.o: %.c prepare scripts FORCE
++%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%.o: %.c gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.lst: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.s: %.S prepare scripts FORCE
++%.s: %.S gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.o: %.S prepare scripts FORCE
++%.o: %.S gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.symtypes: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+@@ -1510,11 +1570,15 @@ endif
+ $(cmd_crmodverdir)
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(build)=$(build-dir)
+-%/: prepare scripts FORCE
++%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%/: gcc-plugins prepare scripts FORCE
+ $(cmd_crmodverdir)
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(build)=$(build-dir)
+-%.ko: prepare scripts FORCE
++%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%.ko: gcc-plugins prepare scripts FORCE
+ $(cmd_crmodverdir)
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(build)=$(build-dir) $(@:.ko=.o)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/filemap.c linux-3.4-pax/mm/filemap.c
+--- linux-3.4/mm/filemap.c 2012-05-21 11:33:40.035929971 +0200
++++ linux-3.4-pax/mm/filemap.c 2012-05-21 12:10:11.940049020 +0200
+@@ -1762,7 +1762,7 @@ int generic_file_mmap(struct file * file
+ struct address_space *mapping = file->f_mapping;
+
+ if (!mapping->a_ops->readpage)
+- return -ENOEXEC;
++ return -ENODEV;
+ file_accessed(file);
+ vma->vm_ops = &generic_file_vm_ops;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/fremap.c linux-3.4-pax/mm/fremap.c
+--- linux-3.4/mm/fremap.c 2012-01-08 19:48:29.299470861 +0100
++++ linux-3.4-pax/mm/fremap.c 2012-05-21 12:10:11.940049020 +0200
+@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
+ retry:
+ vma = find_vma(mm, start);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
++ goto out;
++#endif
++
+ /*
+ * Make sure the vma is shared, that it supports prefaulting,
+ * and that the remapped range is valid and fully within
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/highmem.c linux-3.4-pax/mm/highmem.c
+--- linux-3.4/mm/highmem.c 2012-01-08 19:48:29.303470861 +0100
++++ linux-3.4-pax/mm/highmem.c 2012-05-21 12:10:11.944049020 +0200
+@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
+ * So no dangers, even with speculative execution.
+ */
+ page = pte_page(pkmap_page_table[i]);
++ pax_open_kernel();
+ pte_clear(&init_mm, (unsigned long)page_address(page),
+ &pkmap_page_table[i]);
+-
++ pax_close_kernel();
+ set_page_address(page, NULL);
+ need_flush = 1;
+ }
+@@ -186,9 +187,11 @@ start:
+ }
+ }
+ vaddr = PKMAP_ADDR(last_pkmap_nr);
++
++ pax_open_kernel();
+ set_pte_at(&init_mm, vaddr,
+ &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
+-
++ pax_close_kernel();
+ pkmap_count[last_pkmap_nr] = 1;
+ set_page_address(page, (void *)vaddr);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/huge_memory.c linux-3.4-pax/mm/huge_memory.c
+--- linux-3.4/mm/huge_memory.c 2012-05-21 11:33:40.039929972 +0200
++++ linux-3.4-pax/mm/huge_memory.c 2012-05-21 12:10:11.948049020 +0200
+@@ -733,7 +733,7 @@ out:
+ * run pte_offset_map on the pmd, if an huge pmd could
+ * materialize from under us from a different thread.
+ */
+- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
++ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
+ return VM_FAULT_OOM;
+ /* if an huge pmd materialized from under us just retry later */
+ if (unlikely(pmd_trans_huge(*pmd)))
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/hugetlb.c linux-3.4-pax/mm/hugetlb.c
+--- linux-3.4/mm/hugetlb.c 2012-05-21 11:33:40.043929972 +0200
++++ linux-3.4-pax/mm/hugetlb.c 2012-05-21 12:10:11.952049020 +0200
+@@ -2437,6 +2437,27 @@ static int unmap_ref_private(struct mm_s
+ return 1;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ struct vm_area_struct *vma_m;
++ unsigned long address_m;
++ pte_t *ptep_m;
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (!vma_m)
++ return;
++
++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++ address_m = address + SEGMEXEC_TASK_SIZE;
++ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
++ get_page(page_m);
++ hugepage_add_anon_rmap(page_m, vma_m, address_m);
++ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
++}
++#endif
++
+ /*
+ * Hugetlb_cow() should be called with page lock of the original hugepage held.
+ * Called with hugetlb_instantiation_mutex held and pte_page locked so we
+@@ -2549,6 +2570,11 @@ retry_avoidcopy:
+ make_huge_pte(vma, new_page, 1));
+ page_remove_rmap(old_page);
+ hugepage_add_new_anon_rmap(new_page, vma, address);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_huge_pte(vma, address, new_page);
++#endif
++
+ /* Make the old page be freed below */
+ new_page = old_page;
+ mmu_notifier_invalidate_range_end(mm,
+@@ -2703,6 +2729,10 @@ retry:
+ && (vma->vm_flags & VM_SHARED)));
+ set_huge_pte_at(mm, address, ptep, new_pte);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_huge_pte(vma, address, page);
++#endif
++
+ if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
+ /* Optimization, do the COW without a second fault */
+ ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
+@@ -2732,6 +2762,10 @@ int hugetlb_fault(struct mm_struct *mm,
+ static DEFINE_MUTEX(hugetlb_instantiation_mutex);
+ struct hstate *h = hstate_vma(vma);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ address &= huge_page_mask(h);
+
+ ptep = huge_pte_offset(mm, address);
+@@ -2745,6 +2779,26 @@ int hugetlb_fault(struct mm_struct *mm,
+ VM_FAULT_SET_HINDEX(h - hstates);
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ unsigned long address_m;
++
++ if (vma->vm_start > vma_m->vm_start) {
++ address_m = address;
++ address -= SEGMEXEC_TASK_SIZE;
++ vma = vma_m;
++ h = hstate_vma(vma);
++ } else
++ address_m = address + SEGMEXEC_TASK_SIZE;
++
++ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
++ return VM_FAULT_OOM;
++ address_m &= HPAGE_MASK;
++ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
++ }
++#endif
++
+ ptep = huge_pte_alloc(mm, address, huge_page_size(h));
+ if (!ptep)
+ return VM_FAULT_OOM;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/internal.h linux-3.4-pax/mm/internal.h
+--- linux-3.4/mm/internal.h 2012-01-08 19:48:29.311470860 +0100
++++ linux-3.4-pax/mm/internal.h 2012-05-21 12:10:11.952049020 +0200
+@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page
+ * in mm/page_alloc.c
+ */
+ extern void __free_pages_bootmem(struct page *page, unsigned int order);
++extern void free_compound_page(struct page *page);
+ extern void prep_compound_page(struct page *page, unsigned long order);
+ #ifdef CONFIG_MEMORY_FAILURE
+ extern bool is_free_buddy_page(struct page *page);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/Kconfig linux-3.4-pax/mm/Kconfig
+--- linux-3.4/mm/Kconfig 2012-03-19 10:39:12.120049167 +0100
++++ linux-3.4-pax/mm/Kconfig 2012-05-21 12:10:11.956049020 +0200
+@@ -247,10 +247,10 @@ config KSM
+ root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
+
+ config DEFAULT_MMAP_MIN_ADDR
+- int "Low address space to protect from user allocation"
++ int "Low address space to protect from user allocation"
+ depends on MMU
+- default 4096
+- help
++ default 32768
++ help
+ This is the portion of low virtual memory which should be protected
+ from userspace allocation. Keeping a user from writing to low pages
+ can help reduce the impact of kernel NULL pointer bugs.
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/maccess.c linux-3.4-pax/mm/maccess.c
+--- linux-3.4/mm/maccess.c 2012-01-08 19:48:29.327470859 +0100
++++ linux-3.4-pax/mm/maccess.c 2012-05-21 12:10:11.956049020 +0200
+@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, cons
+ set_fs(KERNEL_DS);
+ pagefault_disable();
+ ret = __copy_from_user_inatomic(dst,
+- (__force const void __user *)src, size);
++ (const void __force_user *)src, size);
+ pagefault_enable();
+ set_fs(old_fs);
+
+@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, con
+
+ set_fs(KERNEL_DS);
+ pagefault_disable();
+- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
++ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
+ pagefault_enable();
+ set_fs(old_fs);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/madvise.c linux-3.4-pax/mm/madvise.c
+--- linux-3.4/mm/madvise.c 2012-05-21 11:33:40.071929973 +0200
++++ linux-3.4-pax/mm/madvise.c 2012-05-21 12:10:11.960049021 +0200
+@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
+ pgoff_t pgoff;
+ unsigned long new_flags = vma->vm_flags;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ switch (behavior) {
+ case MADV_NORMAL:
+ new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
+@@ -116,6 +120,13 @@ success:
+ /*
+ * vm_flags is protected by the mmap_sem held in write mode.
+ */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m)
++ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
++#endif
++
+ vma->vm_flags = new_flags;
+
+ out:
+@@ -174,6 +185,11 @@ static long madvise_dontneed(struct vm_a
+ struct vm_area_struct ** prev,
+ unsigned long start, unsigned long end)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ *prev = vma;
+ if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
+ return -EINVAL;
+@@ -186,6 +202,21 @@ static long madvise_dontneed(struct vm_a
+ zap_page_range(vma, start, end - start, &details);
+ } else
+ zap_page_range(vma, start, end - start, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
++ struct zap_details details = {
++ .nonlinear_vma = vma_m,
++ .last_index = ULONG_MAX,
++ };
++ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
++ } else
++ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
++ }
++#endif
++
+ return 0;
+ }
+
+@@ -384,6 +415,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
+ if (end < start)
+ goto out;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ goto out;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ goto out;
++
+ error = 0;
+ if (end == start)
+ goto out;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/memory.c linux-3.4-pax/mm/memory.c
+--- linux-3.4/mm/memory.c 2012-05-21 11:33:40.095929975 +0200
++++ linux-3.4-pax/mm/memory.c 2012-05-21 12:10:11.964049021 +0200
+@@ -434,8 +434,12 @@ static inline void free_pmd_range(struct
+ return;
+
+ pmd = pmd_offset(pud, start);
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
+ pud_clear(pud);
+ pmd_free_tlb(tlb, pmd, start);
++#endif
++
+ }
+
+ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -466,9 +470,12 @@ static inline void free_pud_range(struct
+ if (end - 1 > ceiling - 1)
+ return;
+
++#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
+ pud = pud_offset(pgd, start);
+ pgd_clear(pgd);
+ pud_free_tlb(tlb, pud, start);
++#endif
++
+ }
+
+ /*
+@@ -1597,12 +1604,6 @@ no_page_table:
+ return page;
+ }
+
+-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return stack_guard_page_start(vma, addr) ||
+- stack_guard_page_end(vma, addr+PAGE_SIZE);
+-}
+-
+ /**
+ * __get_user_pages() - pin user pages in memory
+ * @tsk: task_struct of target task
+@@ -1675,10 +1676,10 @@ int __get_user_pages(struct task_struct
+ (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+ i = 0;
+
+- do {
++ while (nr_pages) {
+ struct vm_area_struct *vma;
+
+- vma = find_extend_vma(mm, start);
++ vma = find_vma(mm, start);
+ if (!vma && in_gate_area(mm, start)) {
+ unsigned long pg = start & PAGE_MASK;
+ pgd_t *pgd;
+@@ -1726,7 +1727,7 @@ int __get_user_pages(struct task_struct
+ goto next_page;
+ }
+
+- if (!vma ||
++ if (!vma || start < vma->vm_start ||
+ (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+ !(vm_flags & vma->vm_flags))
+ return i ? : -EFAULT;
+@@ -1753,11 +1754,6 @@ int __get_user_pages(struct task_struct
+ int ret;
+ unsigned int fault_flags = 0;
+
+- /* For mlock, just skip the stack guard page. */
+- if (foll_flags & FOLL_MLOCK) {
+- if (stack_guard_page(vma, start))
+- goto next_page;
+- }
+ if (foll_flags & FOLL_WRITE)
+ fault_flags |= FAULT_FLAG_WRITE;
+ if (nonblocking)
+@@ -1831,7 +1827,7 @@ next_page:
+ start += PAGE_SIZE;
+ nr_pages--;
+ } while (nr_pages && start < vma->vm_end);
+- } while (nr_pages);
++ }
+ return i;
+ }
+ EXPORT_SYMBOL(__get_user_pages);
+@@ -2038,6 +2034,10 @@ static int insert_page(struct vm_area_st
+ page_add_file_rmap(page);
+ set_pte_at(mm, addr, pte, mk_pte(page, prot));
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_file_pte(vma, addr, page, ptl);
++#endif
++
+ retval = 0;
+ pte_unmap_unlock(pte, ptl);
+ return retval;
+@@ -2072,10 +2072,22 @@ out:
+ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+ struct page *page)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return -EFAULT;
+ if (!page_count(page))
+ return -EINVAL;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m)
++ vma_m->vm_flags |= VM_INSERTPAGE;
++#endif
++
+ vma->vm_flags |= VM_INSERTPAGE;
+ return insert_page(vma, addr, page, vma->vm_page_prot);
+ }
+@@ -2161,6 +2173,7 @@ int vm_insert_mixed(struct vm_area_struc
+ unsigned long pfn)
+ {
+ BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
++ BUG_ON(vma->vm_mirror);
+
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return -EFAULT;
+@@ -2368,7 +2381,9 @@ static int apply_to_pmd_range(struct mm_
+
+ BUG_ON(pud_huge(*pud));
+
+- pmd = pmd_alloc(mm, pud, addr);
++ pmd = (mm == &init_mm) ?
++ pmd_alloc_kernel(mm, pud, addr) :
++ pmd_alloc(mm, pud, addr);
+ if (!pmd)
+ return -ENOMEM;
+ do {
+@@ -2388,7 +2403,9 @@ static int apply_to_pud_range(struct mm_
+ unsigned long next;
+ int err;
+
+- pud = pud_alloc(mm, pgd, addr);
++ pud = (mm == &init_mm) ?
++ pud_alloc_kernel(mm, pgd, addr) :
++ pud_alloc(mm, pgd, addr);
+ if (!pud)
+ return -ENOMEM;
+ do {
+@@ -2476,6 +2493,186 @@ static inline void cow_user_page(struct
+ copy_user_highpage(dst, src, va, vma);
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ spinlock_t *ptl;
++ pte_t *pte, entry;
++
++ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
++ entry = *pte;
++ if (!pte_present(entry)) {
++ if (!pte_none(entry)) {
++ BUG_ON(pte_file(entry));
++ free_swap_and_cache(pte_to_swp_entry(entry));
++ pte_clear_not_present_full(mm, address, pte, 0);
++ }
++ } else {
++ struct page *page;
++
++ flush_cache_page(vma, address, pte_pfn(entry));
++ entry = ptep_clear_flush(vma, address, pte);
++ BUG_ON(pte_dirty(entry));
++ page = vm_normal_page(vma, address, entry);
++ if (page) {
++ update_hiwater_rss(mm);
++ if (PageAnon(page))
++ dec_mm_counter_fast(mm, MM_ANONPAGES);
++ else
++ dec_mm_counter_fast(mm, MM_FILEPAGES);
++ page_remove_rmap(page);
++ page_cache_release(page);
++ }
++ }
++ pte_unmap_unlock(pte, ptl);
++}
++
++/* PaX: if vma is mirrored, synchronize the mirror's PTE
++ *
++ * the ptl of the lower mapped page is held on entry and is not released on exit
++ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
++ */
++static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ unsigned long address_m;
++ spinlock_t *ptl_m;
++ struct vm_area_struct *vma_m;
++ pmd_t *pmd_m;
++ pte_t *pte_m, entry_m;
++
++ BUG_ON(!page_m || !PageAnon(page_m));
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (!vma_m)
++ return;
++
++ BUG_ON(!PageLocked(page_m));
++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++ address_m = address + SEGMEXEC_TASK_SIZE;
++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++ pte_m = pte_offset_map(pmd_m, address_m);
++ ptl_m = pte_lockptr(mm, pmd_m);
++ if (ptl != ptl_m) {
++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++ if (!pte_none(*pte_m))
++ goto out;
++ }
++
++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
++ page_cache_get(page_m);
++ page_add_anon_rmap(page_m, vma_m, address_m);
++ inc_mm_counter_fast(mm, MM_ANONPAGES);
++ set_pte_at(mm, address_m, pte_m, entry_m);
++ update_mmu_cache(vma_m, address_m, entry_m);
++out:
++ if (ptl != ptl_m)
++ spin_unlock(ptl_m);
++ pte_unmap(pte_m);
++ unlock_page(page_m);
++}
++
++void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ unsigned long address_m;
++ spinlock_t *ptl_m;
++ struct vm_area_struct *vma_m;
++ pmd_t *pmd_m;
++ pte_t *pte_m, entry_m;
++
++ BUG_ON(!page_m || PageAnon(page_m));
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (!vma_m)
++ return;
++
++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++ address_m = address + SEGMEXEC_TASK_SIZE;
++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++ pte_m = pte_offset_map(pmd_m, address_m);
++ ptl_m = pte_lockptr(mm, pmd_m);
++ if (ptl != ptl_m) {
++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++ if (!pte_none(*pte_m))
++ goto out;
++ }
++
++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
++ page_cache_get(page_m);
++ page_add_file_rmap(page_m);
++ inc_mm_counter_fast(mm, MM_FILEPAGES);
++ set_pte_at(mm, address_m, pte_m, entry_m);
++ update_mmu_cache(vma_m, address_m, entry_m);
++out:
++ if (ptl != ptl_m)
++ spin_unlock(ptl_m);
++ pte_unmap(pte_m);
++}
++
++static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ unsigned long address_m;
++ spinlock_t *ptl_m;
++ struct vm_area_struct *vma_m;
++ pmd_t *pmd_m;
++ pte_t *pte_m, entry_m;
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (!vma_m)
++ return;
++
++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++ address_m = address + SEGMEXEC_TASK_SIZE;
++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++ pte_m = pte_offset_map(pmd_m, address_m);
++ ptl_m = pte_lockptr(mm, pmd_m);
++ if (ptl != ptl_m) {
++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++ if (!pte_none(*pte_m))
++ goto out;
++ }
++
++ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
++ set_pte_at(mm, address_m, pte_m, entry_m);
++out:
++ if (ptl != ptl_m)
++ spin_unlock(ptl_m);
++ pte_unmap(pte_m);
++}
++
++static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
++{
++ struct page *page_m;
++ pte_t entry;
++
++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
++ goto out;
++
++ entry = *pte;
++ page_m = vm_normal_page(vma, address, entry);
++ if (!page_m)
++ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
++ else if (PageAnon(page_m)) {
++ if (pax_find_mirror_vma(vma)) {
++ pte_unmap_unlock(pte, ptl);
++ lock_page(page_m);
++ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
++ if (pte_same(entry, *pte))
++ pax_mirror_anon_pte(vma, address, page_m, ptl);
++ else
++ unlock_page(page_m);
++ }
++ } else
++ pax_mirror_file_pte(vma, address, page_m, ptl);
++
++out:
++ pte_unmap_unlock(pte, ptl);
++}
++#endif
++
+ /*
+ * This routine handles present pages, when users try to write
+ * to a shared page. It is done by copying the page to a new address
+@@ -2687,6 +2884,12 @@ gotten:
+ */
+ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+ if (likely(pte_same(*page_table, orig_pte))) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (pax_find_mirror_vma(vma))
++ BUG_ON(!trylock_page(new_page));
++#endif
++
+ if (old_page) {
+ if (!PageAnon(old_page)) {
+ dec_mm_counter_fast(mm, MM_FILEPAGES);
+@@ -2738,6 +2941,10 @@ gotten:
+ page_remove_rmap(old_page);
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_anon_pte(vma, address, new_page, ptl);
++#endif
++
+ /* Free the old page.. */
+ new_page = old_page;
+ ret |= VM_FAULT_WRITE;
+@@ -3017,6 +3224,11 @@ static int do_swap_page(struct mm_struct
+ swap_free(entry);
+ if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
+ try_to_free_swap(page);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
++#endif
++
+ unlock_page(page);
+ if (swapcache) {
+ /*
+@@ -3040,6 +3252,11 @@ static int do_swap_page(struct mm_struct
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, address, page_table);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_anon_pte(vma, address, page, ptl);
++#endif
++
+ unlock:
+ pte_unmap_unlock(page_table, ptl);
+ out:
+@@ -3059,40 +3276,6 @@ out_release:
+ }
+
+ /*
+- * This is like a special single-page "expand_{down|up}wards()",
+- * except we must first make sure that 'address{-|+}PAGE_SIZE'
+- * doesn't hit another vma.
+- */
+-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+-{
+- address &= PAGE_MASK;
+- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+- struct vm_area_struct *prev = vma->vm_prev;
+-
+- /*
+- * Is there a mapping abutting this one below?
+- *
+- * That's only ok if it's the same stack mapping
+- * that has gotten split..
+- */
+- if (prev && prev->vm_end == address)
+- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+-
+- expand_downwards(vma, address - PAGE_SIZE);
+- }
+- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
+- struct vm_area_struct *next = vma->vm_next;
+-
+- /* As VM_GROWSDOWN but s/below/above/ */
+- if (next && next->vm_start == address + PAGE_SIZE)
+- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+-
+- expand_upwards(vma, address + PAGE_SIZE);
+- }
+- return 0;
+-}
+-
+-/*
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
+@@ -3101,27 +3284,23 @@ static int do_anonymous_page(struct mm_s
+ unsigned long address, pte_t *page_table, pmd_t *pmd,
+ unsigned int flags)
+ {
+- struct page *page;
++ struct page *page = NULL;
+ spinlock_t *ptl;
+ pte_t entry;
+
+- pte_unmap(page_table);
+-
+- /* Check if we need to add a guard page to the stack */
+- if (check_stack_guard_page(vma, address) < 0)
+- return VM_FAULT_SIGBUS;
+-
+- /* Use the zero-page for reads */
+ if (!(flags & FAULT_FLAG_WRITE)) {
+ entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+ vma->vm_page_prot));
+- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++ ptl = pte_lockptr(mm, pmd);
++ spin_lock(ptl);
+ if (!pte_none(*page_table))
+ goto unlock;
+ goto setpte;
+ }
+
+ /* Allocate our own private page. */
++ pte_unmap(page_table);
++
+ if (unlikely(anon_vma_prepare(vma)))
+ goto oom;
+ page = alloc_zeroed_user_highpage_movable(vma, address);
+@@ -3140,6 +3319,11 @@ static int do_anonymous_page(struct mm_s
+ if (!pte_none(*page_table))
+ goto release;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (pax_find_mirror_vma(vma))
++ BUG_ON(!trylock_page(page));
++#endif
++
+ inc_mm_counter_fast(mm, MM_ANONPAGES);
+ page_add_new_anon_rmap(page, vma, address);
+ setpte:
+@@ -3147,6 +3331,12 @@ setpte:
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, address, page_table);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (page)
++ pax_mirror_anon_pte(vma, address, page, ptl);
++#endif
++
+ unlock:
+ pte_unmap_unlock(page_table, ptl);
+ return 0;
+@@ -3290,6 +3480,12 @@ static int __do_fault(struct mm_struct *
+ */
+ /* Only go through if we didn't race with anybody else... */
+ if (likely(pte_same(*page_table, orig_pte))) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (anon && pax_find_mirror_vma(vma))
++ BUG_ON(!trylock_page(page));
++#endif
++
+ flush_icache_page(vma, page);
+ entry = mk_pte(page, vma->vm_page_prot);
+ if (flags & FAULT_FLAG_WRITE)
+@@ -3309,6 +3505,14 @@ static int __do_fault(struct mm_struct *
+
+ /* no need to invalidate: a not-present page won't be cached */
+ update_mmu_cache(vma, address, page_table);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (anon)
++ pax_mirror_anon_pte(vma, address, page, ptl);
++ else
++ pax_mirror_file_pte(vma, address, page, ptl);
++#endif
++
+ } else {
+ if (cow_page)
+ mem_cgroup_uncharge_page(cow_page);
+@@ -3462,6 +3666,12 @@ int handle_pte_fault(struct mm_struct *m
+ if (flags & FAULT_FLAG_WRITE)
+ flush_tlb_fix_spurious_fault(vma, address);
+ }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_pte(vma, address, pte, pmd, ptl);
++ return 0;
++#endif
++
+ unlock:
+ pte_unmap_unlock(pte, ptl);
+ return 0;
+@@ -3478,6 +3688,10 @@ int handle_mm_fault(struct mm_struct *mm
+ pmd_t *pmd;
+ pte_t *pte;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ __set_current_state(TASK_RUNNING);
+
+ count_vm_event(PGFAULT);
+@@ -3489,6 +3703,34 @@ int handle_mm_fault(struct mm_struct *mm
+ if (unlikely(is_vm_hugetlb_page(vma)))
+ return hugetlb_fault(mm, vma, address, flags);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ unsigned long address_m;
++ pgd_t *pgd_m;
++ pud_t *pud_m;
++ pmd_t *pmd_m;
++
++ if (vma->vm_start > vma_m->vm_start) {
++ address_m = address;
++ address -= SEGMEXEC_TASK_SIZE;
++ vma = vma_m;
++ } else
++ address_m = address + SEGMEXEC_TASK_SIZE;
++
++ pgd_m = pgd_offset(mm, address_m);
++ pud_m = pud_alloc(mm, pgd_m, address_m);
++ if (!pud_m)
++ return VM_FAULT_OOM;
++ pmd_m = pmd_alloc(mm, pud_m, address_m);
++ if (!pmd_m)
++ return VM_FAULT_OOM;
++ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
++ return VM_FAULT_OOM;
++ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
++ }
++#endif
++
+ pgd = pgd_offset(mm, address);
+ pud = pud_alloc(mm, pgd, address);
+ if (!pud)
+@@ -3518,7 +3760,7 @@ int handle_mm_fault(struct mm_struct *mm
+ * run pte_offset_map on the pmd, if an huge pmd could
+ * materialize from under us from a different thread.
+ */
+- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
++ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
+ return VM_FAULT_OOM;
+ /* if an huge pmd materialized from under us just retry later */
+ if (unlikely(pmd_trans_huge(*pmd)))
+@@ -3555,6 +3797,23 @@ int __pud_alloc(struct mm_struct *mm, pg
+ spin_unlock(&mm->page_table_lock);
+ return 0;
+ }
++
++int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
++{
++ pud_t *new = pud_alloc_one(mm, address);
++ if (!new)
++ return -ENOMEM;
++
++ smp_wmb(); /* See comment in __pte_alloc */
++
++ spin_lock(&mm->page_table_lock);
++ if (pgd_present(*pgd)) /* Another has populated it */
++ pud_free(mm, new);
++ else
++ pgd_populate_kernel(mm, pgd, new);
++ spin_unlock(&mm->page_table_lock);
++ return 0;
++}
+ #endif /* __PAGETABLE_PUD_FOLDED */
+
+ #ifndef __PAGETABLE_PMD_FOLDED
+@@ -3585,6 +3844,30 @@ int __pmd_alloc(struct mm_struct *mm, pu
+ spin_unlock(&mm->page_table_lock);
+ return 0;
+ }
++
++int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
++{
++ pmd_t *new = pmd_alloc_one(mm, address);
++ if (!new)
++ return -ENOMEM;
++
++ smp_wmb(); /* See comment in __pte_alloc */
++
++ spin_lock(&mm->page_table_lock);
++#ifndef __ARCH_HAS_4LEVEL_HACK
++ if (pud_present(*pud)) /* Another has populated it */
++ pmd_free(mm, new);
++ else
++ pud_populate_kernel(mm, pud, new);
++#else
++ if (pgd_present(*pud)) /* Another has populated it */
++ pmd_free(mm, new);
++ else
++ pgd_populate_kernel(mm, pud, new);
++#endif /* __ARCH_HAS_4LEVEL_HACK */
++ spin_unlock(&mm->page_table_lock);
++ return 0;
++}
+ #endif /* __PAGETABLE_PMD_FOLDED */
+
+ int make_pages_present(unsigned long addr, unsigned long end)
+@@ -3622,7 +3905,7 @@ static int __init gate_vma_init(void)
+ gate_vma.vm_start = FIXADDR_USER_START;
+ gate_vma.vm_end = FIXADDR_USER_END;
+ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+- gate_vma.vm_page_prot = __P101;
++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+
+ return 0;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/memory-failure.c linux-3.4-pax/mm/memory-failure.c
+--- linux-3.4/mm/memory-failure.c 2012-05-21 11:33:40.091929974 +0200
++++ linux-3.4-pax/mm/memory-failure.c 2012-05-21 12:10:11.968049021 +0200
+@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __r
+
+ int sysctl_memory_failure_recovery __read_mostly = 1;
+
+-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
++atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
+
+ #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
+
+@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct
+ pfn, t->comm, t->pid);
+ si.si_signo = SIGBUS;
+ si.si_errno = 0;
+- si.si_addr = (void *)addr;
++ si.si_addr = (void __user *)addr;
+ #ifdef __ARCH_SI_TRAPNO
+ si.si_trapno = trapno;
+ #endif
+@@ -1036,7 +1036,7 @@ int memory_failure(unsigned long pfn, in
+ }
+
+ nr_pages = 1 << compound_trans_order(hpage);
+- atomic_long_add(nr_pages, &mce_bad_pages);
++ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
+
+ /*
+ * We need/can do nothing about count=0 pages.
+@@ -1066,7 +1066,7 @@ int memory_failure(unsigned long pfn, in
+ if (!PageHWPoison(hpage)
+ || (hwpoison_filter(p) && TestClearPageHWPoison(p))
+ || (p != hpage && TestSetPageHWPoison(hpage))) {
+- atomic_long_sub(nr_pages, &mce_bad_pages);
++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
+ return 0;
+ }
+ set_page_hwpoison_huge_page(hpage);
+@@ -1124,7 +1124,7 @@ int memory_failure(unsigned long pfn, in
+ }
+ if (hwpoison_filter(p)) {
+ if (TestClearPageHWPoison(p))
+- atomic_long_sub(nr_pages, &mce_bad_pages);
++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
+ unlock_page(hpage);
+ put_page(hpage);
+ return 0;
+@@ -1319,7 +1319,7 @@ int unpoison_memory(unsigned long pfn)
+ return 0;
+ }
+ if (TestClearPageHWPoison(p))
+- atomic_long_sub(nr_pages, &mce_bad_pages);
++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
+ pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
+ return 0;
+ }
+@@ -1333,7 +1333,7 @@ int unpoison_memory(unsigned long pfn)
+ */
+ if (TestClearPageHWPoison(page)) {
+ pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
+- atomic_long_sub(nr_pages, &mce_bad_pages);
++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
+ freeit = 1;
+ if (PageHuge(page))
+ clear_page_hwpoison_huge_page(page);
+@@ -1446,7 +1446,7 @@ static int soft_offline_huge_page(struct
+ }
+ done:
+ if (!PageHWPoison(hpage))
+- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
++ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
+ set_page_hwpoison_huge_page(hpage);
+ dequeue_hwpoisoned_huge_page(hpage);
+ /* keep elevated page count for bad page */
+@@ -1577,7 +1577,7 @@ int soft_offline_page(struct page *page,
+ return ret;
+
+ done:
+- atomic_long_add(1, &mce_bad_pages);
++ atomic_long_add_unchecked(1, &mce_bad_pages);
+ SetPageHWPoison(page);
+ /* keep elevated page count for bad page */
+ return ret;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/mempolicy.c linux-3.4-pax/mm/mempolicy.c
+--- linux-3.4/mm/mempolicy.c 2012-05-21 11:33:40.099929975 +0200
++++ linux-3.4-pax/mm/mempolicy.c 2012-05-21 12:10:11.972049021 +0200
+@@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct
+ unsigned long vmstart;
+ unsigned long vmend;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ vma = find_vma(mm, start);
+ if (!vma || vma->vm_start > start)
+ return -EFAULT;
+@@ -679,6 +683,16 @@ static int mbind_range(struct mm_struct
+ err = policy_vma(vma, new_pol);
+ if (err)
+ goto out;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ err = policy_vma(vma_m, new_pol);
++ if (err)
++ goto out;
++ }
++#endif
++
+ }
+
+ out:
+@@ -1112,6 +1126,17 @@ static long do_mbind(unsigned long start
+
+ if (end < start)
+ return -EINVAL;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ return -EINVAL;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ if (end == start)
+ return 0;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/mlock.c linux-3.4-pax/mm/mlock.c
+--- linux-3.4/mm/mlock.c 2012-03-19 10:39:12.600049141 +0100
++++ linux-3.4-pax/mm/mlock.c 2012-05-21 12:10:11.976049022 +0200
+@@ -385,6 +385,9 @@ static int do_mlock(unsigned long start,
+ return -EINVAL;
+ if (end == start)
+ return 0;
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ vma = find_vma(current->mm, start);
+ if (!vma || vma->vm_start > start)
+ return -ENOMEM;
+@@ -396,6 +399,11 @@ static int do_mlock(unsigned long start,
+ for (nstart = start ; ; ) {
+ vm_flags_t newflags;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
++ break;
++#endif
++
+ /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
+
+ newflags = vma->vm_flags | VM_LOCKED;
+@@ -524,17 +532,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
+ static int do_mlockall(int flags)
+ {
+ struct vm_area_struct * vma, * prev = NULL;
+- unsigned int def_flags = 0;
+
+ if (flags & MCL_FUTURE)
+- def_flags = VM_LOCKED;
+- current->mm->def_flags = def_flags;
++ current->mm->def_flags |= VM_LOCKED;
++ else
++ current->mm->def_flags &= ~VM_LOCKED;
+ if (flags == MCL_FUTURE)
+ goto out;
+
+ for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
+ vm_flags_t newflags;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
++ break;
++#endif
++
++ BUG_ON(vma->vm_end > TASK_SIZE);
+ newflags = vma->vm_flags | VM_LOCKED;
+ if (!(flags & MCL_CURRENT))
+ newflags &= ~VM_LOCKED;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/mmap.c linux-3.4-pax/mm/mmap.c
+--- linux-3.4/mm/mmap.c 2012-05-21 11:33:40.107929975 +0200
++++ linux-3.4-pax/mm/mmap.c 2012-05-22 16:53:56.195111150 +0200
+@@ -46,6 +46,16 @@
+ #define arch_rebalance_pgtables(addr, len) (addr)
+ #endif
+
++static inline void verify_mm_writelocked(struct mm_struct *mm)
++{
++#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
++ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
++ up_read(&mm->mmap_sem);
++ BUG();
++ }
++#endif
++}
++
+ static void unmap_region(struct mm_struct *mm,
+ struct vm_area_struct *vma, struct vm_area_struct *prev,
+ unsigned long start, unsigned long end);
+@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ */
+-pgprot_t protection_map[16] = {
++pgprot_t protection_map[16] __read_only = {
+ __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+ __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+ };
+
+-pgprot_t vm_get_page_prot(unsigned long vm_flags)
++pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
+ {
+- return __pgprot(pgprot_val(protection_map[vm_flags &
++ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
+ pgprot_val(arch_vm_get_page_prot(vm_flags)));
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++ if (!(__supported_pte_mask & _PAGE_NX) &&
++ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
++ (vm_flags & (VM_READ | VM_WRITE)))
++ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
++#endif
++
++ return prot;
+ }
+ EXPORT_SYMBOL(vm_get_page_prot);
+
+ int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
+ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
+ int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
++unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
+ /*
+ * Make sure vm_committed_as in one cacheline and not cacheline shared with
+ * other variables. It can be updated by several CPUs frequently.
+@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma
+ struct vm_area_struct *next = vma->vm_next;
+
+ might_sleep();
++ BUG_ON(vma->vm_mirror);
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+ if (vma->vm_file) {
+@@ -690,6 +711,12 @@ static int
+ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
+ struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
++ return 0;
++#endif
++
+ if (is_mergeable_vma(vma, file, vm_flags) &&
+ is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
+ if (vma->vm_pgoff == vm_pgoff)
+@@ -709,6 +736,12 @@ static int
+ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+ struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
++ return 0;
++#endif
++
+ if (is_mergeable_vma(vma, file, vm_flags) &&
+ is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
+ pgoff_t vm_pglen;
+@@ -751,13 +784,20 @@ can_vma_merge_after(struct vm_area_struc
+ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+ struct vm_area_struct *prev, unsigned long addr,
+ unsigned long end, unsigned long vm_flags,
+- struct anon_vma *anon_vma, struct file *file,
++ struct anon_vma *anon_vma, struct file *file,
+ pgoff_t pgoff, struct mempolicy *policy)
+ {
+ pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
+ struct vm_area_struct *area, *next;
+ int err;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
++ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
++
++ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
++#endif
++
+ /*
+ * We later require that vma->vm_flags == vm_flags,
+ * so this tests vma->vm_flags & VM_SPECIAL, too.
+@@ -773,6 +813,15 @@ struct vm_area_struct *vma_merge(struct
+ if (next && next->vm_end == end) /* cases 6, 7, 8 */
+ next = next->vm_next;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (prev)
++ prev_m = pax_find_mirror_vma(prev);
++ if (area)
++ area_m = pax_find_mirror_vma(area);
++ if (next)
++ next_m = pax_find_mirror_vma(next);
++#endif
++
+ /*
+ * Can it merge with the predecessor?
+ */
+@@ -792,9 +841,24 @@ struct vm_area_struct *vma_merge(struct
+ /* cases 1, 6 */
+ err = vma_adjust(prev, prev->vm_start,
+ next->vm_end, prev->vm_pgoff, NULL);
+- } else /* cases 2, 5, 7 */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!err && prev_m)
++ err = vma_adjust(prev_m, prev_m->vm_start,
++ next_m->vm_end, prev_m->vm_pgoff, NULL);
++#endif
++
++ } else { /* cases 2, 5, 7 */
+ err = vma_adjust(prev, prev->vm_start,
+ end, prev->vm_pgoff, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!err && prev_m)
++ err = vma_adjust(prev_m, prev_m->vm_start,
++ end_m, prev_m->vm_pgoff, NULL);
++#endif
++
++ }
+ if (err)
+ return NULL;
+ khugepaged_enter_vma_merge(prev);
+@@ -808,12 +872,27 @@ struct vm_area_struct *vma_merge(struct
+ mpol_equal(policy, vma_policy(next)) &&
+ can_vma_merge_before(next, vm_flags,
+ anon_vma, file, pgoff+pglen)) {
+- if (prev && addr < prev->vm_end) /* case 4 */
++ if (prev && addr < prev->vm_end) { /* case 4 */
+ err = vma_adjust(prev, prev->vm_start,
+ addr, prev->vm_pgoff, NULL);
+- else /* cases 3, 8 */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!err && prev_m)
++ err = vma_adjust(prev_m, prev_m->vm_start,
++ addr_m, prev_m->vm_pgoff, NULL);
++#endif
++
++ } else { /* cases 3, 8 */
+ err = vma_adjust(area, addr, next->vm_end,
+ next->vm_pgoff - pglen, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!err && area_m)
++ err = vma_adjust(area_m, addr_m, next_m->vm_end,
++ next_m->vm_pgoff - pglen, NULL);
++#endif
++
++ }
+ if (err)
+ return NULL;
+ khugepaged_enter_vma_merge(area);
+@@ -922,14 +1001,11 @@ none:
+ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
+ struct file *file, long pages)
+ {
+- const unsigned long stack_flags
+- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
+-
+ if (file) {
+ mm->shared_vm += pages;
+ if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
+ mm->exec_vm += pages;
+- } else if (flags & stack_flags)
++ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
+ mm->stack_vm += pages;
+ if (flags & (VM_RESERVED|VM_IO))
+ mm->reserved_vm += pages;
+@@ -969,7 +1045,7 @@ static unsigned long do_mmap_pgoff(struc
+ * (the exception is when the underlying filesystem is noexec
+ * mounted, in which case we dont add PROT_EXEC.)
+ */
+- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
+ if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
+ prot |= PROT_EXEC;
+
+@@ -995,7 +1071,7 @@ static unsigned long do_mmap_pgoff(struc
+ /* Obtain the address to map to. we verify (or select) it and ensure
+ * that it represents a valid section of the address space.
+ */
+- addr = get_unmapped_area(file, addr, len, pgoff, flags);
++ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
+ if (addr & ~PAGE_MASK)
+ return addr;
+
+@@ -1006,6 +1082,28 @@ static unsigned long do_mmap_pgoff(struc
+ vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
+ mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->pax_flags & MF_PAX_MPROTECT) {
++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
++
++#ifdef CONFIG_PAX_EMUPLT
++ vm_flags &= ~VM_EXEC;
++#else
++ return -EPERM;
++#endif
++
++ if (!(vm_flags & VM_EXEC))
++ vm_flags &= ~VM_MAYEXEC;
++ else
++ vm_flags &= ~VM_MAYWRITE;
++ }
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
++ vm_flags &= ~VM_PAGEEXEC;
++#endif
++
+ if (flags & MAP_LOCKED)
+ if (!can_do_mlock())
+ return -EPERM;
+@@ -1192,7 +1290,7 @@ int vma_wants_writenotify(struct vm_area
+ vm_flags_t vm_flags = vma->vm_flags;
+
+ /* If it was private or non-writable, the write bit is already clear */
+- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
++ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
+ return 0;
+
+ /* The backer wishes to know when pages are first written to? */
+@@ -1241,14 +1339,24 @@ unsigned long mmap_region(struct file *f
+ unsigned long charged = 0;
+ struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m = NULL;
++#endif
++
++ /*
++ * mm->mmap_sem is required to protect against another thread
++ * changing the mappings in case we sleep.
++ */
++ verify_mm_writelocked(mm);
++
+ /* Clear old maps */
+ error = -ENOMEM;
+-munmap_back:
+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
+ if (vma && vma->vm_start < addr + len) {
+ if (do_munmap(mm, addr, len))
+ return -ENOMEM;
+- goto munmap_back;
++ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
++ BUG_ON(vma && vma->vm_start < addr + len);
+ }
+
+ /* Check against address space limit. */
+@@ -1297,6 +1405,16 @@ munmap_back:
+ goto unacct_error;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++ if (!vma_m) {
++ error = -ENOMEM;
++ goto free_vma;
++ }
++ }
++#endif
++
+ vma->vm_mm = mm;
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+@@ -1321,6 +1439,19 @@ munmap_back:
+ error = file->f_op->mmap(file, vma);
+ if (error)
+ goto unmap_and_free_vma;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m && (vm_flags & VM_EXECUTABLE))
++ added_exe_file_vma(mm);
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
++ vma->vm_flags |= VM_PAGEEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ }
++#endif
++
+ if (vm_flags & VM_EXECUTABLE)
+ added_exe_file_vma(mm);
+
+@@ -1358,6 +1489,11 @@ munmap_back:
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+ file = vma->vm_file;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m)
++ BUG_ON(pax_mirror_vma(vma_m, vma));
++#endif
++
+ /* Once vma denies write, undo our temporary denial count */
+ if (correct_wcount)
+ atomic_inc(&inode->i_writecount);
+@@ -1366,6 +1502,7 @@ out:
+
+ mm->total_vm += len >> PAGE_SHIFT;
+ vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
++ track_exec_limit(mm, addr, addr + len, vm_flags);
+ if (vm_flags & VM_LOCKED) {
+ if (!mlock_vma_pages_range(vma, addr, addr + len))
+ mm->locked_vm += (len >> PAGE_SHIFT);
+@@ -1383,6 +1520,12 @@ unmap_and_free_vma:
+ unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
+ charged = 0;
+ free_vma:
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m)
++ kmem_cache_free(vm_area_cachep, vma_m);
++#endif
++
+ kmem_cache_free(vm_area_cachep, vma);
+ unacct_error:
+ if (charged)
+@@ -1390,6 +1533,44 @@ unacct_error:
+ return error;
+ }
+
++bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
++{
++ if (!vma) {
++#ifdef CONFIG_STACK_GROWSUP
++ if (addr > sysctl_heap_stack_gap)
++ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
++ else
++ vma = find_vma(current->mm, 0);
++ if (vma && (vma->vm_flags & VM_GROWSUP))
++ return false;
++#endif
++ return true;
++ }
++
++ if (addr + len > vma->vm_start)
++ return false;
++
++ if (vma->vm_flags & VM_GROWSDOWN)
++ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
++#ifdef CONFIG_STACK_GROWSUP
++ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
++ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
++#endif
++
++ return true;
++}
++
++unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
++{
++ if (vma->vm_start < len)
++ return -ENOMEM;
++ if (!(vma->vm_flags & VM_GROWSDOWN))
++ return vma->vm_start - len;
++ if (sysctl_heap_stack_gap <= vma->vm_start - len)
++ return vma->vm_start - len - sysctl_heap_stack_gap;
++ return -ENOMEM;
++}
++
+ /* Get an address range which is currently unmapped.
+ * For shmat() with addr=0.
+ *
+@@ -1416,18 +1597,23 @@ arch_get_unmapped_area(struct file *filp
+ if (flags & MAP_FIXED)
+ return addr;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+- vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
+- return addr;
++ if (TASK_SIZE - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
+ }
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
+- mm->cached_hole_size = 0;
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
+
+ full_search:
+@@ -1438,34 +1624,40 @@ full_search:
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- addr = TASK_UNMAPPED_BASE;
+- start_addr = addr;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
+- /*
+- * Remember the place where we stopped the search:
+- */
+- mm->free_area_cache = addr + len;
+- return addr;
+- }
++ if (check_heap_stack_gap(vma, addr, len))
++ break;
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+ addr = vma->vm_end;
+ }
++
++ /*
++ * Remember the place where we stopped the search:
++ */
++ mm->free_area_cache = addr + len;
++ return addr;
+ }
+ #endif
+
+ void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
++ return;
++#endif
++
+ /*
+ * Is this a new hole at the lowest possible address?
+ */
+- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
++ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
+ mm->free_area_cache = addr;
+ }
+
+@@ -1481,7 +1673,7 @@ arch_get_unmapped_area_topdown(struct fi
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+- unsigned long addr = addr0, start_addr;
++ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE)
+@@ -1490,13 +1682,18 @@ arch_get_unmapped_area_topdown(struct fi
+ if (flags & MAP_FIXED)
+ return addr;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+- vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
+- return addr;
++ if (TASK_SIZE - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
+ }
+
+ /* check if free_area_cache is useful for us */
+@@ -1520,7 +1717,7 @@ try_again:
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || addr+len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+
+@@ -1529,8 +1726,8 @@ try_again:
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- } while (len < vma->vm_start);
++ addr = skip_heap_stack_gap(vma, len);
++ } while (!IS_ERR_VALUE(addr));
+
+ fail:
+ /*
+@@ -1553,13 +1750,21 @@ fail:
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+- mm->free_area_cache = mm->mmap_base;
++ mm->mmap_base = base;
++ mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
+
+ return addr;
+@@ -1568,6 +1773,12 @@ fail:
+
+ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
++ return;
++#endif
++
+ /*
+ * Is this a new hole at the highest possible address?
+ */
+@@ -1575,8 +1786,10 @@ void arch_unmap_area_topdown(struct mm_s
+ mm->free_area_cache = addr;
+
+ /* dont allow allocations above current base */
+- if (mm->free_area_cache > mm->mmap_base)
++ if (mm->free_area_cache > mm->mmap_base) {
+ mm->free_area_cache = mm->mmap_base;
++ mm->cached_hole_size = ~0UL;
++ }
+ }
+
+ unsigned long
+@@ -1672,6 +1885,28 @@ find_vma_prev(struct mm_struct *mm, unsi
+ return vma;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
++{
++ struct vm_area_struct *vma_m;
++
++ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
++ BUG_ON(vma->vm_mirror);
++ return NULL;
++ }
++ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
++ vma_m = vma->vm_mirror;
++ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
++ BUG_ON(vma->vm_file != vma_m->vm_file);
++ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
++ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
++ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
++ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
++ return vma_m;
++}
++#endif
++
+ /*
+ * Verify that the stack growth is acceptable and
+ * update accounting. This is shared with both the
+@@ -1731,34 +1966,42 @@ static int acct_stack_growth(struct vm_a
+ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ {
+ int error;
++ bool locknext;
+
+ if (!(vma->vm_flags & VM_GROWSUP))
+ return -EFAULT;
+
++ /* Also guard against wrapping around to address 0. */
++ if (address < PAGE_ALIGN(address+1))
++ address = PAGE_ALIGN(address+1);
++ else
++ return -ENOMEM;
++
+ /*
+ * We must make sure the anon_vma is allocated
+ * so that the anon_vma locking is not a noop.
+ */
+ if (unlikely(anon_vma_prepare(vma)))
+ return -ENOMEM;
++ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
++ if (locknext && anon_vma_prepare(vma->vm_next))
++ return -ENOMEM;
+ vma_lock_anon_vma(vma);
++ if (locknext)
++ vma_lock_anon_vma(vma->vm_next);
+
+ /*
+ * vma->vm_start/vm_end cannot change under us because the caller
+ * is required to hold the mmap_sem in read mode. We need the
+- * anon_vma lock to serialize against concurrent expand_stacks.
+- * Also guard against wrapping around to address 0.
++ * anon_vma locks to serialize against concurrent expand_stacks
++ * and expand_upwards.
+ */
+- if (address < PAGE_ALIGN(address+4))
+- address = PAGE_ALIGN(address+4);
+- else {
+- vma_unlock_anon_vma(vma);
+- return -ENOMEM;
+- }
+ error = 0;
+
+ /* Somebody else might have raced and expanded it already */
+- if (address > vma->vm_end) {
++ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
++ error = -ENOMEM;
++ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
+ unsigned long size, grow;
+
+ size = address - vma->vm_start;
+@@ -1773,6 +2016,8 @@ int expand_upwards(struct vm_area_struct
+ }
+ }
+ }
++ if (locknext)
++ vma_unlock_anon_vma(vma->vm_next);
+ vma_unlock_anon_vma(vma);
+ khugepaged_enter_vma_merge(vma);
+ return error;
+@@ -1786,6 +2031,8 @@ int expand_downwards(struct vm_area_stru
+ unsigned long address)
+ {
+ int error;
++ bool lockprev = false;
++ struct vm_area_struct *prev;
+
+ /*
+ * We must make sure the anon_vma is allocated
+@@ -1799,6 +2046,15 @@ int expand_downwards(struct vm_area_stru
+ if (error)
+ return error;
+
++ prev = vma->vm_prev;
++#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
++ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
++#endif
++ if (lockprev && anon_vma_prepare(prev))
++ return -ENOMEM;
++ if (lockprev)
++ vma_lock_anon_vma(prev);
++
+ vma_lock_anon_vma(vma);
+
+ /*
+@@ -1808,9 +2064,17 @@ int expand_downwards(struct vm_area_stru
+ */
+
+ /* Somebody else might have raced and expanded it already */
+- if (address < vma->vm_start) {
++ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
++ error = -ENOMEM;
++ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
+ unsigned long size, grow;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++
++ vma_m = pax_find_mirror_vma(vma);
++#endif
++
+ size = vma->vm_end - address;
+ grow = (vma->vm_start - address) >> PAGE_SHIFT;
+
+@@ -1820,11 +2084,22 @@ int expand_downwards(struct vm_area_stru
+ if (!error) {
+ vma->vm_start = address;
+ vma->vm_pgoff -= grow;
++ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m) {
++ vma_m->vm_start -= grow << PAGE_SHIFT;
++ vma_m->vm_pgoff -= grow;
++ }
++#endif
++
+ perf_event_mmap(vma);
+ }
+ }
+ }
+ vma_unlock_anon_vma(vma);
++ if (lockprev)
++ vma_unlock_anon_vma(prev);
+ khugepaged_enter_vma_merge(vma);
+ return error;
+ }
+@@ -1894,6 +2169,13 @@ static void remove_vma_list(struct mm_st
+ do {
+ long nrpages = vma_pages(vma);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
++ vma = remove_vma(vma);
++ continue;
++ }
++#endif
++
+ mm->total_vm -= nrpages;
+ vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
+ vma = remove_vma(vma);
+@@ -1939,6 +2221,16 @@ detach_vmas_to_be_unmapped(struct mm_str
+ insertion_point = (prev ? &prev->vm_next : &mm->mmap);
+ vma->vm_prev = NULL;
+ do {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma->vm_mirror) {
++ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
++ vma->vm_mirror->vm_mirror = NULL;
++ vma->vm_mirror->vm_flags &= ~VM_EXEC;
++ vma->vm_mirror = NULL;
++ }
++#endif
++
+ rb_erase(&vma->vm_rb, &mm->mm_rb);
+ mm->map_count--;
+ tail_vma = vma;
+@@ -1967,14 +2259,33 @@ static int __split_vma(struct mm_struct
+ struct vm_area_struct *new;
+ int err = -ENOMEM;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m, *new_m = NULL;
++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
++#endif
++
+ if (is_vm_hugetlb_page(vma) && (addr &
+ ~(huge_page_mask(hstate_vma(vma)))))
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++#endif
++
+ new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ if (!new)
+ goto out_err;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m) {
++ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
++ if (!new_m) {
++ kmem_cache_free(vm_area_cachep, new);
++ goto out_err;
++ }
++ }
++#endif
++
+ /* most fields are the same, copy all, and then fixup */
+ *new = *vma;
+
+@@ -1987,6 +2298,22 @@ static int __split_vma(struct mm_struct
+ new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m) {
++ *new_m = *vma_m;
++ INIT_LIST_HEAD(&new_m->anon_vma_chain);
++ new_m->vm_mirror = new;
++ new->vm_mirror = new_m;
++
++ if (new_below)
++ new_m->vm_end = addr_m;
++ else {
++ new_m->vm_start = addr_m;
++ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
++ }
++ }
++#endif
++
+ pol = mpol_dup(vma_policy(vma));
+ if (IS_ERR(pol)) {
+ err = PTR_ERR(pol);
+@@ -2012,6 +2339,42 @@ static int __split_vma(struct mm_struct
+ else
+ err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!err && vma_m) {
++ if (anon_vma_clone(new_m, vma_m))
++ goto out_free_mpol;
++
++ mpol_get(pol);
++ vma_set_policy(new_m, pol);
++
++ if (new_m->vm_file) {
++ get_file(new_m->vm_file);
++ if (vma_m->vm_flags & VM_EXECUTABLE)
++ added_exe_file_vma(mm);
++ }
++
++ if (new_m->vm_ops && new_m->vm_ops->open)
++ new_m->vm_ops->open(new_m);
++
++ if (new_below)
++ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
++ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
++ else
++ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
++
++ if (err) {
++ if (new_m->vm_ops && new_m->vm_ops->close)
++ new_m->vm_ops->close(new_m);
++ if (new_m->vm_file) {
++ if (vma_m->vm_flags & VM_EXECUTABLE)
++ removed_exe_file_vma(mm);
++ fput(new_m->vm_file);
++ }
++ mpol_put(pol);
++ }
++ }
++#endif
++
+ /* Success. */
+ if (!err)
+ return 0;
+@@ -2024,10 +2387,18 @@ static int __split_vma(struct mm_struct
+ removed_exe_file_vma(mm);
+ fput(new->vm_file);
+ }
+- unlink_anon_vmas(new);
+ out_free_mpol:
+ mpol_put(pol);
+ out_free_vma:
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (new_m) {
++ unlink_anon_vmas(new_m);
++ kmem_cache_free(vm_area_cachep, new_m);
++ }
++#endif
++
++ unlink_anon_vmas(new);
+ kmem_cache_free(vm_area_cachep, new);
+ out_err:
+ return err;
+@@ -2040,6 +2411,15 @@ static int __split_vma(struct mm_struct
+ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, int new_below)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
++ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
++ if (mm->map_count >= sysctl_max_map_count-1)
++ return -ENOMEM;
++ } else
++#endif
++
+ if (mm->map_count >= sysctl_max_map_count)
+ return -ENOMEM;
+
+@@ -2051,11 +2431,30 @@ int split_vma(struct mm_struct *mm, stru
+ * work. This now handles partial unmappings.
+ * Jeremy Fitzhardinge <jeremy@goop.org>
+ */
++#ifdef CONFIG_PAX_SEGMEXEC
++int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
++{
++ int ret = __do_munmap(mm, start, len);
++ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
++ return ret;
++
++ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
++}
++
++int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
++#else
+ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
++#endif
+ {
+ unsigned long end;
+ struct vm_area_struct *vma, *prev, *last;
+
++ /*
++ * mm->mmap_sem is required to protect against another thread
++ * changing the mappings in case we sleep.
++ */
++ verify_mm_writelocked(mm);
++
+ if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
+ return -EINVAL;
+
+@@ -2130,6 +2529,8 @@ int do_munmap(struct mm_struct *mm, unsi
+ /* Fix up all other VM information */
+ remove_vma_list(mm, vma);
+
++ track_exec_limit(mm, start, end, 0UL);
++
+ return 0;
+ }
+ EXPORT_SYMBOL(do_munmap);
+@@ -2139,6 +2540,13 @@ int vm_munmap(unsigned long start, size_
+ int ret;
+ struct mm_struct *mm = current->mm;
+
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
++ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
++ return -EINVAL;
++#endif
++
+ down_write(&mm->mmap_sem);
+ ret = do_munmap(mm, start, len);
+ up_write(&mm->mmap_sem);
+@@ -2152,16 +2560,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
+ return vm_munmap(addr, len);
+ }
+
+-static inline void verify_mm_writelocked(struct mm_struct *mm)
+-{
+-#ifdef CONFIG_DEBUG_VM
+- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
+- WARN_ON(1);
+- up_read(&mm->mmap_sem);
+- }
+-#endif
+-}
+-
+ /*
+ * this is really a simplified "do_mmap". it only handles
+ * anonymous maps. eventually we may be able to do some
+@@ -2175,6 +2573,7 @@ static unsigned long do_brk(unsigned lon
+ struct rb_node ** rb_link, * rb_parent;
+ pgoff_t pgoff = addr >> PAGE_SHIFT;
+ int error;
++ unsigned long charged;
+
+ len = PAGE_ALIGN(len);
+ if (!len)
+@@ -2186,16 +2585,30 @@ static unsigned long do_brk(unsigned lon
+
+ flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->pax_flags & MF_PAX_MPROTECT)
++ flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
+ if (error & ~PAGE_MASK)
+ return error;
+
++ charged = len >> PAGE_SHIFT;
++
+ /*
+ * mlock MCL_FUTURE?
+ */
+ if (mm->def_flags & VM_LOCKED) {
+ unsigned long locked, lock_limit;
+- locked = len >> PAGE_SHIFT;
++ locked = charged;
+ locked += mm->locked_vm;
+ lock_limit = rlimit(RLIMIT_MEMLOCK);
+ lock_limit >>= PAGE_SHIFT;
+@@ -2212,22 +2625,22 @@ static unsigned long do_brk(unsigned lon
+ /*
+ * Clear old maps. this also does some error checking for us
+ */
+- munmap_back:
+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
+ if (vma && vma->vm_start < addr + len) {
+ if (do_munmap(mm, addr, len))
+ return -ENOMEM;
+- goto munmap_back;
+- }
++ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
++ BUG_ON(vma && vma->vm_start < addr + len);
++ }
+
+ /* Check against address space limits *after* clearing old maps... */
+- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
++ if (!may_expand_vm(mm, charged))
+ return -ENOMEM;
+
+ if (mm->map_count > sysctl_max_map_count)
+ return -ENOMEM;
+
+- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
++ if (security_vm_enough_memory_mm(mm, charged))
+ return -ENOMEM;
+
+ /* Can we just expand an old private anonymous mapping? */
+@@ -2241,7 +2654,7 @@ static unsigned long do_brk(unsigned lon
+ */
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ if (!vma) {
+- vm_unacct_memory(len >> PAGE_SHIFT);
++ vm_unacct_memory(charged);
+ return -ENOMEM;
+ }
+
+@@ -2255,11 +2668,12 @@ static unsigned long do_brk(unsigned lon
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+ out:
+ perf_event_mmap(vma);
+- mm->total_vm += len >> PAGE_SHIFT;
++ mm->total_vm += charged;
+ if (flags & VM_LOCKED) {
+ if (!mlock_vma_pages_range(vma, addr, addr + len))
+- mm->locked_vm += (len >> PAGE_SHIFT);
++ mm->locked_vm += charged;
+ }
++ track_exec_limit(mm, addr, addr + len, flags);
+ return addr;
+ }
+
+@@ -2315,8 +2729,10 @@ void exit_mmap(struct mm_struct *mm)
+ * Walk the list again, actually closing and freeing it,
+ * with preemption enabled, without holding any MM locks.
+ */
+- while (vma)
++ while (vma) {
++ vma->vm_mirror = NULL;
+ vma = remove_vma(vma);
++ }
+
+ BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
+ }
+@@ -2330,6 +2746,10 @@ int insert_vm_struct(struct mm_struct *
+ struct vm_area_struct * __vma, * prev;
+ struct rb_node ** rb_link, * rb_parent;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m = NULL;
++#endif
++
+ /*
+ * The vm_pgoff of a purely anonymous vma should be irrelevant
+ * until its first write fault, when page's anon_vma and index
+@@ -2352,7 +2772,22 @@ int insert_vm_struct(struct mm_struct *
+ if ((vma->vm_flags & VM_ACCOUNT) &&
+ security_vm_enough_memory_mm(mm, vma_pages(vma)))
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++ if (!vma_m)
++ return -ENOMEM;
++ }
++#endif
++
+ vma_link(mm, vma, prev, rb_link, rb_parent);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m)
++ BUG_ON(pax_mirror_vma(vma_m, vma));
++#endif
++
+ return 0;
+ }
+
+@@ -2371,6 +2806,8 @@ struct vm_area_struct *copy_vma(struct v
+ struct mempolicy *pol;
+ bool faulted_in_anon_vma = true;
+
++ BUG_ON(vma->vm_mirror);
++
+ /*
+ * If anonymous vma has not yet been faulted, update new pgoff
+ * to match new location, to increase its chance of merging.
+@@ -2438,6 +2875,39 @@ struct vm_area_struct *copy_vma(struct v
+ return NULL;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
++{
++ struct vm_area_struct *prev_m;
++ struct rb_node **rb_link_m, *rb_parent_m;
++ struct mempolicy *pol_m;
++
++ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
++ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
++ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
++ *vma_m = *vma;
++ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
++ if (anon_vma_clone(vma_m, vma))
++ return -ENOMEM;
++ pol_m = vma_policy(vma_m);
++ mpol_get(pol_m);
++ vma_set_policy(vma_m, pol_m);
++ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
++ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
++ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
++ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
++ if (vma_m->vm_file)
++ get_file(vma_m->vm_file);
++ if (vma_m->vm_ops && vma_m->vm_ops->open)
++ vma_m->vm_ops->open(vma_m);
++ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
++ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
++ vma_m->vm_mirror = vma;
++ vma->vm_mirror = vma_m;
++ return 0;
++}
++#endif
++
+ /*
+ * Return true if the calling process may expand its vm space by the passed
+ * number of pages
+@@ -2449,6 +2919,11 @@ int may_expand_vm(struct mm_struct *mm,
+
+ lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ cur -= mm->brk_gap;
++#endif
++
+ if (cur + npages > lim)
+ return 0;
+ return 1;
+@@ -2519,6 +2994,17 @@ int install_special_mapping(struct mm_st
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->pax_flags & MF_PAX_MPROTECT) {
++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
++ return -EPERM;
++ if (!(vm_flags & VM_EXEC))
++ vm_flags &= ~VM_MAYEXEC;
++ else
++ vm_flags &= ~VM_MAYWRITE;
++ }
++#endif
++
+ vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/mprotect.c linux-3.4-pax/mm/mprotect.c
+--- linux-3.4/mm/mprotect.c 2012-05-21 11:33:40.111929976 +0200
++++ linux-3.4-pax/mm/mprotect.c 2012-05-21 12:10:11.984049021 +0200
+@@ -23,10 +23,17 @@
+ #include <linux/mmu_notifier.h>
+ #include <linux/migrate.h>
+ #include <linux/perf_event.h>
++
++#ifdef CONFIG_PAX_MPROTECT
++#include <linux/elf.h>
++#include <linux/binfmts.h>
++#endif
++
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
++#include <asm/mmu_context.h>
+
+ #ifndef pgprot_modify
+ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+@@ -141,6 +148,48 @@ static void change_protection(struct vm_
+ flush_tlb_range(vma, start, end);
+ }
+
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++/* called while holding the mmap semaphor for writing except stack expansion */
++void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
++{
++ unsigned long oldlimit, newlimit = 0UL;
++
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
++ return;
++
++ spin_lock(&mm->page_table_lock);
++ oldlimit = mm->context.user_cs_limit;
++ if ((prot & VM_EXEC) && oldlimit < end)
++ /* USER_CS limit moved up */
++ newlimit = end;
++ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
++ /* USER_CS limit moved down */
++ newlimit = start;
++
++ if (newlimit) {
++ mm->context.user_cs_limit = newlimit;
++
++#ifdef CONFIG_SMP
++ wmb();
++ cpus_clear(mm->context.cpu_user_cs_mask);
++ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
++#endif
++
++ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
++ }
++ spin_unlock(&mm->page_table_lock);
++ if (newlimit == end) {
++ struct vm_area_struct *vma = find_vma(mm, oldlimit);
++
++ for (; vma && vma->vm_start < end; vma = vma->vm_next)
++ if (is_vm_hugetlb_page(vma))
++ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
++ else
++ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
++ }
++}
++#endif
++
+ int
+ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+ unsigned long start, unsigned long end, unsigned long newflags)
+@@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vm
+ int error;
+ int dirty_accountable = 0;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m = NULL;
++ unsigned long start_m, end_m;
++
++ start_m = start + SEGMEXEC_TASK_SIZE;
++ end_m = end + SEGMEXEC_TASK_SIZE;
++#endif
++
+ if (newflags == oldflags) {
+ *pprev = vma;
+ return 0;
+ }
+
++ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
++ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
++
++ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
++ return -ENOMEM;
++
++ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
++ return -ENOMEM;
++ }
++
+ /*
+ * If we make a private mapping writable we increase our commit;
+ * but (without finer accounting) cannot reduce our commit if we
+@@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vm
+ }
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
++ if (start != vma->vm_start) {
++ error = split_vma(mm, vma, start, 1);
++ if (error)
++ goto fail;
++ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
++ *pprev = (*pprev)->vm_next;
++ }
++
++ if (end != vma->vm_end) {
++ error = split_vma(mm, vma, end, 0);
++ if (error)
++ goto fail;
++ }
++
++ if (pax_find_mirror_vma(vma)) {
++ error = __do_munmap(mm, start_m, end_m - start_m);
++ if (error)
++ goto fail;
++ } else {
++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++ if (!vma_m) {
++ error = -ENOMEM;
++ goto fail;
++ }
++ vma->vm_flags = newflags;
++ error = pax_mirror_vma(vma_m, vma);
++ if (error) {
++ vma->vm_flags = oldflags;
++ goto fail;
++ }
++ }
++ }
++#endif
++
+ /*
+ * First try to merge with previous and/or next vma.
+ */
+@@ -204,9 +307,21 @@ success:
+ * vm_flags and vm_page_prot are protected by the mmap_sem
+ * held in write mode.
+ */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
++ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
++#endif
++
+ vma->vm_flags = newflags;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->binfmt && mm->binfmt->handle_mprotect)
++ mm->binfmt->handle_mprotect(vma, newflags);
++#endif
++
+ vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
+- vm_get_page_prot(newflags));
++ vm_get_page_prot(vma->vm_flags));
+
+ if (vma_wants_writenotify(vma)) {
+ vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
+@@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+ end = start + len;
+ if (end <= start)
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ return -EINVAL;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ if (!arch_validate_prot(prot))
+ return -EINVAL;
+
+@@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+ /*
+ * Does the application expect PROT_READ to imply PROT_EXEC:
+ */
+- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
+ prot |= PROT_EXEC;
+
+ vm_flags = calc_vm_prot_bits(prot);
+@@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+ if (start > vma->vm_start)
+ prev = vma;
+
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
++ current->mm->binfmt->handle_mprotect(vma, vm_flags);
++#endif
++
+ for (nstart = start ; ; ) {
+ unsigned long newflags;
+
+@@ -311,6 +442,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+ error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
+ if (error)
+ goto out;
++
++ track_exec_limit(current->mm, nstart, tmp, vm_flags);
++
+ nstart = tmp;
+
+ if (nstart < prev->vm_end)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/mremap.c linux-3.4-pax/mm/mremap.c
+--- linux-3.4/mm/mremap.c 2012-05-21 11:33:40.111929976 +0200
++++ linux-3.4-pax/mm/mremap.c 2012-05-21 12:10:11.988049021 +0200
+@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_str
+ continue;
+ pte = ptep_get_and_clear(mm, old_addr, old_pte);
+ pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
++ pte = pte_exprotect(pte);
++#endif
++
+ set_pte_at(mm, new_addr, new_pte, pte);
+ }
+
+@@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_res
+ if (is_vm_hugetlb_page(vma))
+ goto Einval;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (pax_find_mirror_vma(vma))
++ goto Einval;
++#endif
++
+ /* We can't remap across vm area boundaries */
+ if (old_len > vma->vm_end - addr)
+ goto Efault;
+@@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned
+ unsigned long ret = -EINVAL;
+ unsigned long charged = 0;
+ unsigned long map_flags;
++ unsigned long pax_task_size = TASK_SIZE;
+
+ if (new_addr & ~PAGE_MASK)
+ goto out;
+
+- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
+ goto out;
+
+ /* Check if the location we're moving into overlaps the
+ * old location at all, and fail if it does.
+ */
+- if ((new_addr <= addr) && (new_addr+new_len) > addr)
+- goto out;
+-
+- if ((addr <= new_addr) && (addr+old_len) > new_addr)
++ if (addr + old_len > new_addr && new_addr + new_len > addr)
+ goto out;
+
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+@@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long ad
+ struct vm_area_struct *vma;
+ unsigned long ret = -EINVAL;
+ unsigned long charged = 0;
++ unsigned long pax_task_size = TASK_SIZE;
+
+ if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
+ goto out;
+@@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long ad
+ if (!new_len)
+ goto out;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
++ old_len > pax_task_size || addr > pax_task_size-old_len)
++ goto out;
++
+ if (flags & MREMAP_FIXED) {
+ if (flags & MREMAP_MAYMOVE)
+ ret = mremap_to(addr, old_len, new_addr, new_len);
+@@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long ad
+ addr + new_len);
+ }
+ ret = addr;
++ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
+ goto out;
+ }
+ }
+@@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long ad
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+ if (ret)
+ goto out;
++
++ map_flags = vma->vm_flags;
+ ret = move_vma(vma, addr, old_len, new_len, new_addr);
++ if (!(ret & ~PAGE_MASK)) {
++ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
++ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
++ }
+ }
+ out:
+ if (ret & ~PAGE_MASK)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/nommu.c linux-3.4-pax/mm/nommu.c
+--- linux-3.4/mm/nommu.c 2012-05-21 11:33:40.123929976 +0200
++++ linux-3.4-pax/mm/nommu.c 2012-05-21 12:10:11.992049023 +0200
+@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMI
+ int sysctl_overcommit_ratio = 50; /* default is 50% */
+ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
+ int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
+-int heap_stack_gap = 0;
+
+ atomic_long_t mmap_pages_allocated;
+
+@@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct m
+ EXPORT_SYMBOL(find_vma);
+
+ /*
+- * find a VMA
+- * - we don't extend stack VMAs under NOMMU conditions
+- */
+-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
+-{
+- return find_vma(mm, addr);
+-}
+-
+-/*
+ * expand a stack to a given address
+ * - not supported under NOMMU conditions
+ */
+@@ -1580,6 +1570,7 @@ int split_vma(struct mm_struct *mm, stru
+
+ /* most fields are the same, copy all, and then fixup */
+ *new = *vma;
++ INIT_LIST_HEAD(&new->anon_vma_chain);
+ *region = *vma->vm_region;
+ new->vm_region = region;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/page_alloc.c linux-3.4-pax/mm/page_alloc.c
+--- linux-3.4/mm/page_alloc.c 2012-05-21 11:33:40.131929977 +0200
++++ linux-3.4-pax/mm/page_alloc.c 2012-05-21 12:10:11.996049024 +0200
+@@ -335,7 +335,7 @@ out:
+ * This usage means that zero-order pages may not be compound.
+ */
+
+-static void free_compound_page(struct page *page)
++void free_compound_page(struct page *page)
+ {
+ __free_pages_ok(page, compound_order(page));
+ }
+@@ -692,6 +692,10 @@ static bool free_pages_prepare(struct pa
+ int i;
+ int bad = 0;
+
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ unsigned long index = 1UL << order;
++#endif
++
+ trace_mm_page_free(page, order);
+ kmemcheck_free_shadow(page, order);
+
+@@ -707,6 +711,12 @@ static bool free_pages_prepare(struct pa
+ debug_check_no_obj_freed(page_address(page),
+ PAGE_SIZE << order);
+ }
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ for (; index; --index)
++ sanitize_highpage(page + index - 1);
++#endif
++
+ arch_free_page(page, order);
+ kernel_map_pages(page, 1 << order, 0);
+
+@@ -830,8 +840,10 @@ static int prep_new_page(struct page *pa
+ arch_alloc_page(page, order);
+ kernel_map_pages(page, 1 << order, 1);
+
++#ifndef CONFIG_PAX_MEMORY_SANITIZE
+ if (gfp_flags & __GFP_ZERO)
+ prep_zero_page(page, order, gfp_flags);
++#endif
+
+ if (order && (gfp_flags & __GFP_COMP))
+ prep_compound_page(page, order);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/percpu.c linux-3.4-pax/mm/percpu.c
+--- linux-3.4/mm/percpu.c 2012-05-21 11:33:40.139929977 +0200
++++ linux-3.4-pax/mm/percpu.c 2012-05-21 12:10:12.012049023 +0200
+@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __
+ static unsigned int pcpu_high_unit_cpu __read_mostly;
+
+ /* the address of the first chunk which starts with the kernel static area */
+-void *pcpu_base_addr __read_mostly;
++void *pcpu_base_addr __read_only;
+ EXPORT_SYMBOL_GPL(pcpu_base_addr);
+
+ static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/process_vm_access.c linux-3.4-pax/mm/process_vm_access.c
+--- linux-3.4/mm/process_vm_access.c 2012-03-19 10:39:12.620049142 +0100
++++ linux-3.4-pax/mm/process_vm_access.c 2012-05-21 12:10:12.016049024 +0200
+@@ -258,19 +258,19 @@ static ssize_t process_vm_rw_core(pid_t
+ size_t iov_l_curr_offset = 0;
+ ssize_t iov_len;
+
++ return -ENOSYS; // PaX: until properly audited
++
+ /*
+ * Work out how many pages of struct pages we're going to need
+ * when eventually calling get_user_pages
+ */
+ for (i = 0; i < riovcnt; i++) {
+ iov_len = rvec[i].iov_len;
+- if (iov_len > 0) {
+- nr_pages_iov = ((unsigned long)rvec[i].iov_base
+- + iov_len)
+- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
+- / PAGE_SIZE + 1;
+- nr_pages = max(nr_pages, nr_pages_iov);
+- }
++ if (iov_len <= 0)
++ continue;
++ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
++ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
++ nr_pages = max(nr_pages, nr_pages_iov);
+ }
+
+ if (nr_pages == 0)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/rmap.c linux-3.4-pax/mm/rmap.c
+--- linux-3.4/mm/rmap.c 2012-05-21 11:33:40.143929977 +0200
++++ linux-3.4-pax/mm/rmap.c 2012-05-21 12:10:12.020049024 +0200
+@@ -167,6 +167,10 @@ int anon_vma_prepare(struct vm_area_stru
+ struct anon_vma *anon_vma = vma->anon_vma;
+ struct anon_vma_chain *avc;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct anon_vma_chain *avc_m = NULL;
++#endif
++
+ might_sleep();
+ if (unlikely(!anon_vma)) {
+ struct mm_struct *mm = vma->vm_mm;
+@@ -176,6 +180,12 @@ int anon_vma_prepare(struct vm_area_stru
+ if (!avc)
+ goto out_enomem;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
++ if (!avc_m)
++ goto out_enomem_free_avc;
++#endif
++
+ anon_vma = find_mergeable_anon_vma(vma);
+ allocated = NULL;
+ if (!anon_vma) {
+@@ -189,6 +199,18 @@ int anon_vma_prepare(struct vm_area_stru
+ /* page_table_lock to protect against threads */
+ spin_lock(&mm->page_table_lock);
+ if (likely(!vma->anon_vma)) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
++
++ if (vma_m) {
++ BUG_ON(vma_m->anon_vma);
++ vma_m->anon_vma = anon_vma;
++ anon_vma_chain_link(vma_m, avc_m, anon_vma);
++ avc_m = NULL;
++ }
++#endif
++
+ vma->anon_vma = anon_vma;
+ anon_vma_chain_link(vma, avc, anon_vma);
+ allocated = NULL;
+@@ -199,12 +221,24 @@ int anon_vma_prepare(struct vm_area_stru
+
+ if (unlikely(allocated))
+ put_anon_vma(allocated);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (unlikely(avc_m))
++ anon_vma_chain_free(avc_m);
++#endif
++
+ if (unlikely(avc))
+ anon_vma_chain_free(avc);
+ }
+ return 0;
+
+ out_enomem_free_avc:
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (avc_m)
++ anon_vma_chain_free(avc_m);
++#endif
++
+ anon_vma_chain_free(avc);
+ out_enomem:
+ return -ENOMEM;
+@@ -240,7 +274,7 @@ static inline void unlock_anon_vma_root(
+ * Attach the anon_vmas from src to dst.
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
++int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
+ {
+ struct anon_vma_chain *avc, *pavc;
+ struct anon_vma *root = NULL;
+@@ -318,7 +352,7 @@ void anon_vma_moveto_tail(struct vm_area
+ * the corresponding VMA in the parent process is attached to.
+ * Returns 0 on success, non-zero on failure.
+ */
+-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
++int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
+ {
+ struct anon_vma_chain *avc;
+ struct anon_vma *anon_vma;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/shmem.c linux-3.4-pax/mm/shmem.c
+--- linux-3.4/mm/shmem.c 2012-05-21 11:33:40.147929977 +0200
++++ linux-3.4-pax/mm/shmem.c 2012-05-21 12:10:12.020049024 +0200
+@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
+ #define BOGO_DIRENT_SIZE 20
+
+ /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
+-#define SHORT_SYMLINK_LEN 128
++#define SHORT_SYMLINK_LEN 64
+
+ struct shmem_xattr {
+ struct list_head list; /* anchored by shmem_inode_info->xattr_list */
+@@ -2235,8 +2235,7 @@ int shmem_fill_super(struct super_block
+ int err = -ENOMEM;
+
+ /* Round up to L1_CACHE_BYTES to resist false sharing */
+- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
+- L1_CACHE_BYTES), GFP_KERNEL);
++ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
+ if (!sbinfo)
+ return -ENOMEM;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/slab.c linux-3.4-pax/mm/slab.c
+--- linux-3.4/mm/slab.c 2012-05-21 11:33:40.155929978 +0200
++++ linux-3.4-pax/mm/slab.c 2012-05-21 12:10:12.028049024 +0200
+@@ -153,7 +153,7 @@
+
+ /* Legal flag mask for kmem_cache_create(). */
+ #if DEBUG
+-# define CREATE_MASK (SLAB_RED_ZONE | \
++# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
+ SLAB_POISON | SLAB_HWCACHE_ALIGN | \
+ SLAB_CACHE_DMA | \
+ SLAB_STORE_USER | \
+@@ -161,7 +161,7 @@
+ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
+ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
+ #else
+-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
++# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
+ SLAB_CACHE_DMA | \
+ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
+ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
+@@ -290,7 +290,7 @@ struct kmem_list3 {
+ * Need this for bootstrapping a per node allocator.
+ */
+ #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
+-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
++static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
+ #define CACHE_CACHE 0
+ #define SIZE_AC MAX_NUMNODES
+ #define SIZE_L3 (2 * MAX_NUMNODES)
+@@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_
+ if ((x)->max_freeable < i) \
+ (x)->max_freeable = i; \
+ } while (0)
+-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
+-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
+-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
+-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
++#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
++#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
++#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
++#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
+ #else
+ #define STATS_INC_ACTIVE(x) do { } while (0)
+ #define STATS_DEC_ACTIVE(x) do { } while (0)
+@@ -542,7 +542,7 @@ static inline void *index_to_obj(struct
+ * reciprocal_divide(offset, cache->reciprocal_buffer_size)
+ */
+ static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+- const struct slab *slab, void *obj)
++ const struct slab *slab, const void *obj)
+ {
+ u32 offset = (obj - slab->s_mem);
+ return reciprocal_divide(offset, cache->reciprocal_buffer_size);
+@@ -568,7 +568,7 @@ struct cache_names {
+ static struct cache_names __initdata cache_names[] = {
+ #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
+ #include <linux/kmalloc_sizes.h>
+- {NULL,}
++ {NULL}
+ #undef CACHE
+ };
+
+@@ -1588,7 +1588,7 @@ void __init kmem_cache_init(void)
+ sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
+ sizes[INDEX_AC].cs_size,
+ ARCH_KMALLOC_MINALIGN,
+- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
+ NULL);
+
+ if (INDEX_AC != INDEX_L3) {
+@@ -1596,7 +1596,7 @@ void __init kmem_cache_init(void)
+ kmem_cache_create(names[INDEX_L3].name,
+ sizes[INDEX_L3].cs_size,
+ ARCH_KMALLOC_MINALIGN,
+- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
+ NULL);
+ }
+
+@@ -1614,7 +1614,7 @@ void __init kmem_cache_init(void)
+ sizes->cs_cachep = kmem_cache_create(names->name,
+ sizes->cs_size,
+ ARCH_KMALLOC_MINALIGN,
+- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
+ NULL);
+ }
+ #ifdef CONFIG_ZONE_DMA
+@@ -4390,10 +4390,10 @@ static int s_show(struct seq_file *m, vo
+ }
+ /* cpu stats */
+ {
+- unsigned long allochit = atomic_read(&cachep->allochit);
+- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
+- unsigned long freehit = atomic_read(&cachep->freehit);
+- unsigned long freemiss = atomic_read(&cachep->freemiss);
++ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
++ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
++ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
++ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
+
+ seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
+ allochit, allocmiss, freehit, freemiss);
+@@ -4659,6 +4659,55 @@ static int __init slab_proc_init(void)
+ module_init(slab_proc_init);
+ #endif
+
++void check_object_size(const void *ptr, unsigned long n, bool to)
++{
++
++#ifdef CONFIG_PAX_USERCOPY
++ struct page *page;
++ struct kmem_cache *cachep = NULL;
++ struct slab *slabp;
++ unsigned int objnr;
++ unsigned long offset;
++ const char *type;
++
++ if (!n)
++ return;
++
++ type = "<null>";
++ if (ZERO_OR_NULL_PTR(ptr))
++ goto report;
++
++ if (!virt_addr_valid(ptr))
++ return;
++
++ page = virt_to_head_page(ptr);
++
++ type = "<process stack>";
++ if (!PageSlab(page)) {
++ if (object_is_on_stack(ptr, n) == -1)
++ goto report;
++ return;
++ }
++
++ cachep = page_get_cache(page);
++ type = cachep->name;
++ if (!(cachep->flags & SLAB_USERCOPY))
++ goto report;
++
++ slabp = page_get_slab(page);
++ objnr = obj_to_index(cachep, slabp, ptr);
++ BUG_ON(objnr >= cachep->num);
++ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
++ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
++ return;
++
++report:
++ pax_report_usercopy(ptr, n, to, type);
++#endif
++
++}
++EXPORT_SYMBOL(check_object_size);
++
+ /**
+ * ksize - get the actual amount of memory allocated for a given object
+ * @objp: Pointer to the object
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/slob.c linux-3.4-pax/mm/slob.c
+--- linux-3.4/mm/slob.c 2012-01-08 19:48:29.467470852 +0100
++++ linux-3.4-pax/mm/slob.c 2012-05-21 12:10:12.032049025 +0200
+@@ -29,7 +29,7 @@
+ * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
+ * alloc_pages() directly, allocating compound pages so the page order
+ * does not have to be separately tracked, and also stores the exact
+- * allocation size in page->private so that it can be used to accurately
++ * allocation size in slob_page->size so that it can be used to accurately
+ * provide ksize(). These objects are detected in kfree() because slob_page()
+ * is false for them.
+ *
+@@ -58,6 +58,7 @@
+ */
+
+ #include <linux/kernel.h>
++#include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/mm.h>
+ #include <linux/swap.h> /* struct reclaim_state */
+@@ -102,7 +103,8 @@ struct slob_page {
+ unsigned long flags; /* mandatory */
+ atomic_t _count; /* mandatory */
+ slobidx_t units; /* free units left in page */
+- unsigned long pad[2];
++ unsigned long pad[1];
++ unsigned long size; /* size when >=PAGE_SIZE */
+ slob_t *free; /* first free slob_t in page */
+ struct list_head list; /* linked list of free pages */
+ };
+@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
+ */
+ static inline int is_slob_page(struct slob_page *sp)
+ {
+- return PageSlab((struct page *)sp);
++ return PageSlab((struct page *)sp) && !sp->size;
+ }
+
+ static inline void set_slob_page(struct slob_page *sp)
+@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
+
+ static inline struct slob_page *slob_page(const void *addr)
+ {
+- return (struct slob_page *)virt_to_page(addr);
++ return (struct slob_page *)virt_to_head_page(addr);
+ }
+
+ /*
+@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
+ /*
+ * Return the size of a slob block.
+ */
+-static slobidx_t slob_units(slob_t *s)
++static slobidx_t slob_units(const slob_t *s)
+ {
+ if (s->units > 0)
+ return s->units;
+@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
+ /*
+ * Return the next free slob block pointer after this one.
+ */
+-static slob_t *slob_next(slob_t *s)
++static slob_t *slob_next(const slob_t *s)
+ {
+ slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
+ slobidx_t next;
+@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
+ /*
+ * Returns true if s is the last free block in its page.
+ */
+-static int slob_last(slob_t *s)
++static int slob_last(const slob_t *s)
+ {
+ return !((unsigned long)slob_next(s) & ~PAGE_MASK);
+ }
+@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
+ if (!page)
+ return NULL;
+
++ set_slob_page(page);
+ return page_address(page);
+ }
+
+@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
+ if (!b)
+ return NULL;
+ sp = slob_page(b);
+- set_slob_page(sp);
+
+ spin_lock_irqsave(&slob_lock, flags);
+ sp->units = SLOB_UNITS(PAGE_SIZE);
+ sp->free = b;
++ sp->size = 0;
+ INIT_LIST_HEAD(&sp->list);
+ set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
+ set_slob_page_free(sp, slob_list);
+@@ -476,10 +479,9 @@ out:
+ * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
+ */
+
+-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
++static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
+ {
+- unsigned int *m;
+- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
++ slob_t *m;
+ void *ret;
+
+ gfp &= gfp_allowed_mask;
+@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t
+
+ if (!m)
+ return NULL;
+- *m = size;
++ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
++ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
++ m[0].units = size;
++ m[1].units = align;
+ ret = (void *)m + align;
+
+ trace_kmalloc_node(_RET_IP_, ret,
+@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t
+ gfp |= __GFP_COMP;
+ ret = slob_new_pages(gfp, order, node);
+ if (ret) {
+- struct page *page;
+- page = virt_to_page(ret);
+- page->private = size;
++ struct slob_page *sp;
++ sp = slob_page(ret);
++ sp->size = size;
+ }
+
+ trace_kmalloc_node(_RET_IP_, ret,
+ size, PAGE_SIZE << order, gfp, node);
+ }
+
+- kmemleak_alloc(ret, size, 1, gfp);
++ return ret;
++}
++
++void *__kmalloc_node(size_t size, gfp_t gfp, int node)
++{
++ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
++ void *ret = __kmalloc_node_align(size, gfp, node, align);
++
++ if (!ZERO_OR_NULL_PTR(ret))
++ kmemleak_alloc(ret, size, 1, gfp);
+ return ret;
+ }
+ EXPORT_SYMBOL(__kmalloc_node);
+@@ -533,13 +547,92 @@ void kfree(const void *block)
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+- unsigned int *m = (unsigned int *)(block - align);
+- slob_free(m, *m + align);
+- } else
++ slob_t *m = (slob_t *)(block - align);
++ slob_free(m, m[0].units + align);
++ } else {
++ clear_slob_page(sp);
++ free_slob_page(sp);
++ sp->size = 0;
+ put_page(&sp->page);
++ }
+ }
+ EXPORT_SYMBOL(kfree);
+
++void check_object_size(const void *ptr, unsigned long n, bool to)
++{
++
++#ifdef CONFIG_PAX_USERCOPY
++ struct slob_page *sp;
++ const slob_t *free;
++ const void *base;
++ unsigned long flags;
++ const char *type;
++
++ if (!n)
++ return;
++
++ type = "<null>";
++ if (ZERO_OR_NULL_PTR(ptr))
++ goto report;
++
++ if (!virt_addr_valid(ptr))
++ return;
++
++ type = "<process stack>";
++ sp = slob_page(ptr);
++ if (!PageSlab((struct page *)sp)) {
++ if (object_is_on_stack(ptr, n) == -1)
++ goto report;
++ return;
++ }
++
++ type = "<slob>";
++ if (sp->size) {
++ base = page_address(&sp->page);
++ if (base <= ptr && n <= sp->size - (ptr - base))
++ return;
++ goto report;
++ }
++
++ /* some tricky double walking to find the chunk */
++ spin_lock_irqsave(&slob_lock, flags);
++ base = (void *)((unsigned long)ptr & PAGE_MASK);
++ free = sp->free;
++
++ while (!slob_last(free) && (void *)free <= ptr) {
++ base = free + slob_units(free);
++ free = slob_next(free);
++ }
++
++ while (base < (void *)free) {
++ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
++ int size = SLOB_UNIT * SLOB_UNITS(m + align);
++ int offset;
++
++ if (ptr < base + align)
++ break;
++
++ offset = ptr - base - align;
++ if (offset >= m) {
++ base += size;
++ continue;
++ }
++
++ if (n > m - offset)
++ break;
++
++ spin_unlock_irqrestore(&slob_lock, flags);
++ return;
++ }
++
++ spin_unlock_irqrestore(&slob_lock, flags);
++report:
++ pax_report_usercopy(ptr, n, to, type);
++#endif
++
++}
++EXPORT_SYMBOL(check_object_size);
++
+ /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
+ size_t ksize(const void *block)
+ {
+@@ -552,10 +645,10 @@ size_t ksize(const void *block)
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+- unsigned int *m = (unsigned int *)(block - align);
+- return SLOB_UNITS(*m) * SLOB_UNIT;
++ slob_t *m = (slob_t *)(block - align);
++ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
+ } else
+- return sp->page.private;
++ return sp->size;
+ }
+ EXPORT_SYMBOL(ksize);
+
+@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(con
+ {
+ struct kmem_cache *c;
+
++#ifdef CONFIG_PAX_USERCOPY
++ c = __kmalloc_node_align(sizeof(struct kmem_cache),
++ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
++#else
+ c = slob_alloc(sizeof(struct kmem_cache),
+ GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
++#endif
+
+ if (c) {
+ c->name = name;
+@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_
+
+ lockdep_trace_alloc(flags);
+
++#ifdef CONFIG_PAX_USERCOPY
++ b = __kmalloc_node_align(c->size, flags, node, c->align);
++#else
+ if (c->size < PAGE_SIZE) {
+ b = slob_alloc(c->size, flags, c->align, node);
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ SLOB_UNITS(c->size) * SLOB_UNIT,
+ flags, node);
+ } else {
++ struct slob_page *sp;
++
+ b = slob_new_pages(flags, get_order(c->size), node);
++ sp = slob_page(b);
++ sp->size = c->size;
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ PAGE_SIZE << get_order(c->size),
+ flags, node);
+ }
++#endif
+
+ if (c->ctor)
+ c->ctor(b);
+@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
+
+ static void __kmem_cache_free(void *b, int size)
+ {
+- if (size < PAGE_SIZE)
++ struct slob_page *sp = slob_page(b);
++
++ if (is_slob_page(sp))
+ slob_free(b, size);
+- else
++ else {
++ clear_slob_page(sp);
++ free_slob_page(sp);
++ sp->size = 0;
+ slob_free_pages(b, get_order(size));
++ }
+ }
+
+ static void kmem_rcu_free(struct rcu_head *head)
+@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_hea
+
+ void kmem_cache_free(struct kmem_cache *c, void *b)
+ {
++ int size = c->size;
++
++#ifdef CONFIG_PAX_USERCOPY
++ if (size + c->align < PAGE_SIZE) {
++ size += c->align;
++ b -= c->align;
++ }
++#endif
++
+ kmemleak_free_recursive(b, c->flags);
+ if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
+ struct slob_rcu *slob_rcu;
+- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
+- slob_rcu->size = c->size;
++ slob_rcu = b + (size - sizeof(struct slob_rcu));
++ slob_rcu->size = size;
+ call_rcu(&slob_rcu->head, kmem_rcu_free);
+ } else {
+- __kmem_cache_free(b, c->size);
++ __kmem_cache_free(b, size);
+ }
+
++#ifdef CONFIG_PAX_USERCOPY
++ trace_kfree(_RET_IP_, b);
++#else
+ trace_kmem_cache_free(_RET_IP_, b);
++#endif
++
+ }
+ EXPORT_SYMBOL(kmem_cache_free);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/slub.c linux-3.4-pax/mm/slub.c
+--- linux-3.4/mm/slub.c 2012-05-21 11:33:40.159929978 +0200
++++ linux-3.4-pax/mm/slub.c 2012-05-21 12:10:12.040049025 +0200
+@@ -2600,6 +2600,8 @@ void kmem_cache_free(struct kmem_cache *
+
+ page = virt_to_head_page(x);
+
++ BUG_ON(!PageSlab(page));
++
+ slab_free(s, page, x, _RET_IP_);
+
+ trace_kmem_cache_free(_RET_IP_, x);
+@@ -2633,7 +2635,7 @@ static int slub_min_objects;
+ * Merge control. If this is set then no merging of slab caches will occur.
+ * (Could be removed. This was introduced to pacify the merge skeptics.)
+ */
+-static int slub_nomerge;
++static int slub_nomerge = 1;
+
+ /*
+ * Calculate the order of allocation given an slab object size.
+@@ -3086,7 +3088,7 @@ static int kmem_cache_open(struct kmem_c
+ else
+ s->cpu_partial = 30;
+
+- s->refcount = 1;
++ atomic_set(&s->refcount, 1);
+ #ifdef CONFIG_NUMA
+ s->remote_node_defrag_ratio = 1000;
+ #endif
+@@ -3190,8 +3192,7 @@ static inline int kmem_cache_close(struc
+ void kmem_cache_destroy(struct kmem_cache *s)
+ {
+ down_write(&slub_lock);
+- s->refcount--;
+- if (!s->refcount) {
++ if (atomic_dec_and_test(&s->refcount)) {
+ list_del(&s->list);
+ up_write(&slub_lock);
+ if (kmem_cache_close(s)) {
+@@ -3402,6 +3403,50 @@ void *__kmalloc_node(size_t size, gfp_t
+ EXPORT_SYMBOL(__kmalloc_node);
+ #endif
+
++void check_object_size(const void *ptr, unsigned long n, bool to)
++{
++
++#ifdef CONFIG_PAX_USERCOPY
++ struct page *page;
++ struct kmem_cache *s = NULL;
++ unsigned long offset;
++ const char *type;
++
++ if (!n)
++ return;
++
++ type = "<null>";
++ if (ZERO_OR_NULL_PTR(ptr))
++ goto report;
++
++ if (!virt_addr_valid(ptr))
++ return;
++
++ page = virt_to_head_page(ptr);
++
++ type = "<process stack>";
++ if (!PageSlab(page)) {
++ if (object_is_on_stack(ptr, n) == -1)
++ goto report;
++ return;
++ }
++
++ s = page->slab;
++ type = s->name;
++ if (!(s->flags & SLAB_USERCOPY))
++ goto report;
++
++ offset = (ptr - page_address(page)) % s->size;
++ if (offset <= s->objsize && n <= s->objsize - offset)
++ return;
++
++report:
++ pax_report_usercopy(ptr, n, to, type);
++#endif
++
++}
++EXPORT_SYMBOL(check_object_size);
++
+ size_t ksize(const void *object)
+ {
+ struct page *page;
+@@ -3676,7 +3721,7 @@ static void __init kmem_cache_bootstrap_
+ int node;
+
+ list_add(&s->list, &slab_caches);
+- s->refcount = -1;
++ atomic_set(&s->refcount, -1);
+
+ for_each_node_state(node, N_NORMAL_MEMORY) {
+ struct kmem_cache_node *n = get_node(s, node);
+@@ -3796,17 +3841,17 @@ void __init kmem_cache_init(void)
+
+ /* Caches that are not of the two-to-the-power-of size */
+ if (KMALLOC_MIN_SIZE <= 32) {
+- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
++ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
+ caches++;
+ }
+
+ if (KMALLOC_MIN_SIZE <= 64) {
+- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
++ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
+ caches++;
+ }
+
+ for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
+- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
++ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
+ caches++;
+ }
+
+@@ -3874,7 +3919,7 @@ static int slab_unmergeable(struct kmem_
+ /*
+ * We may have set a slab to be unmergeable during bootstrap.
+ */
+- if (s->refcount < 0)
++ if (atomic_read(&s->refcount) < 0)
+ return 1;
+
+ return 0;
+@@ -3933,7 +3978,7 @@ struct kmem_cache *kmem_cache_create(con
+ down_write(&slub_lock);
+ s = find_mergeable(size, align, flags, name, ctor);
+ if (s) {
+- s->refcount++;
++ atomic_inc(&s->refcount);
+ /*
+ * Adjust the object sizes so that we clear
+ * the complete object on kzalloc.
+@@ -3942,7 +3987,7 @@ struct kmem_cache *kmem_cache_create(con
+ s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
+
+ if (sysfs_slab_alias(s, name)) {
+- s->refcount--;
++ atomic_dec(&s->refcount);
+ goto err;
+ }
+ up_write(&slub_lock);
+@@ -4706,7 +4751,7 @@ SLAB_ATTR_RO(ctor);
+
+ static ssize_t aliases_show(struct kmem_cache *s, char *buf)
+ {
+- return sprintf(buf, "%d\n", s->refcount - 1);
++ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
+ }
+ SLAB_ATTR_RO(aliases);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/sparse-vmemmap.c linux-3.4-pax/mm/sparse-vmemmap.c
+--- linux-3.4/mm/sparse-vmemmap.c 2012-01-08 19:48:29.483470851 +0100
++++ linux-3.4-pax/mm/sparse-vmemmap.c 2012-05-21 12:10:12.040049025 +0200
+@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(p
+ void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+ if (!p)
+ return NULL;
+- pud_populate(&init_mm, pud, p);
++ pud_populate_kernel(&init_mm, pud, p);
+ }
+ return pud;
+ }
+@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(u
+ void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+ if (!p)
+ return NULL;
+- pgd_populate(&init_mm, pgd, p);
++ pgd_populate_kernel(&init_mm, pgd, p);
+ }
+ return pgd;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/swap.c linux-3.4-pax/mm/swap.c
+--- linux-3.4/mm/swap.c 2012-05-21 11:33:40.171929979 +0200
++++ linux-3.4-pax/mm/swap.c 2012-05-21 12:10:12.044049025 +0200
+@@ -30,6 +30,7 @@
+ #include <linux/backing-dev.h>
+ #include <linux/memcontrol.h>
+ #include <linux/gfp.h>
++#include <linux/hugetlb.h>
+
+ #include "internal.h"
+
+@@ -70,6 +71,8 @@ static void __put_compound_page(struct p
+
+ __page_cache_release(page);
+ dtor = get_compound_page_dtor(page);
++ if (!PageHuge(page))
++ BUG_ON(dtor != free_compound_page);
+ (*dtor)(page);
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/swapfile.c linux-3.4-pax/mm/swapfile.c
+--- linux-3.4/mm/swapfile.c 2012-05-21 11:33:40.179929979 +0200
++++ linux-3.4-pax/mm/swapfile.c 2012-05-21 12:10:12.048049025 +0200
+@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
+
+ static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
+ /* Activity counter to indicate that a swapon or swapoff has occurred */
+-static atomic_t proc_poll_event = ATOMIC_INIT(0);
++static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
+
+ static inline unsigned char swap_count(unsigned char ent)
+ {
+@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
+ }
+ filp_close(swap_file, NULL);
+ err = 0;
+- atomic_inc(&proc_poll_event);
++ atomic_inc_unchecked(&proc_poll_event);
+ wake_up_interruptible(&proc_poll_wait);
+
+ out_dput:
+@@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *
+
+ poll_wait(file, &proc_poll_wait, wait);
+
+- if (seq->poll_event != atomic_read(&proc_poll_event)) {
+- seq->poll_event = atomic_read(&proc_poll_event);
++ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
++ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
+ return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
+ }
+
+@@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inod
+ return ret;
+
+ seq = file->private_data;
+- seq->poll_event = atomic_read(&proc_poll_event);
++ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
+ return 0;
+ }
+
+@@ -2127,7 +2127,7 @@ SYSCALL_DEFINE2(swapon, const char __use
+ (p->flags & SWP_DISCARDABLE) ? "D" : "");
+
+ mutex_unlock(&swapon_mutex);
+- atomic_inc(&proc_poll_event);
++ atomic_inc_unchecked(&proc_poll_event);
+ wake_up_interruptible(&proc_poll_wait);
+
+ if (S_ISREG(inode->i_mode))
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/util.c linux-3.4-pax/mm/util.c
+--- linux-3.4/mm/util.c 2012-05-21 11:33:40.191929980 +0200
++++ linux-3.4-pax/mm/util.c 2012-05-21 12:10:12.052049026 +0200
+@@ -284,6 +284,12 @@ done:
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/vmalloc.c linux-3.4-pax/mm/vmalloc.c
+--- linux-3.4/mm/vmalloc.c 2012-05-21 11:33:40.195929980 +0200
++++ linux-3.4-pax/mm/vmalloc.c 2012-05-22 15:28:31.575384606 +0200
+@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
+
+ pte = pte_offset_kernel(pmd, addr);
+ do {
+- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
+- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
++ BUG_ON(!pte_exec(*pte));
++ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
++ continue;
++ }
++#endif
++
++ {
++ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
++ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
++ }
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ }
+
+@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
+ unsigned long end, pgprot_t prot, struct page **pages, int *nr)
+ {
+ pte_t *pte;
++ int ret = -ENOMEM;
+
+ /*
+ * nr is a running index into the array which helps higher level
+@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
+ pte = pte_alloc_kernel(pmd, addr);
+ if (!pte)
+ return -ENOMEM;
++
++ pax_open_kernel();
+ do {
+ struct page *page = pages[*nr];
+
+- if (WARN_ON(!pte_none(*pte)))
+- return -EBUSY;
+- if (WARN_ON(!page))
+- return -ENOMEM;
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (pgprot_val(prot) & _PAGE_NX)
++#endif
++
++ if (WARN_ON(!pte_none(*pte))) {
++ ret = -EBUSY;
++ goto out;
++ }
++ if (WARN_ON(!page)) {
++ ret = -ENOMEM;
++ goto out;
++ }
+ set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
+ (*nr)++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+- return 0;
++ ret = 0;
++out:
++ pax_close_kernel();
++ return ret;
+ }
+
+ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
+@@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, un
+ pmd_t *pmd;
+ unsigned long next;
+
+- pmd = pmd_alloc(&init_mm, pud, addr);
++ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
+ if (!pmd)
+ return -ENOMEM;
+ do {
+@@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, un
+ pud_t *pud;
+ unsigned long next;
+
+- pud = pud_alloc(&init_mm, pgd, addr);
++ pud = pud_alloc_kernel(&init_mm, pgd, addr);
+ if (!pud)
+ return -ENOMEM;
+ do {
+@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
+ * and fall back on vmalloc() if that fails. Others
+ * just put it in the vmalloc space.
+ */
+-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
++#ifdef CONFIG_MODULES
++#ifdef MODULES_VADDR
+ unsigned long addr = (unsigned long)x;
+ if (addr >= MODULES_VADDR && addr < MODULES_END)
+ return 1;
+ #endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
++ return 1;
++#endif
++
++#endif
++
+ return is_vmalloc_addr(x);
+ }
+
+@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
+
+ if (!pgd_none(*pgd)) {
+ pud_t *pud = pud_offset(pgd, addr);
++#ifdef CONFIG_X86
++ if (!pud_large(*pud))
++#endif
+ if (!pud_none(*pud)) {
+ pmd_t *pmd = pmd_offset(pud, addr);
++#ifdef CONFIG_X86
++ if (!pmd_large(*pmd))
++#endif
+ if (!pmd_none(*pmd)) {
+ pte_t *ptep, pte;
+
+@@ -332,6 +372,10 @@ static void purge_vmap_area_lazy(void);
+ static struct vmap_area *alloc_vmap_area(unsigned long size,
+ unsigned long align,
+ unsigned long vstart, unsigned long vend,
++ int node, gfp_t gfp_mask) __size_overflow(1);
++static struct vmap_area *alloc_vmap_area(unsigned long size,
++ unsigned long align,
++ unsigned long vstart, unsigned long vend,
+ int node, gfp_t gfp_mask)
+ {
+ struct vmap_area *va;
+@@ -1319,6 +1363,16 @@ static struct vm_struct *__get_vm_area_n
+ struct vm_struct *area;
+
+ BUG_ON(in_interrupt());
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++ if (flags & VM_KERNEXEC) {
++ if (start != VMALLOC_START || end != VMALLOC_END)
++ return NULL;
++ start = (unsigned long)MODULES_EXEC_VADDR;
++ end = (unsigned long)MODULES_EXEC_END;
++ }
++#endif
++
+ if (flags & VM_IOREMAP) {
+ int bit = fls(size);
+
+@@ -1551,6 +1605,11 @@ void *vmap(struct page **pages, unsigned
+ if (count > totalram_pages)
+ return NULL;
+
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++ if (!(pgprot_val(prot) & _PAGE_NX))
++ flags |= VM_KERNEXEC;
++#endif
++
+ area = get_vm_area_caller((count << PAGE_SHIFT), flags,
+ __builtin_return_address(0));
+ if (!area)
+@@ -1652,6 +1711,13 @@ void *__vmalloc_node_range(unsigned long
+ if (!size || (size >> PAGE_SHIFT) > totalram_pages)
+ goto fail;
+
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++ if (!(pgprot_val(prot) & _PAGE_NX))
++ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
++ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
++ else
++#endif
++
+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
+ start, end, node, gfp_mask, caller);
+ if (!area)
+@@ -1825,10 +1891,9 @@ EXPORT_SYMBOL(vzalloc_node);
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
+ */
+-
+ void *vmalloc_exec(unsigned long size)
+ {
+- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
++ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
+ -1, __builtin_return_address(0));
+ }
+
+@@ -2123,6 +2188,8 @@ int remap_vmalloc_range(struct vm_area_s
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+
++ BUG_ON(vma->vm_mirror);
++
+ if ((PAGE_SIZE-1) & (unsigned long)addr)
+ return -EINVAL;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/mm/vmstat.c linux-3.4-pax/mm/vmstat.c
+--- linux-3.4/mm/vmstat.c 2012-05-21 11:33:40.215929981 +0200
++++ linux-3.4-pax/mm/vmstat.c 2012-05-21 12:10:12.056049026 +0200
+@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
+ *
+ * vm_stat contains the global counters
+ */
+-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
++atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
+ EXPORT_SYMBOL(vm_stat);
+
+ #ifdef CONFIG_SMP
+@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
+ v = p->vm_stat_diff[i];
+ p->vm_stat_diff[i] = 0;
+ local_irq_restore(flags);
+- atomic_long_add(v, &zone->vm_stat[i]);
++ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
+ global_diff[i] += v;
+ #ifdef CONFIG_NUMA
+ /* 3 seconds idle till flush */
+@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
+
+ for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+ if (global_diff[i])
+- atomic_long_add(global_diff[i], &vm_stat[i]);
++ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
+ }
+
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/8021q/vlan.c linux-3.4-pax/net/8021q/vlan.c
+--- linux-3.4/net/8021q/vlan.c 2012-03-19 10:39:12.640049139 +0100
++++ linux-3.4-pax/net/8021q/vlan.c 2012-05-21 12:10:12.060049026 +0200
+@@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net
+ err = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ break;
+- if ((args.u.name_type >= 0) &&
+- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
++ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
+ struct vlan_net *vn;
+
+ vn = net_generic(net, vlan_net_id);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/9p/trans_fd.c linux-3.4-pax/net/9p/trans_fd.c
+--- linux-3.4/net/9p/trans_fd.c 2012-03-19 10:39:12.652049139 +0100
++++ linux-3.4-pax/net/9p/trans_fd.c 2012-05-21 12:10:12.064049026 +0200
+@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client
+ oldfs = get_fs();
+ set_fs(get_ds());
+ /* The cast to a user pointer is valid due to the set_fs() */
+- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
++ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
+ set_fs(oldfs);
+
+ if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/atm/atm_misc.c linux-3.4-pax/net/atm/atm_misc.c
+--- linux-3.4/net/atm/atm_misc.c 2012-03-19 10:39:12.884049125 +0100
++++ linux-3.4-pax/net/atm/atm_misc.c 2012-05-21 12:10:12.064049026 +0200
+@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
+ if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
+ return 1;
+ atm_return(vcc, truesize);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return 0;
+ }
+ EXPORT_SYMBOL(atm_charge);
+@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
+ }
+ }
+ atm_return(vcc, guess);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return NULL;
+ }
+ EXPORT_SYMBOL(atm_alloc_charge);
+@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
+
+ void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
+ {
+-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
+ __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ }
+@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
+
+ void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
+ {
+-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
+ __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/atm/lec.h linux-3.4-pax/net/atm/lec.h
+--- linux-3.4/net/atm/lec.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/net/atm/lec.h 2012-05-21 12:10:12.068049027 +0200
+@@ -48,7 +48,7 @@ struct lane2_ops {
+ const u8 *tlvs, u32 sizeoftlvs);
+ void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
+ const u8 *tlvs, u32 sizeoftlvs);
+-};
++} __no_const;
+
+ /*
+ * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/atm/mpc.h linux-3.4-pax/net/atm/mpc.h
+--- linux-3.4/net/atm/mpc.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/net/atm/mpc.h 2012-05-21 12:10:12.068049027 +0200
+@@ -33,7 +33,7 @@ struct mpoa_client {
+ struct mpc_parameters parameters; /* parameters for this client */
+
+ const struct net_device_ops *old_ops;
+- struct net_device_ops new_ops;
++ net_device_ops_no_const new_ops;
+ };
+
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/atm/proc.c linux-3.4-pax/net/atm/proc.c
+--- linux-3.4/net/atm/proc.c 2011-10-24 12:48:42.271090918 +0200
++++ linux-3.4-pax/net/atm/proc.c 2012-05-21 12:10:12.072049027 +0200
+@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
+ const struct k_atm_aal_stats *stats)
+ {
+ seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
+- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
+- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
+- atomic_read(&stats->rx_drop));
++ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
++ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
++ atomic_read_unchecked(&stats->rx_drop));
+ }
+
+ static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/atm/resources.c linux-3.4-pax/net/atm/resources.c
+--- linux-3.4/net/atm/resources.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/net/atm/resources.c 2012-05-21 12:10:12.072049027 +0200
+@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
+ static void copy_aal_stats(struct k_atm_aal_stats *from,
+ struct atm_aal_stats *to)
+ {
+-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
+ __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ }
+@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
+ static void subtract_aal_stats(struct k_atm_aal_stats *from,
+ struct atm_aal_stats *to)
+ {
+-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
+ __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/batman-adv/bat_iv_ogm.c linux-3.4-pax/net/batman-adv/bat_iv_ogm.c
+--- linux-3.4/net/batman-adv/bat_iv_ogm.c 2012-05-21 11:33:40.631930004 +0200
++++ linux-3.4-pax/net/batman-adv/bat_iv_ogm.c 2012-05-21 12:10:12.076049026 +0200
+@@ -539,7 +539,7 @@ static void bat_iv_ogm_schedule(struct h
+
+ /* change sequence number to network order */
+ batman_ogm_packet->seqno =
+- htonl((uint32_t)atomic_read(&hard_iface->seqno));
++ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
+
+ batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
+ batman_ogm_packet->tt_crc = htons((uint16_t)
+@@ -559,7 +559,7 @@ static void bat_iv_ogm_schedule(struct h
+ else
+ batman_ogm_packet->gw_flags = NO_FLAGS;
+
+- atomic_inc(&hard_iface->seqno);
++ atomic_inc_unchecked(&hard_iface->seqno);
+
+ slide_own_bcast_window(hard_iface);
+ bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
+@@ -917,7 +917,7 @@ static void bat_iv_ogm_process(const str
+ return;
+
+ /* could be changed by schedule_own_packet() */
+- if_incoming_seqno = atomic_read(&if_incoming->seqno);
++ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
+
+ has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/batman-adv/hard-interface.c linux-3.4-pax/net/batman-adv/hard-interface.c
+--- linux-3.4/net/batman-adv/hard-interface.c 2012-05-21 11:33:40.695930007 +0200
++++ linux-3.4-pax/net/batman-adv/hard-interface.c 2012-05-21 12:10:12.076049026 +0200
+@@ -328,8 +328,8 @@ int hardif_enable_interface(struct hard_
+ hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
+ dev_add_pack(&hard_iface->batman_adv_ptype);
+
+- atomic_set(&hard_iface->seqno, 1);
+- atomic_set(&hard_iface->frag_seqno, 1);
++ atomic_set_unchecked(&hard_iface->seqno, 1);
++ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
+ bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
+ hard_iface->net_dev->name);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/batman-adv/soft-interface.c linux-3.4-pax/net/batman-adv/soft-interface.c
+--- linux-3.4/net/batman-adv/soft-interface.c 2012-05-21 11:33:40.771930012 +0200
++++ linux-3.4-pax/net/batman-adv/soft-interface.c 2012-05-21 12:10:12.080049026 +0200
+@@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *
+
+ /* set broadcast sequence number */
+ bcast_packet->seqno =
+- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
++ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
+
+ add_bcast_packet_to_list(bat_priv, skb, 1);
+
+@@ -841,7 +841,7 @@ struct net_device *softif_create(const c
+ atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
+
+ atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
+- atomic_set(&bat_priv->bcast_seqno, 1);
++ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
+ atomic_set(&bat_priv->ttvn, 0);
+ atomic_set(&bat_priv->tt_local_changes, 0);
+ atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/batman-adv/types.h linux-3.4-pax/net/batman-adv/types.h
+--- linux-3.4/net/batman-adv/types.h 2012-05-21 11:33:40.803930013 +0200
++++ linux-3.4-pax/net/batman-adv/types.h 2012-05-21 12:10:12.084049028 +0200
+@@ -38,8 +38,8 @@ struct hard_iface {
+ int16_t if_num;
+ char if_status;
+ struct net_device *net_dev;
+- atomic_t seqno;
+- atomic_t frag_seqno;
++ atomic_unchecked_t seqno;
++ atomic_unchecked_t frag_seqno;
+ unsigned char *packet_buff;
+ int packet_len;
+ struct kobject *hardif_obj;
+@@ -155,7 +155,7 @@ struct bat_priv {
+ atomic_t orig_interval; /* uint */
+ atomic_t hop_penalty; /* uint */
+ atomic_t log_level; /* uint */
+- atomic_t bcast_seqno;
++ atomic_unchecked_t bcast_seqno;
+ atomic_t bcast_queue_left;
+ atomic_t batman_queue_left;
+ atomic_t ttvn; /* translation table version number */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/batman-adv/unicast.c linux-3.4-pax/net/batman-adv/unicast.c
+--- linux-3.4/net/batman-adv/unicast.c 2012-05-21 11:33:40.807930013 +0200
++++ linux-3.4-pax/net/batman-adv/unicast.c 2012-05-21 12:10:12.084049028 +0200
+@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, s
+ frag1->flags = UNI_FRAG_HEAD | large_tail;
+ frag2->flags = large_tail;
+
+- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
++ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
+ frag1->seqno = htons(seqno - 1);
+ frag2->seqno = htons(seqno);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/bluetooth/hci_conn.c linux-3.4-pax/net/bluetooth/hci_conn.c
+--- linux-3.4/net/bluetooth/hci_conn.c 2012-05-21 11:33:40.823930014 +0200
++++ linux-3.4-pax/net/bluetooth/hci_conn.c 2012-05-21 12:10:12.088049029 +0200
+@@ -233,7 +233,7 @@ void hci_le_ltk_reply(struct hci_conn *c
+ memset(&cp, 0, sizeof(cp));
+
+ cp.handle = cpu_to_le16(conn->handle);
+- memcpy(cp.ltk, ltk, sizeof(ltk));
++ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
+
+ hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/bluetooth/l2cap_core.c linux-3.4-pax/net/bluetooth/l2cap_core.c
+--- linux-3.4/net/bluetooth/l2cap_core.c 2012-05-21 11:33:40.879930017 +0200
++++ linux-3.4-pax/net/bluetooth/l2cap_core.c 2012-05-21 12:10:12.092049029 +0200
+@@ -2466,8 +2466,10 @@ static int l2cap_parse_conf_rsp(struct l
+ break;
+
+ case L2CAP_CONF_RFC:
+- if (olen == sizeof(rfc))
+- memcpy(&rfc, (void *)val, olen);
++ if (olen != sizeof(rfc))
++ break;
++
++ memcpy(&rfc, (void *)val, olen);
+
+ if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
+ rfc.mode != chan->mode)
+@@ -2585,8 +2587,10 @@ static void l2cap_conf_rfc_get(struct l2
+
+ switch (type) {
+ case L2CAP_CONF_RFC:
+- if (olen == sizeof(rfc))
+- memcpy(&rfc, (void *)val, olen);
++ if (olen != sizeof(rfc))
++ break;
++
++ memcpy(&rfc, (void *)val, olen);
+ goto done;
+ }
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/bridge/netfilter/ebtables.c linux-3.4-pax/net/bridge/netfilter/ebtables.c
+--- linux-3.4/net/bridge/netfilter/ebtables.c 2012-03-19 10:39:13.396049117 +0100
++++ linux-3.4-pax/net/bridge/netfilter/ebtables.c 2012-05-21 12:10:12.096049029 +0200
+@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *s
+ tmp.valid_hooks = t->table->valid_hooks;
+ }
+ mutex_unlock(&ebt_mutex);
+- if (copy_to_user(user, &tmp, *len) != 0){
++ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
+ BUGPRINT("c2u Didn't work\n");
+ ret = -EFAULT;
+ break;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/caif/cfctrl.c linux-3.4-pax/net/caif/cfctrl.c
+--- linux-3.4/net/caif/cfctrl.c 2012-01-08 19:48:29.855470831 +0100
++++ linux-3.4-pax/net/caif/cfctrl.c 2012-05-21 12:10:12.100049028 +0200
+@@ -9,6 +9,7 @@
+ #include <linux/stddef.h>
+ #include <linux/spinlock.h>
+ #include <linux/slab.h>
++#include <linux/sched.h>
+ #include <net/caif/caif_layer.h>
+ #include <net/caif/cfpkt.h>
+ #include <net/caif/cfctrl.h>
+@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
+ memset(&dev_info, 0, sizeof(dev_info));
+ dev_info.id = 0xff;
+ cfsrvl_init(&this->serv, 0, &dev_info, false);
+- atomic_set(&this->req_seq_no, 1);
+- atomic_set(&this->rsp_seq_no, 1);
++ atomic_set_unchecked(&this->req_seq_no, 1);
++ atomic_set_unchecked(&this->rsp_seq_no, 1);
+ this->serv.layer.receive = cfctrl_recv;
+ sprintf(this->serv.layer.name, "ctrl");
+ this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
+@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfc
+ struct cfctrl_request_info *req)
+ {
+ spin_lock_bh(&ctrl->info_list_lock);
+- atomic_inc(&ctrl->req_seq_no);
+- req->sequence_no = atomic_read(&ctrl->req_seq_no);
++ atomic_inc_unchecked(&ctrl->req_seq_no);
++ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
+ list_add_tail(&req->list, &ctrl->list);
+ spin_unlock_bh(&ctrl->info_list_lock);
+ }
+@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctr
+ if (p != first)
+ pr_warn("Requests are not received in order\n");
+
+- atomic_set(&ctrl->rsp_seq_no,
++ atomic_set_unchecked(&ctrl->rsp_seq_no,
+ p->sequence_no);
+ list_del(&p->list);
+ goto out;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/can/gw.c linux-3.4-pax/net/can/gw.c
+--- linux-3.4/net/can/gw.c 2012-01-08 19:48:29.891470829 +0100
++++ linux-3.4-pax/net/can/gw.c 2012-05-21 12:10:12.104049028 +0200
+@@ -96,7 +96,7 @@ struct cf_mod {
+ struct {
+ void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
+ void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
+- } csumfunc;
++ } __no_const csumfunc;
+ };
+
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/compat.c linux-3.4-pax/net/compat.c
+--- linux-3.4/net/compat.c 2012-05-21 11:33:40.963930023 +0200
++++ linux-3.4-pax/net/compat.c 2012-05-21 12:10:12.104049028 +0200
+@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kms
+ __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
+ __get_user(kmsg->msg_flags, &umsg->msg_flags))
+ return -EFAULT;
+- kmsg->msg_name = compat_ptr(tmp1);
+- kmsg->msg_iov = compat_ptr(tmp2);
+- kmsg->msg_control = compat_ptr(tmp3);
++ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
++ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
++ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
+ return 0;
+ }
+
+@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *k
+
+ if (kern_msg->msg_namelen) {
+ if (mode == VERIFY_READ) {
+- int err = move_addr_to_kernel(kern_msg->msg_name,
++ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
+ kern_msg->msg_namelen,
+ kern_address);
+ if (err < 0)
+@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *k
+ kern_msg->msg_name = NULL;
+
+ tot_len = iov_from_user_compat_to_kern(kern_iov,
+- (struct compat_iovec __user *)kern_msg->msg_iov,
++ (struct compat_iovec __force_user *)kern_msg->msg_iov,
+ kern_msg->msg_iovlen);
+ if (tot_len >= 0)
+ kern_msg->msg_iov = kern_iov;
+@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *k
+
+ #define CMSG_COMPAT_FIRSTHDR(msg) \
+ (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
+- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
++ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
+ (struct compat_cmsghdr __user *)NULL)
+
+ #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
+ ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
+ (ucmlen) <= (unsigned long) \
+ ((mhdr)->msg_controllen - \
+- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
++ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
+
+ static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
+ struct compat_cmsghdr __user *cmsg, int cmsg_len)
+ {
+ char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
+- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
++ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
+ msg->msg_controllen)
+ return NULL;
+ return (struct compat_cmsghdr __user *)ptr;
+@@ -219,7 +219,7 @@ Efault:
+
+ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
+ {
+- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
++ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
+ struct compat_cmsghdr cmhdr;
+ int cmlen;
+
+@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
+
+ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
+ {
+- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
++ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
+ int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
+ int fdnum = scm->fp->count;
+ struct file **fp = scm->fp->fp;
+@@ -372,7 +372,7 @@ static int do_set_sock_timeout(struct so
+ return -EFAULT;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
++ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
+ set_fs(old_fs);
+
+ return err;
+@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct so
+ len = sizeof(ktime);
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
++ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
+ set_fs(old_fs);
+
+ if (!err) {
+@@ -576,7 +576,7 @@ int compat_mc_setsockopt(struct sock *so
+ case MCAST_JOIN_GROUP:
+ case MCAST_LEAVE_GROUP:
+ {
+- struct compat_group_req __user *gr32 = (void *)optval;
++ struct compat_group_req __user *gr32 = (void __user *)optval;
+ struct group_req __user *kgr =
+ compat_alloc_user_space(sizeof(struct group_req));
+ u32 interface;
+@@ -597,7 +597,7 @@ int compat_mc_setsockopt(struct sock *so
+ case MCAST_BLOCK_SOURCE:
+ case MCAST_UNBLOCK_SOURCE:
+ {
+- struct compat_group_source_req __user *gsr32 = (void *)optval;
++ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
+ struct group_source_req __user *kgsr = compat_alloc_user_space(
+ sizeof(struct group_source_req));
+ u32 interface;
+@@ -618,7 +618,7 @@ int compat_mc_setsockopt(struct sock *so
+ }
+ case MCAST_MSFILTER:
+ {
+- struct compat_group_filter __user *gf32 = (void *)optval;
++ struct compat_group_filter __user *gf32 = (void __user *)optval;
+ struct group_filter __user *kgf;
+ u32 interface, fmode, numsrc;
+
+@@ -656,7 +656,7 @@ int compat_mc_getsockopt(struct sock *so
+ char __user *optval, int __user *optlen,
+ int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
+ {
+- struct compat_group_filter __user *gf32 = (void *)optval;
++ struct compat_group_filter __user *gf32 = (void __user *)optval;
+ struct group_filter __user *kgf;
+ int __user *koptlen;
+ u32 interface, fmode, numsrc;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/core/datagram.c linux-3.4-pax/net/core/datagram.c
+--- linux-3.4/net/core/datagram.c 2012-05-21 11:33:40.967930023 +0200
++++ linux-3.4-pax/net/core/datagram.c 2012-05-21 12:10:12.108049029 +0200
+@@ -290,7 +290,7 @@ int skb_kill_datagram(struct sock *sk, s
+ }
+
+ kfree_skb(skb);
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ sk_mem_reclaim_partial(sk);
+
+ return err;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/core/dev.c linux-3.4-pax/net/core/dev.c
+--- linux-3.4/net/core/dev.c 2012-05-21 11:33:40.971930022 +0200
++++ linux-3.4-pax/net/core/dev.c 2012-05-21 12:10:12.112049029 +0200
+@@ -1602,7 +1602,7 @@ int dev_forward_skb(struct net_device *d
+ {
+ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
+ if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
+- atomic_long_inc(&dev->rx_dropped);
++ atomic_long_inc_unchecked(&dev->rx_dropped);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -1612,7 +1612,7 @@ int dev_forward_skb(struct net_device *d
+ nf_reset(skb);
+
+ if (unlikely(!is_skb_forwardable(dev, skb))) {
+- atomic_long_inc(&dev->rx_dropped);
++ atomic_long_inc_unchecked(&dev->rx_dropped);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -2042,7 +2042,7 @@ static int illegal_highdma(struct net_de
+
+ struct dev_gso_cb {
+ void (*destructor)(struct sk_buff *skb);
+-};
++} __no_const;
+
+ #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
+
+@@ -2898,7 +2898,7 @@ enqueue:
+
+ local_irq_restore(flags);
+
+- atomic_long_inc(&skb->dev->rx_dropped);
++ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -2970,7 +2970,7 @@ int netif_rx_ni(struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL(netif_rx_ni);
+
+-static void net_tx_action(struct softirq_action *h)
++static void net_tx_action(void)
+ {
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
+
+@@ -3258,7 +3258,7 @@ ncls:
+ if (pt_prev) {
+ ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+ } else {
+- atomic_long_inc(&skb->dev->rx_dropped);
++ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
+ kfree_skb(skb);
+ /* Jamal, now you will not able to escape explaining
+ * me how you were going to use this. :-)
+@@ -3818,7 +3818,7 @@ void netif_napi_del(struct napi_struct *
+ }
+ EXPORT_SYMBOL(netif_napi_del);
+
+-static void net_rx_action(struct softirq_action *h)
++static void net_rx_action(void)
+ {
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
+ unsigned long time_limit = jiffies + 2;
+@@ -5839,7 +5839,7 @@ struct rtnl_link_stats64 *dev_get_stats(
+ } else {
+ netdev_stats_to_stats64(storage, &dev->stats);
+ }
+- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
++ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
+ return storage;
+ }
+ EXPORT_SYMBOL(dev_get_stats);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/core/flow.c linux-3.4-pax/net/core/flow.c
+--- linux-3.4/net/core/flow.c 2012-01-08 19:48:29.983470824 +0100
++++ linux-3.4-pax/net/core/flow.c 2012-05-21 12:10:12.116049029 +0200
+@@ -61,7 +61,7 @@ struct flow_cache {
+ struct timer_list rnd_timer;
+ };
+
+-atomic_t flow_cache_genid = ATOMIC_INIT(0);
++atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
+ EXPORT_SYMBOL(flow_cache_genid);
+ static struct flow_cache flow_cache_global;
+ static struct kmem_cache *flow_cachep __read_mostly;
+@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsig
+
+ static int flow_entry_valid(struct flow_cache_entry *fle)
+ {
+- if (atomic_read(&flow_cache_genid) != fle->genid)
++ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
+ return 0;
+ if (fle->object && !fle->object->ops->check(fle->object))
+ return 0;
+@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const
+ hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
+ fcp->hash_count++;
+ }
+- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
++ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
+ flo = fle->object;
+ if (!flo)
+ goto ret_object;
+@@ -280,7 +280,7 @@ nocache:
+ }
+ flo = resolver(net, key, family, dir, flo, ctx);
+ if (fle) {
+- fle->genid = atomic_read(&flow_cache_genid);
++ fle->genid = atomic_read_unchecked(&flow_cache_genid);
+ if (!IS_ERR(flo))
+ fle->object = flo;
+ else
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/core/iovec.c linux-3.4-pax/net/core/iovec.c
+--- linux-3.4/net/core/iovec.c 2012-05-21 11:33:40.995930024 +0200
++++ linux-3.4-pax/net/core/iovec.c 2012-05-21 12:10:12.120049029 +0200
+@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struc
+ if (m->msg_namelen) {
+ if (mode == VERIFY_READ) {
+ void __user *namep;
+- namep = (void __user __force *) m->msg_name;
++ namep = (void __force_user *) m->msg_name;
+ err = move_addr_to_kernel(namep, m->msg_namelen,
+ address);
+ if (err < 0)
+@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struc
+ }
+
+ size = m->msg_iovlen * sizeof(struct iovec);
+- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
++ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
+ return -EFAULT;
+
+ m->msg_iov = iov;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/core/rtnetlink.c linux-3.4-pax/net/core/rtnetlink.c
+--- linux-3.4/net/core/rtnetlink.c 2012-05-21 11:33:41.031930026 +0200
++++ linux-3.4-pax/net/core/rtnetlink.c 2012-05-21 12:10:12.124049030 +0200
+@@ -56,7 +56,7 @@ struct rtnl_link {
+ rtnl_doit_func doit;
+ rtnl_dumpit_func dumpit;
+ rtnl_calcit_func calcit;
+-};
++} __no_const;
+
+ static DEFINE_MUTEX(rtnl_mutex);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/core/scm.c linux-3.4-pax/net/core/scm.c
+--- linux-3.4/net/core/scm.c 2012-05-21 11:33:41.031930026 +0200
++++ linux-3.4-pax/net/core/scm.c 2012-05-21 12:10:12.124049030 +0200
+@@ -219,7 +219,7 @@ EXPORT_SYMBOL(__scm_send);
+ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
+ {
+ struct cmsghdr __user *cm
+- = (__force struct cmsghdr __user *)msg->msg_control;
++ = (struct cmsghdr __force_user *)msg->msg_control;
+ struct cmsghdr cmhdr;
+ int cmlen = CMSG_LEN(len);
+ int err;
+@@ -242,7 +242,7 @@ int put_cmsg(struct msghdr * msg, int le
+ err = -EFAULT;
+ if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
+ goto out;
+- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
++ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
+ goto out;
+ cmlen = CMSG_SPACE(len);
+ if (msg->msg_controllen < cmlen)
+@@ -258,7 +258,7 @@ EXPORT_SYMBOL(put_cmsg);
+ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
+ {
+ struct cmsghdr __user *cm
+- = (__force struct cmsghdr __user*)msg->msg_control;
++ = (struct cmsghdr __force_user *)msg->msg_control;
+
+ int fdmax = 0;
+ int fdnum = scm->fp->count;
+@@ -278,7 +278,7 @@ void scm_detach_fds(struct msghdr *msg,
+ if (fdnum < fdmax)
+ fdmax = fdnum;
+
+- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
++ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
+ i++, cmfptr++)
+ {
+ int new_fd;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/core/sock.c linux-3.4-pax/net/core/sock.c
+--- linux-3.4/net/core/sock.c 2012-05-21 11:33:41.039930025 +0200
++++ linux-3.4-pax/net/core/sock.c 2012-05-21 12:10:12.128049030 +0200
+@@ -340,7 +340,7 @@ int sock_queue_rcv_skb(struct sock *sk,
+ struct sk_buff_head *list = &sk->sk_receive_queue;
+
+ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ trace_sock_rcvqueue_full(sk, skb);
+ return -ENOMEM;
+ }
+@@ -350,7 +350,7 @@ int sock_queue_rcv_skb(struct sock *sk,
+ return err;
+
+ if (!sk_rmem_schedule(sk, skb->truesize)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ return -ENOBUFS;
+ }
+
+@@ -370,7 +370,7 @@ int sock_queue_rcv_skb(struct sock *sk,
+ skb_dst_force(skb);
+
+ spin_lock_irqsave(&list->lock, flags);
+- skb->dropcount = atomic_read(&sk->sk_drops);
++ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
+ __skb_queue_tail(list, skb);
+ spin_unlock_irqrestore(&list->lock, flags);
+
+@@ -390,7 +390,7 @@ int sk_receive_skb(struct sock *sk, stru
+ skb->dev = NULL;
+
+ if (sk_rcvqueues_full(sk, skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ goto discard_and_relse;
+ }
+ if (nested)
+@@ -408,7 +408,7 @@ int sk_receive_skb(struct sock *sk, stru
+ mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+ } else if (sk_add_backlog(sk, skb)) {
+ bh_unlock_sock(sk);
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ goto discard_and_relse;
+ }
+
+@@ -997,7 +997,7 @@ int sock_getsockopt(struct socket *sock,
+ return -ENOTCONN;
+ if (lv < len)
+ return -EINVAL;
+- if (copy_to_user(optval, address, len))
++ if (len > sizeof(address) || copy_to_user(optval, address, len))
+ return -EFAULT;
+ goto lenout;
+ }
+@@ -1043,7 +1043,7 @@ int sock_getsockopt(struct socket *sock,
+
+ if (len > lv)
+ len = lv;
+- if (copy_to_user(optval, &v, len))
++ if (len > sizeof(v) || copy_to_user(optval, &v, len))
+ return -EFAULT;
+ lenout:
+ if (put_user(len, optlen))
+@@ -2128,7 +2128,7 @@ void sock_init_data(struct socket *sock,
+ */
+ smp_wmb();
+ atomic_set(&sk->sk_refcnt, 1);
+- atomic_set(&sk->sk_drops, 0);
++ atomic_set_unchecked(&sk->sk_drops, 0);
+ }
+ EXPORT_SYMBOL(sock_init_data);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/decnet/sysctl_net_decnet.c linux-3.4-pax/net/decnet/sysctl_net_decnet.c
+--- linux-3.4/net/decnet/sysctl_net_decnet.c 2011-10-24 12:48:42.523090905 +0200
++++ linux-3.4-pax/net/decnet/sysctl_net_decnet.c 2012-05-21 12:10:12.132049030 +0200
+@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_t
+
+ if (len > *lenp) len = *lenp;
+
+- if (copy_to_user(buffer, addr, len))
++ if (len > sizeof addr || copy_to_user(buffer, addr, len))
+ return -EFAULT;
+
+ *lenp = len;
+@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table
+
+ if (len > *lenp) len = *lenp;
+
+- if (copy_to_user(buffer, devname, len))
++ if (len > sizeof devname || copy_to_user(buffer, devname, len))
+ return -EFAULT;
+
+ *lenp = len;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/ipv4/fib_frontend.c linux-3.4-pax/net/ipv4/fib_frontend.c
+--- linux-3.4/net/ipv4/fib_frontend.c 2012-05-21 11:33:41.127930031 +0200
++++ linux-3.4-pax/net/ipv4/fib_frontend.c 2012-05-21 12:10:12.136049030 +0200
+@@ -969,12 +969,12 @@ static int fib_inetaddr_event(struct not
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+ fib_sync_up(dev);
+ #endif
+- atomic_inc(&net->ipv4.dev_addr_genid);
++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
+ rt_cache_flush(dev_net(dev), -1);
+ break;
+ case NETDEV_DOWN:
+ fib_del_ifaddr(ifa, NULL);
+- atomic_inc(&net->ipv4.dev_addr_genid);
++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
+ if (ifa->ifa_dev->ifa_list == NULL) {
+ /* Last address was deleted from this interface.
+ * Disable IP.
+@@ -1010,7 +1010,7 @@ static int fib_netdev_event(struct notif
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+ fib_sync_up(dev);
+ #endif
+- atomic_inc(&net->ipv4.dev_addr_genid);
++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
+ rt_cache_flush(dev_net(dev), -1);
+ break;
+ case NETDEV_DOWN:
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/ipv4/fib_semantics.c linux-3.4-pax/net/ipv4/fib_semantics.c
+--- linux-3.4/net/ipv4/fib_semantics.c 2012-05-21 11:33:41.131930030 +0200
++++ linux-3.4-pax/net/ipv4/fib_semantics.c 2012-05-21 12:10:12.140049030 +0200
+@@ -698,7 +698,7 @@ __be32 fib_info_update_nh_saddr(struct n
+ nh->nh_saddr = inet_select_addr(nh->nh_dev,
+ nh->nh_gw,
+ nh->nh_parent->fib_scope);
+- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
++ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
+
+ return nh->nh_saddr;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/ipv4/inetpeer.c linux-3.4-pax/net/ipv4/inetpeer.c
+--- linux-3.4/net/ipv4/inetpeer.c 2012-03-19 10:39:13.620049077 +0100
++++ linux-3.4-pax/net/ipv4/inetpeer.c 2012-05-21 12:10:12.140049030 +0200
+@@ -487,8 +487,8 @@ relookup:
+ if (p) {
+ p->daddr = *daddr;
+ atomic_set(&p->refcnt, 1);
+- atomic_set(&p->rid, 0);
+- atomic_set(&p->ip_id_count,
++ atomic_set_unchecked(&p->rid, 0);
++ atomic_set_unchecked(&p->ip_id_count,
+ (daddr->family == AF_INET) ?
+ secure_ip_id(daddr->addr.a4) :
+ secure_ipv6_id(daddr->addr.a6));
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/ipv4/ipconfig.c linux-3.4-pax/net/ipv4/ipconfig.c
+--- linux-3.4/net/ipv4/ipconfig.c 2012-05-21 11:33:41.163930033 +0200
++++ linux-3.4-pax/net/ipv4/ipconfig.c 2012-05-21 12:10:12.144049031 +0200
+@@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsig
+
+ mm_segment_t oldfs = get_fs();
+ set_fs(get_ds());
+- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
++ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
+ set_fs(oldfs);
+ return res;
+ }
+@@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned
+
+ mm_segment_t oldfs = get_fs();
+ set_fs(get_ds());
+- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
++ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
+ set_fs(oldfs);
+ return res;
+ }
+@@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigne
+
+ mm_segment_t oldfs = get_fs();
+ set_fs(get_ds());
+- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
++ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
+ set_fs(oldfs);
+ return res;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/ipv4/ip_fragment.c linux-3.4-pax/net/ipv4/ip_fragment.c
+--- linux-3.4/net/ipv4/ip_fragment.c 2012-05-21 11:33:41.143930033 +0200
++++ linux-3.4-pax/net/ipv4/ip_fragment.c 2012-05-21 12:10:12.148049031 +0200
+@@ -318,7 +318,7 @@ static inline int ip_frag_too_far(struct
+ return 0;
+
+ start = qp->rid;
+- end = atomic_inc_return(&peer->rid);
++ end = atomic_inc_return_unchecked(&peer->rid);
+ qp->rid = end;
+
+ rc = qp->q.fragments && (end - start) > max;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/ipv4/ip_sockglue.c linux-3.4-pax/net/ipv4/ip_sockglue.c
+--- linux-3.4/net/ipv4/ip_sockglue.c 2012-05-21 11:33:41.151930033 +0200
++++ linux-3.4-pax/net/ipv4/ip_sockglue.c 2012-05-21 12:10:12.148049031 +0200
+@@ -1268,7 +1268,7 @@ static int do_ip_getsockopt(struct sock
+ if (sk->sk_type != SOCK_STREAM)
+ return -ENOPROTOOPT;
+
+- msg.msg_control = optval;
++ msg.msg_control = (void __force_kernel *)optval;
+ msg.msg_controllen = len;
+ msg.msg_flags = flags;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/ipv4/ping.c linux-3.4-pax/net/ipv4/ping.c
+--- linux-3.4/net/ipv4/ping.c 2012-05-21 11:33:41.183930034 +0200
++++ linux-3.4-pax/net/ipv4/ping.c 2012-05-21 12:10:12.164049032 +0200
+@@ -838,7 +838,7 @@ static void ping_format_sock(struct sock
+ sk_rmem_alloc_get(sp),
+ 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+ atomic_read(&sp->sk_refcnt), sp,
+- atomic_read(&sp->sk_drops), len);
++ atomic_read_unchecked(&sp->sk_drops), len);
+ }
+
+ static int ping_seq_show(struct seq_file *seq, void *v)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/ipv4/raw.c linux-3.4-pax/net/ipv4/raw.c
+--- linux-3.4/net/ipv4/raw.c 2012-05-21 11:33:41.187930034 +0200
++++ linux-3.4-pax/net/ipv4/raw.c 2012-05-21 12:10:12.168049032 +0200
+@@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk,
+ int raw_rcv(struct sock *sk, struct sk_buff *skb)
+ {
+ if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -740,16 +740,20 @@ static int raw_init(struct sock *sk)
+
+ static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
+ {
++ struct icmp_filter filter;
++
+ if (optlen > sizeof(struct icmp_filter))
+ optlen = sizeof(struct icmp_filter);
+- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
++ if (copy_from_user(&filter, optval, optlen))
+ return -EFAULT;
++ raw_sk(sk)->filter = filter;
+ return 0;
+ }
+
+ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
+ {
+ int len, ret = -EFAULT;
++ struct icmp_filter filter;
+
+ if (get_user(len, optlen))
+ goto out;
+@@ -759,8 +763,8 @@ static int raw_geticmpfilter(struct sock
+ if (len > sizeof(struct icmp_filter))
+ len = sizeof(struct icmp_filter);
+ ret = -EFAULT;
+- if (put_user(len, optlen) ||
+- copy_to_user(optval, &raw_sk(sk)->filter, len))
++ filter = raw_sk(sk)->filter;
++ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
+ goto out;
+ ret = 0;
+ out: return ret;
+@@ -988,7 +992,7 @@ static void raw_sock_seq_show(struct seq
+ sk_wmem_alloc_get(sp),
+ sk_rmem_alloc_get(sp),
+ 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
++ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
+ }
+
+ static int raw_seq_show(struct seq_file *seq, void *v)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/ipv4/route.c linux-3.4-pax/net/ipv4/route.c
+--- linux-3.4/net/ipv4/route.c 2012-05-21 11:33:41.191930034 +0200
++++ linux-3.4-pax/net/ipv4/route.c 2012-05-21 12:10:12.176049032 +0200
+@@ -312,7 +312,7 @@ static inline unsigned int rt_hash(__be3
+
+ static inline int rt_genid(struct net *net)
+ {
+- return atomic_read(&net->ipv4.rt_genid);
++ return atomic_read_unchecked(&net->ipv4.rt_genid);
+ }
+
+ #ifdef CONFIG_PROC_FS
+@@ -936,7 +936,7 @@ static void rt_cache_invalidate(struct n
+ unsigned char shuffle;
+
+ get_random_bytes(&shuffle, sizeof(shuffle));
+- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
++ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
+ inetpeer_invalidate_tree(AF_INET);
+ }
+
+@@ -3009,7 +3009,7 @@ static int rt_fill_info(struct net *net,
+ error = rt->dst.error;
+ if (peer) {
+ inet_peer_refcheck(rt->peer);
+- id = atomic_read(&peer->ip_id_count) & 0xffff;
++ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
+ if (peer->tcp_ts_stamp) {
+ ts = peer->tcp_ts;
+ tsage = get_seconds() - peer->tcp_ts_stamp;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/ipv4/tcp_probe.c linux-3.4-pax/net/ipv4/tcp_probe.c
+--- linux-3.4/net/ipv4/tcp_probe.c 2012-05-21 11:33:41.243930037 +0200
++++ linux-3.4-pax/net/ipv4/tcp_probe.c 2012-05-21 12:10:12.176049032 +0200
+@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file
+ if (cnt + width >= len)
+ break;
+
+- if (copy_to_user(buf + cnt, tbuf, width))
++ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
+ return -EFAULT;
+ cnt += width;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/ipv4/udp.c linux-3.4-pax/net/ipv4/udp.c
+--- linux-3.4/net/ipv4/udp.c 2012-05-21 11:33:41.259930038 +0200
++++ linux-3.4-pax/net/ipv4/udp.c 2012-05-21 12:10:12.180049033 +0200
+@@ -1102,7 +1102,7 @@ static unsigned int first_packet_length(
+ udp_lib_checksum_complete(skb)) {
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
+ IS_UDPLITE(sk));
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ __skb_unlink(skb, rcvq);
+ __skb_queue_tail(&list_kill, skb);
+ }
+@@ -1489,7 +1489,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
+
+ drop:
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ kfree_skb(skb);
+ return -1;
+ }
+@@ -1508,7 +1508,7 @@ static void flush_stack(struct sock **st
+ skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
+
+ if (!skb1) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+ IS_UDPLITE(sk));
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
+@@ -2095,7 +2095,7 @@ static void udp4_format_sock(struct sock
+ sk_rmem_alloc_get(sp),
+ 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+ atomic_read(&sp->sk_refcnt), sp,
+- atomic_read(&sp->sk_drops), len);
++ atomic_read_unchecked(&sp->sk_drops), len);
+ }
+
+ int udp4_seq_show(struct seq_file *seq, void *v)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/ipv6/addrconf.c linux-3.4-pax/net/ipv6/addrconf.c
+--- linux-3.4/net/ipv6/addrconf.c 2012-05-21 11:33:41.271930039 +0200
++++ linux-3.4-pax/net/ipv6/addrconf.c 2012-05-21 12:10:12.184049033 +0200
+@@ -2142,7 +2142,7 @@ int addrconf_set_dstaddr(struct net *net
+ p.iph.ihl = 5;
+ p.iph.protocol = IPPROTO_IPV6;
+ p.iph.ttl = 64;
+- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
++ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
+
+ if (ops->ndo_do_ioctl) {
+ mm_segment_t oldfs = get_fs();
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/ipv6/inet6_connection_sock.c linux-3.4-pax/net/ipv6/inet6_connection_sock.c
+--- linux-3.4/net/ipv6/inet6_connection_sock.c 2012-03-19 10:39:13.720049080 +0100
++++ linux-3.4-pax/net/ipv6/inet6_connection_sock.c 2012-05-21 12:10:12.188049033 +0200
+@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
+ #ifdef CONFIG_XFRM
+ {
+ struct rt6_info *rt = (struct rt6_info *)dst;
+- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
++ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
+ }
+ #endif
+ }
+@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
+ #ifdef CONFIG_XFRM
+ if (dst) {
+ struct rt6_info *rt = (struct rt6_info *)dst;
+- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
++ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
+ __sk_dst_reset(sk);
+ dst = NULL;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/ipv6/ipv6_sockglue.c linux-3.4-pax/net/ipv6/ipv6_sockglue.c
+--- linux-3.4/net/ipv6/ipv6_sockglue.c 2012-05-21 11:33:41.287930039 +0200
++++ linux-3.4-pax/net/ipv6/ipv6_sockglue.c 2012-05-21 12:10:12.188049033 +0200
+@@ -990,7 +990,7 @@ static int do_ipv6_getsockopt(struct soc
+ if (sk->sk_type != SOCK_STREAM)
+ return -ENOPROTOOPT;
+
+- msg.msg_control = optval;
++ msg.msg_control = (void __force_kernel *)optval;
+ msg.msg_controllen = len;
+ msg.msg_flags = flags;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/ipv6/raw.c linux-3.4-pax/net/ipv6/raw.c
+--- linux-3.4/net/ipv6/raw.c 2012-05-21 11:33:41.319930041 +0200
++++ linux-3.4-pax/net/ipv6/raw.c 2012-05-21 12:10:12.192049033 +0200
+@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct s
+ {
+ if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
+ skb_checksum_complete(skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk
+ struct raw6_sock *rp = raw6_sk(sk);
+
+ if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk
+
+ if (inet->hdrincl) {
+ if (skb_checksum_complete(skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -602,7 +602,7 @@ out:
+ return err;
+ }
+
+-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
++static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
+ struct flowi6 *fl6, struct dst_entry **dstp,
+ unsigned int flags)
+ {
+@@ -914,12 +914,15 @@ do_confirm:
+ static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
+ char __user *optval, int optlen)
+ {
++ struct icmp6_filter filter;
++
+ switch (optname) {
+ case ICMPV6_FILTER:
+ if (optlen > sizeof(struct icmp6_filter))
+ optlen = sizeof(struct icmp6_filter);
+- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
++ if (copy_from_user(&filter, optval, optlen))
+ return -EFAULT;
++ raw6_sk(sk)->filter = filter;
+ return 0;
+ default:
+ return -ENOPROTOOPT;
+@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct so
+ char __user *optval, int __user *optlen)
+ {
+ int len;
++ struct icmp6_filter filter;
+
+ switch (optname) {
+ case ICMPV6_FILTER:
+@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct so
+ len = sizeof(struct icmp6_filter);
+ if (put_user(len, optlen))
+ return -EFAULT;
+- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
++ filter = raw6_sk(sk)->filter;
++ if (len > sizeof filter || copy_to_user(optval, &filter, len))
+ return -EFAULT;
+ return 0;
+ default:
+@@ -1250,7 +1255,7 @@ static void raw6_sock_seq_show(struct se
+ 0, 0L, 0,
+ sock_i_uid(sp), 0,
+ sock_i_ino(sp),
+- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
++ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
+ }
+
+ static int raw6_seq_show(struct seq_file *seq, void *v)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/ipv6/udp.c linux-3.4-pax/net/ipv6/udp.c
+--- linux-3.4/net/ipv6/udp.c 2012-05-21 11:33:41.331930042 +0200
++++ linux-3.4-pax/net/ipv6/udp.c 2012-05-21 12:10:12.196049033 +0200
+@@ -551,7 +551,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
+
+ return 0;
+ drop:
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ drop_no_sk_drops_inc:
+ UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+ kfree_skb(skb);
+@@ -627,7 +627,7 @@ static void flush_stack(struct sock **st
+ continue;
+ }
+ drop:
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ UDP6_INC_STATS_BH(sock_net(sk),
+ UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
+ UDP6_INC_STATS_BH(sock_net(sk),
+@@ -798,7 +798,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
+ if (!sock_owned_by_user(sk))
+ udpv6_queue_rcv_skb(sk, skb);
+ else if (sk_add_backlog(sk, skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ bh_unlock_sock(sk);
+ sock_put(sk);
+ goto discard;
+@@ -1412,7 +1412,7 @@ static void udp6_sock_seq_show(struct se
+ sock_i_uid(sp), 0,
+ sock_i_ino(sp),
+ atomic_read(&sp->sk_refcnt), sp,
+- atomic_read(&sp->sk_drops));
++ atomic_read_unchecked(&sp->sk_drops));
+ }
+
+ int udp6_seq_show(struct seq_file *seq, void *v)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/irda/ircomm/ircomm_tty.c linux-3.4-pax/net/irda/ircomm/ircomm_tty.c
+--- linux-3.4/net/irda/ircomm/ircomm_tty.c 2012-05-21 11:33:41.343930042 +0200
++++ linux-3.4-pax/net/irda/ircomm/ircomm_tty.c 2012-05-21 12:10:12.200049034 +0200
+@@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(st
+ add_wait_queue(&self->open_wait, &wait);
+
+ IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
+- __FILE__,__LINE__, tty->driver->name, self->open_count );
++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
+
+ /* As far as I can see, we protect open_count - Jean II */
+ spin_lock_irqsave(&self->spinlock, flags);
+ if (!tty_hung_up_p(filp)) {
+ extra_count = 1;
+- self->open_count--;
++ local_dec(&self->open_count);
+ }
+ spin_unlock_irqrestore(&self->spinlock, flags);
+- self->blocked_open++;
++ local_inc(&self->blocked_open);
+
+ while (1) {
+ if (tty->termios->c_cflag & CBAUD) {
+@@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(st
+ }
+
+ IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
+- __FILE__,__LINE__, tty->driver->name, self->open_count );
++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
+
+ schedule();
+ }
+@@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(st
+ if (extra_count) {
+ /* ++ is not atomic, so this should be protected - Jean II */
+ spin_lock_irqsave(&self->spinlock, flags);
+- self->open_count++;
++ local_inc(&self->open_count);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+- self->blocked_open--;
++ local_dec(&self->blocked_open);
+
+ IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
+- __FILE__,__LINE__, tty->driver->name, self->open_count);
++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
+
+ if (!retval)
+ self->flags |= ASYNC_NORMAL_ACTIVE;
+@@ -412,14 +412,14 @@ static int ircomm_tty_open(struct tty_st
+ }
+ /* ++ is not atomic, so this should be protected - Jean II */
+ spin_lock_irqsave(&self->spinlock, flags);
+- self->open_count++;
++ local_inc(&self->open_count);
+
+ tty->driver_data = self;
+ self->tty = tty;
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
+- self->line, self->open_count);
++ self->line, local_read(&self->open_count));
+
+ /* Not really used by us, but lets do it anyway */
+ self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+@@ -505,7 +505,7 @@ static void ircomm_tty_close(struct tty_
+ return;
+ }
+
+- if ((tty->count == 1) && (self->open_count != 1)) {
++ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. state->count should always
+@@ -515,16 +515,16 @@ static void ircomm_tty_close(struct tty_
+ */
+ IRDA_DEBUG(0, "%s(), bad serial port count; "
+ "tty->count is 1, state->count is %d\n", __func__ ,
+- self->open_count);
+- self->open_count = 1;
++ local_read(&self->open_count));
++ local_set(&self->open_count, 1);
+ }
+
+- if (--self->open_count < 0) {
++ if (local_dec_return(&self->open_count) < 0) {
+ IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
+- __func__, self->line, self->open_count);
+- self->open_count = 0;
++ __func__, self->line, local_read(&self->open_count));
++ local_set(&self->open_count, 0);
+ }
+- if (self->open_count) {
++ if (local_read(&self->open_count)) {
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
+@@ -556,7 +556,7 @@ static void ircomm_tty_close(struct tty_
+ tty->closing = 0;
+ self->tty = NULL;
+
+- if (self->blocked_open) {
++ if (local_read(&self->blocked_open)) {
+ if (self->close_delay)
+ schedule_timeout_interruptible(self->close_delay);
+ wake_up_interruptible(&self->open_wait);
+@@ -1008,7 +1008,7 @@ static void ircomm_tty_hangup(struct tty
+ spin_lock_irqsave(&self->spinlock, flags);
+ self->flags &= ~ASYNC_NORMAL_ACTIVE;
+ self->tty = NULL;
+- self->open_count = 0;
++ local_set(&self->open_count, 0);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ wake_up_interruptible(&self->open_wait);
+@@ -1355,7 +1355,7 @@ static void ircomm_tty_line_info(struct
+ seq_putc(m, '\n');
+
+ seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
+- seq_printf(m, "Open count: %d\n", self->open_count);
++ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
+ seq_printf(m, "Max data size: %d\n", self->max_data_size);
+ seq_printf(m, "Max header size: %d\n", self->max_header_size);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/iucv/af_iucv.c linux-3.4-pax/net/iucv/af_iucv.c
+--- linux-3.4/net/iucv/af_iucv.c 2012-05-21 11:33:41.379930044 +0200
++++ linux-3.4-pax/net/iucv/af_iucv.c 2012-05-21 12:10:12.204049034 +0200
+@@ -783,10 +783,10 @@ static int iucv_sock_autobind(struct soc
+
+ write_lock_bh(&iucv_sk_list.lock);
+
+- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
++ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
+ while (__iucv_get_sock_by_name(name)) {
+ sprintf(name, "%08x",
+- atomic_inc_return(&iucv_sk_list.autobind_name));
++ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
+ }
+
+ write_unlock_bh(&iucv_sk_list.lock);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/key/af_key.c linux-3.4-pax/net/key/af_key.c
+--- linux-3.4/net/key/af_key.c 2012-05-21 11:33:41.387930045 +0200
++++ linux-3.4-pax/net/key/af_key.c 2012-05-21 12:10:12.208049034 +0200
+@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(stru
+ static u32 get_acqseq(void)
+ {
+ u32 res;
+- static atomic_t acqseq;
++ static atomic_unchecked_t acqseq;
+
+ do {
+- res = atomic_inc_return(&acqseq);
++ res = atomic_inc_return_unchecked(&acqseq);
+ } while (!res);
+ return res;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/mac80211/ieee80211_i.h linux-3.4-pax/net/mac80211/ieee80211_i.h
+--- linux-3.4/net/mac80211/ieee80211_i.h 2012-05-21 11:33:41.443930048 +0200
++++ linux-3.4-pax/net/mac80211/ieee80211_i.h 2012-05-21 12:10:12.212049034 +0200
+@@ -28,6 +28,7 @@
+ #include <net/ieee80211_radiotap.h>
+ #include <net/cfg80211.h>
+ #include <net/mac80211.h>
++#include <asm/local.h>
+ #include "key.h"
+ #include "sta_info.h"
+
+@@ -842,7 +843,7 @@ struct ieee80211_local {
+ /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
+ spinlock_t queue_stop_reason_lock;
+
+- int open_count;
++ local_t open_count;
+ int monitors, cooked_mntrs;
+ /* number of interfaces with corresponding FIF_ flags */
+ int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/mac80211/iface.c linux-3.4-pax/net/mac80211/iface.c
+--- linux-3.4/net/mac80211/iface.c 2012-05-21 11:33:41.459930049 +0200
++++ linux-3.4-pax/net/mac80211/iface.c 2012-05-21 12:10:12.216049035 +0200
+@@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_
+ break;
+ }
+
+- if (local->open_count == 0) {
++ if (local_read(&local->open_count) == 0) {
+ res = drv_start(local);
+ if (res)
+ goto err_del_bss;
+@@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_
+ memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+- if (!local->open_count)
++ if (!local_read(&local->open_count))
+ drv_stop(local);
+ return -EADDRNOTAVAIL;
+ }
+@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
+ mutex_unlock(&local->mtx);
+
+ if (coming_up)
+- local->open_count++;
++ local_inc(&local->open_count);
+
+ if (hw_reconf_flags)
+ ieee80211_hw_config(local, hw_reconf_flags);
+@@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_
+ err_del_interface:
+ drv_remove_interface(local, sdata);
+ err_stop:
+- if (!local->open_count)
++ if (!local_read(&local->open_count))
+ drv_stop(local);
+ err_del_bss:
+ sdata->bss = NULL;
+@@ -491,7 +491,7 @@ static void ieee80211_do_stop(struct iee
+ }
+
+ if (going_down)
+- local->open_count--;
++ local_dec(&local->open_count);
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+@@ -550,7 +550,7 @@ static void ieee80211_do_stop(struct iee
+
+ ieee80211_recalc_ps(local, -1);
+
+- if (local->open_count == 0) {
++ if (local_read(&local->open_count) == 0) {
+ if (local->ops->napi_poll)
+ napi_disable(&local->napi);
+ ieee80211_clear_tx_pending(local);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/mac80211/main.c linux-3.4-pax/net/mac80211/main.c
+--- linux-3.4/net/mac80211/main.c 2012-05-21 11:33:41.467930049 +0200
++++ linux-3.4-pax/net/mac80211/main.c 2012-05-21 12:10:12.220049035 +0200
+@@ -164,7 +164,7 @@ int ieee80211_hw_config(struct ieee80211
+ local->hw.conf.power_level = power;
+ }
+
+- if (changed && local->open_count) {
++ if (changed && local_read(&local->open_count)) {
+ ret = drv_config(local, changed);
+ /*
+ * Goal:
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/mac80211/pm.c linux-3.4-pax/net/mac80211/pm.c
+--- linux-3.4/net/mac80211/pm.c 2012-05-21 11:33:41.519930052 +0200
++++ linux-3.4-pax/net/mac80211/pm.c 2012-05-21 12:10:12.220049035 +0200
+@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211
+ struct ieee80211_sub_if_data *sdata;
+ struct sta_info *sta;
+
+- if (!local->open_count)
++ if (!local_read(&local->open_count))
+ goto suspend;
+
+ ieee80211_scan_cancel(local);
+@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211
+ cancel_work_sync(&local->dynamic_ps_enable_work);
+ del_timer_sync(&local->dynamic_ps_timer);
+
+- local->wowlan = wowlan && local->open_count;
++ local->wowlan = wowlan && local_read(&local->open_count);
+ if (local->wowlan) {
+ int err = drv_suspend(local, wowlan);
+ if (err < 0) {
+@@ -128,7 +128,7 @@ int __ieee80211_suspend(struct ieee80211
+ }
+
+ /* stop hardware - this must stop RX */
+- if (local->open_count)
++ if (local_read(&local->open_count))
+ ieee80211_stop_device(local);
+
+ suspend:
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/mac80211/rate.c linux-3.4-pax/net/mac80211/rate.c
+--- linux-3.4/net/mac80211/rate.c 2012-05-21 11:33:41.519930052 +0200
++++ linux-3.4-pax/net/mac80211/rate.c 2012-05-21 12:10:12.224049035 +0200
+@@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct
+
+ ASSERT_RTNL();
+
+- if (local->open_count)
++ if (local_read(&local->open_count))
+ return -EBUSY;
+
+ if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/mac80211/rc80211_pid_debugfs.c linux-3.4-pax/net/mac80211/rc80211_pid_debugfs.c
+--- linux-3.4/net/mac80211/rc80211_pid_debugfs.c 2012-01-08 19:48:30.731470784 +0100
++++ linux-3.4-pax/net/mac80211/rc80211_pid_debugfs.c 2012-05-21 12:10:12.224049035 +0200
+@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_r
+
+ spin_unlock_irqrestore(&events->lock, status);
+
+- if (copy_to_user(buf, pb, p))
++ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
+ return -EFAULT;
+
+ return p;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/mac80211/util.c linux-3.4-pax/net/mac80211/util.c
+--- linux-3.4/net/mac80211/util.c 2012-05-21 11:33:41.543930053 +0200
++++ linux-3.4-pax/net/mac80211/util.c 2012-05-21 12:10:12.228049035 +0200
+@@ -1179,7 +1179,7 @@ int ieee80211_reconfig(struct ieee80211_
+ }
+ #endif
+ /* everything else happens only if HW was up & running */
+- if (!local->open_count)
++ if (!local_read(&local->open_count))
+ goto wake_up;
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/netfilter/ipvs/ip_vs_conn.c linux-3.4-pax/net/netfilter/ipvs/ip_vs_conn.c
+--- linux-3.4/net/netfilter/ipvs/ip_vs_conn.c 2012-01-08 19:48:30.839470779 +0100
++++ linux-3.4-pax/net/netfilter/ipvs/ip_vs_conn.c 2012-05-21 12:10:12.232049035 +0200
+@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
+ /* Increase the refcnt counter of the dest */
+ atomic_inc(&dest->refcnt);
+
+- conn_flags = atomic_read(&dest->conn_flags);
++ conn_flags = atomic_read_unchecked(&dest->conn_flags);
+ if (cp->protocol != IPPROTO_UDP)
+ conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
+ /* Bind with the destination and its corresponding transmitter */
+@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
+ atomic_set(&cp->refcnt, 1);
+
+ atomic_set(&cp->n_control, 0);
+- atomic_set(&cp->in_pkts, 0);
++ atomic_set_unchecked(&cp->in_pkts, 0);
+
+ atomic_inc(&ipvs->conn_count);
+ if (flags & IP_VS_CONN_F_NO_CPORT)
+@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
+
+ /* Don't drop the entry if its number of incoming packets is not
+ located in [0, 8] */
+- i = atomic_read(&cp->in_pkts);
++ i = atomic_read_unchecked(&cp->in_pkts);
+ if (i > 8 || i < 0) return 0;
+
+ if (!todrop_rate[i]) return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/netfilter/ipvs/ip_vs_core.c linux-3.4-pax/net/netfilter/ipvs/ip_vs_core.c
+--- linux-3.4/net/netfilter/ipvs/ip_vs_core.c 2012-05-21 11:33:41.583930055 +0200
++++ linux-3.4-pax/net/netfilter/ipvs/ip_vs_core.c 2012-05-21 12:10:12.236049036 +0200
+@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *sv
+ ret = cp->packet_xmit(skb, cp, pd->pp);
+ /* do not touch skb anymore */
+
+- atomic_inc(&cp->in_pkts);
++ atomic_inc_unchecked(&cp->in_pkts);
+ ip_vs_conn_put(cp);
+ return ret;
+ }
+@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk
+ if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+ pkts = sysctl_sync_threshold(ipvs);
+ else
+- pkts = atomic_add_return(1, &cp->in_pkts);
++ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
+
+ if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
+ cp->protocol == IPPROTO_SCTP) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/netfilter/ipvs/ip_vs_ctl.c linux-3.4-pax/net/netfilter/ipvs/ip_vs_ctl.c
+--- linux-3.4/net/netfilter/ipvs/ip_vs_ctl.c 2012-05-21 11:33:41.587930056 +0200
++++ linux-3.4-pax/net/netfilter/ipvs/ip_vs_ctl.c 2012-05-21 12:10:12.240049036 +0200
+@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service
+ ip_vs_rs_hash(ipvs, dest);
+ write_unlock_bh(&ipvs->rs_lock);
+ }
+- atomic_set(&dest->conn_flags, conn_flags);
++ atomic_set_unchecked(&dest->conn_flags, conn_flags);
+
+ /* bind the service */
+ if (!dest->svc) {
+@@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct se
+ " %-7s %-6d %-10d %-10d\n",
+ &dest->addr.in6,
+ ntohs(dest->port),
+- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
++ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
+ atomic_read(&dest->weight),
+ atomic_read(&dest->activeconns),
+ atomic_read(&dest->inactconns));
+@@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct se
+ "%-7s %-6d %-10d %-10d\n",
+ ntohl(dest->addr.ip),
+ ntohs(dest->port),
+- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
++ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
+ atomic_read(&dest->weight),
+ atomic_read(&dest->activeconns),
+ atomic_read(&dest->inactconns));
+@@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net
+
+ entry.addr = dest->addr.ip;
+ entry.port = dest->port;
+- entry.conn_flags = atomic_read(&dest->conn_flags);
++ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
+ entry.weight = atomic_read(&dest->weight);
+ entry.u_threshold = dest->u_threshold;
+ entry.l_threshold = dest->l_threshold;
+@@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct s
+ NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
+
+ NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
+- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
++ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
+ NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
+ NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
+ NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/netfilter/ipvs/ip_vs_sync.c linux-3.4-pax/net/netfilter/ipvs/ip_vs_sync.c
+--- linux-3.4/net/netfilter/ipvs/ip_vs_sync.c 2012-03-19 10:39:13.944049070 +0100
++++ linux-3.4-pax/net/netfilter/ipvs/ip_vs_sync.c 2012-05-21 12:10:12.244049036 +0200
+@@ -649,7 +649,7 @@ control:
+ * i.e only increment in_pkts for Templates.
+ */
+ if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
+- int pkts = atomic_add_return(1, &cp->in_pkts);
++ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
+
+ if (pkts % sysctl_sync_period(ipvs) != 1)
+ return;
+@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *
+
+ if (opt)
+ memcpy(&cp->in_seq, opt, sizeof(*opt));
+- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
++ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
+ cp->state = state;
+ cp->old_state = cp->state;
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/netfilter/ipvs/ip_vs_xmit.c linux-3.4-pax/net/netfilter/ipvs/ip_vs_xmit.c
+--- linux-3.4/net/netfilter/ipvs/ip_vs_xmit.c 2012-03-19 10:39:13.944049070 +0100
++++ linux-3.4-pax/net/netfilter/ipvs/ip_vs_xmit.c 2012-05-21 12:10:12.248049036 +0200
+@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
+ else
+ rc = NF_ACCEPT;
+ /* do not touch skb anymore */
+- atomic_inc(&cp->in_pkts);
++ atomic_inc_unchecked(&cp->in_pkts);
+ goto out;
+ }
+
+@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
+ else
+ rc = NF_ACCEPT;
+ /* do not touch skb anymore */
+- atomic_inc(&cp->in_pkts);
++ atomic_inc_unchecked(&cp->in_pkts);
+ goto out;
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/netfilter/nfnetlink_log.c linux-3.4-pax/net/netfilter/nfnetlink_log.c
+--- linux-3.4/net/netfilter/nfnetlink_log.c 2012-01-08 19:48:30.923470774 +0100
++++ linux-3.4-pax/net/netfilter/nfnetlink_log.c 2012-05-21 12:10:12.248049036 +0200
+@@ -70,7 +70,7 @@ struct nfulnl_instance {
+ };
+
+ static DEFINE_SPINLOCK(instances_lock);
+-static atomic_t global_seq;
++static atomic_unchecked_t global_seq;
+
+ #define INSTANCE_BUCKETS 16
+ static struct hlist_head instance_table[INSTANCE_BUCKETS];
+@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_ins
+ /* global sequence number */
+ if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
+ NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
+- htonl(atomic_inc_return(&global_seq)));
++ htonl(atomic_inc_return_unchecked(&global_seq)));
+
+ if (data_len) {
+ struct nlattr *nla;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/netfilter/xt_statistic.c linux-3.4-pax/net/netfilter/xt_statistic.c
+--- linux-3.4/net/netfilter/xt_statistic.c 2012-01-08 19:48:30.955470772 +0100
++++ linux-3.4-pax/net/netfilter/xt_statistic.c 2012-05-21 12:10:12.252049036 +0200
+@@ -19,7 +19,7 @@
+ #include <linux/module.h>
+
+ struct xt_statistic_priv {
+- atomic_t count;
++ atomic_unchecked_t count;
+ } ____cacheline_aligned_in_smp;
+
+ MODULE_LICENSE("GPL");
+@@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb,
+ break;
+ case XT_STATISTIC_MODE_NTH:
+ do {
+- oval = atomic_read(&info->master->count);
++ oval = atomic_read_unchecked(&info->master->count);
+ nval = (oval == info->u.nth.every) ? 0 : oval + 1;
+- } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
++ } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
+ if (nval == 0)
+ ret = !ret;
+ break;
+@@ -64,7 +64,7 @@ static int statistic_mt_check(const stru
+ info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
+ if (info->master == NULL)
+ return -ENOMEM;
+- atomic_set(&info->master->count, info->u.nth.count);
++ atomic_set_unchecked(&info->master->count, info->u.nth.count);
+
+ return 0;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/netlink/af_netlink.c linux-3.4-pax/net/netlink/af_netlink.c
+--- linux-3.4/net/netlink/af_netlink.c 2012-05-21 11:33:41.663930060 +0200
++++ linux-3.4-pax/net/netlink/af_netlink.c 2012-05-21 12:10:12.256049037 +0200
+@@ -741,7 +741,7 @@ static void netlink_overrun(struct sock
+ sk->sk_error_report(sk);
+ }
+ }
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ }
+
+ static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
+@@ -2013,7 +2013,7 @@ static int netlink_seq_show(struct seq_f
+ sk_wmem_alloc_get(s),
+ nlk->cb,
+ atomic_read(&s->sk_refcnt),
+- atomic_read(&s->sk_drops),
++ atomic_read_unchecked(&s->sk_drops),
+ sock_i_ino(s)
+ );
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/packet/af_packet.c linux-3.4-pax/net/packet/af_packet.c
+--- linux-3.4/net/packet/af_packet.c 2012-05-21 11:33:41.775930066 +0200
++++ linux-3.4-pax/net/packet/af_packet.c 2012-05-21 12:10:12.260049037 +0200
+@@ -1687,7 +1687,7 @@ static int packet_rcv(struct sk_buff *sk
+
+ spin_lock(&sk->sk_receive_queue.lock);
+ po->stats.tp_packets++;
+- skb->dropcount = atomic_read(&sk->sk_drops);
++ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ spin_unlock(&sk->sk_receive_queue.lock);
+ sk->sk_data_ready(sk, skb->len);
+@@ -1696,7 +1696,7 @@ static int packet_rcv(struct sk_buff *sk
+ drop_n_acct:
+ spin_lock(&sk->sk_receive_queue.lock);
+ po->stats.tp_drops++;
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ spin_unlock(&sk->sk_receive_queue.lock);
+
+ drop_n_restore:
+@@ -3294,7 +3294,7 @@ static int packet_getsockopt(struct sock
+ case PACKET_HDRLEN:
+ if (len > sizeof(int))
+ len = sizeof(int);
+- if (copy_from_user(&val, optval, len))
++ if (len > sizeof(val) || copy_from_user(&val, optval, len))
+ return -EFAULT;
+ switch (val) {
+ case TPACKET_V1:
+@@ -3344,7 +3344,7 @@ static int packet_getsockopt(struct sock
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+- if (copy_to_user(optval, data, len))
++ if (len > sizeof(st) || copy_to_user(optval, data, len))
+ return -EFAULT;
+ return 0;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/phonet/pep.c linux-3.4-pax/net/phonet/pep.c
+--- linux-3.4/net/phonet/pep.c 2012-05-21 11:33:41.775930066 +0200
++++ linux-3.4-pax/net/phonet/pep.c 2012-05-21 12:10:12.264049037 +0200
+@@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk,
+
+ case PNS_PEP_CTRL_REQ:
+ if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ break;
+ }
+ __skb_pull(skb, 4);
+@@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk,
+ }
+
+ if (pn->rx_credits == 0) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ err = -ENOBUFS;
+ break;
+ }
+@@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct so
+ }
+
+ if (pn->rx_credits == 0) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ err = NET_RX_DROP;
+ break;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/phonet/socket.c linux-3.4-pax/net/phonet/socket.c
+--- linux-3.4/net/phonet/socket.c 2012-03-19 10:39:14.136049059 +0100
++++ linux-3.4-pax/net/phonet/socket.c 2012-05-21 12:10:12.268049037 +0200
+@@ -614,7 +614,7 @@ static int pn_sock_seq_show(struct seq_f
+ sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
+ sock_i_uid(sk), sock_i_ino(sk),
+ atomic_read(&sk->sk_refcnt), sk,
+- atomic_read(&sk->sk_drops), &len);
++ atomic_read_unchecked(&sk->sk_drops), &len);
+ }
+ seq_printf(seq, "%*s\n", 127 - len, "");
+ return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rds/cong.c linux-3.4-pax/net/rds/cong.c
+--- linux-3.4/net/rds/cong.c 2012-01-08 19:48:31.043470768 +0100
++++ linux-3.4-pax/net/rds/cong.c 2012-05-21 12:10:12.268049037 +0200
+@@ -78,7 +78,7 @@
+ * finds that the saved generation number is smaller than the global generation
+ * number, it wakes up the process.
+ */
+-static atomic_t rds_cong_generation = ATOMIC_INIT(0);
++static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
+
+ /*
+ * Congestion monitoring
+@@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_con
+ rdsdebug("waking map %p for %pI4\n",
+ map, &map->m_addr);
+ rds_stats_inc(s_cong_update_received);
+- atomic_inc(&rds_cong_generation);
++ atomic_inc_unchecked(&rds_cong_generation);
+ if (waitqueue_active(&map->m_waitq))
+ wake_up(&map->m_waitq);
+ if (waitqueue_active(&rds_poll_waitq))
+@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
+
+ int rds_cong_updated_since(unsigned long *recent)
+ {
+- unsigned long gen = atomic_read(&rds_cong_generation);
++ unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
+
+ if (likely(*recent == gen))
+ return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rds/ib_cm.c linux-3.4-pax/net/rds/ib_cm.c
+--- linux-3.4/net/rds/ib_cm.c 2012-05-21 11:33:41.799930067 +0200
++++ linux-3.4-pax/net/rds/ib_cm.c 2012-05-21 12:10:12.272049038 +0200
+@@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_con
+ /* Clear the ACK state */
+ clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
+ #ifdef KERNEL_HAS_ATOMIC64
+- atomic64_set(&ic->i_ack_next, 0);
++ atomic64_set_unchecked(&ic->i_ack_next, 0);
+ #else
+ ic->i_ack_next = 0;
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rds/ib.h linux-3.4-pax/net/rds/ib.h
+--- linux-3.4/net/rds/ib.h 2011-10-24 12:48:43.343090860 +0200
++++ linux-3.4-pax/net/rds/ib.h 2012-05-21 12:10:12.272049038 +0200
+@@ -128,7 +128,7 @@ struct rds_ib_connection {
+ /* sending acks */
+ unsigned long i_ack_flags;
+ #ifdef KERNEL_HAS_ATOMIC64
+- atomic64_t i_ack_next; /* next ACK to send */
++ atomic64_unchecked_t i_ack_next; /* next ACK to send */
+ #else
+ spinlock_t i_ack_lock; /* protect i_ack_next */
+ u64 i_ack_next; /* next ACK to send */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rds/ib_recv.c linux-3.4-pax/net/rds/ib_recv.c
+--- linux-3.4/net/rds/ib_recv.c 2012-05-21 11:33:41.803930067 +0200
++++ linux-3.4-pax/net/rds/ib_recv.c 2012-05-21 12:10:12.276049038 +0200
+@@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
+ static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
+ int ack_required)
+ {
+- atomic64_set(&ic->i_ack_next, seq);
++ atomic64_set_unchecked(&ic->i_ack_next, seq);
+ if (ack_required) {
+ smp_mb__before_clear_bit();
+ set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+@@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
+ clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+ smp_mb__after_clear_bit();
+
+- return atomic64_read(&ic->i_ack_next);
++ return atomic64_read_unchecked(&ic->i_ack_next);
+ }
+ #endif
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rds/iw_cm.c linux-3.4-pax/net/rds/iw_cm.c
+--- linux-3.4/net/rds/iw_cm.c 2012-05-21 11:33:41.819930068 +0200
++++ linux-3.4-pax/net/rds/iw_cm.c 2012-05-21 12:10:12.276049038 +0200
+@@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_con
+ /* Clear the ACK state */
+ clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
+ #ifdef KERNEL_HAS_ATOMIC64
+- atomic64_set(&ic->i_ack_next, 0);
++ atomic64_set_unchecked(&ic->i_ack_next, 0);
+ #else
+ ic->i_ack_next = 0;
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rds/iw.h linux-3.4-pax/net/rds/iw.h
+--- linux-3.4/net/rds/iw.h 2011-10-24 12:48:43.351090861 +0200
++++ linux-3.4-pax/net/rds/iw.h 2012-05-21 12:10:12.280049038 +0200
+@@ -134,7 +134,7 @@ struct rds_iw_connection {
+ /* sending acks */
+ unsigned long i_ack_flags;
+ #ifdef KERNEL_HAS_ATOMIC64
+- atomic64_t i_ack_next; /* next ACK to send */
++ atomic64_unchecked_t i_ack_next; /* next ACK to send */
+ #else
+ spinlock_t i_ack_lock; /* protect i_ack_next */
+ u64 i_ack_next; /* next ACK to send */
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rds/iw_recv.c linux-3.4-pax/net/rds/iw_recv.c
+--- linux-3.4/net/rds/iw_recv.c 2012-05-21 11:33:41.819930068 +0200
++++ linux-3.4-pax/net/rds/iw_recv.c 2012-05-21 12:10:12.284049038 +0200
+@@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
+ static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
+ int ack_required)
+ {
+- atomic64_set(&ic->i_ack_next, seq);
++ atomic64_set_unchecked(&ic->i_ack_next, seq);
+ if (ack_required) {
+ smp_mb__before_clear_bit();
+ set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+@@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
+ clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+ smp_mb__after_clear_bit();
+
+- return atomic64_read(&ic->i_ack_next);
++ return atomic64_read_unchecked(&ic->i_ack_next);
+ }
+ #endif
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rds/tcp.c linux-3.4-pax/net/rds/tcp.c
+--- linux-3.4/net/rds/tcp.c 2012-01-08 19:48:31.135470763 +0100
++++ linux-3.4-pax/net/rds/tcp.c 2012-05-21 12:10:12.284049038 +0200
+@@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock
+ int val = 1;
+
+ set_fs(KERNEL_DS);
+- sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
++ sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
+ sizeof(val));
+ set_fs(oldfs);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rds/tcp_send.c linux-3.4-pax/net/rds/tcp_send.c
+--- linux-3.4/net/rds/tcp_send.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/net/rds/tcp_send.c 2012-05-21 12:10:12.288049038 +0200
+@@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+- sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
++ sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
+ sizeof(val));
+ set_fs(oldfs);
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rxrpc/af_rxrpc.c linux-3.4-pax/net/rxrpc/af_rxrpc.c
+--- linux-3.4/net/rxrpc/af_rxrpc.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/net/rxrpc/af_rxrpc.c 2012-05-21 12:10:12.288049038 +0200
+@@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
+ __be32 rxrpc_epoch;
+
+ /* current debugging ID */
+-atomic_t rxrpc_debug_id;
++atomic_unchecked_t rxrpc_debug_id;
+
+ /* count of skbs currently in use */
+ atomic_t rxrpc_n_skbs;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rxrpc/ar-ack.c linux-3.4-pax/net/rxrpc/ar-ack.c
+--- linux-3.4/net/rxrpc/ar-ack.c 2012-03-19 10:39:14.152049059 +0100
++++ linux-3.4-pax/net/rxrpc/ar-ack.c 2012-05-21 12:10:12.292049039 +0200
+@@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
+
+ _enter("{%d,%d,%d,%d},",
+ call->acks_hard, call->acks_unacked,
+- atomic_read(&call->sequence),
++ atomic_read_unchecked(&call->sequence),
+ CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
+
+ stop = 0;
+@@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
+
+ /* each Tx packet has a new serial number */
+ sp->hdr.serial =
+- htonl(atomic_inc_return(&call->conn->serial));
++ htonl(atomic_inc_return_unchecked(&call->conn->serial));
+
+ hdr = (struct rxrpc_header *) txb->head;
+ hdr->serial = sp->hdr.serial;
+@@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struc
+ */
+ static void rxrpc_clear_tx_window(struct rxrpc_call *call)
+ {
+- rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
++ rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
+ }
+
+ /*
+@@ -629,7 +629,7 @@ process_further:
+
+ latest = ntohl(sp->hdr.serial);
+ hard = ntohl(ack.firstPacket);
+- tx = atomic_read(&call->sequence);
++ tx = atomic_read_unchecked(&call->sequence);
+
+ _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
+ latest,
+@@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_stru
+ goto maybe_reschedule;
+
+ send_ACK_with_skew:
+- ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
++ ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
+ ntohl(ack.serial));
+ send_ACK:
+ mtu = call->conn->trans->peer->if_mtu;
+@@ -1173,7 +1173,7 @@ send_ACK:
+ ackinfo.rxMTU = htonl(5692);
+ ackinfo.jumbo_max = htonl(4);
+
+- hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
++ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
+ _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
+ ntohl(hdr.serial),
+ ntohs(ack.maxSkew),
+@@ -1191,7 +1191,7 @@ send_ACK:
+ send_message:
+ _debug("send message");
+
+- hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
++ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
+ _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
+ send_message_2:
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rxrpc/ar-call.c linux-3.4-pax/net/rxrpc/ar-call.c
+--- linux-3.4/net/rxrpc/ar-call.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/net/rxrpc/ar-call.c 2012-05-21 12:10:12.296049039 +0200
+@@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
+ spin_lock_init(&call->lock);
+ rwlock_init(&call->state_lock);
+ atomic_set(&call->usage, 1);
+- call->debug_id = atomic_inc_return(&rxrpc_debug_id);
++ call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+ call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
+
+ memset(&call->sock_node, 0xed, sizeof(call->sock_node));
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rxrpc/ar-connection.c linux-3.4-pax/net/rxrpc/ar-connection.c
+--- linux-3.4/net/rxrpc/ar-connection.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/net/rxrpc/ar-connection.c 2012-05-21 12:10:12.296049039 +0200
+@@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
+ rwlock_init(&conn->lock);
+ spin_lock_init(&conn->state_lock);
+ atomic_set(&conn->usage, 1);
+- conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
++ conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+ conn->avail_calls = RXRPC_MAXCALLS;
+ conn->size_align = 4;
+ conn->header_size = sizeof(struct rxrpc_header);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rxrpc/ar-connevent.c linux-3.4-pax/net/rxrpc/ar-connevent.c
+--- linux-3.4/net/rxrpc/ar-connevent.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/net/rxrpc/ar-connevent.c 2012-05-21 12:10:12.300049039 +0200
+@@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
+
+ len = iov[0].iov_len + iov[1].iov_len;
+
+- hdr.serial = htonl(atomic_inc_return(&conn->serial));
++ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
+ _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
+
+ ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rxrpc/ar-input.c linux-3.4-pax/net/rxrpc/ar-input.c
+--- linux-3.4/net/rxrpc/ar-input.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/net/rxrpc/ar-input.c 2012-05-21 12:10:12.304049039 +0200
+@@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
+ /* track the latest serial number on this connection for ACK packet
+ * information */
+ serial = ntohl(sp->hdr.serial);
+- hi_serial = atomic_read(&call->conn->hi_serial);
++ hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
+ while (serial > hi_serial)
+- hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
++ hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
+ serial);
+
+ /* request ACK generation for any ACK or DATA packet that requests
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rxrpc/ar-internal.h linux-3.4-pax/net/rxrpc/ar-internal.h
+--- linux-3.4/net/rxrpc/ar-internal.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/net/rxrpc/ar-internal.h 2012-05-21 12:10:12.304049039 +0200
+@@ -272,8 +272,8 @@ struct rxrpc_connection {
+ int error; /* error code for local abort */
+ int debug_id; /* debug ID for printks */
+ unsigned call_counter; /* call ID counter */
+- atomic_t serial; /* packet serial number counter */
+- atomic_t hi_serial; /* highest serial number received */
++ atomic_unchecked_t serial; /* packet serial number counter */
++ atomic_unchecked_t hi_serial; /* highest serial number received */
+ u8 avail_calls; /* number of calls available */
+ u8 size_align; /* data size alignment (for security) */
+ u8 header_size; /* rxrpc + security header size */
+@@ -346,7 +346,7 @@ struct rxrpc_call {
+ spinlock_t lock;
+ rwlock_t state_lock; /* lock for state transition */
+ atomic_t usage;
+- atomic_t sequence; /* Tx data packet sequence counter */
++ atomic_unchecked_t sequence; /* Tx data packet sequence counter */
+ u32 abort_code; /* local/remote abort code */
+ enum { /* current state of call */
+ RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
+@@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
+ */
+ extern atomic_t rxrpc_n_skbs;
+ extern __be32 rxrpc_epoch;
+-extern atomic_t rxrpc_debug_id;
++extern atomic_unchecked_t rxrpc_debug_id;
+ extern struct workqueue_struct *rxrpc_workqueue;
+
+ /*
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rxrpc/ar-local.c linux-3.4-pax/net/rxrpc/ar-local.c
+--- linux-3.4/net/rxrpc/ar-local.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/net/rxrpc/ar-local.c 2012-05-21 12:10:12.308049040 +0200
+@@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
+ spin_lock_init(&local->lock);
+ rwlock_init(&local->services_lock);
+ atomic_set(&local->usage, 1);
+- local->debug_id = atomic_inc_return(&rxrpc_debug_id);
++ local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+ memcpy(&local->srx, srx, sizeof(*srx));
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rxrpc/ar-output.c linux-3.4-pax/net/rxrpc/ar-output.c
+--- linux-3.4/net/rxrpc/ar-output.c 2012-03-19 10:39:14.152049059 +0100
++++ linux-3.4-pax/net/rxrpc/ar-output.c 2012-05-21 12:10:12.308049040 +0200
+@@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb
+ sp->hdr.cid = call->cid;
+ sp->hdr.callNumber = call->call_id;
+ sp->hdr.seq =
+- htonl(atomic_inc_return(&call->sequence));
++ htonl(atomic_inc_return_unchecked(&call->sequence));
+ sp->hdr.serial =
+- htonl(atomic_inc_return(&conn->serial));
++ htonl(atomic_inc_return_unchecked(&conn->serial));
+ sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
+ sp->hdr.userStatus = 0;
+ sp->hdr.securityIndex = conn->security_ix;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rxrpc/ar-peer.c linux-3.4-pax/net/rxrpc/ar-peer.c
+--- linux-3.4/net/rxrpc/ar-peer.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/net/rxrpc/ar-peer.c 2012-05-21 12:10:12.312049040 +0200
+@@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
+ INIT_LIST_HEAD(&peer->error_targets);
+ spin_lock_init(&peer->lock);
+ atomic_set(&peer->usage, 1);
+- peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
++ peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+ memcpy(&peer->srx, srx, sizeof(*srx));
+
+ rxrpc_assess_MTU_size(peer);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rxrpc/ar-proc.c linux-3.4-pax/net/rxrpc/ar-proc.c
+--- linux-3.4/net/rxrpc/ar-proc.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/net/rxrpc/ar-proc.c 2012-05-21 12:10:12.312049040 +0200
+@@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
+ atomic_read(&conn->usage),
+ rxrpc_conn_states[conn->state],
+ key_serial(conn->key),
+- atomic_read(&conn->serial),
+- atomic_read(&conn->hi_serial));
++ atomic_read_unchecked(&conn->serial),
++ atomic_read_unchecked(&conn->hi_serial));
+
+ return 0;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rxrpc/ar-transport.c linux-3.4-pax/net/rxrpc/ar-transport.c
+--- linux-3.4/net/rxrpc/ar-transport.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/net/rxrpc/ar-transport.c 2012-05-21 12:10:12.316049040 +0200
+@@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
+ spin_lock_init(&trans->client_lock);
+ rwlock_init(&trans->conn_lock);
+ atomic_set(&trans->usage, 1);
+- trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
++ trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+
+ if (peer->srx.transport.family == AF_INET) {
+ switch (peer->srx.transport_type) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/rxrpc/rxkad.c linux-3.4-pax/net/rxrpc/rxkad.c
+--- linux-3.4/net/rxrpc/rxkad.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/net/rxrpc/rxkad.c 2012-05-21 12:10:12.316049040 +0200
+@@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct
+
+ len = iov[0].iov_len + iov[1].iov_len;
+
+- hdr.serial = htonl(atomic_inc_return(&conn->serial));
++ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
+ _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
+
+ ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
+@@ -660,7 +660,7 @@ static int rxkad_send_response(struct rx
+
+ len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
+
+- hdr->serial = htonl(atomic_inc_return(&conn->serial));
++ hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
+ _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
+
+ ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/sctp/socket.c linux-3.4-pax/net/sctp/socket.c
+--- linux-3.4/net/sctp/socket.c 2012-05-21 11:33:41.923930073 +0200
++++ linux-3.4-pax/net/sctp/socket.c 2012-05-21 12:10:12.324049040 +0200
+@@ -4569,7 +4569,7 @@ static int sctp_getsockopt_peer_addrs(st
+ addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
+ if (space_left < addrlen)
+ return -ENOMEM;
+- if (copy_to_user(to, &temp, addrlen))
++ if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
+ return -EFAULT;
+ to += addrlen;
+ cnt++;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/socket.c linux-3.4-pax/net/socket.c
+--- linux-3.4/net/socket.c 2012-05-21 11:33:41.931930075 +0200
++++ linux-3.4-pax/net/socket.c 2012-05-21 12:10:12.332049041 +0200
+@@ -1966,7 +1966,7 @@ static int __sys_sendmsg(struct socket *
+ * checking falls down on this.
+ */
+ if (copy_from_user(ctl_buf,
+- (void __user __force *)msg_sys->msg_control,
++ (void __force_user *)msg_sys->msg_control,
+ ctl_len))
+ goto out_freectl;
+ msg_sys->msg_control = ctl_buf;
+@@ -2136,7 +2136,7 @@ static int __sys_recvmsg(struct socket *
+ * kernel msghdr to use the kernel address space)
+ */
+
+- uaddr = (__force void __user *)msg_sys->msg_name;
++ uaddr = (void __force_user *)msg_sys->msg_name;
+ uaddr_len = COMPAT_NAMELEN(msg);
+ if (MSG_CMSG_COMPAT & flags) {
+ err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
+@@ -2758,7 +2758,7 @@ static int ethtool_ioctl(struct net *net
+ }
+
+ ifr = compat_alloc_user_space(buf_size);
+- rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
++ rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
+
+ if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
+ return -EFAULT;
+@@ -2782,12 +2782,12 @@ static int ethtool_ioctl(struct net *net
+ offsetof(struct ethtool_rxnfc, fs.ring_cookie));
+
+ if (copy_in_user(rxnfc, compat_rxnfc,
+- (void *)(&rxnfc->fs.m_ext + 1) -
+- (void *)rxnfc) ||
++ (void __user *)(&rxnfc->fs.m_ext + 1) -
++ (void __user *)rxnfc) ||
+ copy_in_user(&rxnfc->fs.ring_cookie,
+ &compat_rxnfc->fs.ring_cookie,
+- (void *)(&rxnfc->fs.location + 1) -
+- (void *)&rxnfc->fs.ring_cookie) ||
++ (void __user *)(&rxnfc->fs.location + 1) -
++ (void __user *)&rxnfc->fs.ring_cookie) ||
+ copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
+ sizeof(rxnfc->rule_cnt)))
+ return -EFAULT;
+@@ -2799,12 +2799,12 @@ static int ethtool_ioctl(struct net *net
+
+ if (convert_out) {
+ if (copy_in_user(compat_rxnfc, rxnfc,
+- (const void *)(&rxnfc->fs.m_ext + 1) -
+- (const void *)rxnfc) ||
++ (const void __user *)(&rxnfc->fs.m_ext + 1) -
++ (const void __user *)rxnfc) ||
+ copy_in_user(&compat_rxnfc->fs.ring_cookie,
+ &rxnfc->fs.ring_cookie,
+- (const void *)(&rxnfc->fs.location + 1) -
+- (const void *)&rxnfc->fs.ring_cookie) ||
++ (const void __user *)(&rxnfc->fs.location + 1) -
++ (const void __user *)&rxnfc->fs.ring_cookie) ||
+ copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
+ sizeof(rxnfc->rule_cnt)))
+ return -EFAULT;
+@@ -2874,7 +2874,7 @@ static int bond_ioctl(struct net *net, u
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = dev_ioctl(net, cmd,
+- (struct ifreq __user __force *) &kifr);
++ (struct ifreq __force_user *) &kifr);
+ set_fs(old_fs);
+
+ return err;
+@@ -2983,7 +2983,7 @@ static int compat_sioc_ifmap(struct net
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+- err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
++ err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
+ set_fs(old_fs);
+
+ if (cmd == SIOCGIFMAP && !err) {
+@@ -3088,7 +3088,7 @@ static int routing_ioctl(struct net *net
+ ret |= __get_user(rtdev, &(ur4->rt_dev));
+ if (rtdev) {
+ ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
+- r4.rt_dev = (char __user __force *)devname;
++ r4.rt_dev = (char __force_user *)devname;
+ devname[15] = 0;
+ } else
+ r4.rt_dev = NULL;
+@@ -3314,8 +3314,8 @@ int kernel_getsockopt(struct socket *soc
+ int __user *uoptlen;
+ int err;
+
+- uoptval = (char __user __force *) optval;
+- uoptlen = (int __user __force *) optlen;
++ uoptval = (char __force_user *) optval;
++ uoptlen = (int __force_user *) optlen;
+
+ set_fs(KERNEL_DS);
+ if (level == SOL_SOCKET)
+@@ -3335,7 +3335,7 @@ int kernel_setsockopt(struct socket *soc
+ char __user *uoptval;
+ int err;
+
+- uoptval = (char __user __force *) optval;
++ uoptval = (char __force_user *) optval;
+
+ set_fs(KERNEL_DS);
+ if (level == SOL_SOCKET)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/sunrpc/sched.c linux-3.4-pax/net/sunrpc/sched.c
+--- linux-3.4/net/sunrpc/sched.c 2012-05-21 11:33:42.019930078 +0200
++++ linux-3.4-pax/net/sunrpc/sched.c 2012-05-21 12:10:12.332049041 +0200
+@@ -240,9 +240,9 @@ static int rpc_wait_bit_killable(void *w
+ #ifdef RPC_DEBUG
+ static void rpc_task_set_debuginfo(struct rpc_task *task)
+ {
+- static atomic_t rpc_pid;
++ static atomic_unchecked_t rpc_pid;
+
+- task->tk_pid = atomic_inc_return(&rpc_pid);
++ task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
+ }
+ #else
+ static inline void rpc_task_set_debuginfo(struct rpc_task *task)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/sunrpc/xprtrdma/svc_rdma.c linux-3.4-pax/net/sunrpc/xprtrdma/svc_rdma.c
+--- linux-3.4/net/sunrpc/xprtrdma/svc_rdma.c 2012-05-21 11:33:42.079930082 +0200
++++ linux-3.4-pax/net/sunrpc/xprtrdma/svc_rdma.c 2012-05-21 12:10:12.336049041 +0200
+@@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCR
+ static unsigned int min_max_inline = 4096;
+ static unsigned int max_max_inline = 65536;
+
+-atomic_t rdma_stat_recv;
+-atomic_t rdma_stat_read;
+-atomic_t rdma_stat_write;
+-atomic_t rdma_stat_sq_starve;
+-atomic_t rdma_stat_rq_starve;
+-atomic_t rdma_stat_rq_poll;
+-atomic_t rdma_stat_rq_prod;
+-atomic_t rdma_stat_sq_poll;
+-atomic_t rdma_stat_sq_prod;
++atomic_unchecked_t rdma_stat_recv;
++atomic_unchecked_t rdma_stat_read;
++atomic_unchecked_t rdma_stat_write;
++atomic_unchecked_t rdma_stat_sq_starve;
++atomic_unchecked_t rdma_stat_rq_starve;
++atomic_unchecked_t rdma_stat_rq_poll;
++atomic_unchecked_t rdma_stat_rq_prod;
++atomic_unchecked_t rdma_stat_sq_poll;
++atomic_unchecked_t rdma_stat_sq_prod;
+
+ /* Temporary NFS request map and context caches */
+ struct kmem_cache *svc_rdma_map_cachep;
+@@ -110,7 +110,7 @@ static int read_reset_stat(ctl_table *ta
+ len -= *ppos;
+ if (len > *lenp)
+ len = *lenp;
+- if (len && copy_to_user(buffer, str_buf, len))
++ if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
+ return -EFAULT;
+ *lenp = len;
+ *ppos += len;
+@@ -151,63 +151,63 @@ static ctl_table svcrdma_parm_table[] =
+ {
+ .procname = "rdma_stat_read",
+ .data = &rdma_stat_read,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_recv",
+ .data = &rdma_stat_recv,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_write",
+ .data = &rdma_stat_write,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_sq_starve",
+ .data = &rdma_stat_sq_starve,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_rq_starve",
+ .data = &rdma_stat_rq_starve,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_rq_poll",
+ .data = &rdma_stat_rq_poll,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_rq_prod",
+ .data = &rdma_stat_rq_prod,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_sq_poll",
+ .data = &rdma_stat_sq_poll,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_sq_prod",
+ .data = &rdma_stat_sq_prod,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-3.4-pax/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+--- linux-3.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2012-05-21 11:33:42.087930083 +0200
++++ linux-3.4-pax/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2012-05-21 12:10:12.340049041 +0200
+@@ -501,7 +501,7 @@ next_sge:
+ svc_rdma_put_context(ctxt, 0);
+ goto out;
+ }
+- atomic_inc(&rdma_stat_read);
++ atomic_inc_unchecked(&rdma_stat_read);
+
+ if (read_wr.num_sge < chl_map->ch[ch_no].count) {
+ chl_map->ch[ch_no].count -= read_wr.num_sge;
+@@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
+ dto_q);
+ list_del_init(&ctxt->dto_q);
+ } else {
+- atomic_inc(&rdma_stat_rq_starve);
++ atomic_inc_unchecked(&rdma_stat_rq_starve);
+ clear_bit(XPT_DATA, &xprt->xpt_flags);
+ ctxt = NULL;
+ }
+@@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
+ dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
+ ctxt, rdma_xprt, rqstp, ctxt->wc_status);
+ BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
+- atomic_inc(&rdma_stat_recv);
++ atomic_inc_unchecked(&rdma_stat_recv);
+
+ /* Build up the XDR from the receive buffers. */
+ rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-3.4-pax/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+--- linux-3.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2012-05-21 11:33:42.087930083 +0200
++++ linux-3.4-pax/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2012-05-21 12:10:12.340049041 +0200
+@@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
+ write_wr.wr.rdma.remote_addr = to;
+
+ /* Post It */
+- atomic_inc(&rdma_stat_write);
++ atomic_inc_unchecked(&rdma_stat_write);
+ if (svc_rdma_send(xprt, &write_wr))
+ goto err;
+ return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-3.4-pax/net/sunrpc/xprtrdma/svc_rdma_transport.c
+--- linux-3.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2012-05-21 11:33:42.091930083 +0200
++++ linux-3.4-pax/net/sunrpc/xprtrdma/svc_rdma_transport.c 2012-05-21 12:10:12.344049041 +0200
+@@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd
+ return;
+
+ ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
+- atomic_inc(&rdma_stat_rq_poll);
++ atomic_inc_unchecked(&rdma_stat_rq_poll);
+
+ while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
+ ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
+@@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd
+ }
+
+ if (ctxt)
+- atomic_inc(&rdma_stat_rq_prod);
++ atomic_inc_unchecked(&rdma_stat_rq_prod);
+
+ set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
+ /*
+@@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd
+ return;
+
+ ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
+- atomic_inc(&rdma_stat_sq_poll);
++ atomic_inc_unchecked(&rdma_stat_sq_poll);
+ while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
+ if (wc.status != IB_WC_SUCCESS)
+ /* Close the transport */
+@@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd
+ }
+
+ if (ctxt)
+- atomic_inc(&rdma_stat_sq_prod);
++ atomic_inc_unchecked(&rdma_stat_sq_prod);
+ }
+
+ static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
+@@ -1266,7 +1266,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
+ spin_lock_bh(&xprt->sc_lock);
+ if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
+ spin_unlock_bh(&xprt->sc_lock);
+- atomic_inc(&rdma_stat_sq_starve);
++ atomic_inc_unchecked(&rdma_stat_sq_starve);
+
+ /* See if we can opportunistically reap SQ WR to make room */
+ sq_cq_reap(xprt);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/tipc/link.c linux-3.4-pax/net/tipc/link.c
+--- linux-3.4/net/tipc/link.c 2012-05-21 11:33:42.139930086 +0200
++++ linux-3.4-pax/net/tipc/link.c 2012-05-21 12:10:12.348049042 +0200
+@@ -1203,7 +1203,7 @@ static int link_send_sections_long(struc
+ struct tipc_msg fragm_hdr;
+ struct sk_buff *buf, *buf_chain, *prev;
+ u32 fragm_crs, fragm_rest, hsz, sect_rest;
+- const unchar *sect_crs;
++ const unchar __user *sect_crs;
+ int curr_sect;
+ u32 fragm_no;
+
+@@ -1247,7 +1247,7 @@ again:
+
+ if (!sect_rest) {
+ sect_rest = msg_sect[++curr_sect].iov_len;
+- sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
++ sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
+ }
+
+ if (sect_rest < fragm_rest)
+@@ -1266,7 +1266,7 @@ error:
+ }
+ } else
+ skb_copy_to_linear_data_offset(buf, fragm_crs,
+- sect_crs, sz);
++ (const void __force_kernel *)sect_crs, sz);
+ sect_crs += sz;
+ sect_rest -= sz;
+ fragm_crs += sz;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/tipc/msg.c linux-3.4-pax/net/tipc/msg.c
+--- linux-3.4/net/tipc/msg.c 2012-05-21 11:33:42.151930086 +0200
++++ linux-3.4-pax/net/tipc/msg.c 2012-05-21 12:10:12.352049042 +0200
+@@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr,
+ msg_sect[cnt].iov_len);
+ else
+ skb_copy_to_linear_data_offset(*buf, pos,
+- msg_sect[cnt].iov_base,
++ (const void __force_kernel *)msg_sect[cnt].iov_base,
+ msg_sect[cnt].iov_len);
+ pos += msg_sect[cnt].iov_len;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/tipc/subscr.c linux-3.4-pax/net/tipc/subscr.c
+--- linux-3.4/net/tipc/subscr.c 2012-05-21 11:33:42.183930088 +0200
++++ linux-3.4-pax/net/tipc/subscr.c 2012-05-21 12:10:12.356049042 +0200
+@@ -101,7 +101,7 @@ static void subscr_send_event(struct tip
+ {
+ struct iovec msg_sect;
+
+- msg_sect.iov_base = (void *)&sub->evt;
++ msg_sect.iov_base = (void __force_user *)&sub->evt;
+ msg_sect.iov_len = sizeof(struct tipc_event);
+
+ sub->evt.event = htohl(event, sub->swap);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/wireless/core.h linux-3.4-pax/net/wireless/core.h
+--- linux-3.4/net/wireless/core.h 2012-05-21 11:33:42.203930088 +0200
++++ linux-3.4-pax/net/wireless/core.h 2012-05-21 12:10:12.356049042 +0200
+@@ -27,7 +27,7 @@ struct cfg80211_registered_device {
+ struct mutex mtx;
+
+ /* rfkill support */
+- struct rfkill_ops rfkill_ops;
++ rfkill_ops_no_const rfkill_ops;
+ struct rfkill *rfkill;
+ struct work_struct rfkill_sync;
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/wireless/wext-core.c linux-3.4-pax/net/wireless/wext-core.c
+--- linux-3.4/net/wireless/wext-core.c 2012-05-21 11:33:42.231930091 +0200
++++ linux-3.4-pax/net/wireless/wext-core.c 2012-05-21 12:10:12.360049042 +0200
+@@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struc
+ */
+
+ /* Support for very large requests */
+- if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
+- (user_length > descr->max_tokens)) {
++ if (user_length > descr->max_tokens) {
+ /* Allow userspace to GET more than max so
+ * we can support any size GET requests.
+ * There is still a limit : -ENOMEM.
+@@ -787,22 +786,6 @@ static int ioctl_standard_iw_point(struc
+ }
+ }
+
+- if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
+- /*
+- * If this is a GET, but not NOMAX, it means that the extra
+- * data is not bounded by userspace, but by max_tokens. Thus
+- * set the length to max_tokens. This matches the extra data
+- * allocation.
+- * The driver should fill it with the number of tokens it
+- * provided, and it may check iwp->length rather than having
+- * knowledge of max_tokens. If the driver doesn't change the
+- * iwp->length, this ioctl just copies back max_token tokens
+- * filled with zeroes. Hopefully the driver isn't claiming
+- * them to be valid data.
+- */
+- iwp->length = descr->max_tokens;
+- }
+-
+ err = handler(dev, info, (union iwreq_data *) iwp, extra);
+
+ iwp->length += essid_compat;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/net/xfrm/xfrm_policy.c linux-3.4-pax/net/xfrm/xfrm_policy.c
+--- linux-3.4/net/xfrm/xfrm_policy.c 2012-03-19 10:39:14.288049052 +0100
++++ linux-3.4-pax/net/xfrm/xfrm_policy.c 2012-05-21 12:10:12.364049043 +0200
+@@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
+ {
+ policy->walk.dead = 1;
+
+- atomic_inc(&policy->genid);
++ atomic_inc_unchecked(&policy->genid);
+
+ if (del_timer(&policy->timer))
+ xfrm_pol_put(policy);
+@@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
+ hlist_add_head(&policy->bydst, chain);
+ xfrm_pol_hold(policy);
+ net->xfrm.policy_count[dir]++;
+- atomic_inc(&flow_cache_genid);
++ atomic_inc_unchecked(&flow_cache_genid);
+ if (delpol)
+ __xfrm_policy_unlink(delpol, dir);
+ policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
+@@ -1530,7 +1530,7 @@ free_dst:
+ goto out;
+ }
+
+-static int inline
++static inline int
+ xfrm_dst_alloc_copy(void **target, const void *src, int size)
+ {
+ if (!*target) {
+@@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const
+ return 0;
+ }
+
+-static int inline
++static inline int
+ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
+ {
+ #ifdef CONFIG_XFRM_SUB_POLICY
+@@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry
+ #endif
+ }
+
+-static int inline
++static inline int
+ xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
+ {
+ #ifdef CONFIG_XFRM_SUB_POLICY
+@@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xf
+
+ xdst->num_pols = num_pols;
+ memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
+- xdst->policy_genid = atomic_read(&pols[0]->genid);
++ xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
+
+ return xdst;
+ }
+@@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
+ if (xdst->xfrm_genid != dst->xfrm->genid)
+ return 0;
+ if (xdst->num_pols > 0 &&
+- xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
++ xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
+ return 0;
+
+ mtu = dst_mtu(dst->child);
+@@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xf
+ sizeof(pol->xfrm_vec[i].saddr));
+ pol->xfrm_vec[i].encap_family = mp->new_family;
+ /* flush bundles */
+- atomic_inc(&pol->genid);
++ atomic_inc_unchecked(&pol->genid);
+ }
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/scripts/basic/fixdep.c linux-3.4-pax/scripts/basic/fixdep.c
+--- linux-3.4/scripts/basic/fixdep.c 2012-01-08 19:48:31.551470741 +0100
++++ linux-3.4-pax/scripts/basic/fixdep.c 2012-05-21 12:10:12.364049043 +0200
+@@ -161,7 +161,7 @@ static unsigned int strhash(const char *
+ /*
+ * Lookup a value in the configuration string.
+ */
+-static int is_defined_config(const char *name, int len, unsigned int hash)
++static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
+ {
+ struct item *aux;
+
+@@ -211,10 +211,10 @@ static void clear_config(void)
+ /*
+ * Record the use of a CONFIG_* word.
+ */
+-static void use_config(const char *m, int slen)
++static void use_config(const char *m, unsigned int slen)
+ {
+ unsigned int hash = strhash(m, slen);
+- int c, i;
++ unsigned int c, i;
+
+ if (is_defined_config(m, slen, hash))
+ return;
+@@ -235,9 +235,9 @@ static void use_config(const char *m, in
+
+ static void parse_config_file(const char *map, size_t len)
+ {
+- const int *end = (const int *) (map + len);
++ const unsigned int *end = (const unsigned int *) (map + len);
+ /* start at +1, so that p can never be < map */
+- const int *m = (const int *) map + 1;
++ const unsigned int *m = (const unsigned int *) map + 1;
+ const char *p, *q;
+
+ for (; m < end; m++) {
+@@ -406,7 +406,7 @@ static void print_deps(void)
+ static void traps(void)
+ {
+ static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
+- int *p = (int *)test;
++ unsigned int *p = (unsigned int *)test;
+
+ if (*p != INT_CONF) {
+ fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/scripts/gcc-plugin.sh linux-3.4-pax/scripts/gcc-plugin.sh
+--- linux-3.4/scripts/gcc-plugin.sh 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.4-pax/scripts/gcc-plugin.sh 2012-05-21 12:10:12.368049043 +0200
+@@ -0,0 +1,2 @@
++#!/bin/sh
++echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/scripts/Makefile.build linux-3.4-pax/scripts/Makefile.build
+--- linux-3.4/scripts/Makefile.build 2012-05-21 11:33:42.259930092 +0200
++++ linux-3.4-pax/scripts/Makefile.build 2012-05-21 12:10:12.368049043 +0200
+@@ -62,7 +62,7 @@ endif
+ ifdef KBUILD_ENABLE_EXTRA_GCC_CHECKS
+ warning- := $(empty)
+
+-warning-1 := -Wextra -Wunused -Wno-unused-parameter
++warning-1 := -Wextra -Wunused -Wno-unused-parameter -Wno-missing-field-initializers
+ warning-1 += -Wmissing-declarations
+ warning-1 += -Wmissing-format-attribute
+ warning-1 += -Wmissing-prototypes
+@@ -111,7 +111,7 @@ endif
+ endif
+
+ # Do not include host rules unless needed
+-ifneq ($(hostprogs-y)$(hostprogs-m),)
++ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
+ include scripts/Makefile.host
+ endif
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/scripts/Makefile.clean linux-3.4-pax/scripts/Makefile.clean
+--- linux-3.4/scripts/Makefile.clean 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/scripts/Makefile.clean 2012-05-21 12:10:12.372049043 +0200
+@@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
+ __clean-files := $(extra-y) $(always) \
+ $(targets) $(clean-files) \
+ $(host-progs) \
+- $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
++ $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
++ $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
+
+ __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/scripts/Makefile.host linux-3.4-pax/scripts/Makefile.host
+--- linux-3.4/scripts/Makefile.host 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/scripts/Makefile.host 2012-05-21 12:10:12.372049043 +0200
+@@ -31,6 +31,7 @@
+ # Note: Shared libraries consisting of C++ files are not supported
+
+ __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
++__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
+
+ # C code
+ # Executables compiled from a single .c file
+@@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
+ # Shared libaries (only .c supported)
+ # Shared libraries (.so) - all .so files referenced in "xxx-objs"
+ host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
++host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
+ # Remove .so files from "xxx-objs"
+ host-cobjs := $(filter-out %.so,$(host-cobjs))
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/scripts/Makefile.lib linux-3.4-pax/scripts/Makefile.lib
+--- linux-3.4/scripts/Makefile.lib 2012-05-21 11:33:42.271930093 +0200
++++ linux-3.4-pax/scripts/Makefile.lib 2012-05-22 15:28:31.591384605 +0200
+@@ -144,14 +144,14 @@ __a_flags = $(c
+ __cpp_flags = $(call flags,_cpp_flags)
+ endif
+
+-c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
++c_flags = -Wp,-MD,$(depfile) $(LINUXINCLUDE) $(NOSTDINC_FLAGS) \
+ $(__c_flags) $(modkern_cflags) \
+ -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags)
+
+-a_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
++a_flags = -Wp,-MD,$(depfile) $(LINUXINCLUDE) $(NOSTDINC_FLAGS) \
+ $(__a_flags) $(modkern_aflags)
+
+-cpp_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
++cpp_flags = -Wp,-MD,$(depfile) $(LINUXINCLUDE) $(NOSTDINC_FLAGS) \
+ $(__cpp_flags)
+
+ ld_flags = $(LDFLAGS) $(ldflags-y)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/scripts/mod/file2alias.c linux-3.4-pax/scripts/mod/file2alias.c
+--- linux-3.4/scripts/mod/file2alias.c 2012-05-21 11:33:42.343930097 +0200
++++ linux-3.4-pax/scripts/mod/file2alias.c 2012-05-21 12:10:12.376049043 +0200
+@@ -128,7 +128,7 @@ static void device_id_check(const char *
+ unsigned long size, unsigned long id_size,
+ void *symval)
+ {
+- int i;
++ unsigned int i;
+
+ if (size % id_size || size < id_size) {
+ if (cross_build != 0)
+@@ -158,7 +158,7 @@ static void device_id_check(const char *
+ /* USB is special because the bcdDevice can be matched against a numeric range */
+ /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
+ static void do_usb_entry(struct usb_device_id *id,
+- unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
++ unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
+ unsigned char range_lo, unsigned char range_hi,
+ unsigned char max, struct module *mod)
+ {
+@@ -259,7 +259,7 @@ static void do_usb_entry_multi(struct us
+ {
+ unsigned int devlo, devhi;
+ unsigned char chi, clo, max;
+- int ndigits;
++ unsigned int ndigits;
+
+ id->match_flags = TO_NATIVE(id->match_flags);
+ id->idVendor = TO_NATIVE(id->idVendor);
+@@ -501,7 +501,7 @@ static void do_pnp_device_entry(void *sy
+ for (i = 0; i < count; i++) {
+ const char *id = (char *)devs[i].id;
+ char acpi_id[sizeof(devs[0].id)];
+- int j;
++ unsigned int j;
+
+ buf_printf(&mod->dev_table_buf,
+ "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
+@@ -531,7 +531,7 @@ static void do_pnp_card_entries(void *sy
+
+ for (j = 0; j < PNP_MAX_DEVICES; j++) {
+ const char *id = (char *)card->devs[j].id;
+- int i2, j2;
++ unsigned int i2, j2;
+ int dup = 0;
+
+ if (!id[0])
+@@ -557,7 +557,7 @@ static void do_pnp_card_entries(void *sy
+ /* add an individual alias for every device entry */
+ if (!dup) {
+ char acpi_id[sizeof(card->devs[0].id)];
+- int k;
++ unsigned int k;
+
+ buf_printf(&mod->dev_table_buf,
+ "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
+@@ -882,7 +882,7 @@ static void dmi_ascii_filter(char *d, co
+ static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
+ char *alias)
+ {
+- int i, j;
++ unsigned int i, j;
+
+ sprintf(alias, "dmi*");
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/scripts/mod/modpost.c linux-3.4-pax/scripts/mod/modpost.c
+--- linux-3.4/scripts/mod/modpost.c 2012-05-21 11:33:42.347930097 +0200
++++ linux-3.4-pax/scripts/mod/modpost.c 2012-05-21 12:10:12.380049043 +0200
+@@ -922,6 +922,7 @@ enum mismatch {
+ ANY_INIT_TO_ANY_EXIT,
+ ANY_EXIT_TO_ANY_INIT,
+ EXPORT_TO_INIT_EXIT,
++ DATA_TO_TEXT
+ };
+
+ struct sectioncheck {
+@@ -1030,6 +1031,12 @@ const struct sectioncheck sectioncheck[]
+ .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
+ .mismatch = EXPORT_TO_INIT_EXIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
++},
++/* Do not reference code from writable data */
++{
++ .fromsec = { DATA_SECTIONS, NULL },
++ .tosec = { TEXT_SECTIONS, NULL },
++ .mismatch = DATA_TO_TEXT
+ }
+ };
+
+@@ -1152,10 +1159,10 @@ static Elf_Sym *find_elf_symbol(struct e
+ continue;
+ if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
+ continue;
+- if (sym->st_value == addr)
+- return sym;
+ /* Find a symbol nearby - addr are maybe negative */
+ d = sym->st_value - addr;
++ if (d == 0)
++ return sym;
+ if (d < 0)
+ d = addr - sym->st_value;
+ if (d < distance) {
+@@ -1434,6 +1441,14 @@ static void report_sec_mismatch(const ch
+ tosym, prl_to, prl_to, tosym);
+ free(prl_to);
+ break;
++ case DATA_TO_TEXT:
++/*
++ fprintf(stderr,
++ "The variable %s references\n"
++ "the %s %s%s%s\n",
++ fromsym, to, sec2annotation(tosec), tosym, to_p);
++*/
++ break;
+ }
+ fprintf(stderr, "\n");
+ }
+@@ -1668,7 +1683,7 @@ static void section_rel(const char *modn
+ static void check_sec_ref(struct module *mod, const char *modname,
+ struct elf_info *elf)
+ {
+- int i;
++ unsigned int i;
+ Elf_Shdr *sechdrs = elf->sechdrs;
+
+ /* Walk through all sections */
+@@ -1766,7 +1781,7 @@ void __attribute__((format(printf, 2, 3)
+ va_end(ap);
+ }
+
+-void buf_write(struct buffer *buf, const char *s, int len)
++void buf_write(struct buffer *buf, const char *s, unsigned int len)
+ {
+ if (buf->size - buf->pos < len) {
+ buf->size += len + SZ;
+@@ -1984,7 +1999,7 @@ static void write_if_changed(struct buff
+ if (fstat(fileno(file), &st) < 0)
+ goto close_write;
+
+- if (st.st_size != b->pos)
++ if (st.st_size != (off_t)b->pos)
+ goto close_write;
+
+ tmp = NOFAIL(malloc(b->pos));
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/scripts/mod/modpost.h linux-3.4-pax/scripts/mod/modpost.h
+--- linux-3.4/scripts/mod/modpost.h 2012-05-21 11:33:42.347930097 +0200
++++ linux-3.4-pax/scripts/mod/modpost.h 2012-05-21 12:10:12.384049044 +0200
+@@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
+
+ struct buffer {
+ char *p;
+- int pos;
+- int size;
++ unsigned int pos;
++ unsigned int size;
+ };
+
+ void __attribute__((format(printf, 2, 3)))
+ buf_printf(struct buffer *buf, const char *fmt, ...);
+
+ void
+-buf_write(struct buffer *buf, const char *s, int len);
++buf_write(struct buffer *buf, const char *s, unsigned int len);
+
+ struct module {
+ struct module *next;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/scripts/mod/sumversion.c linux-3.4-pax/scripts/mod/sumversion.c
+--- linux-3.4/scripts/mod/sumversion.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/scripts/mod/sumversion.c 2012-05-21 12:10:12.384049044 +0200
+@@ -470,7 +470,7 @@ static void write_version(const char *fi
+ goto out;
+ }
+
+- if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
++ if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
+ warn("writing sum in %s failed: %s\n",
+ filename, strerror(errno));
+ goto out;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/scripts/pnmtologo.c linux-3.4-pax/scripts/pnmtologo.c
+--- linux-3.4/scripts/pnmtologo.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/scripts/pnmtologo.c 2012-05-21 12:10:12.388049044 +0200
+@@ -237,14 +237,14 @@ static void write_header(void)
+ fprintf(out, " * Linux logo %s\n", logoname);
+ fputs(" */\n\n", out);
+ fputs("#include <linux/linux_logo.h>\n\n", out);
+- fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
++ fprintf(out, "static unsigned char %s_data[] = {\n",
+ logoname);
+ }
+
+ static void write_footer(void)
+ {
+ fputs("\n};\n\n", out);
+- fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
++ fprintf(out, "const struct linux_logo %s = {\n", logoname);
+ fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
+ fprintf(out, "\t.width\t\t= %d,\n", logo_width);
+ fprintf(out, "\t.height\t\t= %d,\n", logo_height);
+@@ -374,7 +374,7 @@ static void write_logo_clut224(void)
+ fputs("\n};\n\n", out);
+
+ /* write logo clut */
+- fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
++ fprintf(out, "static unsigned char %s_clut[] = {\n",
+ logoname);
+ write_hex_cnt = 0;
+ for (i = 0; i < logo_clutsize; i++) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/security/integrity/ima/ima_api.c linux-3.4-pax/security/integrity/ima/ima_api.c
+--- linux-3.4/security/integrity/ima/ima_api.c 2012-03-19 10:39:14.584049035 +0100
++++ linux-3.4-pax/security/integrity/ima/ima_api.c 2012-05-21 12:10:12.388049044 +0200
+@@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
+ int result;
+
+ /* can overflow, only indicator */
+- atomic_long_inc(&ima_htable.violations);
++ atomic_long_inc_unchecked(&ima_htable.violations);
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/security/integrity/ima/ima_fs.c linux-3.4-pax/security/integrity/ima/ima_fs.c
+--- linux-3.4/security/integrity/ima/ima_fs.c 2012-01-08 19:48:31.727470731 +0100
++++ linux-3.4-pax/security/integrity/ima/ima_fs.c 2012-05-21 12:10:12.392049044 +0200
+@@ -28,12 +28,12 @@
+ static int valid_policy = 1;
+ #define TMPBUFLEN 12
+ static ssize_t ima_show_htable_value(char __user *buf, size_t count,
+- loff_t *ppos, atomic_long_t *val)
++ loff_t *ppos, atomic_long_unchecked_t *val)
+ {
+ char tmpbuf[TMPBUFLEN];
+ ssize_t len;
+
+- len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
++ len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
+ return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
+ }
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/security/integrity/ima/ima.h linux-3.4-pax/security/integrity/ima/ima.h
+--- linux-3.4/security/integrity/ima/ima.h 2012-01-08 19:48:31.703470733 +0100
++++ linux-3.4-pax/security/integrity/ima/ima.h 2012-05-21 12:10:12.392049044 +0200
+@@ -86,8 +86,8 @@ void ima_add_violation(struct inode *ino
+ extern spinlock_t ima_queue_lock;
+
+ struct ima_h_table {
+- atomic_long_t len; /* number of stored measurements in the list */
+- atomic_long_t violations;
++ atomic_long_unchecked_t len; /* number of stored measurements in the list */
++ atomic_long_unchecked_t violations;
+ struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
+ };
+ extern struct ima_h_table ima_htable;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/security/integrity/ima/ima_queue.c linux-3.4-pax/security/integrity/ima/ima_queue.c
+--- linux-3.4/security/integrity/ima/ima_queue.c 2012-03-19 10:39:14.584049035 +0100
++++ linux-3.4-pax/security/integrity/ima/ima_queue.c 2012-05-21 12:10:12.396049044 +0200
+@@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct i
+ INIT_LIST_HEAD(&qe->later);
+ list_add_tail_rcu(&qe->later, &ima_measurements);
+
+- atomic_long_inc(&ima_htable.len);
++ atomic_long_inc_unchecked(&ima_htable.len);
+ key = ima_hash_key(entry->digest);
+ hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
+ return 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/security/Kconfig linux-3.4-pax/security/Kconfig
+--- linux-3.4/security/Kconfig 2012-05-21 11:33:42.379930099 +0200
++++ linux-3.4-pax/security/Kconfig 2012-05-21 12:10:12.396049044 +0200
+@@ -4,6 +4,636 @@
+
+ menu "Security options"
+
++menu "PaX"
++
++ config ARCH_TRACK_EXEC_LIMIT
++ bool
++
++ config PAX_KERNEXEC_PLUGIN
++ bool
++
++ config PAX_PER_CPU_PGD
++ bool
++
++ config TASK_SIZE_MAX_SHIFT
++ int
++ depends on X86_64
++ default 47 if !PAX_PER_CPU_PGD
++ default 42 if PAX_PER_CPU_PGD
++
++config PAX
++ bool "Enable various PaX features"
++ depends on ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86
++ help
++ This allows you to enable various PaX features. PaX adds
++ intrusion prevention mechanisms to the kernel that reduce
++ the risks posed by exploitable memory corruption bugs.
++
++menu "PaX Control"
++ depends on PAX
++
++config PAX_SOFTMODE
++ bool 'Support soft mode'
++ help
++ Enabling this option will allow you to run PaX in soft mode, that
++ is, PaX features will not be enforced by default, only on executables
++ marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
++ support as they are the only way to mark executables for soft mode use.
++
++ Soft mode can be activated by using the "pax_softmode=1" kernel command
++ line option on boot. Furthermore you can control various PaX features
++ at runtime via the entries in /proc/sys/kernel/pax.
++
++config PAX_EI_PAX
++ bool 'Use legacy ELF header marking'
++ help
++ Enabling this option will allow you to control PaX features on
++ a per executable basis via the 'chpax' utility available at
++ http://pax.grsecurity.net/. The control flags will be read from
++ an otherwise reserved part of the ELF header. This marking has
++ numerous drawbacks (no support for soft-mode, toolchain does not
++ know about the non-standard use of the ELF header) therefore it
++ has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
++ support.
++
++ If you have applications not marked by the PT_PAX_FLAGS ELF program
++ header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
++ option otherwise they will not get any protection.
++
++ Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
++ support as well, they will override the legacy EI_PAX marks.
++
++config PAX_PT_PAX_FLAGS
++ bool 'Use ELF program header marking'
++ help
++ Enabling this option will allow you to control PaX features on
++ a per executable basis via the 'paxctl' utility available at
++ http://pax.grsecurity.net/. The control flags will be read from
++ a PaX specific ELF program header (PT_PAX_FLAGS). This marking
++ has the benefits of supporting both soft mode and being fully
++ integrated into the toolchain (the binutils patch is available
++ from http://pax.grsecurity.net).
++
++ If you have applications not marked by the PT_PAX_FLAGS ELF program
++ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
++ support otherwise they will not get any protection.
++
++ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
++ must make sure that the marks are the same if a binary has both marks.
++
++ Note that if you enable the legacy EI_PAX marking support as well,
++ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
++
++config PAX_XATTR_PAX_FLAGS
++ bool 'Use filesystem extended attributes marking'
++ select CIFS_XATTR if CIFS
++ select EXT2_FS_XATTR if EXT2_FS
++ select EXT3_FS_XATTR if EXT3_FS
++ select EXT4_FS_XATTR if EXT4_FS
++ select JFFS2_FS_XATTR if JFFS2_FS
++ select REISERFS_FS_XATTR if REISERFS_FS
++ select SQUASHFS_XATTR if SQUASHFS
++ select TMPFS_XATTR if TMPFS
++ select UBIFS_FS_XATTR if UBIFS_FS
++ help
++ Enabling this option will allow you to control PaX features on
++ a per executable basis via the 'setfattr' utility. The control
++ flags will be read from the user.pax.flags extended attribute of
++ the file. This marking has the benefit of supporting binary-only
++ applications that self-check themselves (e.g., skype) and would
++ not tolerate chpax/paxctl changes. The main drawback is that
++ extended attributes are not supported by some filesystems (e.g.,
++ isofs, udf, vfat) so copying files through such filesystems will
++ lose the extended attributes and these PaX markings.
++
++ If you have applications not marked by the PT_PAX_FLAGS ELF program
++ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
++ support otherwise they will not get any protection.
++
++ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
++ must make sure that the marks are the same if a binary has both marks.
++
++ Note that if you enable the legacy EI_PAX marking support as well,
++ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
++
++choice
++ prompt 'MAC system integration'
++ default PAX_NO_ACL_FLAGS
++ help
++ Mandatory Access Control systems have the option of controlling
++ PaX flags on a per executable basis, choose the method supported
++ by your particular system.
++
++ - "none": if your MAC system does not interact with PaX,
++ - "direct": if your MAC system defines pax_set_initial_flags() itself,
++ - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
++
++ NOTE: this option is for developers/integrators only.
++
++ config PAX_NO_ACL_FLAGS
++ bool 'none'
++
++ config PAX_HAVE_ACL_FLAGS
++ bool 'direct'
++
++ config PAX_HOOK_ACL_FLAGS
++ bool 'hook'
++endchoice
++
++endmenu
++
++menu "Non-executable pages"
++ depends on PAX
++
++config PAX_NOEXEC
++ bool "Enforce non-executable pages"
++ depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
++ help
++ By design some architectures do not allow for protecting memory
++ pages against execution or even if they do, Linux does not make
++ use of this feature. In practice this means that if a page is
++ readable (such as the stack or heap) it is also executable.
++
++ There is a well known exploit technique that makes use of this
++ fact and a common programming mistake where an attacker can
++ introduce code of his choice somewhere in the attacked program's
++ memory (typically the stack or the heap) and then execute it.
++
++ If the attacked program was running with different (typically
++ higher) privileges than that of the attacker, then he can elevate
++ his own privilege level (e.g. get a root shell, write to files for
++ which he does not have write access to, etc).
++
++ Enabling this option will let you choose from various features
++ that prevent the injection and execution of 'foreign' code in
++ a program.
++
++ This will also break programs that rely on the old behaviour and
++ expect that dynamically allocated memory via the malloc() family
++ of functions is executable (which it is not). Notable examples
++ are the XFree86 4.x server, the java runtime and wine.
++
++config PAX_PAGEEXEC
++ bool "Paging based non-executable pages"
++ depends on !COMPAT_VDSO && PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4 || MPSC || MATOM || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
++ select X86_PAE if X86_32 && !HIGHMEM4G && (MCORE2 || MPSC || MATOM || MK8)
++ select ARCH_TRACK_EXEC_LIMIT if X86_32
++ select S390_SWITCH_AMODE if S390
++ select S390_EXEC_PROTECT if S390
++ help
++ This implementation is based on the paging feature of the CPU.
++ On i386 without hardware non-executable bit support there is a
++ variable but usually low performance impact, however on Intel's
++ P4 core based CPUs it is very high so you should not enable this
++ for kernels meant to be used on such CPUs.
++
++ On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
++ with hardware non-executable bit support there is no performance
++ impact, on ppc the impact is negligible.
++
++ Note that several architectures require various emulations due to
++ badly designed userland ABIs, this will cause a performance impact
++ but will disappear as soon as userland is fixed. For example, ppc
++ userland MUST have been built with secure-plt by a recent toolchain.
++
++config PAX_SEGMEXEC
++ bool "Segmentation based non-executable pages"
++ depends on !COMPAT_VDSO && PAX_NOEXEC && X86_32
++ help
++ This implementation is based on the segmentation feature of the
++ CPU and has a very small performance impact, however applications
++ will be limited to a 1.5 GB address space instead of the normal
++ 3 GB.
++
++config PAX_EMUTRAMP
++ bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
++ default y if PARISC
++ help
++ There are some programs and libraries that for one reason or
++ another attempt to execute special small code snippets from
++ non-executable memory pages. Most notable examples are the
++ signal handler return code generated by the kernel itself and
++ the GCC trampolines.
++
++ If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
++ such programs will no longer work under your kernel.
++
++ As a remedy you can say Y here and use the 'chpax' or 'paxctl'
++ utilities to enable trampoline emulation for the affected programs
++ yet still have the protection provided by the non-executable pages.
++
++ On parisc you MUST enable this option and EMUSIGRT as well, otherwise
++ your system will not even boot.
++
++ Alternatively you can say N here and use the 'chpax' or 'paxctl'
++ utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
++ for the affected files.
++
++ NOTE: enabling this feature *may* open up a loophole in the
++ protection provided by non-executable pages that an attacker
++ could abuse. Therefore the best solution is to not have any
++ files on your system that would require this option. This can
++ be achieved by not using libc5 (which relies on the kernel
++ signal handler return code) and not using or rewriting programs
++ that make use of the nested function implementation of GCC.
++ Skilled users can just fix GCC itself so that it implements
++ nested function calls in a way that does not interfere with PaX.
++
++config PAX_EMUSIGRT
++ bool "Automatically emulate sigreturn trampolines"
++ depends on PAX_EMUTRAMP && PARISC
++ default y
++ help
++ Enabling this option will have the kernel automatically detect
++ and emulate signal return trampolines executing on the stack
++ that would otherwise lead to task termination.
++
++ This solution is intended as a temporary one for users with
++ legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
++ Modula-3 runtime, etc) or executables linked to such, basically
++ everything that does not specify its own SA_RESTORER function in
++ normal executable memory like glibc 2.1+ does.
++
++ On parisc you MUST enable this option, otherwise your system will
++ not even boot.
++
++ NOTE: this feature cannot be disabled on a per executable basis
++ and since it *does* open up a loophole in the protection provided
++ by non-executable pages, the best solution is to not have any
++ files on your system that would require this option.
++
++config PAX_MPROTECT
++ bool "Restrict mprotect()"
++ depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
++ help
++ Enabling this option will prevent programs from
++ - changing the executable status of memory pages that were
++ not originally created as executable,
++ - making read-only executable pages writable again,
++ - creating executable pages from anonymous memory,
++ - making read-only-after-relocations (RELRO) data pages writable again.
++
++ You should say Y here to complete the protection provided by
++ the enforcement of non-executable pages.
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
++ this feature on a per file basis.
++
++config PAX_ELFRELOCS
++ bool "Allow ELF text relocations"
++ depends on PAX_MPROTECT
++ default n
++ help
++ Non-executable pages and mprotect() restrictions are effective
++ in preventing the introduction of new executable code into an
++ attacked task's address space. There remain only two venues
++ for this kind of attack: if the attacker can execute already
++ existing code in the attacked task then he can either have it
++ create and mmap() a file containing his code or have it mmap()
++ an already existing ELF library that does not have position
++ independent code in it and use mprotect() on it to make it
++ writable and copy his code there. While protecting against
++ the former approach is beyond PaX, the latter can be prevented
++ by having only PIC ELF libraries on one's system (which do not
++ need to relocate their code). If you are sure this is your case,
++ then disable this option otherwise be careful as you may not even
++ be able to boot or log on your system (for example, some PAM
++ modules are erroneously compiled as non-PIC by default).
++
++ NOTE: if you are using dynamic ELF executables (as suggested
++ when using ASLR) then you must have made sure that you linked
++ your files using the PIC version of crt1 (the et_dyn.tar.gz package
++ referenced there has already been updated to support this).
++
++config PAX_ETEXECRELOCS
++ bool "Allow ELF ET_EXEC text relocations"
++ depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
++ select PAX_ELFRELOCS
++ default y
++ help
++ On some architectures there are incorrectly created applications
++ that require text relocations and would not work without enabling
++ this option. If you are an alpha, ia64 or parisc user, you should
++ enable this option and disable it once you have made sure that
++ none of your applications need it.
++
++config PAX_EMUPLT
++ bool "Automatically emulate ELF PLT"
++ depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
++ default y
++ help
++ Enabling this option will have the kernel automatically detect
++ and emulate the Procedure Linkage Table entries in ELF files.
++ On some architectures such entries are in writable memory, and
++ become non-executable leading to task termination. Therefore
++ it is mandatory that you enable this option on alpha, parisc,
++ sparc and sparc64, otherwise your system would not even boot.
++
++ NOTE: this feature *does* open up a loophole in the protection
++ provided by the non-executable pages, therefore the proper
++ solution is to modify the toolchain to produce a PLT that does
++ not need to be writable.
++
++config PAX_DLRESOLVE
++ bool 'Emulate old glibc resolver stub'
++ depends on PAX_EMUPLT && SPARC
++ default n
++ help
++ This option is needed if userland has an old glibc (before 2.4)
++ that puts a 'save' instruction into the runtime generated resolver
++ stub that needs special emulation.
++
++config PAX_KERNEXEC
++ bool "Enforce non-executable kernel pages"
++ depends on (PPC || X86) && !COMPAT_VDSO && !XEN && (!X86_32 || X86_WP_WORKS_OK)
++ select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
++ select PAX_KERNEXEC_PLUGIN if X86_64
++ help
++ This is the kernel land equivalent of PAGEEXEC and MPROTECT,
++ that is, enabling this option will make it harder to inject
++ and execute 'foreign' code in kernel memory itself.
++
++ Note that on x86_64 kernels there is a known regression when
++ this feature and KVM/VMX are both enabled in the host kernel.
++
++choice
++ prompt "Return Address Instrumentation Method"
++ default PAX_KERNEXEC_PLUGIN_METHOD_BTS
++ depends on PAX_KERNEXEC_PLUGIN
++ help
++ Select the method used to instrument function pointer dereferences.
++ Note that binary modules cannot be instrumented by this approach.
++
++ config PAX_KERNEXEC_PLUGIN_METHOD_BTS
++ bool "bts"
++ help
++ This method is compatible with binary only modules but has
++ a higher runtime overhead.
++
++ config PAX_KERNEXEC_PLUGIN_METHOD_OR
++ bool "or"
++ depends on !PARAVIRT
++ help
++ This method is incompatible with binary only modules but has
++ a lower runtime overhead.
++endchoice
++
++config PAX_KERNEXEC_PLUGIN_METHOD
++ string
++ default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
++ default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
++ default ""
++
++config PAX_KERNEXEC_MODULE_TEXT
++ int "Minimum amount of memory reserved for module code"
++ default "4"
++ depends on PAX_KERNEXEC && X86_32 && MODULES
++ help
++ Due to implementation details the kernel must reserve a fixed
++ amount of memory for module code at compile time that cannot be
++ changed at runtime. Here you can specify the minimum amount
++ in MB that will be reserved. Due to the same implementation
++ details this size will always be rounded up to the next 2/4 MB
++ boundary (depends on PAE) so the actually available memory for
++ module code will usually be more than this minimum.
++
++ The default 4 MB should be enough for most users but if you have
++ an excessive number of modules (e.g., most distribution configs
++ compile many drivers as modules) or use huge modules such as
++ nvidia's kernel driver, you will need to adjust this amount.
++ A good rule of thumb is to look at your currently loaded kernel
++ modules and add up their sizes.
++
++endmenu
++
++menu "Address Space Layout Randomization"
++ depends on PAX
++
++config PAX_ASLR
++ bool "Address Space Layout Randomization"
++ help
++ Many if not most exploit techniques rely on the knowledge of
++ certain addresses in the attacked program. The following options
++ will allow the kernel to apply a certain amount of randomization
++ to specific parts of the program thereby forcing an attacker to
++ guess them in most cases. Any failed guess will most likely crash
++ the attacked program which allows the kernel to detect such attempts
++ and react on them. PaX itself provides no reaction mechanisms,
++ instead it is strongly encouraged that you make use of Nergal's
++ segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
++ (http://www.grsecurity.net/) built-in crash detection features or
++ develop one yourself.
++
++ By saying Y here you can choose to randomize the following areas:
++ - top of the task's kernel stack
++ - top of the task's userland stack
++ - base address for mmap() requests that do not specify one
++ (this includes all libraries)
++ - base address of the main executable
++
++ It is strongly recommended to say Y here as address space layout
++ randomization has negligible impact on performance yet it provides
++ a very effective protection.
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
++ this feature on a per file basis.
++
++config PAX_RANDKSTACK
++ bool "Randomize kernel stack base"
++ depends on X86_TSC && X86
++ help
++ By saying Y here the kernel will randomize every task's kernel
++ stack on every system call. This will not only force an attacker
++ to guess it but also prevent him from making use of possible
++ leaked information about it.
++
++ Since the kernel stack is a rather scarce resource, randomization
++ may cause unexpected stack overflows, therefore you should very
++ carefully test your system. Note that once enabled in the kernel
++ configuration, this feature cannot be disabled on a per file basis.
++
++config PAX_RANDUSTACK
++ bool "Randomize user stack base"
++ depends on PAX_ASLR
++ help
++ By saying Y here the kernel will randomize every task's userland
++ stack. The randomization is done in two steps where the second
++ one may apply a big amount of shift to the top of the stack and
++ cause problems for programs that want to use lots of memory (more
++ than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
++ For this reason the second step can be controlled by 'chpax' or
++ 'paxctl' on a per file basis.
++
++config PAX_RANDMMAP
++ bool "Randomize mmap() base"
++ depends on PAX_ASLR
++ help
++ By saying Y here the kernel will use a randomized base address for
++ mmap() requests that do not specify one themselves. As a result
++ all dynamically loaded libraries will appear at random addresses
++ and therefore be harder to exploit by a technique where an attacker
++ attempts to execute library code for his purposes (e.g. spawn a
++ shell from an exploited program that is running at an elevated
++ privilege level).
++
++ Furthermore, if a program is relinked as a dynamic ELF file, its
++ base address will be randomized as well, completing the full
++ randomization of the address space layout. Attacking such programs
++ becomes a guess game. You can find an example of doing this at
++ http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
++ http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
++ feature on a per file basis.
++
++endmenu
++
++menu "Miscellaneous hardening features"
++
++config PAX_MEMORY_SANITIZE
++ bool "Sanitize all freed memory"
++ depends on !HIBERNATION
++ help
++ By saying Y here the kernel will erase memory pages as soon as they
++ are freed. This in turn reduces the lifetime of data stored in the
++ pages, making it less likely that sensitive information such as
++ passwords, cryptographic secrets, etc stay in memory for too long.
++
++ This is especially useful for programs whose runtime is short, long
++ lived processes and the kernel itself benefit from this as long as
++ they operate on whole memory pages and ensure timely freeing of pages
++ that may hold sensitive information.
++
++ The tradeoff is performance impact, on a single CPU system kernel
++ compilation sees a 3% slowdown, other systems and workloads may vary
++ and you are advised to test this feature on your expected workload
++ before deploying it.
++
++ Note that this feature does not protect data stored in live pages,
++ e.g., process memory swapped to disk may stay there for a long time.
++
++config PAX_MEMORY_STACKLEAK
++ bool "Sanitize kernel stack"
++ depends on X86
++ help
++ By saying Y here the kernel will erase the kernel stack before it
++ returns from a system call. This in turn reduces the information
++ that a kernel stack leak bug can reveal.
++
++ Note that such a bug can still leak information that was put on
++ the stack by the current system call (the one eventually triggering
++ the bug) but traces of earlier system calls on the kernel stack
++ cannot leak anymore.
++
++ The tradeoff is performance impact, on a single CPU system kernel
++ compilation sees a 1% slowdown, other systems and workloads may vary
++ and you are advised to test this feature on your expected workload
++ before deploying it.
++
++ Note: full support for this feature requires gcc with plugin support
++ so make sure your compiler is at least gcc 4.5.0. Using older gcc
++ versions means that functions with large enough stack frames may
++ leave uninitialized memory behind that may be exposed to a later
++ syscall leaking the stack.
++
++config PAX_MEMORY_UDEREF
++ bool "Prevent invalid userland pointer dereference"
++ depends on X86 && !COMPAT_VDSO && !UML_X86 && !XEN
++ select PAX_PER_CPU_PGD if X86_64
++ help
++ By saying Y here the kernel will be prevented from dereferencing
++ userland pointers in contexts where the kernel expects only kernel
++ pointers. This is both a useful runtime debugging feature and a
++ security measure that prevents exploiting a class of kernel bugs.
++
++ The tradeoff is that some virtualization solutions may experience
++ a huge slowdown and therefore you should not enable this feature
++ for kernels meant to run in such environments. Whether a given VM
++ solution is affected or not is best determined by simply trying it
++ out, the performance impact will be obvious right on boot as this
++ mechanism engages from very early on. A good rule of thumb is that
++ VMs running on CPUs without hardware virtualization support (i.e.,
++ the majority of IA-32 CPUs) will likely experience the slowdown.
++
++config PAX_REFCOUNT
++ bool "Prevent various kernel object reference counter overflows"
++ depends on (ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86
++ help
++ By saying Y here the kernel will detect and prevent overflowing
++ various (but not all) kinds of object reference counters. Such
++ overflows can normally occur due to bugs only and are often, if
++ not always, exploitable.
++
++ The tradeoff is that data structures protected by an overflowed
++ refcount will never be freed and therefore will leak memory. Note
++ that this leak also happens even without this protection but in
++ that case the overflow can eventually trigger the freeing of the
++ data structure while it is still being used elsewhere, resulting
++ in the exploitable situation that this feature prevents.
++
++ Since this has a negligible performance impact, you should enable
++ this feature.
++
++config PAX_USERCOPY
++ bool "Harden heap object copies between kernel and userland"
++ depends on ARM || PPC || SPARC || X86
++ depends on SLAB || SLUB || SLOB
++ help
++ By saying Y here the kernel will enforce the size of heap objects
++ when they are copied in either direction between the kernel and
++ userland, even if only a part of the heap object is copied.
++
++ Specifically, this checking prevents information leaking from the
++ kernel heap during kernel to userland copies (if the kernel heap
++ object is otherwise fully initialized) and prevents kernel heap
++ overflows during userland to kernel copies. Only objects belonging
++ to explictly marked slub types are allowed to be copied at all.
++
++ Note that the current implementation provides the strictest checks
++ for the SLUB allocator.
++
++ If frame pointers are enabled on x86, this option will also restrict
++ copies into and out of the kernel stack to local variables within a
++ single frame.
++
++ Since this has a negligible performance impact, you should enable
++ this feature.
++
++config PAX_CONSTIFY_PLUGIN
++ bool "Automatically constify eligible structures"
++ depends on !UML
++ help
++ By saying Y here the compiler will automatically constify a class
++ of types that contain only function pointers. This reduces the
++ kernel's attack surface and also produces a better memory layout.
++
++ Note that the implementation requires a gcc with plugin support,
++ i.e., gcc 4.5 or newer. You may need to install the supporting
++ headers explicitly in addition to the normal gcc package.
++
++ Note that if some code really has to modify constified variables
++ then the source code will have to be patched to allow it. Examples
++ can be found in PaX itself (the no_const attribute) and for some
++ out-of-tree modules at http://www.grsecurity.net/~paxguy1/ .
++
++config PAX_SIZE_OVERFLOW
++ bool "Prevent various integer overflows in function size parameters"
++ depends on X86
++ help
++ By saying Y here the kernel recomputes expressions of function
++ arguments marked by a size_overflow attribute with double integer
++ precision (DImode/TImode for 32/64 bit integer types).
++
++ The recomputed argument is checked against INT_MAX and an event
++ is logged on overflow and the triggering process is killed.
++
++ Homepage:
++ http://www.grsecurity.net/~ephox/overflow_plugin/
++
++endmenu
++
++endmenu
++
+ config KEYS
+ bool "Enable access key retention support"
+ help
+@@ -169,7 +799,7 @@ config INTEL_TXT
+ config LSM_MMAP_MIN_ADDR
+ int "Low address space for LSM to protect from user allocation"
+ depends on SECURITY && SECURITY_SELINUX
+- default 32768 if ARM
++ default 32768 if ALPHA || ARM || PARISC || SPARC32
+ default 65536
+ help
+ This is the portion of low virtual memory which should be protected
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/security/keys/compat.c linux-3.4-pax/security/keys/compat.c
+--- linux-3.4/security/keys/compat.c 2012-01-08 19:48:31.735470731 +0100
++++ linux-3.4-pax/security/keys/compat.c 2012-05-21 12:10:12.400049045 +0200
+@@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
+ if (ret == 0)
+ goto no_payload_free;
+
+- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
++ ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
+
+ if (iov != iovstack)
+ kfree(iov);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/security/keys/keyctl.c linux-3.4-pax/security/keys/keyctl.c
+--- linux-3.4/security/keys/keyctl.c 2012-05-21 11:33:42.531930107 +0200
++++ linux-3.4-pax/security/keys/keyctl.c 2012-05-21 12:10:12.404049045 +0200
+@@ -935,7 +935,7 @@ static int keyctl_change_reqkey_auth(str
+ /*
+ * Copy the iovec data from userspace
+ */
+-static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
++static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
+ unsigned ioc)
+ {
+ for (; ioc > 0; ioc--) {
+@@ -957,7 +957,7 @@ static long copy_from_user_iovec(void *b
+ * If successful, 0 will be returned.
+ */
+ long keyctl_instantiate_key_common(key_serial_t id,
+- const struct iovec *payload_iov,
++ const struct iovec __user *payload_iov,
+ unsigned ioc,
+ size_t plen,
+ key_serial_t ringid)
+@@ -1052,7 +1052,7 @@ long keyctl_instantiate_key(key_serial_t
+ [0].iov_len = plen
+ };
+
+- return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
++ return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
+ }
+
+ return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
+@@ -1085,7 +1085,7 @@ long keyctl_instantiate_key_iov(key_seri
+ if (ret == 0)
+ goto no_payload_free;
+
+- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
++ ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
+
+ if (iov != iovstack)
+ kfree(iov);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/security/keys/keyring.c linux-3.4-pax/security/keys/keyring.c
+--- linux-3.4/security/keys/keyring.c 2012-03-19 10:39:14.592049034 +0100
++++ linux-3.4-pax/security/keys/keyring.c 2012-05-21 12:10:12.404049045 +0200
+@@ -214,15 +214,15 @@ static long keyring_read(const struct ke
+ ret = -EFAULT;
+
+ for (loop = 0; loop < klist->nkeys; loop++) {
++ key_serial_t serial;
+ key = klist->keys[loop];
++ serial = key->serial;
+
+ tmp = sizeof(key_serial_t);
+ if (tmp > buflen)
+ tmp = buflen;
+
+- if (copy_to_user(buffer,
+- &key->serial,
+- tmp) != 0)
++ if (copy_to_user(buffer, &serial, tmp))
+ goto error;
+
+ buflen -= tmp;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/security/security.c linux-3.4-pax/security/security.c
+--- linux-3.4/security/security.c 2012-05-21 11:33:42.543930108 +0200
++++ linux-3.4-pax/security/security.c 2012-05-30 02:32:53.894987973 +0200
+@@ -20,6 +20,7 @@
+ #include <linux/ima.h>
+ #include <linux/evm.h>
+ #include <linux/fsnotify.h>
++#include <linux/mm.h>
+ #include <net/flow.h>
+
+ #define MAX_LSM_EVM_XATTR 2
+@@ -28,8 +29,8 @@
+ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
+ CONFIG_DEFAULT_SECURITY;
+
+-static struct security_operations *security_ops;
+-static struct security_operations default_security_ops = {
++static struct security_operations *security_ops __read_only;
++static struct security_operations default_security_ops __read_only = {
+ .name = "default",
+ };
+
+@@ -70,7 +71,9 @@ int __init security_init(void)
+
+ void reset_security_ops(void)
+ {
++ pax_open_kernel();
+ security_ops = &default_security_ops;
++ pax_close_kernel();
+ }
+
+ /* Save user chosen LSM */
+@@ -123,7 +126,9 @@ int __init register_security(struct secu
+ if (security_ops != &default_security_ops)
+ return -EAGAIN;
+
++ pax_open_kernel();
+ security_ops = ops;
++ pax_close_kernel();
+
+ return 0;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/security/selinux/hooks.c linux-3.4-pax/security/selinux/hooks.c
+--- linux-3.4/security/selinux/hooks.c 2012-05-21 11:33:42.551930108 +0200
++++ linux-3.4-pax/security/selinux/hooks.c 2012-05-21 12:10:12.412049045 +0200
+@@ -5520,7 +5520,7 @@ static int selinux_key_getsecurity(struc
+
+ #endif
+
+-static struct security_operations selinux_ops = {
++static struct security_operations selinux_ops __read_only = {
+ .name = "selinux",
+
+ .ptrace_access_check = selinux_ptrace_access_check,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/security/selinux/include/xfrm.h linux-3.4-pax/security/selinux/include/xfrm.h
+--- linux-3.4/security/selinux/include/xfrm.h 2012-05-21 11:33:42.583930110 +0200
++++ linux-3.4-pax/security/selinux/include/xfrm.h 2012-05-21 12:10:12.416049045 +0200
+@@ -50,7 +50,7 @@ int selinux_xfrm_decode_session(struct s
+
+ static inline void selinux_xfrm_notify_policyload(void)
+ {
+- atomic_inc(&flow_cache_genid);
++ atomic_inc_unchecked(&flow_cache_genid);
+ }
+ #else
+ static inline int selinux_xfrm_enabled(void)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/security/smack/smack_lsm.c linux-3.4-pax/security/smack/smack_lsm.c
+--- linux-3.4/security/smack/smack_lsm.c 2012-05-21 11:33:42.595930110 +0200
++++ linux-3.4-pax/security/smack/smack_lsm.c 2012-05-21 12:10:12.420049046 +0200
+@@ -3500,7 +3500,7 @@ static int smack_inode_getsecctx(struct
+ return 0;
+ }
+
+-struct security_operations smack_ops = {
++struct security_operations smack_ops __read_only = {
+ .name = "smack",
+
+ .ptrace_access_check = smack_ptrace_access_check,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/security/tomoyo/tomoyo.c linux-3.4-pax/security/tomoyo/tomoyo.c
+--- linux-3.4/security/tomoyo/tomoyo.c 2012-03-19 10:39:14.612049034 +0100
++++ linux-3.4-pax/security/tomoyo/tomoyo.c 2012-05-21 12:10:12.424049046 +0200
+@@ -501,7 +501,7 @@ static int tomoyo_socket_sendmsg(struct
+ * tomoyo_security_ops is a "struct security_operations" which is used for
+ * registering TOMOYO.
+ */
+-static struct security_operations tomoyo_security_ops = {
++static struct security_operations tomoyo_security_ops __read_only = {
+ .name = "tomoyo",
+ .cred_alloc_blank = tomoyo_cred_alloc_blank,
+ .cred_prepare = tomoyo_cred_prepare,
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/aoa/codecs/onyx.c linux-3.4-pax/sound/aoa/codecs/onyx.c
+--- linux-3.4/sound/aoa/codecs/onyx.c 2012-05-21 11:33:42.711930117 +0200
++++ linux-3.4-pax/sound/aoa/codecs/onyx.c 2012-05-21 12:10:12.428049046 +0200
+@@ -54,7 +54,7 @@ struct onyx {
+ spdif_locked:1,
+ analog_locked:1,
+ original_mute:2;
+- int open_count;
++ local_t open_count;
+ struct codec_info *codec_info;
+
+ /* mutex serializes concurrent access to the device
+@@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
+ struct onyx *onyx = cii->codec_data;
+
+ mutex_lock(&onyx->mutex);
+- onyx->open_count++;
++ local_inc(&onyx->open_count);
+ mutex_unlock(&onyx->mutex);
+
+ return 0;
+@@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
+ struct onyx *onyx = cii->codec_data;
+
+ mutex_lock(&onyx->mutex);
+- onyx->open_count--;
+- if (!onyx->open_count)
++ if (local_dec_and_test(&onyx->open_count))
+ onyx->spdif_locked = onyx->analog_locked = 0;
+ mutex_unlock(&onyx->mutex);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/aoa/codecs/onyx.h linux-3.4-pax/sound/aoa/codecs/onyx.h
+--- linux-3.4/sound/aoa/codecs/onyx.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/sound/aoa/codecs/onyx.h 2012-05-21 12:10:12.428049046 +0200
+@@ -11,6 +11,7 @@
+ #include <linux/i2c.h>
+ #include <asm/pmac_low_i2c.h>
+ #include <asm/prom.h>
++#include <asm/local.h>
+
+ /* PCM3052 register definitions */
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/core/oss/pcm_oss.c linux-3.4-pax/sound/core/oss/pcm_oss.c
+--- linux-3.4/sound/core/oss/pcm_oss.c 2012-03-19 10:39:14.668049032 +0100
++++ linux-3.4-pax/sound/core/oss/pcm_oss.c 2012-05-21 12:10:12.432049046 +0200
+@@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(str
+ if (in_kernel) {
+ mm_segment_t fs;
+ fs = snd_enter_user();
+- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
++ ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
+ snd_leave_user(fs);
+ } else {
+- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
++ ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
+ }
+ if (ret != -EPIPE && ret != -ESTRPIPE)
+ break;
+@@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(stru
+ if (in_kernel) {
+ mm_segment_t fs;
+ fs = snd_enter_user();
+- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
++ ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
+ snd_leave_user(fs);
+ } else {
+- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
++ ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
+ }
+ if (ret == -EPIPE) {
+ if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
+@@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct
+ struct snd_pcm_plugin_channel *channels;
+ size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
+ if (!in_kernel) {
+- if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
++ if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
+ return -EFAULT;
+ buf = runtime->oss.buffer;
+ }
+@@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct
+ }
+ } else {
+ tmp = snd_pcm_oss_write2(substream,
+- (const char __force *)buf,
++ (const char __force_kernel *)buf,
+ runtime->oss.period_bytes, 0);
+ if (tmp <= 0)
+ goto err;
+@@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ snd_pcm_sframes_t frames, frames1;
+ #ifdef CONFIG_SND_PCM_OSS_PLUGINS
+- char __user *final_dst = (char __force __user *)buf;
++ char __user *final_dst = (char __force_user *)buf;
+ if (runtime->oss.plugin_first) {
+ struct snd_pcm_plugin_channel *channels;
+ size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
+@@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct
+ xfer += tmp;
+ runtime->oss.buffer_used -= tmp;
+ } else {
+- tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
++ tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
+ runtime->oss.period_bytes, 0);
+ if (tmp <= 0)
+ goto err;
+@@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_p
+ size1);
+ size1 /= runtime->channels; /* frames */
+ fs = snd_enter_user();
+- snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
++ snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
+ snd_leave_user(fs);
+ }
+ } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/core/pcm_compat.c linux-3.4-pax/sound/core/pcm_compat.c
+--- linux-3.4/sound/core/pcm_compat.c 2011-10-24 12:48:44.127090819 +0200
++++ linux-3.4-pax/sound/core/pcm_compat.c 2012-05-21 12:10:12.436049046 +0200
+@@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(st
+ int err;
+
+ fs = snd_enter_user();
+- err = snd_pcm_delay(substream, &delay);
++ err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
+ snd_leave_user(fs);
+ if (err < 0)
+ return err;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/core/pcm_native.c linux-3.4-pax/sound/core/pcm_native.c
+--- linux-3.4/sound/core/pcm_native.c 2012-05-21 11:33:43.139930140 +0200
++++ linux-3.4-pax/sound/core/pcm_native.c 2012-05-21 12:10:12.440049047 +0200
+@@ -2770,11 +2770,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_
+ switch (substream->stream) {
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
+- (void __user *)arg);
++ (void __force_user *)arg);
+ break;
+ case SNDRV_PCM_STREAM_CAPTURE:
+ result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
+- (void __user *)arg);
++ (void __force_user *)arg);
+ break;
+ default:
+ result = -EINVAL;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/core/seq/seq_device.c linux-3.4-pax/sound/core/seq/seq_device.c
+--- linux-3.4/sound/core/seq/seq_device.c 2012-01-08 19:48:32.035470715 +0100
++++ linux-3.4-pax/sound/core/seq/seq_device.c 2012-05-21 12:10:12.444049047 +0200
+@@ -64,7 +64,7 @@ struct ops_list {
+ int argsize; /* argument size */
+
+ /* operators */
+- struct snd_seq_dev_ops ops;
++ struct snd_seq_dev_ops *ops;
+
+ /* registred devices */
+ struct list_head dev_list; /* list of devices */
+@@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char
+
+ mutex_lock(&ops->reg_mutex);
+ /* copy driver operators */
+- ops->ops = *entry;
++ ops->ops = entry;
+ ops->driver |= DRIVER_LOADED;
+ ops->argsize = argsize;
+
+@@ -463,7 +463,7 @@ static int init_device(struct snd_seq_de
+ dev->name, ops->id, ops->argsize, dev->argsize);
+ return -EINVAL;
+ }
+- if (ops->ops.init_device(dev) >= 0) {
++ if (ops->ops->init_device(dev) >= 0) {
+ dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
+ ops->num_init_devices++;
+ } else {
+@@ -490,7 +490,7 @@ static int free_device(struct snd_seq_de
+ dev->name, ops->id, ops->argsize, dev->argsize);
+ return -EINVAL;
+ }
+- if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
++ if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
+ dev->status = SNDRV_SEQ_DEVICE_FREE;
+ dev->driver_data = NULL;
+ ops->num_init_devices--;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/drivers/mts64.c linux-3.4-pax/sound/drivers/mts64.c
+--- linux-3.4/sound/drivers/mts64.c 2012-03-19 10:39:14.676049031 +0100
++++ linux-3.4-pax/sound/drivers/mts64.c 2012-05-21 12:10:12.444049047 +0200
+@@ -29,6 +29,7 @@
+ #include <sound/initval.h>
+ #include <sound/rawmidi.h>
+ #include <sound/control.h>
++#include <asm/local.h>
+
+ #define CARD_NAME "Miditerminal 4140"
+ #define DRIVER_NAME "MTS64"
+@@ -67,7 +68,7 @@ struct mts64 {
+ struct pardevice *pardev;
+ int pardev_claimed;
+
+- int open_count;
++ local_t open_count;
+ int current_midi_output_port;
+ int current_midi_input_port;
+ u8 mode[MTS64_NUM_INPUT_PORTS];
+@@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct
+ {
+ struct mts64 *mts = substream->rmidi->private_data;
+
+- if (mts->open_count == 0) {
++ if (local_read(&mts->open_count) == 0) {
+ /* We don't need a spinlock here, because this is just called
+ if the device has not been opened before.
+ So there aren't any IRQs from the device */
+@@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct
+
+ msleep(50);
+ }
+- ++(mts->open_count);
++ local_inc(&mts->open_count);
+
+ return 0;
+ }
+@@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struc
+ struct mts64 *mts = substream->rmidi->private_data;
+ unsigned long flags;
+
+- --(mts->open_count);
+- if (mts->open_count == 0) {
++ if (local_dec_return(&mts->open_count) == 0) {
+ /* We need the spinlock_irqsave here because we can still
+ have IRQs at this point */
+ spin_lock_irqsave(&mts->lock, flags);
+@@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struc
+
+ msleep(500);
+
+- } else if (mts->open_count < 0)
+- mts->open_count = 0;
++ } else if (local_read(&mts->open_count) < 0)
++ local_set(&mts->open_count, 0);
+
+ return 0;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/drivers/opl4/opl4_lib.c linux-3.4-pax/sound/drivers/opl4/opl4_lib.c
+--- linux-3.4/sound/drivers/opl4/opl4_lib.c 2012-01-08 19:48:32.111470711 +0100
++++ linux-3.4-pax/sound/drivers/opl4/opl4_lib.c 2012-05-21 12:10:12.448049047 +0200
+@@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
+ MODULE_DESCRIPTION("OPL4 driver");
+ MODULE_LICENSE("GPL");
+
+-static void inline snd_opl4_wait(struct snd_opl4 *opl4)
++static inline void snd_opl4_wait(struct snd_opl4 *opl4)
+ {
+ int timeout = 10;
+ while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/drivers/portman2x4.c linux-3.4-pax/sound/drivers/portman2x4.c
+--- linux-3.4/sound/drivers/portman2x4.c 2012-03-19 10:39:14.680049031 +0100
++++ linux-3.4-pax/sound/drivers/portman2x4.c 2012-05-21 12:10:12.452049047 +0200
+@@ -48,6 +48,7 @@
+ #include <sound/initval.h>
+ #include <sound/rawmidi.h>
+ #include <sound/control.h>
++#include <asm/local.h>
+
+ #define CARD_NAME "Portman 2x4"
+ #define DRIVER_NAME "portman"
+@@ -85,7 +86,7 @@ struct portman {
+ struct pardevice *pardev;
+ int pardev_claimed;
+
+- int open_count;
++ local_t open_count;
+ int mode[PORTMAN_NUM_INPUT_PORTS];
+ struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
+ };
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/firewire/amdtp.c linux-3.4-pax/sound/firewire/amdtp.c
+--- linux-3.4/sound/firewire/amdtp.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/sound/firewire/amdtp.c 2012-05-21 12:10:12.452049047 +0200
+@@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
+ ptr = s->pcm_buffer_pointer + data_blocks;
+ if (ptr >= pcm->runtime->buffer_size)
+ ptr -= pcm->runtime->buffer_size;
+- ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
++ ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
+
+ s->pcm_period_pointer += data_blocks;
+ if (s->pcm_period_pointer >= pcm->runtime->period_size) {
+@@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
+ */
+ void amdtp_out_stream_update(struct amdtp_out_stream *s)
+ {
+- ACCESS_ONCE(s->source_node_id_field) =
++ ACCESS_ONCE_RW(s->source_node_id_field) =
+ (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
+ }
+ EXPORT_SYMBOL(amdtp_out_stream_update);
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/firewire/amdtp.h linux-3.4-pax/sound/firewire/amdtp.h
+--- linux-3.4/sound/firewire/amdtp.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/sound/firewire/amdtp.h 2012-05-21 12:10:12.456049048 +0200
+@@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
+ static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
+ struct snd_pcm_substream *pcm)
+ {
+- ACCESS_ONCE(s->pcm) = pcm;
++ ACCESS_ONCE_RW(s->pcm) = pcm;
+ }
+
+ /**
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/firewire/isight.c linux-3.4-pax/sound/firewire/isight.c
+--- linux-3.4/sound/firewire/isight.c 2012-05-21 11:33:43.171930142 +0200
++++ linux-3.4-pax/sound/firewire/isight.c 2012-05-21 12:10:12.456049048 +0200
+@@ -96,7 +96,7 @@ static void isight_update_pointers(struc
+ ptr += count;
+ if (ptr >= runtime->buffer_size)
+ ptr -= runtime->buffer_size;
+- ACCESS_ONCE(isight->buffer_pointer) = ptr;
++ ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
+
+ isight->period_counter += count;
+ if (isight->period_counter >= runtime->period_size) {
+@@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_p
+ if (err < 0)
+ return err;
+
+- ACCESS_ONCE(isight->pcm_active) = true;
++ ACCESS_ONCE_RW(isight->pcm_active) = true;
+
+ return 0;
+ }
+@@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm
+ {
+ struct isight *isight = substream->private_data;
+
+- ACCESS_ONCE(isight->pcm_active) = false;
++ ACCESS_ONCE_RW(isight->pcm_active) = false;
+
+ mutex_lock(&isight->mutex);
+ isight_stop_streaming(isight);
+@@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+- ACCESS_ONCE(isight->pcm_running) = true;
++ ACCESS_ONCE_RW(isight->pcm_running) = true;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+- ACCESS_ONCE(isight->pcm_running) = false;
++ ACCESS_ONCE_RW(isight->pcm_running) = false;
+ break;
+ default:
+ return -EINVAL;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/isa/cmi8330.c linux-3.4-pax/sound/isa/cmi8330.c
+--- linux-3.4/sound/isa/cmi8330.c 2012-03-19 10:39:14.696049030 +0100
++++ linux-3.4-pax/sound/isa/cmi8330.c 2012-05-21 12:10:12.460049048 +0200
+@@ -172,7 +172,7 @@ struct snd_cmi8330 {
+
+ struct snd_pcm *pcm;
+ struct snd_cmi8330_stream {
+- struct snd_pcm_ops ops;
++ snd_pcm_ops_no_const ops;
+ snd_pcm_open_callback_t open;
+ void *private_data; /* sb or wss */
+ } streams[2];
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/oss/sb_audio.c linux-3.4-pax/sound/oss/sb_audio.c
+--- linux-3.4/sound/oss/sb_audio.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/sound/oss/sb_audio.c 2012-05-21 12:10:12.464049048 +0200
+@@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
+ buf16 = (signed short *)(localbuf + localoffs);
+ while (c)
+ {
+- locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
++ locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
+ if (copy_from_user(lbuf8,
+ userbuf+useroffs + p,
+ locallen))
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/oss/swarm_cs4297a.c linux-3.4-pax/sound/oss/swarm_cs4297a.c
+--- linux-3.4/sound/oss/swarm_cs4297a.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/sound/oss/swarm_cs4297a.c 2012-05-21 12:10:12.468049048 +0200
+@@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
+ {
+ struct cs4297a_state *s;
+ u32 pwr, id;
+- mm_segment_t fs;
+ int rval;
+ #ifndef CONFIG_BCM_CS4297A_CSWARM
+ u64 cfg;
+@@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
+ if (!rval) {
+ char *sb1250_duart_present;
+
++#if 0
++ mm_segment_t fs;
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+-#if 0
+ val = SOUND_MASK_LINE;
+ mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
+ for (i = 0; i < ARRAY_SIZE(initvol); i++) {
+ val = initvol[i].vol;
+ mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
+ }
++ set_fs(fs);
+ // cs4297a_write_ac97(s, 0x18, 0x0808);
+ #else
+ // cs4297a_write_ac97(s, 0x5e, 0x180);
+ cs4297a_write_ac97(s, 0x02, 0x0808);
+ cs4297a_write_ac97(s, 0x18, 0x0808);
+ #endif
+- set_fs(fs);
+
+ list_add(&s->list, &cs4297a_devs);
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/pci/hda/hda_codec.h linux-3.4-pax/sound/pci/hda/hda_codec.h
+--- linux-3.4/sound/pci/hda/hda_codec.h 2012-05-21 11:33:43.351930152 +0200
++++ linux-3.4-pax/sound/pci/hda/hda_codec.h 2012-05-21 12:10:12.472049048 +0200
+@@ -611,7 +611,7 @@ struct hda_bus_ops {
+ /* notify power-up/down from codec to controller */
+ void (*pm_notify)(struct hda_bus *bus);
+ #endif
+-};
++} __no_const;
+
+ /* template to pass to the bus constructor */
+ struct hda_bus_template {
+@@ -713,6 +713,7 @@ struct hda_codec_ops {
+ #endif
+ void (*reboot_notify)(struct hda_codec *codec);
+ };
++typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
+
+ /* record for amp information cache */
+ struct hda_cache_head {
+@@ -743,7 +744,7 @@ struct hda_pcm_ops {
+ struct snd_pcm_substream *substream);
+ int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
+ struct snd_pcm_substream *substream);
+-};
++} __no_const;
+
+ /* PCM information for each substream */
+ struct hda_pcm_stream {
+@@ -801,7 +802,7 @@ struct hda_codec {
+ const char *modelname; /* model name for preset */
+
+ /* set by patch */
+- struct hda_codec_ops patch_ops;
++ hda_codec_ops_no_const patch_ops;
+
+ /* PCM to create, set by patch_ops.build_pcms callback */
+ unsigned int num_pcms;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/pci/ice1712/ice1712.h linux-3.4-pax/sound/pci/ice1712/ice1712.h
+--- linux-3.4/sound/pci/ice1712/ice1712.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/sound/pci/ice1712/ice1712.h 2012-05-21 12:10:12.472049048 +0200
+@@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
+ unsigned int mask_flags; /* total mask bits */
+ struct snd_akm4xxx_ops {
+ void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
+- } ops;
++ } __no_const ops;
+ };
+
+ struct snd_ice1712_spdif {
+@@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
+ int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
+ void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
+ int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
+- } ops;
++ } __no_const ops;
+ };
+
+
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/pci/ymfpci/ymfpci_main.c linux-3.4-pax/sound/pci/ymfpci/ymfpci_main.c
+--- linux-3.4/sound/pci/ymfpci/ymfpci_main.c 2012-05-21 11:33:43.787930175 +0200
++++ linux-3.4-pax/sound/pci/ymfpci/ymfpci_main.c 2012-05-21 12:10:12.476049049 +0200
+@@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct sn
+ if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
+ break;
+ }
+- if (atomic_read(&chip->interrupt_sleep_count)) {
+- atomic_set(&chip->interrupt_sleep_count, 0);
++ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
+ wake_up(&chip->interrupt_sleep);
+ }
+ __end:
+@@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct s
+ continue;
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&chip->interrupt_sleep, &wait);
+- atomic_inc(&chip->interrupt_sleep_count);
++ atomic_inc_unchecked(&chip->interrupt_sleep_count);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(50));
+ remove_wait_queue(&chip->interrupt_sleep, &wait);
+ }
+@@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(
+ snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
+ spin_unlock(&chip->reg_lock);
+
+- if (atomic_read(&chip->interrupt_sleep_count)) {
+- atomic_set(&chip->interrupt_sleep_count, 0);
++ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
+ wake_up(&chip->interrupt_sleep);
+ }
+ }
+@@ -2398,7 +2398,7 @@ int __devinit snd_ymfpci_create(struct s
+ spin_lock_init(&chip->reg_lock);
+ spin_lock_init(&chip->voice_lock);
+ init_waitqueue_head(&chip->interrupt_sleep);
+- atomic_set(&chip->interrupt_sleep_count, 0);
++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
+ chip->card = card;
+ chip->pci = pci;
+ chip->irq = -1;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/soc/soc-pcm.c linux-3.4-pax/sound/soc/soc-pcm.c
+--- linux-3.4/sound/soc/soc-pcm.c 2012-05-21 11:33:44.919930237 +0200
++++ linux-3.4-pax/sound/soc/soc-pcm.c 2012-05-21 12:10:12.480049049 +0200
+@@ -641,7 +641,7 @@ int soc_new_pcm(struct snd_soc_pcm_runti
+ struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+- struct snd_pcm_ops *soc_pcm_ops = &rtd->ops;
++ snd_pcm_ops_no_const *soc_pcm_ops = &rtd->ops;
+ struct snd_pcm *pcm;
+ char new_name[64];
+ int ret = 0, playback = 0, capture = 0;
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/sound/usb/card.h linux-3.4-pax/sound/usb/card.h
+--- linux-3.4/sound/usb/card.h 2012-03-19 10:39:15.612048982 +0100
++++ linux-3.4-pax/sound/usb/card.h 2012-05-21 12:10:12.480049049 +0200
+@@ -45,6 +45,7 @@ struct snd_urb_ops {
+ int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
+ int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
+ };
++typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
+
+ struct snd_usb_substream {
+ struct snd_usb_stream *stream;
+@@ -94,7 +95,7 @@ struct snd_usb_substream {
+ struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
+ spinlock_t lock;
+
+- struct snd_urb_ops ops; /* callbacks (must be filled at init) */
++ snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
+ int last_frame_number; /* stored frame number */
+ int last_delay; /* stored delay */
+ };
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/tools/gcc/checker_plugin.c linux-3.4-pax/tools/gcc/checker_plugin.c
+--- linux-3.4/tools/gcc/checker_plugin.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.4-pax/tools/gcc/checker_plugin.c 2012-05-21 12:10:12.484049049 +0200
+@@ -0,0 +1,171 @@
++/*
++ * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ * but for the kernel it doesn't matter since it doesn't link against
++ * any of the gcc libraries
++ *
++ * gcc plugin to implement various sparse (source code checker) features
++ *
++ * TODO:
++ * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
++ *
++ * BUGS:
++ * - none known
++ */
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++#include "tree-flow.h"
++#include "target.h"
++
++extern void c_register_addr_space (const char *str, addr_space_t as);
++extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
++extern enum machine_mode default_addr_space_address_mode (addr_space_t);
++extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
++extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
++extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
++
++extern void print_gimple_stmt(FILE *, gimple, int, int);
++extern rtx emit_move_insn(rtx x, rtx y);
++
++int plugin_is_GPL_compatible;
++
++static struct plugin_info checker_plugin_info = {
++ .version = "201111150100",
++};
++
++#define ADDR_SPACE_KERNEL 0
++#define ADDR_SPACE_FORCE_KERNEL 1
++#define ADDR_SPACE_USER 2
++#define ADDR_SPACE_FORCE_USER 3
++#define ADDR_SPACE_IOMEM 0
++#define ADDR_SPACE_FORCE_IOMEM 0
++#define ADDR_SPACE_PERCPU 0
++#define ADDR_SPACE_FORCE_PERCPU 0
++#define ADDR_SPACE_RCU 0
++#define ADDR_SPACE_FORCE_RCU 0
++
++static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
++{
++ return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
++}
++
++static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
++{
++ return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
++}
++
++static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
++{
++ return default_addr_space_valid_pointer_mode(mode, as);
++}
++
++static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
++{
++ return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
++}
++
++static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
++{
++ return default_addr_space_legitimize_address(x, oldx, mode, as);
++}
++
++static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
++{
++ if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
++ return true;
++
++ if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
++ return true;
++
++ if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
++ return true;
++
++ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
++ return true;
++
++ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
++ return true;
++
++ if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
++ return true;
++
++ if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
++ return true;
++
++ return subset == superset;
++}
++
++static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
++{
++// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
++// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
++
++ return op;
++}
++
++static void register_checker_address_spaces(void *event_data, void *data)
++{
++ c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
++ c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
++ c_register_addr_space("__user", ADDR_SPACE_USER);
++ c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
++// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
++// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
++// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
++// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
++// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
++// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
++
++ targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
++ targetm.addr_space.address_mode = checker_addr_space_address_mode;
++ targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
++ targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
++// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
++ targetm.addr_space.subset_p = checker_addr_space_subset_p;
++ targetm.addr_space.convert = checker_addr_space_convert;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ const int argc = plugin_info->argc;
++ const struct plugin_argument * const argv = plugin_info->argv;
++ int i;
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
++
++ for (i = 0; i < argc; ++i)
++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++
++ if (TARGET_64BIT == 0)
++ return 0;
++
++ register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
++
++ return 0;
++}
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/tools/gcc/colorize_plugin.c linux-3.4-pax/tools/gcc/colorize_plugin.c
+--- linux-3.4/tools/gcc/colorize_plugin.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.4-pax/tools/gcc/colorize_plugin.c 2012-05-21 12:10:12.484049049 +0200
+@@ -0,0 +1,147 @@
++/*
++ * Copyright 2012 by PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ * but for the kernel it doesn't matter since it doesn't link against
++ * any of the gcc libraries
++ *
++ * gcc plugin to colorize diagnostic output
++ *
++ */
++
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++
++int plugin_is_GPL_compatible;
++
++static struct plugin_info colorize_plugin_info = {
++ .version = "201203092200",
++};
++
++#define GREEN "\033[32m\033[2m"
++#define LIGHTGREEN "\033[32m\033[1m"
++#define YELLOW "\033[33m\033[2m"
++#define LIGHTYELLOW "\033[33m\033[1m"
++#define RED "\033[31m\033[2m"
++#define LIGHTRED "\033[31m\033[1m"
++#define BLUE "\033[34m\033[2m"
++#define LIGHTBLUE "\033[34m\033[1m"
++#define BRIGHT "\033[m\033[1m"
++#define NORMAL "\033[m"
++
++static diagnostic_starter_fn old_starter;
++static diagnostic_finalizer_fn old_finalizer;
++
++static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
++{
++ const char *color;
++ char *newprefix;
++
++ switch (diagnostic->kind) {
++ case DK_NOTE:
++ color = LIGHTBLUE;
++ break;
++
++ case DK_PEDWARN:
++ case DK_WARNING:
++ color = LIGHTYELLOW;
++ break;
++
++ case DK_ERROR:
++ case DK_FATAL:
++ case DK_ICE:
++ case DK_PERMERROR:
++ case DK_SORRY:
++ color = LIGHTRED;
++ break;
++
++ default:
++ color = NORMAL;
++ }
++
++ old_starter(context, diagnostic);
++ if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
++ return;
++ pp_destroy_prefix(context->printer);
++ pp_set_prefix(context->printer, newprefix);
++}
++
++static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
++{
++ old_finalizer(context, diagnostic);
++}
++
++static void colorize_arm(void)
++{
++ old_starter = diagnostic_starter(global_dc);
++ old_finalizer = diagnostic_finalizer(global_dc);
++
++ diagnostic_starter(global_dc) = start_colorize;
++ diagnostic_finalizer(global_dc) = finalize_colorize;
++}
++
++static unsigned int execute_colorize_rearm(void)
++{
++ if (diagnostic_starter(global_dc) == start_colorize)
++ return 0;
++
++ colorize_arm();
++ return 0;
++}
++
++struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
++ .pass = {
++ .type = SIMPLE_IPA_PASS,
++ .name = "colorize_rearm",
++ .gate = NULL,
++ .execute = execute_colorize_rearm,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = 0
++ }
++};
++
++static void colorize_start_unit(void *gcc_data, void *user_data)
++{
++ colorize_arm();
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ struct register_pass_info colorize_rearm_pass_info = {
++ .pass = &pass_ipa_colorize_rearm.pass,
++ .reference_pass_name = "*free_lang_data",
++ .ref_pass_instance_number = 0,
++ .pos_op = PASS_POS_INSERT_AFTER
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
++ register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
++ return 0;
++}
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/tools/gcc/constify_plugin.c linux-3.4-pax/tools/gcc/constify_plugin.c
+--- linux-3.4/tools/gcc/constify_plugin.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.4-pax/tools/gcc/constify_plugin.c 2012-05-30 02:13:59.687048363 +0200
+@@ -0,0 +1,328 @@
++/*
++ * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011 by PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/const_plugin/
++ *
++ * Usage:
++ * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
++ * $ gcc -fplugin=constify_plugin.so test.c -O2
++ */
++
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++#include "tree-flow.h"
++
++#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
++
++int plugin_is_GPL_compatible;
++
++static struct plugin_info const_plugin_info = {
++ .version = "201205300030",
++ .help = "no-constify\tturn off constification\n",
++};
++
++static void deconstify_tree(tree node);
++
++static void deconstify_type(tree type)
++{
++ tree field;
++
++ for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
++ tree type = TREE_TYPE(field);
++
++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
++ continue;
++ if (!TYPE_READONLY(type))
++ continue;
++
++ deconstify_tree(field);
++ }
++ TYPE_READONLY(type) = 0;
++ C_TYPE_FIELDS_READONLY(type) = 0;
++}
++
++static void deconstify_tree(tree node)
++{
++ tree old_type, new_type, field;
++
++ old_type = TREE_TYPE(node);
++
++ gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
++
++ new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
++ TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
++ for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
++ DECL_FIELD_CONTEXT(field) = new_type;
++
++ deconstify_type(new_type);
++
++ TREE_READONLY(node) = 0;
++ TREE_TYPE(node) = new_type;
++}
++
++static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
++{
++ tree type;
++
++ *no_add_attrs = true;
++ if (TREE_CODE(*node) == FUNCTION_DECL) {
++ error("%qE attribute does not apply to functions", name);
++ return NULL_TREE;
++ }
++
++ if (TREE_CODE(*node) == VAR_DECL) {
++ error("%qE attribute does not apply to variables", name);
++ return NULL_TREE;
++ }
++
++ if (TYPE_P(*node)) {
++ if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
++ *no_add_attrs = false;
++ else
++ error("%qE attribute applies to struct and union types only", name);
++ return NULL_TREE;
++ }
++
++ type = TREE_TYPE(*node);
++
++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
++ error("%qE attribute applies to struct and union types only", name);
++ return NULL_TREE;
++ }
++
++ if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
++ error("%qE attribute is already applied to the type", name);
++ return NULL_TREE;
++ }
++
++ if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
++ error("%qE attribute used on type that is not constified", name);
++ return NULL_TREE;
++ }
++
++ if (TREE_CODE(*node) == TYPE_DECL) {
++ deconstify_tree(*node);
++ return NULL_TREE;
++ }
++
++ return NULL_TREE;
++}
++
++static void constify_type(tree type)
++{
++ TYPE_READONLY(type) = 1;
++ C_TYPE_FIELDS_READONLY(type) = 1;
++}
++
++static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
++{
++ *no_add_attrs = true;
++ if (!TYPE_P(*node)) {
++ error("%qE attribute applies to types only", name);
++ return NULL_TREE;
++ }
++
++ if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
++ error("%qE attribute applies to struct and union types only", name);
++ return NULL_TREE;
++ }
++
++ *no_add_attrs = false;
++ constify_type(*node);
++ return NULL_TREE;
++}
++
++static struct attribute_spec no_const_attr = {
++ .name = "no_const",
++ .min_length = 0,
++ .max_length = 0,
++ .decl_required = false,
++ .type_required = false,
++ .function_type_required = false,
++ .handler = handle_no_const_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++ .affects_type_identity = true
++#endif
++};
++
++static struct attribute_spec do_const_attr = {
++ .name = "do_const",
++ .min_length = 0,
++ .max_length = 0,
++ .decl_required = false,
++ .type_required = false,
++ .function_type_required = false,
++ .handler = handle_do_const_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++ .affects_type_identity = true
++#endif
++};
++
++static void register_attributes(void *event_data, void *data)
++{
++ register_attribute(&no_const_attr);
++ register_attribute(&do_const_attr);
++}
++
++static bool is_fptr(tree field)
++{
++ tree ptr = TREE_TYPE(field);
++
++ if (TREE_CODE(ptr) != POINTER_TYPE)
++ return false;
++
++ return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
++}
++
++static bool walk_struct(tree node)
++{
++ tree field;
++
++ if (TYPE_FIELDS(node) == NULL_TREE)
++ return false;
++
++ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
++ gcc_assert(!TYPE_READONLY(node));
++ deconstify_type(node);
++ return false;
++ }
++
++ for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
++ tree type = TREE_TYPE(field);
++ enum tree_code code = TREE_CODE(type);
++ if (code == RECORD_TYPE || code == UNION_TYPE) {
++ if (!(walk_struct(type)))
++ return false;
++ } else if (!is_fptr(field) && !TREE_READONLY(field))
++ return false;
++ }
++ return true;
++}
++
++static void finish_type(void *event_data, void *data)
++{
++ tree type = (tree)event_data;
++
++ if (type == NULL_TREE)
++ return;
++
++ if (TYPE_READONLY(type))
++ return;
++
++ if (walk_struct(type))
++ constify_type(type);
++}
++
++static unsigned int check_local_variables(void);
++
++struct gimple_opt_pass pass_local_variable = {
++ {
++ .type = GIMPLE_PASS,
++ .name = "check_local_variables",
++ .gate = NULL,
++ .execute = check_local_variables,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = 0
++ }
++};
++
++static unsigned int check_local_variables(void)
++{
++ tree var;
++ referenced_var_iterator rvi;
++
++#if BUILDING_GCC_VERSION == 4005
++ FOR_EACH_REFERENCED_VAR(var, rvi) {
++#else
++ FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
++#endif
++ tree type = TREE_TYPE(var);
++
++ if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
++ continue;
++
++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
++ continue;
++
++ if (!TYPE_READONLY(type))
++ continue;
++
++// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
++// continue;
++
++// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
++// continue;
++
++ if (walk_struct(type)) {
++ error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
++ return 1;
++ }
++ }
++ return 0;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ const int argc = plugin_info->argc;
++ const struct plugin_argument * const argv = plugin_info->argv;
++ int i;
++ bool constify = true;
++
++ struct register_pass_info local_variable_pass_info = {
++ .pass = &pass_local_variable.pass,
++ .reference_pass_name = "*referenced_vars",
++ .ref_pass_instance_number = 0,
++ .pos_op = PASS_POS_INSERT_AFTER
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ for (i = 0; i < argc; ++i) {
++ if (!(strcmp(argv[i].key, "no-constify"))) {
++ constify = false;
++ continue;
++ }
++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
++ if (constify) {
++ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
++ }
++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
++
++ return 0;
++}
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/tools/gcc/kallocstat_plugin.c linux-3.4-pax/tools/gcc/kallocstat_plugin.c
+--- linux-3.4/tools/gcc/kallocstat_plugin.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.4-pax/tools/gcc/kallocstat_plugin.c 2012-05-21 12:10:12.488049049 +0200
+@@ -0,0 +1,167 @@
++/*
++ * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ * but for the kernel it doesn't matter since it doesn't link against
++ * any of the gcc libraries
++ *
++ * gcc plugin to find the distribution of k*alloc sizes
++ *
++ * TODO:
++ *
++ * BUGS:
++ * - none known
++ */
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++
++extern void print_gimple_stmt(FILE *, gimple, int, int);
++
++int plugin_is_GPL_compatible;
++
++static const char * const kalloc_functions[] = {
++ "__kmalloc",
++ "kmalloc",
++ "kmalloc_large",
++ "kmalloc_node",
++ "kmalloc_order",
++ "kmalloc_order_trace",
++ "kmalloc_slab",
++ "kzalloc",
++ "kzalloc_node",
++};
++
++static struct plugin_info kallocstat_plugin_info = {
++ .version = "201111150100",
++};
++
++static unsigned int execute_kallocstat(void);
++
++static struct gimple_opt_pass kallocstat_pass = {
++ .pass = {
++ .type = GIMPLE_PASS,
++ .name = "kallocstat",
++ .gate = NULL,
++ .execute = execute_kallocstat,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = 0
++ }
++};
++
++static bool is_kalloc(const char *fnname)
++{
++ size_t i;
++
++ for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
++ if (!strcmp(fnname, kalloc_functions[i]))
++ return true;
++ return false;
++}
++
++static unsigned int execute_kallocstat(void)
++{
++ basic_block bb;
++
++ // 1. loop through BBs and GIMPLE statements
++ FOR_EACH_BB(bb) {
++ gimple_stmt_iterator gsi;
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ // gimple match:
++ tree fndecl, size;
++ gimple call_stmt;
++ const char *fnname;
++
++ // is it a call
++ call_stmt = gsi_stmt(gsi);
++ if (!is_gimple_call(call_stmt))
++ continue;
++ fndecl = gimple_call_fndecl(call_stmt);
++ if (fndecl == NULL_TREE)
++ continue;
++ if (TREE_CODE(fndecl) != FUNCTION_DECL)
++ continue;
++
++ // is it a call to k*alloc
++ fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
++ if (!is_kalloc(fnname))
++ continue;
++
++ // is the size arg the result of a simple const assignment
++ size = gimple_call_arg(call_stmt, 0);
++ while (true) {
++ gimple def_stmt;
++ expanded_location xloc;
++ size_t size_val;
++
++ if (TREE_CODE(size) != SSA_NAME)
++ break;
++ def_stmt = SSA_NAME_DEF_STMT(size);
++ if (!def_stmt || !is_gimple_assign(def_stmt))
++ break;
++ if (gimple_num_ops(def_stmt) != 2)
++ break;
++ size = gimple_assign_rhs1(def_stmt);
++ if (!TREE_CONSTANT(size))
++ continue;
++ xloc = expand_location(gimple_location(def_stmt));
++ if (!xloc.file)
++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
++ size_val = TREE_INT_CST_LOW(size);
++ fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
++ break;
++ }
++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
++//debug_tree(gimple_call_fn(call_stmt));
++//print_node(stderr, "pax", fndecl, 4);
++ }
++ }
++
++ return 0;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ struct register_pass_info kallocstat_pass_info = {
++ .pass = &kallocstat_pass.pass,
++ .reference_pass_name = "ssa",
++ .ref_pass_instance_number = 0,
++ .pos_op = PASS_POS_INSERT_AFTER
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
++
++ return 0;
++}
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/tools/gcc/kernexec_plugin.c linux-3.4-pax/tools/gcc/kernexec_plugin.c
+--- linux-3.4/tools/gcc/kernexec_plugin.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.4-pax/tools/gcc/kernexec_plugin.c 2012-05-21 12:10:12.488049049 +0200
+@@ -0,0 +1,427 @@
++/*
++ * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ * but for the kernel it doesn't matter since it doesn't link against
++ * any of the gcc libraries
++ *
++ * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
++ *
++ * TODO:
++ *
++ * BUGS:
++ * - none known
++ */
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++#include "tree-flow.h"
++
++extern void print_gimple_stmt(FILE *, gimple, int, int);
++extern rtx emit_move_insn(rtx x, rtx y);
++
++int plugin_is_GPL_compatible;
++
++static struct plugin_info kernexec_plugin_info = {
++ .version = "201111291120",
++ .help = "method=[bts|or]\tinstrumentation method\n"
++};
++
++static unsigned int execute_kernexec_reload(void);
++static unsigned int execute_kernexec_fptr(void);
++static unsigned int execute_kernexec_retaddr(void);
++static bool kernexec_cmodel_check(void);
++
++static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
++static void (*kernexec_instrument_retaddr)(rtx);
++
++static struct gimple_opt_pass kernexec_reload_pass = {
++ .pass = {
++ .type = GIMPLE_PASS,
++ .name = "kernexec_reload",
++ .gate = kernexec_cmodel_check,
++ .execute = execute_kernexec_reload,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
++ }
++};
++
++static struct gimple_opt_pass kernexec_fptr_pass = {
++ .pass = {
++ .type = GIMPLE_PASS,
++ .name = "kernexec_fptr",
++ .gate = kernexec_cmodel_check,
++ .execute = execute_kernexec_fptr,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
++ }
++};
++
++static struct rtl_opt_pass kernexec_retaddr_pass = {
++ .pass = {
++ .type = RTL_PASS,
++ .name = "kernexec_retaddr",
++ .gate = kernexec_cmodel_check,
++ .execute = execute_kernexec_retaddr,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
++ }
++};
++
++static bool kernexec_cmodel_check(void)
++{
++ tree section;
++
++ if (ix86_cmodel != CM_KERNEL)
++ return false;
++
++ section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
++ if (!section || !TREE_VALUE(section))
++ return true;
++
++ section = TREE_VALUE(TREE_VALUE(section));
++ if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
++ return true;
++
++ return false;
++}
++
++/*
++ * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
++ */
++static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
++{
++ gimple asm_movabs_stmt;
++
++ // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
++ asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
++ gimple_asm_set_volatile(asm_movabs_stmt, true);
++ gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
++ update_stmt(asm_movabs_stmt);
++}
++
++/*
++ * find all asm() stmts that clobber r10 and add a reload of r10
++ */
++static unsigned int execute_kernexec_reload(void)
++{
++ basic_block bb;
++
++ // 1. loop through BBs and GIMPLE statements
++ FOR_EACH_BB(bb) {
++ gimple_stmt_iterator gsi;
++
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ // gimple match: __asm__ ("" : : : "r10");
++ gimple asm_stmt;
++ size_t nclobbers;
++
++ // is it an asm ...
++ asm_stmt = gsi_stmt(gsi);
++ if (gimple_code(asm_stmt) != GIMPLE_ASM)
++ continue;
++
++ // ... clobbering r10
++ nclobbers = gimple_asm_nclobbers(asm_stmt);
++ while (nclobbers--) {
++ tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
++ if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
++ continue;
++ kernexec_reload_fptr_mask(&gsi);
++//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
++ break;
++ }
++ }
++ }
++
++ return 0;
++}
++
++/*
++ * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
++ * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
++ */
++static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
++{
++ gimple assign_intptr, assign_new_fptr, call_stmt;
++ tree intptr, old_fptr, new_fptr, kernexec_mask;
++
++ call_stmt = gsi_stmt(*gsi);
++ old_fptr = gimple_call_fn(call_stmt);
++
++ // create temporary unsigned long variable used for bitops and cast fptr to it
++ intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
++ add_referenced_var(intptr);
++ mark_sym_for_renaming(intptr);
++ assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
++ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
++ update_stmt(assign_intptr);
++
++ // apply logical or to temporary unsigned long and bitmask
++ kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
++// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
++ assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
++ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
++ update_stmt(assign_intptr);
++
++ // cast temporary unsigned long back to a temporary fptr variable
++ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
++ add_referenced_var(new_fptr);
++ mark_sym_for_renaming(new_fptr);
++ assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
++ gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
++ update_stmt(assign_new_fptr);
++
++ // replace call stmt fn with the new fptr
++ gimple_call_set_fn(call_stmt, new_fptr);
++ update_stmt(call_stmt);
++}
++
++static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
++{
++ gimple asm_or_stmt, call_stmt;
++ tree old_fptr, new_fptr, input, output;
++ VEC(tree, gc) *inputs = NULL;
++ VEC(tree, gc) *outputs = NULL;
++
++ call_stmt = gsi_stmt(*gsi);
++ old_fptr = gimple_call_fn(call_stmt);
++
++ // create temporary fptr variable
++ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
++ add_referenced_var(new_fptr);
++ mark_sym_for_renaming(new_fptr);
++
++ // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
++ input = build_tree_list(NULL_TREE, build_string(2, "0"));
++ input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
++ output = build_tree_list(NULL_TREE, build_string(3, "=r"));
++ output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
++ VEC_safe_push(tree, gc, inputs, input);
++ VEC_safe_push(tree, gc, outputs, output);
++ asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
++ gimple_asm_set_volatile(asm_or_stmt, true);
++ gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
++ update_stmt(asm_or_stmt);
++
++ // replace call stmt fn with the new fptr
++ gimple_call_set_fn(call_stmt, new_fptr);
++ update_stmt(call_stmt);
++}
++
++/*
++ * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
++ */
++static unsigned int execute_kernexec_fptr(void)
++{
++ basic_block bb;
++
++ // 1. loop through BBs and GIMPLE statements
++ FOR_EACH_BB(bb) {
++ gimple_stmt_iterator gsi;
++
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
++ tree fn;
++ gimple call_stmt;
++
++ // is it a call ...
++ call_stmt = gsi_stmt(gsi);
++ if (!is_gimple_call(call_stmt))
++ continue;
++ fn = gimple_call_fn(call_stmt);
++ if (TREE_CODE(fn) == ADDR_EXPR)
++ continue;
++ if (TREE_CODE(fn) != SSA_NAME)
++ gcc_unreachable();
++
++ // ... through a function pointer
++ fn = SSA_NAME_VAR(fn);
++ if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
++ continue;
++ fn = TREE_TYPE(fn);
++ if (TREE_CODE(fn) != POINTER_TYPE)
++ continue;
++ fn = TREE_TYPE(fn);
++ if (TREE_CODE(fn) != FUNCTION_TYPE)
++ continue;
++
++ kernexec_instrument_fptr(&gsi);
++
++//debug_tree(gimple_call_fn(call_stmt));
++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
++ }
++ }
++
++ return 0;
++}
++
++// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
++static void kernexec_instrument_retaddr_bts(rtx insn)
++{
++ rtx btsq;
++ rtvec argvec, constraintvec, labelvec;
++ int line;
++
++ // create asm volatile("btsq $63,(%%rsp)":::)
++ argvec = rtvec_alloc(0);
++ constraintvec = rtvec_alloc(0);
++ labelvec = rtvec_alloc(0);
++ line = expand_location(RTL_LOCATION(insn)).line;
++ btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
++ MEM_VOLATILE_P(btsq) = 1;
++// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
++ emit_insn_before(btsq, insn);
++}
++
++// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
++static void kernexec_instrument_retaddr_or(rtx insn)
++{
++ rtx orq;
++ rtvec argvec, constraintvec, labelvec;
++ int line;
++
++ // create asm volatile("orq %%r10,(%%rsp)":::)
++ argvec = rtvec_alloc(0);
++ constraintvec = rtvec_alloc(0);
++ labelvec = rtvec_alloc(0);
++ line = expand_location(RTL_LOCATION(insn)).line;
++ orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
++ MEM_VOLATILE_P(orq) = 1;
++// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
++ emit_insn_before(orq, insn);
++}
++
++/*
++ * find all asm level function returns and forcibly set the highest bit of the return address
++ */
++static unsigned int execute_kernexec_retaddr(void)
++{
++ rtx insn;
++
++ // 1. find function returns
++ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
++ // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
++ // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
++ rtx body;
++
++ // is it a retn
++ if (!JUMP_P(insn))
++ continue;
++ body = PATTERN(insn);
++ if (GET_CODE(body) == PARALLEL)
++ body = XVECEXP(body, 0, 0);
++ if (GET_CODE(body) != RETURN)
++ continue;
++ kernexec_instrument_retaddr(insn);
++ }
++
++// print_simple_rtl(stderr, get_insns());
++// print_rtl(stderr, get_insns());
++
++ return 0;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ const int argc = plugin_info->argc;
++ const struct plugin_argument * const argv = plugin_info->argv;
++ int i;
++ struct register_pass_info kernexec_reload_pass_info = {
++ .pass = &kernexec_reload_pass.pass,
++ .reference_pass_name = "ssa",
++ .ref_pass_instance_number = 0,
++ .pos_op = PASS_POS_INSERT_AFTER
++ };
++ struct register_pass_info kernexec_fptr_pass_info = {
++ .pass = &kernexec_fptr_pass.pass,
++ .reference_pass_name = "ssa",
++ .ref_pass_instance_number = 0,
++ .pos_op = PASS_POS_INSERT_AFTER
++ };
++ struct register_pass_info kernexec_retaddr_pass_info = {
++ .pass = &kernexec_retaddr_pass.pass,
++ .reference_pass_name = "pro_and_epilogue",
++ .ref_pass_instance_number = 0,
++ .pos_op = PASS_POS_INSERT_AFTER
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
++
++ if (TARGET_64BIT == 0)
++ return 0;
++
++ for (i = 0; i < argc; ++i) {
++ if (!strcmp(argv[i].key, "method")) {
++ if (!argv[i].value) {
++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ continue;
++ }
++ if (!strcmp(argv[i].value, "bts")) {
++ kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
++ kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
++ } else if (!strcmp(argv[i].value, "or")) {
++ kernexec_instrument_fptr = kernexec_instrument_fptr_or;
++ kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
++ fix_register("r10", 1, 1);
++ } else
++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
++ continue;
++ }
++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ }
++ if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
++ error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
++
++ if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
++
++ return 0;
++}
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/tools/gcc/Makefile linux-3.4-pax/tools/gcc/Makefile
+--- linux-3.4/tools/gcc/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.4-pax/tools/gcc/Makefile 2012-05-21 12:10:12.492049050 +0200
+@@ -0,0 +1,26 @@
++#CC := gcc
++#PLUGIN_SOURCE_FILES := pax_plugin.c
++#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
++GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
++#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
++
++HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
++CFLAGS_size_overflow_plugin.o := -Wno-missing-initializer
++
++hostlibs-$(CONFIG_PAX_CONSTIFY_PLUGIN) := constify_plugin.so
++hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
++hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
++hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
++hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
++hostlibs-y += colorize_plugin.so
++hostlibs-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
++
++always := $(hostlibs-y)
++
++constify_plugin-objs := constify_plugin.o
++stackleak_plugin-objs := stackleak_plugin.o
++kallocstat_plugin-objs := kallocstat_plugin.o
++kernexec_plugin-objs := kernexec_plugin.o
++checker_plugin-objs := checker_plugin.o
++colorize_plugin-objs := colorize_plugin.o
++size_overflow_plugin-objs := size_overflow_plugin.o
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/tools/gcc/size_overflow_hash.h linux-3.4-pax/tools/gcc/size_overflow_hash.h
+--- linux-3.4/tools/gcc/size_overflow_hash.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.4-pax/tools/gcc/size_overflow_hash.h 2012-05-30 14:10:40.081660405 +0200
+@@ -0,0 +1,13146 @@
++struct size_overflow_hash _000001_hash = {
++ .next = NULL,
++ .name = "alloc_dr",
++ .file = "drivers/base/devres.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000002_hash = {
++ .next = NULL,
++ .name = "__copy_from_user",
++ .file = "arch/x86/include/asm/uaccess_32.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000003_hash = {
++ .next = NULL,
++ .name = "copy_from_user",
++ .file = "arch/x86/include/asm/uaccess_32.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000004_hash = {
++ .next = NULL,
++ .name = "__copy_from_user_inatomic",
++ .file = "arch/x86/include/asm/uaccess_32.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000005_hash = {
++ .next = NULL,
++ .name = "__copy_from_user_nocache",
++ .file = "arch/x86/include/asm/uaccess_32.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000006_hash = {
++ .next = NULL,
++ .name = "__copy_to_user_inatomic",
++ .file = "arch/x86/include/asm/uaccess_32.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000007_hash = {
++ .next = NULL,
++ .name = "do_xip_mapping_read",
++ .file = "mm/filemap_xip.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000008_hash = {
++ .next = NULL,
++ .name = "hugetlbfs_read",
++ .file = "fs/hugetlbfs/inode.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000009_hash = {
++ .next = NULL,
++ .name = "kcalloc",
++ .file = "include/linux/slab.h",
++ .param1 = 1,
++ .param2 = 1,
++};
++struct size_overflow_hash _000011_hash = {
++ .next = NULL,
++ .name = "kmalloc",
++ .file = "include/linux/slub_def.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000012_hash = {
++ .next = NULL,
++ .name = "kmalloc_slab",
++ .file = "include/linux/slub_def.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000013_hash = {
++ .next = NULL,
++ .name = "kmemdup",
++ .file = "include/linux/string.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000014_hash = {
++ .next = NULL,
++ .name = "__krealloc",
++ .file = "include/linux/slab.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000015_hash = {
++ .next = NULL,
++ .name = "memdup_user",
++ .file = "include/linux/string.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000016_hash = {
++ .next = NULL,
++ .name = "module_alloc",
++ .file = "include/linux/moduleloader.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000017_hash = {
++ .next = NULL,
++ .name = "read_default_ldt",
++ .file = "arch/x86/kernel/ldt.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000018_hash = {
++ .next = NULL,
++ .name = "read_kcore",
++ .file = "fs/proc/kcore.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000019_hash = {
++ .next = NULL,
++ .name = "read_ldt",
++ .file = "arch/x86/kernel/ldt.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000020_hash = {
++ .next = NULL,
++ .name = "read_zero",
++ .file = "drivers/char/mem.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000021_hash = {
++ .next = NULL,
++ .name = "__vmalloc_node",
++ .file = "mm/vmalloc.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000022_hash = {
++ .next = NULL,
++ .name = "vm_map_ram",
++ .file = "include/linux/vmalloc.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000023_hash = {
++ .next = NULL,
++ .name = "aa_simple_write_to_buffer",
++ .file = "security/apparmor/apparmorfs.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000024_hash = {
++ .next = NULL,
++ .name = "ablkcipher_copy_iv",
++ .file = "crypto/ablkcipher.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000025_hash = {
++ .next = NULL,
++ .name = "ablkcipher_next_slow",
++ .file = "crypto/ablkcipher.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000026_hash = {
++ .next = NULL,
++ .name = "acpi_os_allocate",
++ .file = "include/acpi/platform/aclinux.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000027_hash = {
++ .next = NULL,
++ .name = "acpi_system_write_wakeup_device",
++ .file = "drivers/acpi/proc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000028_hash = {
++ .next = NULL,
++ .name = "ahash_setkey_unaligned",
++ .file = "crypto/ahash.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000029_hash = {
++ .next = NULL,
++ .name = "alloc_fdmem",
++ .file = "fs/file.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000030_hash = {
++ .next = NULL,
++ .name = "audit_unpack_string",
++ .file = "kernel/auditfilter.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000031_hash = {
++ .next = NULL,
++ .name = "bio_alloc_map_data",
++ .file = "fs/bio.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000032_hash = {
++ .next = NULL,
++ .name = "bio_kmalloc",
++ .file = "include/linux/bio.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000033_hash = {
++ .next = NULL,
++ .name = "blkcipher_copy_iv",
++ .file = "crypto/blkcipher.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000034_hash = {
++ .next = NULL,
++ .name = "blkcipher_next_slow",
++ .file = "crypto/blkcipher.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000035_hash = {
++ .next = NULL,
++ .name = "cgroup_write_string",
++ .file = "kernel/cgroup.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000036_hash = {
++ .next = NULL,
++ .name = "cgroup_write_X64",
++ .file = "kernel/cgroup.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000037_hash = {
++ .next = NULL,
++ .name = "clear_refs_write",
++ .file = "fs/proc/task_mmu.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000038_hash = {
++ .next = NULL,
++ .name = "comm_write",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000039_hash = {
++ .next = NULL,
++ .name = "copy_and_check",
++ .file = "kernel/module.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000040_hash = {
++ .next = NULL,
++ .name = "__copy_to_user",
++ .file = "arch/x86/include/asm/uaccess_32.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000041_hash = {
++ .next = NULL,
++ .name = "copy_vm86_regs_from_user",
++ .file = "arch/x86/kernel/vm86_32.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000042_hash = {
++ .next = NULL,
++ .name = "csum_partial_copy_fromiovecend",
++ .file = "include/linux/socket.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000043_hash = {
++ .next = NULL,
++ .name = "ddebug_proc_write",
++ .file = "lib/dynamic_debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000044_hash = {
++ .next = NULL,
++ .name = "devm_kzalloc",
++ .file = "include/linux/device.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000045_hash = {
++ .next = NULL,
++ .name = "devres_alloc",
++ .file = "include/linux/device.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000046_hash = {
++ .next = NULL,
++ .name = "do_ip_setsockopt",
++ .file = "net/ipv4/ip_sockglue.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000047_hash = {
++ .next = NULL,
++ .name = "do_kimage_alloc",
++ .file = "kernel/kexec.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000048_hash = {
++ .next = NULL,
++ .name = "do_tty_write",
++ .file = "drivers/tty/tty_io.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000049_hash = {
++ .next = NULL,
++ .name = "fanotify_write",
++ .file = "fs/notify/fanotify/fanotify_user.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000050_hash = {
++ .next = NULL,
++ .name = "file_read_actor",
++ .file = "include/linux/fs.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000051_hash = {
++ .next = NULL,
++ .name = "fill_write_buffer",
++ .file = "fs/sysfs/file.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000052_hash = {
++ .next = NULL,
++ .name = "get_user_cpu_mask",
++ .file = "kernel/sched/core.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000053_hash = {
++ .next = NULL,
++ .name = "hashtab_create",
++ .file = "security/selinux/ss/hashtab.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000054_hash = {
++ .next = NULL,
++ .name = "heap_init",
++ .file = "include/linux/prio_heap.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000055_hash = {
++ .next = NULL,
++ .name = "hest_ghes_dev_register",
++ .file = "drivers/acpi/apei/hest.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000056_hash = {
++ .next = NULL,
++ .name = "ima_write_policy",
++ .file = "security/integrity/ima/ima_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000057_hash = {
++ .next = NULL,
++ .name = "input_ff_create",
++ .file = "include/linux/input.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000058_hash = {
++ .next = NULL,
++ .name = "input_mt_init_slots",
++ .file = "include/linux/input/mt.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000059_hash = {
++ .next = NULL,
++ .name = "iov_iter_copy_from_user",
++ .file = "include/linux/fs.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000060_hash = {
++ .next = NULL,
++ .name = "iov_iter_copy_from_user_atomic",
++ .file = "include/linux/fs.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000061_hash = {
++ .next = NULL,
++ .name = "keyctl_instantiate_key_common",
++ .file = "security/keys/keyctl.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000062_hash = {
++ .next = NULL,
++ .name = "keyctl_update_key",
++ .file = "security/keys/keyctl.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000063_hash = {
++ .next = NULL,
++ .name = "__kfifo_alloc",
++ .file = "include/linux/kfifo.h",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _000065_hash = {
++ .next = NULL,
++ .name = "kfifo_copy_from_user",
++ .file = "kernel/kfifo.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000066_hash = {
++ .next = NULL,
++ .name = "kmalloc_node",
++ .file = "include/linux/slab.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000067_hash = {
++ .next = NULL,
++ .name = "kmalloc_parameter",
++ .file = "kernel/params.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000068_hash = {
++ .next = NULL,
++ .name = "kobj_map",
++ .file = "include/linux/kobj_map.h",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _000070_hash = {
++ .next = NULL,
++ .name = "krealloc",
++ .file = "include/linux/slab.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000071_hash = {
++ .next = NULL,
++ .name = "kvmalloc",
++ .file = "security/apparmor/lib.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000072_hash = {
++ .next = NULL,
++ .name = "kzalloc",
++ .file = "include/linux/slab.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000073_hash = {
++ .next = NULL,
++ .name = "listxattr",
++ .file = "fs/xattr.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000074_hash = {
++ .next = NULL,
++ .name = "mempool_kmalloc",
++ .file = "include/linux/mempool.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000075_hash = {
++ .next = NULL,
++ .name = "mem_rw",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000076_hash = {
++ .next = NULL,
++ .name = "module_alloc_update_bounds",
++ .file = "kernel/module.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000077_hash = {
++ .next = NULL,
++ .name = "mpi_alloc_limb_space",
++ .file = "lib/mpi/mpiutil.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000078_hash = {
++ .next = NULL,
++ .name = "mpi_resize",
++ .file = "include/linux/mpi.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000079_hash = {
++ .next = NULL,
++ .name = "mtrr_write",
++ .file = "arch/x86/kernel/cpu/mtrr/if.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000080_hash = {
++ .next = NULL,
++ .name = "oom_adjust_write",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000081_hash = {
++ .next = NULL,
++ .name = "oom_score_adj_write",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000082_hash = {
++ .next = NULL,
++ .name = "pipe_iov_copy_from_user",
++ .file = "fs/pipe.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000083_hash = {
++ .next = NULL,
++ .name = "pipe_iov_copy_to_user",
++ .file = "fs/pipe.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000084_hash = {
++ .next = NULL,
++ .name = "pipe_set_size",
++ .file = "fs/pipe.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000085_hash = {
++ .next = NULL,
++ .name = "platform_device_add_data",
++ .file = "include/linux/platform_device.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000086_hash = {
++ .next = NULL,
++ .name = "platform_device_add_resources",
++ .file = "include/linux/platform_device.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000087_hash = {
++ .next = NULL,
++ .name = "pm_qos_power_write",
++ .file = "kernel/power/qos.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000088_hash = {
++ .next = NULL,
++ .name = "pnpbios_proc_write",
++ .file = "drivers/pnp/pnpbios/proc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000089_hash = {
++ .next = NULL,
++ .name = "__probe_kernel_read",
++ .file = "include/linux/uaccess.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000090_hash = {
++ .next = NULL,
++ .name = "__probe_kernel_write",
++ .file = "include/linux/uaccess.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000091_hash = {
++ .next = NULL,
++ .name = "proc_coredump_filter_write",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000092_hash = {
++ .next = NULL,
++ .name = "process_vm_rw_pages",
++ .file = "mm/process_vm_access.c",
++ .param5 = 1,
++ .param6 = 1,
++};
++struct size_overflow_hash _000094_hash = {
++ .next = NULL,
++ .name = "proc_loginuid_write",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000095_hash = {
++ .next = NULL,
++ .name = "proc_pid_attr_write",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000096_hash = {
++ .next = NULL,
++ .name = "pstore_mkfile",
++ .file = "fs/pstore/inode.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000097_hash = {
++ .next = NULL,
++ .name = "qdisc_class_hash_alloc",
++ .file = "net/sched/sch_api.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000098_hash = {
++ .next = NULL,
++ .name = "read",
++ .file = "fs/sysfs/bin.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000099_hash = {
++ .next = NULL,
++ .name = "regmap_access_read_file",
++ .file = "drivers/base/regmap/regmap-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000100_hash = {
++ .next = NULL,
++ .name = "regmap_map_read_file",
++ .file = "drivers/base/regmap/regmap-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000101_hash = {
++ .next = NULL,
++ .name = "_regmap_raw_write",
++ .file = "drivers/base/regmap/regmap.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000102_hash = {
++ .next = NULL,
++ .name = "regset_tls_set",
++ .file = "arch/x86/kernel/tls.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000103_hash = {
++ .next = NULL,
++ .name = "request_key_auth_new",
++ .file = "security/keys/request_key_auth.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000104_hash = {
++ .next = NULL,
++ .name = "restore_i387_fxsave",
++ .file = "arch/x86/kernel/i387.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000105_hash = {
++ .next = NULL,
++ .name = "rngapi_reset",
++ .file = "crypto/rng.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000106_hash = {
++ .next = NULL,
++ .name = "rw_copy_check_uvector",
++ .file = "include/linux/fs.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000107_hash = {
++ .next = NULL,
++ .name = "sched_autogroup_write",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000108_hash = {
++ .next = NULL,
++ .name = "security_context_to_sid_core",
++ .file = "security/selinux/ss/services.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000109_hash = {
++ .next = NULL,
++ .name = "sel_commit_bools_write",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000110_hash = {
++ .next = NULL,
++ .name = "sel_write_avc_cache_threshold",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000111_hash = {
++ .next = NULL,
++ .name = "sel_write_bool",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000112_hash = {
++ .next = NULL,
++ .name = "sel_write_checkreqprot",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000113_hash = {
++ .next = NULL,
++ .name = "sel_write_disable",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000114_hash = {
++ .next = NULL,
++ .name = "sel_write_enforce",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000115_hash = {
++ .next = NULL,
++ .name = "sel_write_load",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000116_hash = {
++ .next = NULL,
++ .name = "setkey_unaligned",
++ .file = "crypto/ablkcipher.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000117_hash = {
++ .next = NULL,
++ .name = "setkey_unaligned",
++ .file = "crypto/blkcipher.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000118_hash = {
++ .next = NULL,
++ .name = "setkey_unaligned",
++ .file = "crypto/aead.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000119_hash = {
++ .next = NULL,
++ .name = "setkey_unaligned",
++ .file = "crypto/cipher.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000120_hash = {
++ .next = NULL,
++ .name = "setxattr",
++ .file = "fs/xattr.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000121_hash = {
++ .next = NULL,
++ .name = "sg_kmalloc",
++ .file = "lib/scatterlist.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000122_hash = {
++ .next = NULL,
++ .name = "shash_setkey_unaligned",
++ .file = "crypto/shash.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000123_hash = {
++ .next = NULL,
++ .name = "shmem_xattr_set",
++ .file = "mm/shmem.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000124_hash = {
++ .next = NULL,
++ .name = "simple_transaction_get",
++ .file = "include/linux/fs.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000125_hash = {
++ .next = NULL,
++ .name = "simple_write_to_buffer",
++ .file = "include/linux/fs.h",
++ .param2 = 1,
++ .param5 = 1,
++};
++struct size_overflow_hash _000127_hash = {
++ .next = NULL,
++ .name = "smk_write_ambient",
++ .file = "security/smack/smackfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000128_hash = {
++ .next = NULL,
++ .name = "smk_write_cipso",
++ .file = "security/smack/smackfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000129_hash = {
++ .next = NULL,
++ .name = "smk_write_direct",
++ .file = "security/smack/smackfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000130_hash = {
++ .next = NULL,
++ .name = "smk_write_doi",
++ .file = "security/smack/smackfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000131_hash = {
++ .next = NULL,
++ .name = "smk_write_load_list",
++ .file = "security/smack/smackfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000132_hash = {
++ .next = &_000102_hash,
++ .name = "smk_write_logging",
++ .file = "security/smack/smackfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000133_hash = {
++ .next = NULL,
++ .name = "smk_write_netlbladdr",
++ .file = "security/smack/smackfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000134_hash = {
++ .next = NULL,
++ .name = "smk_write_onlycap",
++ .file = "security/smack/smackfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000135_hash = {
++ .next = NULL,
++ .name = "sys_add_key",
++ .file = "include/linux/syscalls.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000136_hash = {
++ .next = NULL,
++ .name = "sys_modify_ldt",
++ .file = "arch/x86/include/asm/syscalls.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000137_hash = {
++ .next = NULL,
++ .name = "sys_semtimedop",
++ .file = "include/linux/syscalls.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000138_hash = {
++ .next = NULL,
++ .name = "tomoyo_write_self",
++ .file = "security/tomoyo/securityfs_if.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000139_hash = {
++ .next = NULL,
++ .name = "tpm_write",
++ .file = "drivers/char/tpm/tpm.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000140_hash = {
++ .next = NULL,
++ .name = "tty_buffer_alloc",
++ .file = "drivers/tty/tty_buffer.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000141_hash = {
++ .next = NULL,
++ .name = "user_instantiate",
++ .file = "include/keys/user-type.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000142_hash = {
++ .next = NULL,
++ .name = "user_update",
++ .file = "include/keys/user-type.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000143_hash = {
++ .next = NULL,
++ .name = "vc_do_resize",
++ .file = "drivers/tty/vt/vt.c",
++ .param3 = 1,
++ .param4 = 1,
++};
++struct size_overflow_hash _000145_hash = {
++ .next = NULL,
++ .name = "vcs_write",
++ .file = "drivers/tty/vt/vc_screen.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000146_hash = {
++ .next = NULL,
++ .name = "vga_arb_write",
++ .file = "drivers/gpu/vga/vgaarb.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000147_hash = {
++ .next = NULL,
++ .name = "vga_switcheroo_debugfs_write",
++ .file = "drivers/gpu/vga/vga_switcheroo.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000148_hash = {
++ .next = NULL,
++ .name = "__vmalloc",
++ .file = "include/linux/vmalloc.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000149_hash = {
++ .next = NULL,
++ .name = "vmalloc_32",
++ .file = "include/linux/vmalloc.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000150_hash = {
++ .next = NULL,
++ .name = "vmalloc_32_user",
++ .file = "include/linux/vmalloc.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000151_hash = {
++ .next = NULL,
++ .name = "vmalloc_exec",
++ .file = "include/linux/vmalloc.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000152_hash = {
++ .next = NULL,
++ .name = "vmalloc_node",
++ .file = "include/linux/vmalloc.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000153_hash = {
++ .next = NULL,
++ .name = "__vmalloc_node_flags",
++ .file = "mm/vmalloc.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000154_hash = {
++ .next = NULL,
++ .name = "vmalloc_user",
++ .file = "include/linux/vmalloc.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000155_hash = {
++ .next = NULL,
++ .name = "write",
++ .file = "fs/sysfs/bin.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000156_hash = {
++ .next = NULL,
++ .name = "__xip_file_write",
++ .file = "mm/filemap_xip.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000157_hash = {
++ .next = NULL,
++ .name = "acpi_ex_allocate_name_string",
++ .file = "drivers/acpi/acpica/exnames.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000158_hash = {
++ .next = NULL,
++ .name = "acpi_os_allocate_zeroed",
++ .file = "include/acpi/platform/aclinux.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000159_hash = {
++ .next = NULL,
++ .name = "acpi_ut_initialize_buffer",
++ .file = "drivers/acpi/acpica/utalloc.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000160_hash = {
++ .next = NULL,
++ .name = "add_numbered_child",
++ .file = "drivers/mfd/twl-core.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000161_hash = {
++ .next = NULL,
++ .name = "___alloc_bootmem_nopanic",
++ .file = "mm/nobootmem.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000162_hash = {
++ .next = NULL,
++ .name = "alloc_large_system_hash",
++ .file = "include/linux/bootmem.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000163_hash = {
++ .next = NULL,
++ .name = "audit_init_entry",
++ .file = "kernel/auditfilter.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000164_hash = {
++ .next = NULL,
++ .name = "__bio_map_kern",
++ .file = "fs/bio.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _000166_hash = {
++ .next = NULL,
++ .name = "blk_register_region",
++ .file = "include/linux/genhd.h",
++ .param1 = 1,
++ .param2 = 1,
++};
++struct size_overflow_hash _000168_hash = {
++ .next = NULL,
++ .name = "cdev_add",
++ .file = "include/linux/cdev.h",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _000170_hash = {
++ .next = NULL,
++ .name = "copy_to_user",
++ .file = "arch/x86/include/asm/uaccess_32.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000171_hash = {
++ .next = NULL,
++ .name = "crypto_ahash_setkey",
++ .file = "include/crypto/hash.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000172_hash = {
++ .next = NULL,
++ .name = "crypto_alloc_instance2",
++ .file = "include/crypto/algapi.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000173_hash = {
++ .next = NULL,
++ .name = "crypto_shash_setkey",
++ .file = "include/crypto/hash.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000174_hash = {
++ .next = NULL,
++ .name = "dev_set_alias",
++ .file = "include/linux/netdevice.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000175_hash = {
++ .next = NULL,
++ .name = "do_readv_writev",
++ .file = "fs/read_write.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000176_hash = {
++ .next = NULL,
++ .name = "getxattr",
++ .file = "fs/xattr.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000177_hash = {
++ .next = NULL,
++ .name = "hugetlbfs_read_actor",
++ .file = "fs/hugetlbfs/inode.c",
++ .param2 = 1,
++ .param5 = 1,
++ .param4 = 1,
++};
++struct size_overflow_hash _000180_hash = {
++ .next = NULL,
++ .name = "keyctl_instantiate_key",
++ .file = "security/keys/keyctl.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000181_hash = {
++ .next = NULL,
++ .name = "keyctl_instantiate_key_iov",
++ .file = "security/keys/keyctl.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000182_hash = {
++ .next = NULL,
++ .name = "__kfifo_from_user",
++ .file = "include/linux/kfifo.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000183_hash = {
++ .next = NULL,
++ .name = "kimage_crash_alloc",
++ .file = "kernel/kexec.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000184_hash = {
++ .next = NULL,
++ .name = "kimage_normal_alloc",
++ .file = "kernel/kexec.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000185_hash = {
++ .next = NULL,
++ .name = "mpi_alloc",
++ .file = "include/linux/mpi.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000186_hash = {
++ .next = NULL,
++ .name = "mpi_set_bit",
++ .file = "include/linux/mpi.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000187_hash = {
++ .next = NULL,
++ .name = "mpi_set_highbit",
++ .file = "include/linux/mpi.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000188_hash = {
++ .next = NULL,
++ .name = "neigh_hash_alloc",
++ .file = "net/core/neighbour.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000189_hash = {
++ .next = NULL,
++ .name = "nl_pid_hash_zalloc",
++ .file = "net/netlink/af_netlink.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000190_hash = {
++ .next = NULL,
++ .name = "pci_add_cap_save_buffer",
++ .file = "drivers/pci/pci.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000191_hash = {
++ .next = NULL,
++ .name = "pcpu_mem_zalloc",
++ .file = "mm/percpu.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000192_hash = {
++ .next = NULL,
++ .name = "platform_create_bundle",
++ .file = "include/linux/platform_device.h",
++ .param4 = 1,
++ .param6 = 1,
++};
++struct size_overflow_hash _000194_hash = {
++ .next = NULL,
++ .name = "process_vm_rw",
++ .file = "mm/process_vm_access.c",
++ .param3 = 1,
++ .param5 = 1,
++};
++struct size_overflow_hash _000196_hash = {
++ .next = NULL,
++ .name = "process_vm_rw_single_vec",
++ .file = "mm/process_vm_access.c",
++ .param1 = 1,
++ .param2 = 1,
++};
++struct size_overflow_hash _000198_hash = {
++ .next = NULL,
++ .name = "profile_load",
++ .file = "security/apparmor/apparmorfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000199_hash = {
++ .next = NULL,
++ .name = "profile_remove",
++ .file = "security/apparmor/apparmorfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000200_hash = {
++ .next = NULL,
++ .name = "profile_replace",
++ .file = "security/apparmor/apparmorfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000201_hash = {
++ .next = NULL,
++ .name = "regcache_rbtree_insert_to_block",
++ .file = "drivers/base/regmap/regcache-rbtree.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000202_hash = {
++ .next = NULL,
++ .name = "regmap_raw_write",
++ .file = "include/linux/regmap.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000203_hash = {
++ .next = NULL,
++ .name = "relay_alloc_page_array",
++ .file = "kernel/relay.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000204_hash = {
++ .next = NULL,
++ .name = "RESIZE_IF_NEEDED",
++ .file = "lib/mpi/mpi-internal.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000205_hash = {
++ .next = NULL,
++ .name = "security_context_to_sid",
++ .file = "security/selinux/ss/services.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000206_hash = {
++ .next = NULL,
++ .name = "security_context_to_sid_default",
++ .file = "security/selinux/ss/services.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000207_hash = {
++ .next = NULL,
++ .name = "security_context_to_sid_force",
++ .file = "security/selinux/ss/services.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000208_hash = {
++ .next = NULL,
++ .name = "selinux_transaction_write",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000209_hash = {
++ .next = NULL,
++ .name = "sel_write_access",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000210_hash = {
++ .next = NULL,
++ .name = "sel_write_create",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000211_hash = {
++ .next = NULL,
++ .name = "sel_write_member",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000212_hash = {
++ .next = NULL,
++ .name = "sel_write_relabel",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000213_hash = {
++ .next = NULL,
++ .name = "sel_write_user",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000214_hash = {
++ .next = NULL,
++ .name = "setkey",
++ .file = "crypto/cipher.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000215_hash = {
++ .next = NULL,
++ .name = "setkey",
++ .file = "crypto/ablkcipher.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000216_hash = {
++ .next = NULL,
++ .name = "setkey",
++ .file = "crypto/aead.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000217_hash = {
++ .next = NULL,
++ .name = "setkey",
++ .file = "crypto/blkcipher.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000218_hash = {
++ .next = NULL,
++ .name = "smk_write_access",
++ .file = "security/smack/smackfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000219_hash = {
++ .next = NULL,
++ .name = "snapshot_write",
++ .file = "kernel/power/user.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000220_hash = {
++ .next = NULL,
++ .name = "spi_alloc_master",
++ .file = "include/linux/spi/spi.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000221_hash = {
++ .next = NULL,
++ .name = "spi_register_board_info",
++ .file = "include/linux/spi/spi.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000222_hash = {
++ .next = NULL,
++ .name = "sys_flistxattr",
++ .file = "include/linux/syscalls.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000223_hash = {
++ .next = NULL,
++ .name = "sys_fsetxattr",
++ .file = "include/linux/syscalls.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000224_hash = {
++ .next = NULL,
++ .name = "sysfs_write_file",
++ .file = "fs/sysfs/file.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000225_hash = {
++ .next = NULL,
++ .name = "sys_ipc",
++ .file = "include/linux/syscalls.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000226_hash = {
++ .next = NULL,
++ .name = "sys_keyctl",
++ .file = "include/linux/syscalls.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000227_hash = {
++ .next = NULL,
++ .name = "sys_listxattr",
++ .file = "include/linux/syscalls.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000228_hash = {
++ .next = NULL,
++ .name = "sys_llistxattr",
++ .file = "include/linux/syscalls.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000229_hash = {
++ .next = NULL,
++ .name = "sys_lsetxattr",
++ .file = "include/linux/syscalls.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000230_hash = {
++ .next = NULL,
++ .name = "sys_sched_setaffinity",
++ .file = "include/linux/syscalls.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000231_hash = {
++ .next = NULL,
++ .name = "sys_semop",
++ .file = "include/linux/syscalls.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000232_hash = {
++ .next = NULL,
++ .name = "sys_setxattr",
++ .file = "include/linux/syscalls.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000233_hash = {
++ .next = NULL,
++ .name = "tnode_alloc",
++ .file = "net/ipv4/fib_trie.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000234_hash = {
++ .next = NULL,
++ .name = "tomoyo_commit_ok",
++ .file = "security/tomoyo/memory.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000235_hash = {
++ .next = NULL,
++ .name = "tomoyo_scan_bprm",
++ .file = "security/tomoyo/condition.c",
++ .param2 = 1,
++ .param4 = 1,
++};
++struct size_overflow_hash _000237_hash = {
++ .next = NULL,
++ .name = "tty_write",
++ .file = "drivers/tty/tty_io.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000238_hash = {
++ .next = NULL,
++ .name = "vc_resize",
++ .file = "include/linux/vt_kern.h",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _000240_hash = {
++ .next = NULL,
++ .name = "vmalloc",
++ .file = "include/linux/vmalloc.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000241_hash = {
++ .next = NULL,
++ .name = "vzalloc",
++ .file = "include/linux/vmalloc.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000242_hash = {
++ .next = NULL,
++ .name = "vzalloc_node",
++ .file = "include/linux/vmalloc.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000243_hash = {
++ .next = NULL,
++ .name = "xfrm_hash_alloc",
++ .file = "net/xfrm/xfrm_hash.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000244_hash = {
++ .next = NULL,
++ .name = "acpi_ds_build_internal_package_obj",
++ .file = "drivers/acpi/acpica/dsobject.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000245_hash = {
++ .next = NULL,
++ .name = "acpi_system_read_event",
++ .file = "drivers/acpi/event.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000246_hash = {
++ .next = NULL,
++ .name = "acpi_ut_create_buffer_object",
++ .file = "drivers/acpi/acpica/utobject.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000247_hash = {
++ .next = NULL,
++ .name = "acpi_ut_create_package_object",
++ .file = "drivers/acpi/acpica/utobject.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000248_hash = {
++ .next = NULL,
++ .name = "acpi_ut_create_string_object",
++ .file = "drivers/acpi/acpica/utobject.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000249_hash = {
++ .next = NULL,
++ .name = "add_child",
++ .file = "drivers/mfd/twl-core.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000250_hash = {
++ .next = NULL,
++ .name = "___alloc_bootmem",
++ .file = "mm/nobootmem.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000251_hash = {
++ .next = NULL,
++ .name = "__alloc_bootmem_nopanic",
++ .file = "include/linux/bootmem.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000252_hash = {
++ .next = NULL,
++ .name = "async_setkey",
++ .file = "crypto/blkcipher.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000253_hash = {
++ .next = NULL,
++ .name = "bio_map_kern",
++ .file = "include/linux/bio.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000254_hash = {
++ .next = NULL,
++ .name = "copy_oldmem_page",
++ .file = "include/linux/crash_dump.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000255_hash = {
++ .next = NULL,
++ .name = "do_sigpending",
++ .file = "include/linux/signal.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000257_hash = {
++ .next = NULL,
++ .name = "keyctl_describe_key",
++ .file = "security/keys/keyctl.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000258_hash = {
++ .next = NULL,
++ .name = "keyctl_get_security",
++ .file = "security/keys/keyctl.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000259_hash = {
++ .next = NULL,
++ .name = "keyring_read",
++ .file = "security/keys/keyring.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000260_hash = {
++ .next = NULL,
++ .name = "kfifo_copy_to_user",
++ .file = "kernel/kfifo.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000261_hash = {
++ .next = NULL,
++ .name = "mousedev_read",
++ .file = "drivers/input/mousedev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000262_hash = {
++ .next = NULL,
++ .name = "mpi_lshift_limbs",
++ .file = "lib/mpi/mpi-bit.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000263_hash = {
++ .next = NULL,
++ .name = "neigh_hash_grow",
++ .file = "net/core/neighbour.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000264_hash = {
++ .next = NULL,
++ .name = "posix_clock_register",
++ .file = "include/linux/posix-clock.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000265_hash = {
++ .next = NULL,
++ .name = "__proc_file_read",
++ .file = "fs/proc/generic.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000266_hash = {
++ .next = NULL,
++ .name = "read_profile",
++ .file = "kernel/profile.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000267_hash = {
++ .next = NULL,
++ .name = "read_vmcore",
++ .file = "fs/proc/vmcore.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000268_hash = {
++ .next = NULL,
++ .name = "redirected_tty_write",
++ .file = "drivers/tty/tty_io.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000269_hash = {
++ .next = NULL,
++ .name = "__register_chrdev",
++ .file = "include/linux/fs.h",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _000271_hash = {
++ .next = NULL,
++ .name = "request_key_auth_read",
++ .file = "security/keys/request_key_auth.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000272_hash = {
++ .next = NULL,
++ .name = "shash_async_setkey",
++ .file = "crypto/shash.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000273_hash = {
++ .next = NULL,
++ .name = "shash_compat_setkey",
++ .file = "crypto/shash.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000274_hash = {
++ .next = NULL,
++ .name = "simple_read_from_buffer",
++ .file = "include/linux/fs.h",
++ .param2 = 1,
++ .param5 = 1,
++};
++struct size_overflow_hash _000276_hash = {
++ .next = NULL,
++ .name = "store_ifalias",
++ .file = "net/core/net-sysfs.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000277_hash = {
++ .next = NULL,
++ .name = "subbuf_read_actor",
++ .file = "kernel/relay.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000278_hash = {
++ .next = NULL,
++ .name = "sys_fgetxattr",
++ .file = "include/linux/syscalls.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000279_hash = {
++ .next = NULL,
++ .name = "sys_getxattr",
++ .file = "include/linux/syscalls.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000280_hash = {
++ .next = NULL,
++ .name = "sys_kexec_load",
++ .file = "include/linux/syscalls.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000281_hash = {
++ .next = NULL,
++ .name = "sys_lgetxattr",
++ .file = "include/linux/syscalls.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000282_hash = {
++ .next = NULL,
++ .name = "sys_process_vm_readv",
++ .file = "include/linux/syscalls.h",
++ .param3 = 1,
++ .param5 = 1,
++};
++struct size_overflow_hash _000284_hash = {
++ .next = NULL,
++ .name = "sys_process_vm_writev",
++ .file = "include/linux/syscalls.h",
++ .param3 = 1,
++ .param5 = 1,
++};
++struct size_overflow_hash _000286_hash = {
++ .next = NULL,
++ .name = "sys_sched_getaffinity",
++ .file = "include/linux/syscalls.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000287_hash = {
++ .next = NULL,
++ .name = "tomoyo_read_self",
++ .file = "security/tomoyo/securityfs_if.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000288_hash = {
++ .next = NULL,
++ .name = "tpm_read",
++ .file = "drivers/char/tpm/tpm.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000289_hash = {
++ .next = NULL,
++ .name = "user_read",
++ .file = "include/keys/user-type.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000290_hash = {
++ .next = NULL,
++ .name = "vcs_read",
++ .file = "drivers/tty/vt/vc_screen.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000291_hash = {
++ .next = NULL,
++ .name = "vfs_readv",
++ .file = "include/linux/fs.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000292_hash = {
++ .next = NULL,
++ .name = "vfs_writev",
++ .file = "include/linux/fs.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000293_hash = {
++ .next = NULL,
++ .name = "vga_arb_read",
++ .file = "drivers/gpu/vga/vgaarb.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000294_hash = {
++ .next = NULL,
++ .name = "xz_dec_lzma2_create",
++ .file = "lib/xz/xz_dec_lzma2.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000295_hash = {
++ .next = NULL,
++ .name = "aat2870_reg_read_file",
++ .file = "drivers/mfd/aat2870-core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000296_hash = {
++ .next = NULL,
++ .name = "__alloc_bootmem",
++ .file = "include/linux/bootmem.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000297_hash = {
++ .next = NULL,
++ .name = "__alloc_bootmem_low",
++ .file = "include/linux/bootmem.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000298_hash = {
++ .next = NULL,
++ .name = "__alloc_bootmem_node_nopanic",
++ .file = "include/linux/bootmem.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000299_hash = {
++ .next = NULL,
++ .name = "blk_rq_map_kern",
++ .file = "include/linux/blkdev.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000300_hash = {
++ .next = NULL,
++ .name = "cgroup_read_s64",
++ .file = "kernel/cgroup.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000301_hash = {
++ .next = NULL,
++ .name = "cgroup_read_u64",
++ .file = "kernel/cgroup.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000302_hash = {
++ .next = NULL,
++ .name = "cpuset_common_file_read",
++ .file = "kernel/cpuset.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000303_hash = {
++ .next = NULL,
++ .name = "filter_read",
++ .file = "lib/dma-debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000304_hash = {
++ .next = NULL,
++ .name = "ima_show_htable_value",
++ .file = "security/integrity/ima/ima_fs.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000305_hash = {
++ .next = NULL,
++ .name = "kernel_readv",
++ .file = "fs/splice.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000306_hash = {
++ .next = NULL,
++ .name = "__kfifo_to_user",
++ .file = "include/linux/kfifo.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000307_hash = {
++ .next = NULL,
++ .name = "__kfifo_to_user_r",
++ .file = "include/linux/kfifo.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000308_hash = {
++ .next = NULL,
++ .name = "mqueue_read_file",
++ .file = "ipc/mqueue.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000309_hash = {
++ .next = NULL,
++ .name = "oom_adjust_read",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000310_hash = {
++ .next = NULL,
++ .name = "oom_score_adj_read",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000311_hash = {
++ .next = NULL,
++ .name = "pm_qos_power_read",
++ .file = "kernel/power/qos.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000312_hash = {
++ .next = NULL,
++ .name = "proc_coredump_filter_read",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000313_hash = {
++ .next = NULL,
++ .name = "proc_fdinfo_read",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000314_hash = {
++ .next = NULL,
++ .name = "proc_info_read",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000315_hash = {
++ .next = NULL,
++ .name = "proc_loginuid_read",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000316_hash = {
++ .next = NULL,
++ .name = "proc_pid_attr_read",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000317_hash = {
++ .next = NULL,
++ .name = "proc_sessionid_read",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000318_hash = {
++ .next = NULL,
++ .name = "pstore_file_read",
++ .file = "fs/pstore/inode.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000319_hash = {
++ .next = NULL,
++ .name = "read_enabled_file_bool",
++ .file = "kernel/kprobes.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000320_hash = {
++ .next = NULL,
++ .name = "read_file_blob",
++ .file = "fs/debugfs/file.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000321_hash = {
++ .next = NULL,
++ .name = "read_file_bool",
++ .file = "fs/debugfs/file.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000322_hash = {
++ .next = NULL,
++ .name = "read_from_oldmem",
++ .file = "fs/proc/vmcore.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000323_hash = {
++ .next = NULL,
++ .name = "read_oldmem",
++ .file = "drivers/char/mem.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000324_hash = {
++ .next = NULL,
++ .name = "res_counter_read",
++ .file = "include/linux/res_counter.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000325_hash = {
++ .next = NULL,
++ .name = "sel_read_avc_cache_threshold",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000326_hash = {
++ .next = NULL,
++ .name = "sel_read_avc_hash_stats",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000327_hash = {
++ .next = NULL,
++ .name = "sel_read_bool",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000328_hash = {
++ .next = NULL,
++ .name = "sel_read_checkreqprot",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000329_hash = {
++ .next = NULL,
++ .name = "sel_read_class",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000330_hash = {
++ .next = NULL,
++ .name = "sel_read_enforce",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000331_hash = {
++ .next = NULL,
++ .name = "sel_read_handle_status",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000332_hash = {
++ .next = NULL,
++ .name = "sel_read_handle_unknown",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000333_hash = {
++ .next = NULL,
++ .name = "sel_read_initcon",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000334_hash = {
++ .next = NULL,
++ .name = "sel_read_mls",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000335_hash = {
++ .next = NULL,
++ .name = "sel_read_perm",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000336_hash = {
++ .next = NULL,
++ .name = "sel_read_policy",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000337_hash = {
++ .next = NULL,
++ .name = "sel_read_policycap",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000338_hash = {
++ .next = NULL,
++ .name = "sel_read_policyvers",
++ .file = "security/selinux/selinuxfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000339_hash = {
++ .next = NULL,
++ .name = "simple_attr_read",
++ .file = "include/linux/fs.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000340_hash = {
++ .next = NULL,
++ .name = "simple_transaction_read",
++ .file = "include/linux/fs.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000341_hash = {
++ .next = NULL,
++ .name = "smk_read_ambient",
++ .file = "security/smack/smackfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000342_hash = {
++ .next = NULL,
++ .name = "smk_read_direct",
++ .file = "security/smack/smackfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000343_hash = {
++ .next = NULL,
++ .name = "smk_read_doi",
++ .file = "security/smack/smackfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000344_hash = {
++ .next = NULL,
++ .name = "smk_read_logging",
++ .file = "security/smack/smackfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000345_hash = {
++ .next = NULL,
++ .name = "smk_read_onlycap",
++ .file = "security/smack/smackfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000346_hash = {
++ .next = NULL,
++ .name = "snapshot_read",
++ .file = "kernel/power/user.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000347_hash = {
++ .next = NULL,
++ .name = "supply_map_read_file",
++ .file = "drivers/regulator/core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000348_hash = {
++ .next = NULL,
++ .name = "sysfs_read_file",
++ .file = "fs/sysfs/file.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000349_hash = {
++ .next = NULL,
++ .name = "sys_preadv",
++ .file = "include/linux/syscalls.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000350_hash = {
++ .next = NULL,
++ .name = "sys_pwritev",
++ .file = "include/linux/syscalls.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000351_hash = {
++ .next = NULL,
++ .name = "sys_readv",
++ .file = "include/linux/syscalls.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000352_hash = {
++ .next = NULL,
++ .name = "sys_rt_sigpending",
++ .file = "include/linux/syscalls.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000353_hash = {
++ .next = NULL,
++ .name = "sys_writev",
++ .file = "include/linux/syscalls.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000354_hash = {
++ .next = NULL,
++ .name = "ima_show_htable_violations",
++ .file = "security/integrity/ima/ima_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000355_hash = {
++ .next = NULL,
++ .name = "ima_show_measurements_count",
++ .file = "security/integrity/ima/ima_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000356_hash = {
++ .next = NULL,
++ .name = "alloc_cpu_rmap",
++ .file = "include/linux/cpu_rmap.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000357_hash = {
++ .next = NULL,
++ .name = "alloc_page_cgroup",
++ .file = "mm/page_cgroup.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000358_hash = {
++ .next = NULL,
++ .name = "alloc_sched_domains",
++ .file = "include/linux/sched.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000359_hash = {
++ .next = NULL,
++ .name = "compat_rw_copy_check_uvector",
++ .file = "include/linux/compat.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000360_hash = {
++ .next = NULL,
++ .name = "compat_sys_kexec_load",
++ .file = "include/linux/kexec.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000361_hash = {
++ .next = NULL,
++ .name = "compat_sys_semtimedop",
++ .file = "include/linux/compat.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000362_hash = {
++ .next = NULL,
++ .name = "copy_from_user",
++ .file = "arch/x86/include/asm/uaccess_64.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000363_hash = {
++ .next = NULL,
++ .name = "__copy_from_user",
++ .file = "arch/x86/include/asm/uaccess_64.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000364_hash = {
++ .next = NULL,
++ .name = "__copy_from_user_inatomic",
++ .file = "arch/x86/include/asm/uaccess_64.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000365_hash = {
++ .next = NULL,
++ .name = "__copy_from_user_nocache",
++ .file = "arch/x86/include/asm/uaccess_64.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000366_hash = {
++ .next = NULL,
++ .name = "__copy_in_user",
++ .file = "arch/x86/include/asm/uaccess_64.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000367_hash = {
++ .next = NULL,
++ .name = "copy_in_user",
++ .file = "arch/x86/include/asm/uaccess_64.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000368_hash = {
++ .next = NULL,
++ .name = "__copy_to_user",
++ .file = "arch/x86/include/asm/uaccess_64.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000369_hash = {
++ .next = NULL,
++ .name = "copy_to_user",
++ .file = "arch/x86/include/asm/uaccess_64.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000370_hash = {
++ .next = NULL,
++ .name = "__copy_to_user_inatomic",
++ .file = "arch/x86/include/asm/uaccess_64.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000371_hash = {
++ .next = NULL,
++ .name = "kmalloc_node",
++ .file = "include/linux/slub_def.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000372_hash = {
++ .next = NULL,
++ .name = "pcpu_alloc_bootmem",
++ .file = "arch/x86/kernel/setup_percpu.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000373_hash = {
++ .next = NULL,
++ .name = "sys32_rt_sigpending",
++ .file = "arch/x86/include/asm/sys_ia32.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000374_hash = {
++ .next = NULL,
++ .name = "tunables_read",
++ .file = "arch/x86/platform/uv/tlb_uv.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000375_hash = {
++ .next = NULL,
++ .name = "compat_do_readv_writev",
++ .file = "fs/compat.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000376_hash = {
++ .next = NULL,
++ .name = "compat_keyctl_instantiate_key_iov",
++ .file = "security/keys/compat.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000377_hash = {
++ .next = NULL,
++ .name = "compat_process_vm_rw",
++ .file = "mm/process_vm_access.c",
++ .param3 = 1,
++ .param5 = 1,
++};
++struct size_overflow_hash _000379_hash = {
++ .next = NULL,
++ .name = "do_pages_stat",
++ .file = "mm/migrate.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000380_hash = {
++ .next = NULL,
++ .name = "kzalloc_node",
++ .file = "include/linux/slab.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000381_hash = {
++ .next = NULL,
++ .name = "pcpu_fc_alloc",
++ .file = "arch/x86/kernel/setup_percpu.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000382_hash = {
++ .next = NULL,
++ .name = "ptc_proc_write",
++ .file = "arch/x86/platform/uv/tlb_uv.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000383_hash = {
++ .next = NULL,
++ .name = "tunables_write",
++ .file = "arch/x86/platform/uv/tlb_uv.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000384_hash = {
++ .next = NULL,
++ .name = "__alloc_bootmem_low_node",
++ .file = "include/linux/bootmem.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000385_hash = {
++ .next = NULL,
++ .name = "__alloc_bootmem_node",
++ .file = "include/linux/bootmem.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000386_hash = {
++ .next = NULL,
++ .name = "compat_readv",
++ .file = "fs/compat.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000387_hash = {
++ .next = NULL,
++ .name = "compat_sys_keyctl",
++ .file = "include/linux/compat.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000388_hash = {
++ .next = NULL,
++ .name = "compat_sys_process_vm_readv",
++ .file = "include/linux/compat.h",
++ .param3 = 1,
++ .param5 = 1,
++};
++struct size_overflow_hash _000390_hash = {
++ .next = NULL,
++ .name = "compat_sys_process_vm_writev",
++ .file = "include/linux/compat.h",
++ .param3 = 1,
++ .param5 = 1,
++};
++struct size_overflow_hash _000392_hash = {
++ .next = NULL,
++ .name = "compat_writev",
++ .file = "fs/compat.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000393_hash = {
++ .next = NULL,
++ .name = "sys_move_pages",
++ .file = "include/linux/syscalls.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000394_hash = {
++ .next = NULL,
++ .name = "__alloc_bootmem_node_high",
++ .file = "include/linux/bootmem.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000395_hash = {
++ .next = NULL,
++ .name = "compat_sys_move_pages",
++ .file = "include/linux/compat.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000396_hash = {
++ .next = NULL,
++ .name = "compat_sys_preadv",
++ .file = "include/linux/compat.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000397_hash = {
++ .next = NULL,
++ .name = "compat_sys_pwritev",
++ .file = "include/linux/compat.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000398_hash = {
++ .next = NULL,
++ .name = "compat_sys_readv",
++ .file = "include/linux/compat.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000399_hash = {
++ .next = NULL,
++ .name = "compat_sys_writev",
++ .file = "include/linux/compat.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000400_hash = {
++ .next = NULL,
++ .name = "sparse_early_usemaps_alloc_node",
++ .file = "mm/sparse.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000401_hash = {
++ .next = NULL,
++ .name = "__earlyonly_bootmem_alloc",
++ .file = "mm/sparse-vmemmap.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000402_hash = {
++ .next = NULL,
++ .name = "sparse_mem_maps_populate_node",
++ .file = "include/linux/mm.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000403_hash = {
++ .next = NULL,
++ .name = "vmemmap_alloc_block",
++ .file = "include/linux/mm.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000404_hash = {
++ .next = NULL,
++ .name = "sparse_early_mem_maps_alloc_node",
++ .file = "mm/sparse.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000405_hash = {
++ .next = NULL,
++ .name = "vmemmap_alloc_block_buf",
++ .file = "include/linux/mm.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000406_hash = {
++ .next = NULL,
++ .name = "acpi_battery_write_alarm",
++ .file = "drivers/acpi/battery.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000407_hash = {
++ .next = NULL,
++ .name = "acpi_battery_write_alarm",
++ .file = "drivers/acpi/sbs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000408_hash = {
++ .next = NULL,
++ .name = "ad7879_spi_xfer",
++ .file = "drivers/input/touchscreen/ad7879-spi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000409_hash = {
++ .next = NULL,
++ .name = "add_port",
++ .file = "drivers/char/virtio_console.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000410_hash = {
++ .next = NULL,
++ .name = "addtgt",
++ .file = "drivers/block/aoe/aoecmd.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000411_hash = {
++ .next = NULL,
++ .name = "adu_read",
++ .file = "drivers/usb/misc/adutux.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000412_hash = {
++ .next = NULL,
++ .name = "adu_write",
++ .file = "drivers/usb/misc/adutux.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000413_hash = {
++ .next = NULL,
++ .name = "aer_inject_write",
++ .file = "drivers/pci/pcie/aer/aer_inject.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000414_hash = {
++ .next = NULL,
++ .name = "aes_decrypt_fail_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000415_hash = {
++ .next = NULL,
++ .name = "aes_decrypt_interrupt_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000416_hash = {
++ .next = NULL,
++ .name = "aes_decrypt_packets_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000417_hash = {
++ .next = NULL,
++ .name = "aes_encrypt_fail_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000418_hash = {
++ .next = NULL,
++ .name = "aes_encrypt_interrupt_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000419_hash = {
++ .next = NULL,
++ .name = "aes_encrypt_packets_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000420_hash = {
++ .next = NULL,
++ .name = "afs_alloc_flat_call",
++ .file = "fs/afs/rxrpc.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _000422_hash = {
++ .next = NULL,
++ .name = "afs_cell_alloc",
++ .file = "fs/afs/cell.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000423_hash = {
++ .next = NULL,
++ .name = "afs_proc_cells_write",
++ .file = "fs/afs/proc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000424_hash = {
++ .next = NULL,
++ .name = "afs_proc_rootcell_write",
++ .file = "fs/afs/proc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000425_hash = {
++ .next = NULL,
++ .name = "aggr_recv_addba_req_evt",
++ .file = "drivers/net/wireless/ath/ath6kl/txrx.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000426_hash = {
++ .next = NULL,
++ .name = "agp_3_5_isochronous_node_enable",
++ .file = "drivers/char/agp/isoch.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000427_hash = {
++ .next = NULL,
++ .name = "agp_alloc_page_array",
++ .file = "drivers/char/agp/generic.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000428_hash = {
++ .next = NULL,
++ .name = "alg_setkey",
++ .file = "crypto/af_alg.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000429_hash = {
++ .next = NULL,
++ .name = "alloc_buf",
++ .file = "drivers/char/virtio_console.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000430_hash = {
++ .next = NULL,
++ .name = "alloc_context",
++ .file = "drivers/md/dm-raid1.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000431_hash = {
++ .next = NULL,
++ .name = "alloc_context",
++ .file = "drivers/md/dm-stripe.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000432_hash = {
++ .next = NULL,
++ .name = "__alloc_dev_table",
++ .file = "fs/exofs/super.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000433_hash = {
++ .next = NULL,
++ .name = "alloc_ep_req",
++ .file = "drivers/usb/gadget/f_midi.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000434_hash = {
++ .next = NULL,
++ .name = "alloc_flex_gd",
++ .file = "fs/ext4/resize.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000435_hash = {
++ .next = NULL,
++ .name = "__alloc_objio_seg",
++ .file = "fs/nfs/objlayout/objio_osd.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000436_hash = {
++ .next = NULL,
++ .name = "alloc_one_pg_vec_page",
++ .file = "net/packet/af_packet.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000437_hash = {
++ .next = NULL,
++ .name = "alloc_ring",
++ .file = "drivers/net/ethernet/chelsio/cxgb3/sge.c",
++ .param2 = 1,
++ .param4 = 1,
++};
++struct size_overflow_hash _000438_hash = {
++ .next = NULL,
++ .name = "alloc_ring",
++ .file = "drivers/net/ethernet/chelsio/cxgb4vf/sge.c",
++ .param2 = 1,
++ .param4 = 1,
++};
++struct size_overflow_hash _000441_hash = {
++ .next = NULL,
++ .name = "alloc_ts_config",
++ .file = "include/linux/textsearch.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000442_hash = {
++ .next = NULL,
++ .name = "altera_drscan",
++ .file = "drivers/misc/altera-stapl/altera-jtag.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000443_hash = {
++ .next = NULL,
++ .name = "altera_irscan",
++ .file = "drivers/misc/altera-stapl/altera-jtag.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000444_hash = {
++ .next = &_000066_hash,
++ .name = "altera_set_dr_post",
++ .file = "drivers/misc/altera-stapl/altera-jtag.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000445_hash = {
++ .next = NULL,
++ .name = "altera_set_dr_pre",
++ .file = "drivers/misc/altera-stapl/altera-jtag.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000446_hash = {
++ .next = NULL,
++ .name = "altera_set_ir_post",
++ .file = "drivers/misc/altera-stapl/altera-jtag.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000447_hash = {
++ .next = NULL,
++ .name = "altera_set_ir_pre",
++ .file = "drivers/misc/altera-stapl/altera-jtag.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000448_hash = {
++ .next = NULL,
++ .name = "altera_swap_dr",
++ .file = "drivers/misc/altera-stapl/altera-jtag.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000449_hash = {
++ .next = NULL,
++ .name = "altera_swap_ir",
++ .file = "drivers/misc/altera-stapl/altera-jtag.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000450_hash = {
++ .next = NULL,
++ .name = "aoedev_flush",
++ .file = "drivers/block/aoe/aoedev.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000451_hash = {
++ .next = NULL,
++ .name = "asd_store_update_bios",
++ .file = "drivers/scsi/aic94xx/aic94xx_init.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000452_hash = {
++ .next = NULL,
++ .name = "asix_read_cmd",
++ .file = "drivers/net/usb/asix.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000453_hash = {
++ .next = NULL,
++ .name = "asix_write_cmd",
++ .file = "drivers/net/usb/asix.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000454_hash = {
++ .next = NULL,
++ .name = "asn1_octets_decode",
++ .file = "net/ipv4/netfilter/nf_nat_snmp_basic.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000455_hash = {
++ .next = NULL,
++ .name = "asn1_oid_decode",
++ .file = "net/ipv4/netfilter/nf_nat_snmp_basic.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000456_hash = {
++ .next = NULL,
++ .name = "asn1_oid_decode",
++ .file = "fs/cifs/asn1.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000457_hash = {
++ .next = NULL,
++ .name = "ath6kl_add_bss_if_needed",
++ .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _000458_hash = {
++ .next = NULL,
++ .name = "ath6kl_debug_roam_tbl_event",
++ .file = "drivers/net/wireless/ath/ath6kl/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000459_hash = {
++ .next = NULL,
++ .name = "ath6kl_disconnect_timeout_read",
++ .file = "drivers/net/wireless/ath/ath6kl/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000460_hash = {
++ .next = NULL,
++ .name = "ath6kl_endpoint_stats_read",
++ .file = "drivers/net/wireless/ath/ath6kl/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000461_hash = {
++ .next = NULL,
++ .name = "ath6kl_fwlog_mask_read",
++ .file = "drivers/net/wireless/ath/ath6kl/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000462_hash = {
++ .next = NULL,
++ .name = "ath6kl_fwlog_read",
++ .file = "drivers/net/wireless/ath/ath6kl/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000463_hash = {
++ .next = NULL,
++ .name = "ath6kl_keepalive_read",
++ .file = "drivers/net/wireless/ath/ath6kl/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000464_hash = {
++ .next = NULL,
++ .name = "ath6kl_lrssi_roam_read",
++ .file = "drivers/net/wireless/ath/ath6kl/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000465_hash = {
++ .next = NULL,
++ .name = "ath6kl_regdump_read",
++ .file = "drivers/net/wireless/ath/ath6kl/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000466_hash = {
++ .next = NULL,
++ .name = "ath6kl_regread_read",
++ .file = "drivers/net/wireless/ath/ath6kl/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000467_hash = {
++ .next = NULL,
++ .name = "ath6kl_regwrite_read",
++ .file = "drivers/net/wireless/ath/ath6kl/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000468_hash = {
++ .next = NULL,
++ .name = "ath6kl_roam_table_read",
++ .file = "drivers/net/wireless/ath/ath6kl/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000469_hash = {
++ .next = NULL,
++ .name = "ath6kl_send_go_probe_resp",
++ .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000470_hash = {
++ .next = NULL,
++ .name = "ath6kl_set_ap_probe_resp_ies",
++ .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000471_hash = {
++ .next = NULL,
++ .name = "ath6kl_set_assoc_req_ies",
++ .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000472_hash = {
++ .next = NULL,
++ .name = "ath6kl_tm_rx_report_event",
++ .file = "drivers/net/wireless/ath/ath6kl/testmode.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000473_hash = {
++ .next = NULL,
++ .name = "ath6kl_wmi_send_action_cmd",
++ .file = "drivers/net/wireless/ath/ath6kl/wmi.c",
++ .param7 = 1,
++};
++struct size_overflow_hash _000474_hash = {
++ .next = NULL,
++ .name = "ath6kl_wmi_send_mgmt_cmd",
++ .file = "drivers/net/wireless/ath/ath6kl/wmi.c",
++ .param7 = 1,
++};
++struct size_overflow_hash _000475_hash = {
++ .next = NULL,
++ .name = "ath9k_debugfs_read_buf",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000476_hash = {
++ .next = NULL,
++ .name = "atk_debugfs_ggrp_read",
++ .file = "drivers/hwmon/asus_atk0110.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000477_hash = {
++ .next = NULL,
++ .name = "atm_get_addr",
++ .file = "net/atm/addr.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000478_hash = {
++ .next = NULL,
++ .name = "attach_hdlc_protocol",
++ .file = "include/linux/hdlc.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000479_hash = {
++ .next = NULL,
++ .name = "av7110_vbi_write",
++ .file = "drivers/media/dvb/ttpci/av7110_v4l.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000480_hash = {
++ .next = NULL,
++ .name = "ax25_setsockopt",
++ .file = "net/ax25/af_ax25.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000481_hash = {
++ .next = NULL,
++ .name = "b43_debugfs_read",
++ .file = "drivers/net/wireless/b43/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000482_hash = {
++ .next = NULL,
++ .name = "b43_debugfs_write",
++ .file = "drivers/net/wireless/b43/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000483_hash = {
++ .next = NULL,
++ .name = "b43legacy_debugfs_read",
++ .file = "drivers/net/wireless/b43legacy/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000484_hash = {
++ .next = NULL,
++ .name = "b43legacy_debugfs_write",
++ .file = "drivers/net/wireless/b43legacy/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000485_hash = {
++ .next = NULL,
++ .name = "b43_nphy_load_samples",
++ .file = "drivers/net/wireless/b43/phy_n.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000486_hash = {
++ .next = NULL,
++ .name = "bch_alloc",
++ .file = "lib/bch.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000487_hash = {
++ .next = NULL,
++ .name = "bfad_debugfs_read",
++ .file = "drivers/scsi/bfa/bfad_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000488_hash = {
++ .next = NULL,
++ .name = "bfad_debugfs_read_regrd",
++ .file = "drivers/scsi/bfa/bfad_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000489_hash = {
++ .next = NULL,
++ .name = "bfad_debugfs_write_regrd",
++ .file = "drivers/scsi/bfa/bfad_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000490_hash = {
++ .next = NULL,
++ .name = "bfad_debugfs_write_regwr",
++ .file = "drivers/scsi/bfa/bfad_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000491_hash = {
++ .next = NULL,
++ .name = "bits_to_user",
++ .file = "drivers/input/evdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000492_hash = {
++ .next = NULL,
++ .name = "bl_pipe_downcall",
++ .file = "fs/nfs/blocklayout/blocklayoutdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000493_hash = {
++ .next = NULL,
++ .name = "bm_entry_read",
++ .file = "fs/binfmt_misc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000494_hash = {
++ .next = NULL,
++ .name = "bm_realloc_pages",
++ .file = "drivers/block/drbd/drbd_bitmap.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000495_hash = {
++ .next = NULL,
++ .name = "bm_status_read",
++ .file = "fs/binfmt_misc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000496_hash = {
++ .next = NULL,
++ .name = "bnad_debugfs_read",
++ .file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000497_hash = {
++ .next = NULL,
++ .name = "bnad_debugfs_read_regrd",
++ .file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000498_hash = {
++ .next = NULL,
++ .name = "bnad_debugfs_write_regrd",
++ .file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000499_hash = {
++ .next = NULL,
++ .name = "bnad_debugfs_write_regwr",
++ .file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000500_hash = {
++ .next = NULL,
++ .name = "bnx2fc_cmd_mgr_alloc",
++ .file = "drivers/scsi/bnx2fc/bnx2fc_io.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _000502_hash = {
++ .next = NULL,
++ .name = "btmrvl_curpsmode_read",
++ .file = "drivers/bluetooth/btmrvl_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000503_hash = {
++ .next = NULL,
++ .name = "btmrvl_gpiogap_read",
++ .file = "drivers/bluetooth/btmrvl_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000504_hash = {
++ .next = NULL,
++ .name = "btmrvl_gpiogap_write",
++ .file = "drivers/bluetooth/btmrvl_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000505_hash = {
++ .next = NULL,
++ .name = "btmrvl_hscfgcmd_read",
++ .file = "drivers/bluetooth/btmrvl_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000506_hash = {
++ .next = NULL,
++ .name = "btmrvl_hscfgcmd_write",
++ .file = "drivers/bluetooth/btmrvl_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000507_hash = {
++ .next = &_000006_hash,
++ .name = "btmrvl_hscmd_read",
++ .file = "drivers/bluetooth/btmrvl_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000508_hash = {
++ .next = NULL,
++ .name = "btmrvl_hscmd_write",
++ .file = "drivers/bluetooth/btmrvl_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000509_hash = {
++ .next = NULL,
++ .name = "btmrvl_hsmode_read",
++ .file = "drivers/bluetooth/btmrvl_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000510_hash = {
++ .next = NULL,
++ .name = "btmrvl_hsmode_write",
++ .file = "drivers/bluetooth/btmrvl_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000511_hash = {
++ .next = NULL,
++ .name = "btmrvl_hsstate_read",
++ .file = "drivers/bluetooth/btmrvl_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000512_hash = {
++ .next = NULL,
++ .name = "btmrvl_pscmd_read",
++ .file = "drivers/bluetooth/btmrvl_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000513_hash = {
++ .next = NULL,
++ .name = "btmrvl_pscmd_write",
++ .file = "drivers/bluetooth/btmrvl_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000514_hash = {
++ .next = NULL,
++ .name = "btmrvl_psmode_read",
++ .file = "drivers/bluetooth/btmrvl_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000515_hash = {
++ .next = NULL,
++ .name = "btmrvl_psmode_write",
++ .file = "drivers/bluetooth/btmrvl_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000516_hash = {
++ .next = NULL,
++ .name = "btmrvl_psstate_read",
++ .file = "drivers/bluetooth/btmrvl_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000517_hash = {
++ .next = NULL,
++ .name = "btmrvl_txdnldready_read",
++ .file = "drivers/bluetooth/btmrvl_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000518_hash = {
++ .next = NULL,
++ .name = "btrfs_alloc_delayed_item",
++ .file = "fs/btrfs/delayed-inode.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000519_hash = {
++ .next = NULL,
++ .name = "btrfs_copy_from_user",
++ .file = "fs/btrfs/file.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000520_hash = {
++ .next = NULL,
++ .name = "__btrfs_map_block",
++ .file = "fs/btrfs/volumes.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000521_hash = {
++ .next = NULL,
++ .name = "__c4iw_init_resource_fifo",
++ .file = "drivers/infiniband/hw/cxgb4/resource.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000522_hash = {
++ .next = NULL,
++ .name = "cache_do_downcall",
++ .file = "net/sunrpc/cache.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000523_hash = {
++ .next = NULL,
++ .name = "cachefiles_daemon_write",
++ .file = "fs/cachefiles/daemon.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000524_hash = {
++ .next = NULL,
++ .name = "cache_read",
++ .file = "net/sunrpc/cache.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000525_hash = {
++ .next = NULL,
++ .name = "ca_extend",
++ .file = "drivers/md/persistent-data/dm-space-map-checker.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000526_hash = {
++ .next = NULL,
++ .name = "calc_hmac",
++ .file = "security/keys/encrypted-keys/encrypted.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000527_hash = {
++ .next = NULL,
++ .name = "capi_write",
++ .file = "drivers/isdn/capi/capi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000528_hash = {
++ .next = NULL,
++ .name = "carl9170_cmd_buf",
++ .file = "drivers/net/wireless/ath/carl9170/cmd.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000529_hash = {
++ .next = NULL,
++ .name = "carl9170_debugfs_read",
++ .file = "drivers/net/wireless/ath/carl9170/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000530_hash = {
++ .next = NULL,
++ .name = "carl9170_debugfs_write",
++ .file = "drivers/net/wireless/ath/carl9170/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000531_hash = {
++ .next = NULL,
++ .name = "cciss_proc_write",
++ .file = "drivers/block/cciss.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000532_hash = {
++ .next = NULL,
++ .name = "ceph_buffer_new",
++ .file = "include/linux/ceph/buffer.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000533_hash = {
++ .next = NULL,
++ .name = "ceph_copy_page_vector_to_user",
++ .file = "include/linux/ceph/libceph.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000534_hash = {
++ .next = NULL,
++ .name = "ceph_copy_user_to_page_vector",
++ .file = "include/linux/ceph/libceph.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000535_hash = {
++ .next = NULL,
++ .name = "ceph_read_dir",
++ .file = "fs/ceph/dir.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000536_hash = {
++ .next = NULL,
++ .name = "ceph_setxattr",
++ .file = "fs/ceph/xattr.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000537_hash = {
++ .next = NULL,
++ .name = "cfg80211_connect_result",
++ .file = "include/net/cfg80211.h",
++ .param4 = 1,
++ .param6 = 1,
++};
++struct size_overflow_hash _000539_hash = {
++ .next = NULL,
++ .name = "cfg80211_disconnected",
++ .file = "include/net/cfg80211.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000540_hash = {
++ .next = NULL,
++ .name = "cfg80211_inform_bss",
++ .file = "include/net/cfg80211.h",
++ .param8 = 1,
++};
++struct size_overflow_hash _000541_hash = {
++ .next = NULL,
++ .name = "cfg80211_inform_bss_frame",
++ .file = "include/net/cfg80211.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000542_hash = {
++ .next = NULL,
++ .name = "cfg80211_roamed_bss",
++ .file = "include/net/cfg80211.h",
++ .param4 = 1,
++ .param6 = 1,
++};
++struct size_overflow_hash _000544_hash = {
++ .next = NULL,
++ .name = "cfi_read_pri",
++ .file = "include/linux/mtd/cfi.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000545_hash = {
++ .next = NULL,
++ .name = "channel_type_read",
++ .file = "net/mac80211/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000546_hash = {
++ .next = NULL,
++ .name = "cifs_idmap_key_instantiate",
++ .file = "fs/cifs/cifsacl.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000547_hash = {
++ .next = NULL,
++ .name = "cifs_readdata_alloc",
++ .file = "fs/cifs/cifssmb.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000548_hash = {
++ .next = NULL,
++ .name = "cifs_security_flags_proc_write",
++ .file = "fs/cifs/cifs_debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000549_hash = {
++ .next = NULL,
++ .name = "cifs_setxattr",
++ .file = "fs/cifs/xattr.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000550_hash = {
++ .next = NULL,
++ .name = "cifs_spnego_key_instantiate",
++ .file = "fs/cifs/cifs_spnego.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000551_hash = {
++ .next = NULL,
++ .name = "cifs_writedata_alloc",
++ .file = "fs/cifs/cifssmb.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000552_hash = {
++ .next = NULL,
++ .name = "ci_ll_write",
++ .file = "drivers/media/dvb/ttpci/av7110_ca.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000553_hash = {
++ .next = NULL,
++ .name = "clusterip_proc_write",
++ .file = "net/ipv4/netfilter/ipt_CLUSTERIP.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000554_hash = {
++ .next = &_000108_hash,
++ .name = "cm4040_write",
++ .file = "drivers/char/pcmcia/cm4040_cs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000555_hash = {
++ .next = NULL,
++ .name = "cm_copy_private_data",
++ .file = "drivers/infiniband/core/cm.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000556_hash = {
++ .next = NULL,
++ .name = "cmm_write",
++ .file = "drivers/char/pcmcia/cm4000_cs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000557_hash = {
++ .next = NULL,
++ .name = "cm_write",
++ .file = "drivers/acpi/custom_method.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000558_hash = {
++ .next = NULL,
++ .name = "coda_psdev_read",
++ .file = "fs/coda/psdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000559_hash = {
++ .next = NULL,
++ .name = "coda_psdev_write",
++ .file = "fs/coda/psdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000560_hash = {
++ .next = NULL,
++ .name = "codec_list_read_file",
++ .file = "sound/soc/soc-core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000561_hash = {
++ .next = NULL,
++ .name = "codec_reg_read_file",
++ .file = "sound/soc/soc-core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000562_hash = {
++ .next = NULL,
++ .name = "command_file_write",
++ .file = "drivers/misc/ibmasm/ibmasmfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000563_hash = {
++ .next = NULL,
++ .name = "command_write",
++ .file = "drivers/uwb/uwb-debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000564_hash = {
++ .next = NULL,
++ .name = "concat_writev",
++ .file = "drivers/mtd/mtdconcat.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000565_hash = {
++ .next = NULL,
++ .name = "configfs_read_file",
++ .file = "fs/configfs/file.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000566_hash = {
++ .next = NULL,
++ .name = "context_alloc",
++ .file = "drivers/md/dm-raid.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000567_hash = {
++ .next = NULL,
++ .name = "copy_counters_to_user",
++ .file = "net/bridge/netfilter/ebtables.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000568_hash = {
++ .next = NULL,
++ .name = "copy_entries_to_user",
++ .file = "net/ipv6/netfilter/ip6_tables.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000569_hash = {
++ .next = NULL,
++ .name = "copy_entries_to_user",
++ .file = "net/ipv4/netfilter/arp_tables.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000570_hash = {
++ .next = NULL,
++ .name = "copy_entries_to_user",
++ .file = "net/ipv4/netfilter/ip_tables.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000571_hash = {
++ .next = NULL,
++ .name = "copy_from_user_toio",
++ .file = "include/sound/core.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000572_hash = {
++ .next = NULL,
++ .name = "copy_macs",
++ .file = "net/atm/mpc.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000573_hash = {
++ .next = NULL,
++ .name = "copy_to_user_fromio",
++ .file = "include/sound/core.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000574_hash = {
++ .next = NULL,
++ .name = "cosa_write",
++ .file = "drivers/net/wan/cosa.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000575_hash = {
++ .next = NULL,
++ .name = "create_attr_set",
++ .file = "drivers/platform/x86/thinkpad_acpi.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000576_hash = {
++ .next = NULL,
++ .name = "create_entry",
++ .file = "fs/binfmt_misc.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000577_hash = {
++ .next = NULL,
++ .name = "create_gpadl_header",
++ .file = "drivers/hv/channel.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000578_hash = {
++ .next = NULL,
++ .name = "create_queues",
++ .file = "drivers/atm/ambassador.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _000580_hash = {
++ .next = NULL,
++ .name = "_create_sg_bios",
++ .file = "drivers/scsi/osd/osd_initiator.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000581_hash = {
++ .next = NULL,
++ .name = "cryptd_alloc_instance",
++ .file = "crypto/cryptd.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _000583_hash = {
++ .next = NULL,
++ .name = "cryptd_hash_setkey",
++ .file = "crypto/cryptd.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000584_hash = {
++ .next = NULL,
++ .name = "crypto_authenc_esn_setkey",
++ .file = "crypto/authencesn.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000585_hash = {
++ .next = NULL,
++ .name = "crypto_authenc_setkey",
++ .file = "crypto/authenc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000586_hash = {
++ .next = NULL,
++ .name = "ctrl_out",
++ .file = "drivers/usb/misc/usbtest.c",
++ .param3 = 1,
++ .param5 = 1,
++};
++struct size_overflow_hash _000588_hash = {
++ .next = NULL,
++ .name = "cx18_copy_buf_to_user",
++ .file = "drivers/media/video/cx18/cx18-fileops.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000589_hash = {
++ .next = NULL,
++ .name = "cx24116_writeregN",
++ .file = "drivers/media/dvb/frontends/cx24116.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000590_hash = {
++ .next = NULL,
++ .name = "cxgb_alloc_mem",
++ .file = "drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000591_hash = {
++ .next = NULL,
++ .name = "cxgbi_alloc_big_mem",
++ .file = "drivers/scsi/cxgbi/libcxgbi.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000592_hash = {
++ .next = NULL,
++ .name = "cxgbi_device_register",
++ .file = "drivers/scsi/cxgbi/libcxgbi.c",
++ .param1 = 1,
++ .param2 = 1,
++};
++struct size_overflow_hash _000594_hash = {
++ .next = NULL,
++ .name = "__cxio_init_resource_fifo",
++ .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000595_hash = {
++ .next = NULL,
++ .name = "dac960_user_command_proc_write",
++ .file = "drivers/block/DAC960.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000596_hash = {
++ .next = NULL,
++ .name = "dai_list_read_file",
++ .file = "sound/soc/soc-core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000597_hash = {
++ .next = NULL,
++ .name = "dapm_bias_read_file",
++ .file = "sound/soc/soc-dapm.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000598_hash = {
++ .next = NULL,
++ .name = "dapm_widget_power_read_file",
++ .file = "sound/soc/soc-dapm.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000599_hash = {
++ .next = NULL,
++ .name = "datablob_format",
++ .file = "security/keys/encrypted-keys/encrypted.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000600_hash = {
++ .next = NULL,
++ .name = "dbgfs_frame",
++ .file = "drivers/net/caif/caif_spi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000601_hash = {
++ .next = NULL,
++ .name = "dbgfs_state",
++ .file = "drivers/net/caif/caif_spi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000602_hash = {
++ .next = NULL,
++ .name = "dccp_feat_clone_sp_val",
++ .file = "net/dccp/feat.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000603_hash = {
++ .next = NULL,
++ .name = "dccp_setsockopt_ccid",
++ .file = "net/dccp/proto.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000604_hash = {
++ .next = NULL,
++ .name = "dccp_setsockopt_service",
++ .file = "net/dccp/proto.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000605_hash = {
++ .next = NULL,
++ .name = "ddb_input_read",
++ .file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000606_hash = {
++ .next = NULL,
++ .name = "ddb_output_write",
++ .file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000607_hash = {
++ .next = NULL,
++ .name = "ddp_make_gl",
++ .file = "drivers/scsi/cxgbi/libcxgbi.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000608_hash = {
++ .next = NULL,
++ .name = "debugfs_read",
++ .file = "drivers/infiniband/hw/cxgb4/device.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000609_hash = {
++ .next = NULL,
++ .name = "debugfs_read",
++ .file = "drivers/char/virtio_console.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000610_hash = {
++ .next = NULL,
++ .name = "debug_output",
++ .file = "drivers/usb/host/ohci-dbg.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000611_hash = {
++ .next = NULL,
++ .name = "debug_output",
++ .file = "drivers/usb/host/ehci-dbg.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000612_hash = {
++ .next = NULL,
++ .name = "debug_read",
++ .file = "fs/ocfs2/dlm/dlmdebug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000613_hash = {
++ .next = NULL,
++ .name = "dev_config",
++ .file = "drivers/usb/gadget/inode.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000614_hash = {
++ .next = NULL,
++ .name = "device_write",
++ .file = "fs/dlm/user.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000615_hash = {
++ .next = NULL,
++ .name = "dev_read",
++ .file = "drivers/media/video/gspca/gspca.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000616_hash = {
++ .next = NULL,
++ .name = "dfs_file_read",
++ .file = "drivers/mtd/ubi/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000617_hash = {
++ .next = NULL,
++ .name = "dfs_file_write",
++ .file = "drivers/mtd/ubi/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000618_hash = {
++ .next = NULL,
++ .name = "direct_entry",
++ .file = "drivers/misc/lkdtm.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000619_hash = {
++ .next = NULL,
++ .name = "dispatch_proc_write",
++ .file = "drivers/platform/x86/thinkpad_acpi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000620_hash = {
++ .next = NULL,
++ .name = "diva_os_malloc",
++ .file = "drivers/isdn/hardware/eicon/platform.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000621_hash = {
++ .next = NULL,
++ .name = "dlmfs_file_read",
++ .file = "fs/ocfs2/dlmfs/dlmfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000622_hash = {
++ .next = NULL,
++ .name = "dlmfs_file_write",
++ .file = "fs/ocfs2/dlmfs/dlmfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000623_hash = {
++ .next = NULL,
++ .name = "dma_attach",
++ .file = "drivers/net/wireless/brcm80211/brcmsmac/dma.c",
++ .param6 = 1,
++ .param7 = 1,
++};
++struct size_overflow_hash _000625_hash = {
++ .next = NULL,
++ .name = "dma_rx_errors_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000626_hash = {
++ .next = NULL,
++ .name = "dma_rx_requested_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000627_hash = {
++ .next = NULL,
++ .name = "dma_show_regs",
++ .file = "drivers/tty/serial/mfd.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000628_hash = {
++ .next = NULL,
++ .name = "dma_tx_errors_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000629_hash = {
++ .next = NULL,
++ .name = "dma_tx_requested_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000630_hash = {
++ .next = NULL,
++ .name = "dm_read",
++ .file = "drivers/net/usb/dm9601.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000631_hash = {
++ .next = NULL,
++ .name = "dm_vcalloc",
++ .file = "include/linux/device-mapper.h",
++ .param1 = 1,
++ .param2 = 1,
++};
++struct size_overflow_hash _000633_hash = {
++ .next = NULL,
++ .name = "dm_write",
++ .file = "drivers/net/usb/dm9601.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000634_hash = {
++ .next = NULL,
++ .name = "__dn_setsockopt",
++ .file = "net/decnet/af_decnet.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000635_hash = {
++ .next = NULL,
++ .name = "dns_query",
++ .file = "include/linux/dns_resolver.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000636_hash = {
++ .next = NULL,
++ .name = "dns_resolver_instantiate",
++ .file = "net/dns_resolver/dns_key.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000637_hash = {
++ .next = NULL,
++ .name = "dns_resolver_read",
++ .file = "net/dns_resolver/dns_key.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000638_hash = {
++ .next = NULL,
++ .name = "do_add_counters",
++ .file = "net/ipv6/netfilter/ip6_tables.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000639_hash = {
++ .next = NULL,
++ .name = "do_add_counters",
++ .file = "net/ipv4/netfilter/ip_tables.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000640_hash = {
++ .next = NULL,
++ .name = "do_add_counters",
++ .file = "net/ipv4/netfilter/arp_tables.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000641_hash = {
++ .next = NULL,
++ .name = "__do_config_autodelink",
++ .file = "drivers/usb/storage/realtek_cr.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000642_hash = {
++ .next = NULL,
++ .name = "do_ipv6_setsockopt",
++ .file = "net/ipv6/ipv6_sockglue.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000643_hash = {
++ .next = NULL,
++ .name = "do_ip_vs_set_ctl",
++ .file = "net/netfilter/ipvs/ip_vs_ctl.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000644_hash = {
++ .next = NULL,
++ .name = "do_register_entry",
++ .file = "drivers/misc/lkdtm.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000645_hash = {
++ .next = NULL,
++ .name = "__do_replace",
++ .file = "net/ipv6/netfilter/ip6_tables.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000646_hash = {
++ .next = NULL,
++ .name = "__do_replace",
++ .file = "net/ipv4/netfilter/ip_tables.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000647_hash = {
++ .next = NULL,
++ .name = "__do_replace",
++ .file = "net/ipv4/netfilter/arp_tables.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000648_hash = {
++ .next = NULL,
++ .name = "do_sync",
++ .file = "fs/gfs2/quota.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000649_hash = {
++ .next = NULL,
++ .name = "do_update_counters",
++ .file = "net/bridge/netfilter/ebtables.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000650_hash = {
++ .next = NULL,
++ .name = "driver_state_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000651_hash = {
++ .next = NULL,
++ .name = "dsp_write",
++ .file = "sound/oss/msnd_pinnacle.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000652_hash = {
++ .next = NULL,
++ .name = "dvb_aplay",
++ .file = "drivers/media/dvb/ttpci/av7110_av.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000653_hash = {
++ .next = NULL,
++ .name = "dvb_ca_en50221_io_write",
++ .file = "drivers/media/dvb/dvb-core/dvb_ca_en50221.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000654_hash = {
++ .next = NULL,
++ .name = "dvb_dmxdev_set_buffer_size",
++ .file = "drivers/media/dvb/dvb-core/dmxdev.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000655_hash = {
++ .next = NULL,
++ .name = "dvbdmx_write",
++ .file = "drivers/media/dvb/dvb-core/dvb_demux.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000656_hash = {
++ .next = NULL,
++ .name = "dvb_dvr_set_buffer_size",
++ .file = "drivers/media/dvb/dvb-core/dmxdev.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000657_hash = {
++ .next = NULL,
++ .name = "dvb_play",
++ .file = "drivers/media/dvb/ttpci/av7110_av.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000658_hash = {
++ .next = NULL,
++ .name = "dvb_ringbuffer_pkt_read_user",
++ .file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000659_hash = {
++ .next = NULL,
++ .name = "dvb_ringbuffer_read_user",
++ .file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000660_hash = {
++ .next = NULL,
++ .name = "dw210x_op_rw",
++ .file = "drivers/media/dvb/dvb-usb/dw2102.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _000661_hash = {
++ .next = NULL,
++ .name = "dwc3_mode_write",
++ .file = "drivers/usb/dwc3/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000662_hash = {
++ .next = NULL,
++ .name = "econet_sendmsg",
++ .file = "net/econet/af_econet.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000663_hash = {
++ .next = NULL,
++ .name = "ecryptfs_copy_filename",
++ .file = "fs/ecryptfs/crypto.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000664_hash = {
++ .next = NULL,
++ .name = "ecryptfs_miscdev_write",
++ .file = "fs/ecryptfs/miscdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000665_hash = {
++ .next = NULL,
++ .name = "ecryptfs_send_miscdev",
++ .file = "fs/ecryptfs/miscdev.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000666_hash = {
++ .next = NULL,
++ .name = "edac_device_alloc_ctl_info",
++ .file = "drivers/edac/edac_device.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000667_hash = {
++ .next = NULL,
++ .name = "edac_mc_alloc",
++ .file = "drivers/edac/edac_mc.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000668_hash = {
++ .next = NULL,
++ .name = "edac_pci_alloc_ctl_info",
++ .file = "drivers/edac/edac_pci.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000669_hash = {
++ .next = NULL,
++ .name = "efivar_create_sysfs_entry",
++ .file = "drivers/firmware/efivars.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000670_hash = {
++ .next = NULL,
++ .name = "efx_tsoh_heap_alloc",
++ .file = "drivers/net/ethernet/sfc/tx.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000671_hash = {
++ .next = NULL,
++ .name = "encrypted_instantiate",
++ .file = "security/keys/encrypted-keys/encrypted.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000672_hash = {
++ .next = NULL,
++ .name = "encrypted_update",
++ .file = "security/keys/encrypted-keys/encrypted.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000673_hash = {
++ .next = NULL,
++ .name = "ep0_write",
++ .file = "drivers/usb/gadget/inode.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000674_hash = {
++ .next = NULL,
++ .name = "ep_read",
++ .file = "drivers/usb/gadget/inode.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000675_hash = {
++ .next = NULL,
++ .name = "ep_write",
++ .file = "drivers/usb/gadget/inode.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000676_hash = {
++ .next = NULL,
++ .name = "erst_dbg_write",
++ .file = "drivers/acpi/apei/erst-dbg.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000677_hash = {
++ .next = NULL,
++ .name = "et61x251_read",
++ .file = "drivers/media/video/et61x251/et61x251_core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000678_hash = {
++ .next = NULL,
++ .name = "event_calibration_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000679_hash = {
++ .next = NULL,
++ .name = "event_heart_beat_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000680_hash = {
++ .next = NULL,
++ .name = "event_oom_late_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000681_hash = {
++ .next = NULL,
++ .name = "event_phy_transmit_error_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000682_hash = {
++ .next = NULL,
++ .name = "event_rx_mem_empty_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000683_hash = {
++ .next = NULL,
++ .name = "event_rx_mismatch_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000684_hash = {
++ .next = NULL,
++ .name = "event_rx_pool_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000685_hash = {
++ .next = NULL,
++ .name = "event_tx_stuck_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000686_hash = {
++ .next = NULL,
++ .name = "excessive_retries_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000687_hash = {
++ .next = NULL,
++ .name = "exofs_read_lookup_dev_table",
++ .file = "fs/exofs/super.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000688_hash = {
++ .next = NULL,
++ .name = "ext4_kvmalloc",
++ .file = "fs/ext4/super.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000689_hash = {
++ .next = NULL,
++ .name = "ext4_kvzalloc",
++ .file = "fs/ext4/super.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000690_hash = {
++ .next = NULL,
++ .name = "extend_netdev_table",
++ .file = "net/core/netprio_cgroup.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000691_hash = {
++ .next = NULL,
++ .name = "fd_copyin",
++ .file = "drivers/block/floppy.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000692_hash = {
++ .next = NULL,
++ .name = "fd_copyout",
++ .file = "drivers/block/floppy.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000693_hash = {
++ .next = NULL,
++ .name = "__ffs_ep0_read_events",
++ .file = "drivers/usb/gadget/f_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000694_hash = {
++ .next = NULL,
++ .name = "ffs_epfile_io",
++ .file = "drivers/usb/gadget/f_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000695_hash = {
++ .next = NULL,
++ .name = "ffs_prepare_buffer",
++ .file = "drivers/usb/gadget/f_fs.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000696_hash = {
++ .next = NULL,
++ .name = "f_hidg_read",
++ .file = "drivers/usb/gadget/f_hid.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000697_hash = {
++ .next = NULL,
++ .name = "f_hidg_write",
++ .file = "drivers/usb/gadget/f_hid.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000698_hash = {
++ .next = NULL,
++ .name = "fill_write_buffer",
++ .file = "fs/configfs/file.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000699_hash = {
++ .next = NULL,
++ .name = "flexcop_device_kmalloc",
++ .file = "drivers/media/dvb/b2c2/flexcop.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000700_hash = {
++ .next = NULL,
++ .name = "fops_read",
++ .file = "drivers/media/video/saa7164/saa7164-encoder.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000701_hash = {
++ .next = NULL,
++ .name = "fops_read",
++ .file = "drivers/media/video/saa7164/saa7164-vbi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000702_hash = {
++ .next = NULL,
++ .name = "format_devstat_counter",
++ .file = "net/mac80211/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000703_hash = {
++ .next = NULL,
++ .name = "fragmentation_threshold_read",
++ .file = "net/wireless/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000704_hash = {
++ .next = NULL,
++ .name = "frame_alloc",
++ .file = "drivers/media/video/gspca/gspca.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000705_hash = {
++ .next = NULL,
++ .name = "ftdi_elan_write",
++ .file = "drivers/usb/misc/ftdi-elan.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000706_hash = {
++ .next = NULL,
++ .name = "fuse_conn_limit_read",
++ .file = "fs/fuse/control.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000707_hash = {
++ .next = NULL,
++ .name = "fuse_conn_limit_write",
++ .file = "fs/fuse/control.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000708_hash = {
++ .next = &_000531_hash,
++ .name = "fuse_conn_waiting_read",
++ .file = "fs/fuse/control.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000709_hash = {
++ .next = NULL,
++ .name = "garp_attr_create",
++ .file = "net/802/garp.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000710_hash = {
++ .next = NULL,
++ .name = "get_alua_req",
++ .file = "drivers/scsi/device_handler/scsi_dh_alua.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000711_hash = {
++ .next = NULL,
++ .name = "get_derived_key",
++ .file = "security/keys/encrypted-keys/encrypted.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000712_hash = {
++ .next = NULL,
++ .name = "getdqbuf",
++ .file = "fs/quota/quota_tree.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000713_hash = {
++ .next = NULL,
++ .name = "get_fdb_entries",
++ .file = "net/bridge/br_ioctl.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000714_hash = {
++ .next = NULL,
++ .name = "get_rdac_req",
++ .file = "drivers/scsi/device_handler/scsi_dh_rdac.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000715_hash = {
++ .next = NULL,
++ .name = "get_registers",
++ .file = "drivers/net/usb/pegasus.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000716_hash = {
++ .next = NULL,
++ .name = "get_server_iovec",
++ .file = "fs/cifs/connect.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000717_hash = {
++ .next = NULL,
++ .name = "get_ucode_user",
++ .file = "arch/x86/kernel/microcode_intel.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000718_hash = {
++ .next = NULL,
++ .name = "gfs2_alloc_sort_buffer",
++ .file = "fs/gfs2/dir.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000719_hash = {
++ .next = NULL,
++ .name = "gfs2_glock_nq_m",
++ .file = "fs/gfs2/glock.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000720_hash = {
++ .next = NULL,
++ .name = "gigaset_initdriver",
++ .file = "drivers/isdn/gigaset/common.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000721_hash = {
++ .next = NULL,
++ .name = "gpio_power_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000722_hash = {
++ .next = NULL,
++ .name = "gs_alloc_req",
++ .file = "drivers/usb/gadget/u_serial.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000723_hash = {
++ .next = NULL,
++ .name = "gs_buf_alloc",
++ .file = "drivers/usb/gadget/u_serial.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000724_hash = {
++ .next = NULL,
++ .name = "gss_pipe_downcall",
++ .file = "net/sunrpc/auth_gss/auth_gss.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000725_hash = {
++ .next = NULL,
++ .name = "handle_request",
++ .file = "drivers/firewire/core-cdev.c",
++ .param9 = 1,
++};
++struct size_overflow_hash _000726_hash = {
++ .next = NULL,
++ .name = "hash_new",
++ .file = "net/batman-adv/hash.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000727_hash = {
++ .next = NULL,
++ .name = "hash_setkey",
++ .file = "crypto/algif_hash.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000728_hash = {
++ .next = NULL,
++ .name = "hcd_buffer_alloc",
++ .file = "include/linux/usb/hcd.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _000729_hash = {
++ .next = NULL,
++ .name = "hci_sock_setsockopt",
++ .file = "net/bluetooth/hci_sock.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000730_hash = {
++ .next = NULL,
++ .name = "hdpvr_read",
++ .file = "drivers/media/video/hdpvr/hdpvr-video.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000731_hash = {
++ .next = NULL,
++ .name = "hidraw_get_report",
++ .file = "drivers/hid/hidraw.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000732_hash = {
++ .next = NULL,
++ .name = "hidraw_read",
++ .file = "drivers/hid/hidraw.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000733_hash = {
++ .next = NULL,
++ .name = "hidraw_send_report",
++ .file = "drivers/hid/hidraw.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000734_hash = {
++ .next = NULL,
++ .name = "hid_register_field",
++ .file = "drivers/hid/hid-core.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _000736_hash = {
++ .next = NULL,
++ .name = "hpfs_translate_name",
++ .file = "fs/hpfs/name.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000737_hash = {
++ .next = NULL,
++ .name = "hpi_alloc_control_cache",
++ .file = "sound/pci/asihpi/hpicmn.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000738_hash = {
++ .next = NULL,
++ .name = "ht40allow_map_read",
++ .file = "net/wireless/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000739_hash = {
++ .next = NULL,
++ .name = "__hwahc_dev_set_key",
++ .file = "drivers/usb/host/hwa-hc.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000740_hash = {
++ .next = NULL,
++ .name = "hwflags_read",
++ .file = "net/mac80211/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000741_hash = {
++ .next = NULL,
++ .name = "hysdn_conf_read",
++ .file = "drivers/isdn/hysdn/hysdn_procconf.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000742_hash = {
++ .next = NULL,
++ .name = "hysdn_conf_write",
++ .file = "drivers/isdn/hysdn/hysdn_procconf.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000743_hash = {
++ .next = NULL,
++ .name = "hysdn_log_write",
++ .file = "drivers/isdn/hysdn/hysdn_proclog.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000744_hash = {
++ .next = NULL,
++ .name = "i2400m_rx_stats_read",
++ .file = "drivers/net/wimax/i2400m/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000745_hash = {
++ .next = NULL,
++ .name = "i2400m_tx_stats_read",
++ .file = "drivers/net/wimax/i2400m/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000746_hash = {
++ .next = NULL,
++ .name = "__i2400mu_send_barker",
++ .file = "drivers/net/wimax/i2400m/usb.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000747_hash = {
++ .next = NULL,
++ .name = "i2400m_zrealloc_2x",
++ .file = "drivers/net/wimax/i2400m/fw.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000748_hash = {
++ .next = NULL,
++ .name = "i2cdev_read",
++ .file = "drivers/i2c/i2c-dev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000749_hash = {
++ .next = &_000459_hash,
++ .name = "i2cdev_write",
++ .file = "drivers/i2c/i2c-dev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000750_hash = {
++ .next = NULL,
++ .name = "ib_alloc_device",
++ .file = "include/rdma/ib_verbs.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000751_hash = {
++ .next = NULL,
++ .name = "ib_copy_from_udata",
++ .file = "include/rdma/ib_verbs.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000752_hash = {
++ .next = NULL,
++ .name = "ib_copy_to_udata",
++ .file = "include/rdma/ib_verbs.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000753_hash = {
++ .next = NULL,
++ .name = "ibmasm_new_command",
++ .file = "drivers/misc/ibmasm/command.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000754_hash = {
++ .next = NULL,
++ .name = "ib_ucm_alloc_data",
++ .file = "drivers/infiniband/core/ucm.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000755_hash = {
++ .next = NULL,
++ .name = "ib_umad_write",
++ .file = "drivers/infiniband/core/user_mad.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000756_hash = {
++ .next = NULL,
++ .name = "ib_uverbs_unmarshall_recv",
++ .file = "drivers/infiniband/core/uverbs_cmd.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000757_hash = {
++ .next = NULL,
++ .name = "ide_driver_proc_write",
++ .file = "drivers/ide/ide-proc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000758_hash = {
++ .next = NULL,
++ .name = "ide_queue_pc_tail",
++ .file = "include/linux/ide.h",
++ .param5 = 1,
++};
++struct size_overflow_hash _000759_hash = {
++ .next = NULL,
++ .name = "ide_raw_taskfile",
++ .file = "include/linux/ide.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000760_hash = {
++ .next = NULL,
++ .name = "ide_settings_proc_write",
++ .file = "drivers/ide/ide-proc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000761_hash = {
++ .next = NULL,
++ .name = "idetape_chrdev_read",
++ .file = "drivers/ide/ide-tape.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000762_hash = {
++ .next = NULL,
++ .name = "idetape_chrdev_write",
++ .file = "drivers/ide/ide-tape.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000763_hash = {
++ .next = NULL,
++ .name = "idmouse_read",
++ .file = "drivers/usb/misc/idmouse.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000764_hash = {
++ .next = NULL,
++ .name = "ieee80211_build_probe_req",
++ .file = "net/mac80211/util.c",
++ .param7 = 1,
++};
++struct size_overflow_hash _000765_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000766_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_write",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000767_hash = {
++ .next = NULL,
++ .name = "ieee80211_key_alloc",
++ .file = "net/mac80211/key.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000768_hash = {
++ .next = NULL,
++ .name = "ieee80211_mgmt_tx",
++ .file = "net/mac80211/cfg.c",
++ .param9 = 1,
++};
++struct size_overflow_hash _000769_hash = {
++ .next = NULL,
++ .name = "ikconfig_read_current",
++ .file = "kernel/configs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000770_hash = {
++ .next = NULL,
++ .name = "il3945_sta_dbgfs_stats_table_read",
++ .file = "drivers/net/wireless/iwlegacy/3945-rs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000771_hash = {
++ .next = NULL,
++ .name = "il3945_ucode_general_stats_read",
++ .file = "drivers/net/wireless/iwlegacy/3945-debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000772_hash = {
++ .next = NULL,
++ .name = "il3945_ucode_rx_stats_read",
++ .file = "drivers/net/wireless/iwlegacy/3945-debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000773_hash = {
++ .next = NULL,
++ .name = "il3945_ucode_tx_stats_read",
++ .file = "drivers/net/wireless/iwlegacy/3945-debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000774_hash = {
++ .next = NULL,
++ .name = "il4965_rs_sta_dbgfs_rate_scale_data_read",
++ .file = "drivers/net/wireless/iwlegacy/4965-rs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000775_hash = {
++ .next = NULL,
++ .name = "il4965_rs_sta_dbgfs_scale_table_read",
++ .file = "drivers/net/wireless/iwlegacy/4965-rs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000776_hash = {
++ .next = NULL,
++ .name = "il4965_rs_sta_dbgfs_stats_table_read",
++ .file = "drivers/net/wireless/iwlegacy/4965-rs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000777_hash = {
++ .next = NULL,
++ .name = "il4965_ucode_general_stats_read",
++ .file = "drivers/net/wireless/iwlegacy/4965-debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000778_hash = {
++ .next = NULL,
++ .name = "il4965_ucode_rx_stats_read",
++ .file = "drivers/net/wireless/iwlegacy/4965-debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000779_hash = {
++ .next = NULL,
++ .name = "il4965_ucode_tx_stats_read",
++ .file = "drivers/net/wireless/iwlegacy/4965-debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000780_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_chain_noise_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000781_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_channels_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000782_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_disable_ht40_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000783_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_fh_reg_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000784_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_force_reset_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000785_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_interrupt_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000786_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_missed_beacon_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000787_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_nvm_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000788_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_power_save_status_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000789_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_qos_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000790_hash = {
++ .next = &_000221_hash,
++ .name = "il_dbgfs_rxon_filter_flags_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000791_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_rxon_flags_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000792_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_rx_queue_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000793_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_rx_stats_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000794_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_sensitivity_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000795_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_sram_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000796_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_stations_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000797_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_status_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000798_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_traffic_log_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000799_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_tx_queue_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000800_hash = {
++ .next = NULL,
++ .name = "il_dbgfs_tx_stats_read",
++ .file = "drivers/net/wireless/iwlegacy/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000801_hash = {
++ .next = NULL,
++ .name = "ilo_read",
++ .file = "drivers/misc/hpilo.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000802_hash = {
++ .next = NULL,
++ .name = "ilo_write",
++ .file = "drivers/misc/hpilo.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000803_hash = {
++ .next = NULL,
++ .name = "init_data_container",
++ .file = "fs/btrfs/backref.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000804_hash = {
++ .next = NULL,
++ .name = "init_list_set",
++ .file = "net/netfilter/ipset/ip_set_list_set.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _000806_hash = {
++ .next = NULL,
++ .name = "interpret_user_input",
++ .file = "fs/ubifs/debug.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000807_hash = {
++ .next = NULL,
++ .name = "int_proc_write",
++ .file = "drivers/net/wireless/ray_cs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000808_hash = {
++ .next = NULL,
++ .name = "iowarrior_read",
++ .file = "drivers/usb/misc/iowarrior.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000809_hash = {
++ .next = NULL,
++ .name = "iowarrior_write",
++ .file = "drivers/usb/misc/iowarrior.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000810_hash = {
++ .next = NULL,
++ .name = "ip_set_alloc",
++ .file = "include/linux/netfilter/ipset/ip_set.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000811_hash = {
++ .next = NULL,
++ .name = "ip_vs_conn_fill_param_sync",
++ .file = "net/netfilter/ipvs/ip_vs_sync.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _000812_hash = {
++ .next = NULL,
++ .name = "irda_setsockopt",
++ .file = "net/irda/af_irda.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000813_hash = {
++ .next = NULL,
++ .name = "ir_lirc_transmit_ir",
++ .file = "drivers/media/rc/ir-lirc-codec.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000814_hash = {
++ .next = NULL,
++ .name = "irnet_ctrl_write",
++ .file = "net/irda/irnet/irnet_ppp.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000815_hash = {
++ .next = NULL,
++ .name = "iscsi_decode_text_input",
++ .file = "drivers/target/iscsi/iscsi_target_parameters.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000816_hash = {
++ .next = NULL,
++ .name = "iscsit_dump_data_payload",
++ .file = "drivers/target/iscsi/iscsi_target_erl1.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000817_hash = {
++ .next = NULL,
++ .name = "isdn_read",
++ .file = "drivers/isdn/i4l/isdn_common.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000818_hash = {
++ .next = NULL,
++ .name = "iso_callback",
++ .file = "drivers/firewire/core-cdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000819_hash = {
++ .next = NULL,
++ .name = "iso_packets_buffer_init",
++ .file = "sound/firewire/packets-buffer.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000820_hash = {
++ .next = NULL,
++ .name = "iso_sched_alloc",
++ .file = "drivers/usb/host/ehci-sched.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000821_hash = {
++ .next = NULL,
++ .name = "isr_cmd_cmplt_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000822_hash = {
++ .next = NULL,
++ .name = "isr_commands_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000823_hash = {
++ .next = NULL,
++ .name = "isr_decrypt_done_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000824_hash = {
++ .next = NULL,
++ .name = "isr_dma0_done_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000825_hash = {
++ .next = NULL,
++ .name = "isr_dma1_done_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000826_hash = {
++ .next = NULL,
++ .name = "isr_fiqs_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000827_hash = {
++ .next = NULL,
++ .name = "isr_host_acknowledges_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000828_hash = {
++ .next = &_000629_hash,
++ .name = "isr_hw_pm_mode_changes_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000829_hash = {
++ .next = &_000329_hash,
++ .name = "isr_irqs_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000830_hash = {
++ .next = NULL,
++ .name = "isr_low_rssi_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000831_hash = {
++ .next = NULL,
++ .name = "isr_pci_pm_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000832_hash = {
++ .next = NULL,
++ .name = "isr_rx_headers_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000833_hash = {
++ .next = NULL,
++ .name = "isr_rx_mem_overflow_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000834_hash = {
++ .next = NULL,
++ .name = "isr_rx_procs_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000835_hash = {
++ .next = NULL,
++ .name = "isr_rx_rdys_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000836_hash = {
++ .next = NULL,
++ .name = "isr_tx_exch_complete_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000837_hash = {
++ .next = NULL,
++ .name = "isr_tx_procs_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000838_hash = {
++ .next = NULL,
++ .name = "isr_wakeups_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000839_hash = {
++ .next = NULL,
++ .name = "ivtv_copy_buf_to_user",
++ .file = "drivers/media/video/ivtv/ivtv-fileops.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000840_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_bt_traffic_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000841_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_chain_noise_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000842_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_channels_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000843_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_current_sleep_command_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000844_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_debug_level_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000845_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_debug_level_write",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000846_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_disable_ht40_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000847_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_fh_reg_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000848_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_force_reset_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000849_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_interrupt_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000850_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_log_event_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000851_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_missed_beacon_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000852_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_nvm_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000853_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_plcp_delta_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000854_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_power_save_status_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000855_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_protection_mode_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000856_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_qos_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000857_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_reply_tx_error_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000858_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_rx_handlers_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000859_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_rxon_filter_flags_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000860_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_rxon_flags_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000861_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_rx_queue_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000862_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_rx_statistics_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000863_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_sensitivity_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000864_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_sleep_level_override_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000865_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_sram_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000866_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_stations_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000867_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_status_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000868_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_temperature_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000869_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_thermal_throttling_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000870_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_traffic_log_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000871_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_tx_queue_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-trans-pcie.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000872_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_tx_statistics_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000873_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_ucode_bt_stats_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000874_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_ucode_general_stats_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000875_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_ucode_rx_stats_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000876_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_ucode_tracing_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000877_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_ucode_tx_stats_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000878_hash = {
++ .next = NULL,
++ .name = "iwl_dbgfs_wowlan_sram_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000879_hash = {
++ .next = NULL,
++ .name = "iwmct_fw_parser_init",
++ .file = "drivers/misc/iwmc3200top/fw-download.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000880_hash = {
++ .next = NULL,
++ .name = "iwm_notif_send",
++ .file = "drivers/net/wireless/iwmc3200wifi/main.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _000881_hash = {
++ .next = NULL,
++ .name = "iwm_ntf_calib_res",
++ .file = "drivers/net/wireless/iwmc3200wifi/rx.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000882_hash = {
++ .next = NULL,
++ .name = "iwm_umac_set_config_var",
++ .file = "drivers/net/wireless/iwmc3200wifi/commands.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000883_hash = {
++ .next = NULL,
++ .name = "jbd2_alloc",
++ .file = "include/linux/jbd2.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000884_hash = {
++ .next = NULL,
++ .name = "key_algorithm_read",
++ .file = "net/mac80211/debugfs_key.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000885_hash = {
++ .next = NULL,
++ .name = "key_icverrors_read",
++ .file = "net/mac80211/debugfs_key.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000886_hash = {
++ .next = NULL,
++ .name = "key_key_read",
++ .file = "net/mac80211/debugfs_key.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000887_hash = {
++ .next = NULL,
++ .name = "key_replays_read",
++ .file = "net/mac80211/debugfs_key.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000888_hash = {
++ .next = NULL,
++ .name = "key_rx_spec_read",
++ .file = "net/mac80211/debugfs_key.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000889_hash = {
++ .next = NULL,
++ .name = "key_tx_spec_read",
++ .file = "net/mac80211/debugfs_key.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000890_hash = {
++ .next = NULL,
++ .name = "kmem_alloc",
++ .file = "fs/xfs/kmem.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000891_hash = {
++ .next = NULL,
++ .name = "kmem_zalloc_large",
++ .file = "fs/xfs/kmem.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000892_hash = {
++ .next = NULL,
++ .name = "kone_receive",
++ .file = "drivers/hid/hid-roccat-kone.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000893_hash = {
++ .next = NULL,
++ .name = "kone_send",
++ .file = "drivers/hid/hid-roccat-kone.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000894_hash = {
++ .next = NULL,
++ .name = "kvm_read_guest_atomic",
++ .file = "include/linux/kvm_host.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000895_hash = {
++ .next = NULL,
++ .name = "kvm_read_guest_cached",
++ .file = "include/linux/kvm_host.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000896_hash = {
++ .next = NULL,
++ .name = "kvm_set_irq_routing",
++ .file = "include/linux/kvm_host.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000897_hash = {
++ .next = NULL,
++ .name = "kvm_write_guest_cached",
++ .file = "include/linux/kvm_host.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _000898_hash = {
++ .next = NULL,
++ .name = "l2cap_sock_setsockopt",
++ .file = "net/bluetooth/l2cap_sock.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000899_hash = {
++ .next = NULL,
++ .name = "l2cap_sock_setsockopt_old",
++ .file = "net/bluetooth/l2cap_sock.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000900_hash = {
++ .next = NULL,
++ .name = "lane2_associate_req",
++ .file = "net/atm/lec.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000901_hash = {
++ .next = NULL,
++ .name = "lbs_debugfs_read",
++ .file = "drivers/net/wireless/libertas/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000902_hash = {
++ .next = NULL,
++ .name = "lbs_debugfs_write",
++ .file = "drivers/net/wireless/libertas/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000903_hash = {
++ .next = NULL,
++ .name = "lbs_dev_info",
++ .file = "drivers/net/wireless/libertas/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000904_hash = {
++ .next = NULL,
++ .name = "lbs_host_sleep_read",
++ .file = "drivers/net/wireless/libertas/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000905_hash = {
++ .next = NULL,
++ .name = "lbs_rdbbp_read",
++ .file = "drivers/net/wireless/libertas/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000906_hash = {
++ .next = NULL,
++ .name = "lbs_rdmac_read",
++ .file = "drivers/net/wireless/libertas/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000907_hash = {
++ .next = NULL,
++ .name = "lbs_rdrf_read",
++ .file = "drivers/net/wireless/libertas/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000908_hash = {
++ .next = NULL,
++ .name = "lbs_sleepparams_read",
++ .file = "drivers/net/wireless/libertas/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000909_hash = {
++ .next = NULL,
++ .name = "lbs_threshold_read",
++ .file = "drivers/net/wireless/libertas/debugfs.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000910_hash = {
++ .next = NULL,
++ .name = "lc_create",
++ .file = "include/linux/lru_cache.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000911_hash = {
++ .next = NULL,
++ .name = "lcd_write",
++ .file = "drivers/usb/misc/usblcd.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000912_hash = {
++ .next = NULL,
++ .name = "leaf_dealloc",
++ .file = "fs/gfs2/dir.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000913_hash = {
++ .next = NULL,
++ .name = "__lgread",
++ .file = "drivers/lguest/core.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000914_hash = {
++ .next = NULL,
++ .name = "__lgwrite",
++ .file = "drivers/lguest/core.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000915_hash = {
++ .next = NULL,
++ .name = "link_send_sections_long",
++ .file = "net/tipc/link.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000916_hash = {
++ .next = NULL,
++ .name = "lirc_buffer_init",
++ .file = "include/media/lirc_dev.h",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _000918_hash = {
++ .next = NULL,
++ .name = "lkdtm_debugfs_read",
++ .file = "drivers/misc/lkdtm.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000919_hash = {
++ .next = NULL,
++ .name = "LoadBitmap",
++ .file = "drivers/media/dvb/ttpci/av7110_hw.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000920_hash = {
++ .next = NULL,
++ .name = "long_retry_limit_read",
++ .file = "net/wireless/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000921_hash = {
++ .next = NULL,
++ .name = "lpfc_debugfs_dif_err_read",
++ .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000922_hash = {
++ .next = NULL,
++ .name = "lpfc_debugfs_dif_err_write",
++ .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000923_hash = {
++ .next = NULL,
++ .name = "lpfc_debugfs_read",
++ .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000924_hash = {
++ .next = NULL,
++ .name = "lpfc_idiag_baracc_read",
++ .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000925_hash = {
++ .next = NULL,
++ .name = "lpfc_idiag_ctlacc_read",
++ .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000926_hash = {
++ .next = NULL,
++ .name = "lpfc_idiag_drbacc_read",
++ .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000927_hash = {
++ .next = NULL,
++ .name = "lpfc_idiag_extacc_read",
++ .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000928_hash = {
++ .next = NULL,
++ .name = "lpfc_idiag_mbxacc_read",
++ .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000929_hash = {
++ .next = NULL,
++ .name = "lpfc_idiag_pcicfg_read",
++ .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000930_hash = {
++ .next = NULL,
++ .name = "lpfc_idiag_queacc_read",
++ .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000931_hash = {
++ .next = NULL,
++ .name = "lpfc_idiag_queinfo_read",
++ .file = "drivers/scsi/lpfc/lpfc_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000932_hash = {
++ .next = NULL,
++ .name = "lpfc_sli4_queue_alloc",
++ .file = "drivers/scsi/lpfc/lpfc_sli.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000933_hash = {
++ .next = NULL,
++ .name = "lp_write",
++ .file = "drivers/char/lp.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000934_hash = {
++ .next = NULL,
++ .name = "mac80211_format_buffer",
++ .file = "net/mac80211/debugfs.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000935_hash = {
++ .next = NULL,
++ .name = "mce_write",
++ .file = "arch/x86/kernel/cpu/mcheck/mce-inject.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000936_hash = {
++ .next = NULL,
++ .name = "mcs7830_get_reg",
++ .file = "drivers/net/usb/mcs7830.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000937_hash = {
++ .next = NULL,
++ .name = "mcs7830_set_reg",
++ .file = "drivers/net/usb/mcs7830.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000938_hash = {
++ .next = NULL,
++ .name = "mdc800_device_read",
++ .file = "drivers/usb/image/mdc800.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000939_hash = {
++ .next = NULL,
++ .name = "mdiobus_alloc_size",
++ .file = "include/linux/phy.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000940_hash = {
++ .next = NULL,
++ .name = "media_entity_init",
++ .file = "include/media/media-entity.h",
++ .param2 = 1,
++ .param4 = 1,
++};
++struct size_overflow_hash _000942_hash = {
++ .next = NULL,
++ .name = "memstick_alloc_host",
++ .file = "include/linux/memstick.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000943_hash = {
++ .next = NULL,
++ .name = "mgmt_control",
++ .file = "include/net/bluetooth/hci_core.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _000944_hash = {
++ .next = NULL,
++ .name = "mgmt_pending_add",
++ .file = "net/bluetooth/mgmt.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000945_hash = {
++ .next = &_000321_hash,
++ .name = "mic_calc_failure_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000946_hash = {
++ .next = NULL,
++ .name = "mic_rx_pkts_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000947_hash = {
++ .next = NULL,
++ .name = "minstrel_stats_read",
++ .file = "net/mac80211/rc80211_minstrel_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000948_hash = {
++ .next = NULL,
++ .name = "mlx4_en_create_rx_ring",
++ .file = "drivers/net/ethernet/mellanox/mlx4/en_rx.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000949_hash = {
++ .next = NULL,
++ .name = "mlx4_en_create_tx_ring",
++ .file = "drivers/net/ethernet/mellanox/mlx4/en_tx.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000950_hash = {
++ .next = NULL,
++ .name = "mmc_ext_csd_read",
++ .file = "drivers/mmc/core/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000951_hash = {
++ .next = NULL,
++ .name = "mmc_send_bus_test",
++ .file = "drivers/mmc/core/mmc_ops.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000952_hash = {
++ .next = NULL,
++ .name = "mmc_send_cxd_data",
++ .file = "drivers/mmc/core/mmc_ops.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000953_hash = {
++ .next = NULL,
++ .name = "mmc_test_alloc_mem",
++ .file = "drivers/mmc/card/mmc_test.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000954_hash = {
++ .next = NULL,
++ .name = "mon_bin_get_event",
++ .file = "drivers/usb/mon/mon_bin.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000955_hash = {
++ .next = NULL,
++ .name = "mon_stat_read",
++ .file = "drivers/usb/mon/mon_stat.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000956_hash = {
++ .next = NULL,
++ .name = "mptctl_getiocinfo",
++ .file = "drivers/message/fusion/mptctl.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000957_hash = {
++ .next = NULL,
++ .name = "msnd_fifo_alloc",
++ .file = "sound/oss/msnd.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000958_hash = {
++ .next = NULL,
++ .name = "mtdchar_readoob",
++ .file = "drivers/mtd/mtdchar.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000959_hash = {
++ .next = NULL,
++ .name = "mtdchar_write",
++ .file = "drivers/mtd/mtdchar.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000960_hash = {
++ .next = NULL,
++ .name = "mtdchar_writeoob",
++ .file = "drivers/mtd/mtdchar.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000961_hash = {
++ .next = NULL,
++ .name = "mtdswap_init",
++ .file = "drivers/mtd/mtdswap.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000962_hash = {
++ .next = NULL,
++ .name = "mtf_test_write",
++ .file = "drivers/mmc/card/mmc_test.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000963_hash = {
++ .next = NULL,
++ .name = "musb_test_mode_write",
++ .file = "drivers/usb/musb/musb_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000964_hash = {
++ .next = NULL,
++ .name = "mvumi_alloc_mem_resource",
++ .file = "drivers/scsi/mvumi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000965_hash = {
++ .next = NULL,
++ .name = "mwifiex_alloc_sdio_mpa_buffers",
++ .file = "drivers/net/wireless/mwifiex/sdio.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _000967_hash = {
++ .next = NULL,
++ .name = "mwifiex_debug_read",
++ .file = "drivers/net/wireless/mwifiex/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000968_hash = {
++ .next = NULL,
++ .name = "mwifiex_get_common_rates",
++ .file = "drivers/net/wireless/mwifiex/join.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000969_hash = {
++ .next = NULL,
++ .name = "mwifiex_getlog_read",
++ .file = "drivers/net/wireless/mwifiex/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000970_hash = {
++ .next = NULL,
++ .name = "mwifiex_info_read",
++ .file = "drivers/net/wireless/mwifiex/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000971_hash = {
++ .next = NULL,
++ .name = "mwifiex_rdeeprom_read",
++ .file = "drivers/net/wireless/mwifiex/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000972_hash = {
++ .next = NULL,
++ .name = "mwifiex_regrdwr_read",
++ .file = "drivers/net/wireless/mwifiex/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000973_hash = {
++ .next = NULL,
++ .name = "mwifiex_update_curr_bss_params",
++ .file = "drivers/net/wireless/mwifiex/scan.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000974_hash = {
++ .next = NULL,
++ .name = "nand_bch_init",
++ .file = "include/linux/mtd/nand_bch.h",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _000976_hash = {
++ .next = NULL,
++ .name = "ncp_file_write",
++ .file = "fs/ncpfs/file.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000977_hash = {
++ .next = NULL,
++ .name = "ncp__vol2io",
++ .file = "fs/ncpfs/ncplib_kernel.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _000978_hash = {
++ .next = NULL,
++ .name = "new_bind_ctl",
++ .file = "sound/pci/hda/patch_realtek.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _000979_hash = {
++ .next = NULL,
++ .name = "nfc_llcp_build_tlv",
++ .file = "net/nfc/llcp/commands.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000980_hash = {
++ .next = NULL,
++ .name = "nfs4_alloc_slots",
++ .file = "fs/nfs/nfs4proc.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000981_hash = {
++ .next = NULL,
++ .name = "nfs4_write_cached_acl",
++ .file = "fs/nfs/nfs4proc.c",
++ .param3 = 1,
++ .param4 = 1,
++};
++struct size_overflow_hash _000982_hash = {
++ .next = NULL,
++ .name = "nfsctl_transaction_read",
++ .file = "fs/nfsd/nfsctl.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000983_hash = {
++ .next = NULL,
++ .name = "nfsctl_transaction_write",
++ .file = "fs/nfsd/nfsctl.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000984_hash = {
++ .next = NULL,
++ .name = "nfsd_cache_update",
++ .file = "fs/nfsd/nfscache.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000985_hash = {
++ .next = NULL,
++ .name = "nfs_idmap_get_desc",
++ .file = "fs/nfs/idmap.c",
++ .param2 = 1,
++ .param4 = 1,
++};
++struct size_overflow_hash _000987_hash = {
++ .next = NULL,
++ .name = "nfs_readdata_alloc",
++ .file = "include/linux/nfs_fs.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000988_hash = {
++ .next = NULL,
++ .name = "nfs_readdir_make_qstr",
++ .file = "fs/nfs/dir.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000989_hash = {
++ .next = NULL,
++ .name = "nfs_writedata_alloc",
++ .file = "include/linux/nfs_fs.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000990_hash = {
++ .next = NULL,
++ .name = "nsm_create_handle",
++ .file = "fs/lockd/mon.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _000991_hash = {
++ .next = NULL,
++ .name = "ntfs_copy_from_user",
++ .file = "fs/ntfs/file.c",
++ .param3 = 1,
++ .param5 = 1,
++};
++struct size_overflow_hash _000993_hash = {
++ .next = NULL,
++ .name = "__ntfs_copy_from_user_iovec_inatomic",
++ .file = "fs/ntfs/file.c",
++ .param3 = 1,
++ .param4 = 1,
++};
++struct size_overflow_hash _000995_hash = {
++ .next = NULL,
++ .name = "__ntfs_malloc",
++ .file = "fs/ntfs/malloc.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _000996_hash = {
++ .next = NULL,
++ .name = "nvme_alloc_iod",
++ .file = "drivers/block/nvme.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _000997_hash = {
++ .next = NULL,
++ .name = "nvram_write",
++ .file = "drivers/char/nvram.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000998_hash = {
++ .next = NULL,
++ .name = "o2hb_debug_read",
++ .file = "fs/ocfs2/cluster/heartbeat.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _000999_hash = {
++ .next = NULL,
++ .name = "o2net_debug_read",
++ .file = "fs/ocfs2/cluster/netdebug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001000_hash = {
++ .next = NULL,
++ .name = "o2net_send_message_vec",
++ .file = "fs/ocfs2/cluster/tcp.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001001_hash = {
++ .next = NULL,
++ .name = "ocfs2_control_cfu",
++ .file = "fs/ocfs2/stack_user.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001002_hash = {
++ .next = NULL,
++ .name = "ocfs2_control_read",
++ .file = "fs/ocfs2/stack_user.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001003_hash = {
++ .next = NULL,
++ .name = "ocfs2_debug_read",
++ .file = "fs/ocfs2/super.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001004_hash = {
++ .next = NULL,
++ .name = "opera1_xilinx_rw",
++ .file = "drivers/media/dvb/dvb-usb/opera1.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001005_hash = {
++ .next = NULL,
++ .name = "oprofilefs_str_to_user",
++ .file = "include/linux/oprofile.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001006_hash = {
++ .next = NULL,
++ .name = "oprofilefs_ulong_from_user",
++ .file = "include/linux/oprofile.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001007_hash = {
++ .next = &_000626_hash,
++ .name = "oprofilefs_ulong_to_user",
++ .file = "include/linux/oprofile.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001008_hash = {
++ .next = NULL,
++ .name = "_ore_get_io_state",
++ .file = "fs/exofs/ore.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001009_hash = {
++ .next = NULL,
++ .name = "_osd_realloc_seg",
++ .file = "drivers/scsi/osd/osd_initiator.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001010_hash = {
++ .next = NULL,
++ .name = "_osd_req_list_objects",
++ .file = "drivers/scsi/osd/osd_initiator.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001011_hash = {
++ .next = NULL,
++ .name = "osd_req_read_kern",
++ .file = "include/scsi/osd_initiator.h",
++ .param5 = 1,
++};
++struct size_overflow_hash _001012_hash = {
++ .next = NULL,
++ .name = "osd_req_write_kern",
++ .file = "include/scsi/osd_initiator.h",
++ .param5 = 1,
++};
++struct size_overflow_hash _001013_hash = {
++ .next = NULL,
++ .name = "osst_execute",
++ .file = "drivers/scsi/osst.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001014_hash = {
++ .next = NULL,
++ .name = "otp_read",
++ .file = "drivers/mtd/devices/mtd_dataflash.c",
++ .param2 = 1,
++ .param5 = 1,
++};
++struct size_overflow_hash _001016_hash = {
++ .next = NULL,
++ .name = "packet_buffer_init",
++ .file = "drivers/firewire/nosy.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001017_hash = {
++ .next = NULL,
++ .name = "packet_setsockopt",
++ .file = "net/packet/af_packet.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001018_hash = {
++ .next = NULL,
++ .name = "parse_arg",
++ .file = "drivers/platform/x86/asus_acpi.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001019_hash = {
++ .next = NULL,
++ .name = "parse_command",
++ .file = "fs/binfmt_misc.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001020_hash = {
++ .next = NULL,
++ .name = "pcmcia_replace_cis",
++ .file = "drivers/pcmcia/cistpl.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001021_hash = {
++ .next = NULL,
++ .name = "pcnet32_realloc_rx_ring",
++ .file = "drivers/net/ethernet/amd/pcnet32.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001022_hash = {
++ .next = NULL,
++ .name = "pcnet32_realloc_tx_ring",
++ .file = "drivers/net/ethernet/amd/pcnet32.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001023_hash = {
++ .next = NULL,
++ .name = "pgctrl_write",
++ .file = "net/core/pktgen.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001024_hash = {
++ .next = NULL,
++ .name = "pg_read",
++ .file = "drivers/block/paride/pg.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001025_hash = {
++ .next = NULL,
++ .name = "pg_write",
++ .file = "drivers/block/paride/pg.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001026_hash = {
++ .next = NULL,
++ .name = "picolcd_debug_eeprom_read",
++ .file = "drivers/hid/hid-picolcd.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001027_hash = {
++ .next = NULL,
++ .name = "pkt_add",
++ .file = "drivers/usb/serial/garmin_gps.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001028_hash = {
++ .next = NULL,
++ .name = "pktgen_if_write",
++ .file = "net/core/pktgen.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001029_hash = {
++ .next = NULL,
++ .name = "platform_list_read_file",
++ .file = "sound/soc/soc-core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001030_hash = {
++ .next = NULL,
++ .name = "pm8001_store_update_fw",
++ .file = "drivers/scsi/pm8001/pm8001_ctl.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001031_hash = {
++ .next = NULL,
++ .name = "port_show_regs",
++ .file = "drivers/tty/serial/mfd.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001032_hash = {
++ .next = NULL,
++ .name = "ppp_cp_parse_cr",
++ .file = "drivers/net/wan/hdlc_ppp.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001033_hash = {
++ .next = NULL,
++ .name = "ppp_write",
++ .file = "drivers/net/ppp/ppp_generic.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001034_hash = {
++ .next = NULL,
++ .name = "pp_read",
++ .file = "drivers/char/ppdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001035_hash = {
++ .next = NULL,
++ .name = "pp_write",
++ .file = "drivers/char/ppdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001036_hash = {
++ .next = NULL,
++ .name = "printer_read",
++ .file = "drivers/usb/gadget/printer.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001037_hash = {
++ .next = NULL,
++ .name = "printer_req_alloc",
++ .file = "drivers/usb/gadget/printer.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001038_hash = {
++ .next = NULL,
++ .name = "printer_write",
++ .file = "drivers/usb/gadget/printer.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001039_hash = {
++ .next = NULL,
++ .name = "prism2_set_genericelement",
++ .file = "drivers/net/wireless/hostap/hostap_ioctl.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001040_hash = {
++ .next = NULL,
++ .name = "proc_read",
++ .file = "drivers/net/wireless/airo.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001041_hash = {
++ .next = NULL,
++ .name = "proc_scsi_devinfo_write",
++ .file = "drivers/scsi/scsi_devinfo.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001042_hash = {
++ .next = NULL,
++ .name = "proc_scsi_write",
++ .file = "drivers/scsi/scsi_proc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001043_hash = {
++ .next = NULL,
++ .name = "proc_scsi_write_proc",
++ .file = "drivers/scsi/scsi_proc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001044_hash = {
++ .next = NULL,
++ .name = "proc_write",
++ .file = "drivers/net/wireless/airo.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001045_hash = {
++ .next = NULL,
++ .name = "provide_user_output",
++ .file = "fs/ubifs/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001046_hash = {
++ .next = NULL,
++ .name = "ps_pspoll_max_apturn_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001047_hash = {
++ .next = NULL,
++ .name = "ps_pspoll_timeouts_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001048_hash = {
++ .next = NULL,
++ .name = "ps_pspoll_utilization_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001049_hash = {
++ .next = NULL,
++ .name = "ps_upsd_max_apturn_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001050_hash = {
++ .next = NULL,
++ .name = "ps_upsd_max_sptime_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001051_hash = {
++ .next = NULL,
++ .name = "ps_upsd_timeouts_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001052_hash = {
++ .next = NULL,
++ .name = "ps_upsd_utilization_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001053_hash = {
++ .next = NULL,
++ .name = "pti_char_write",
++ .file = "drivers/misc/pti.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001054_hash = {
++ .next = NULL,
++ .name = "pt_read",
++ .file = "drivers/block/paride/pt.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001055_hash = {
++ .next = NULL,
++ .name = "pt_write",
++ .file = "drivers/block/paride/pt.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001056_hash = {
++ .next = NULL,
++ .name = "pvr2_ioread_read",
++ .file = "drivers/media/video/pvrusb2/pvrusb2-ioread.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001057_hash = {
++ .next = NULL,
++ .name = "pvr2_ioread_set_sync_key",
++ .file = "drivers/media/video/pvrusb2/pvrusb2-ioread.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001058_hash = {
++ .next = NULL,
++ .name = "pvr2_stream_buffer_count",
++ .file = "drivers/media/video/pvrusb2/pvrusb2-io.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001059_hash = {
++ .next = NULL,
++ .name = "pwr_disable_ps_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001060_hash = {
++ .next = NULL,
++ .name = "pwr_elp_enter_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001061_hash = {
++ .next = NULL,
++ .name = "pwr_enable_ps_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001062_hash = {
++ .next = NULL,
++ .name = "pwr_fix_tsf_ps_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001063_hash = {
++ .next = NULL,
++ .name = "pwr_missing_bcns_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001064_hash = {
++ .next = NULL,
++ .name = "pwr_power_save_off_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001065_hash = {
++ .next = NULL,
++ .name = "pwr_ps_enter_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001066_hash = {
++ .next = NULL,
++ .name = "pwr_rcvd_awake_beacons_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001067_hash = {
++ .next = NULL,
++ .name = "pwr_rcvd_beacons_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001068_hash = {
++ .next = NULL,
++ .name = "pwr_tx_without_ps_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001069_hash = {
++ .next = NULL,
++ .name = "pwr_tx_with_ps_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001070_hash = {
++ .next = NULL,
++ .name = "pwr_wake_on_host_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001071_hash = {
++ .next = NULL,
++ .name = "pwr_wake_on_timer_exp_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001072_hash = {
++ .next = NULL,
++ .name = "qc_capture",
++ .file = "drivers/media/video/c-qcam.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001073_hash = {
++ .next = NULL,
++ .name = "qla2x00_get_ctx_bsg_sp",
++ .file = "drivers/scsi/qla2xxx/qla_bsg.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001074_hash = {
++ .next = NULL,
++ .name = "qla2x00_get_ctx_sp",
++ .file = "drivers/scsi/qla2xxx/qla_init.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001075_hash = {
++ .next = NULL,
++ .name = "qlcnic_alloc_msix_entries",
++ .file = "drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001076_hash = {
++ .next = NULL,
++ .name = "queues_read",
++ .file = "net/mac80211/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001077_hash = {
++ .next = NULL,
++ .name = "r3964_write",
++ .file = "drivers/tty/n_r3964.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001078_hash = {
++ .next = NULL,
++ .name = "raw_setsockopt",
++ .file = "net/can/raw.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001079_hash = {
++ .next = NULL,
++ .name = "ray_cs_essid_proc_write",
++ .file = "drivers/net/wireless/ray_cs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001080_hash = {
++ .next = NULL,
++ .name = "rbd_snap_add",
++ .file = "drivers/block/rbd.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001081_hash = {
++ .next = NULL,
++ .name = "rcname_read",
++ .file = "net/mac80211/rate.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001082_hash = {
++ .next = NULL,
++ .name = "rds_message_alloc",
++ .file = "net/rds/message.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001083_hash = {
++ .next = NULL,
++ .name = "rds_page_copy_user",
++ .file = "net/rds/page.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001084_hash = {
++ .next = NULL,
++ .name = "read",
++ .file = "drivers/pci/hotplug/cpqphp_sysfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001085_hash = {
++ .next = NULL,
++ .name = "read_4k_modal_eeprom",
++ .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001086_hash = {
++ .next = NULL,
++ .name = "read_9287_modal_eeprom",
++ .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001087_hash = {
++ .next = NULL,
++ .name = "read_buf",
++ .file = "fs/nfsd/nfs4xdr.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001088_hash = {
++ .next = NULL,
++ .name = "read_cis_cache",
++ .file = "drivers/pcmcia/cistpl.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001089_hash = {
++ .next = NULL,
++ .name = "read_def_modal_eeprom",
++ .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001090_hash = {
++ .next = NULL,
++ .name = "read_file_ani",
++ .file = "drivers/net/wireless/ath/ath5k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001091_hash = {
++ .next = NULL,
++ .name = "read_file_antenna",
++ .file = "drivers/net/wireless/ath/ath5k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001092_hash = {
++ .next = NULL,
++ .name = "read_file_base_eeprom",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001093_hash = {
++ .next = NULL,
++ .name = "read_file_base_eeprom",
++ .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001094_hash = {
++ .next = NULL,
++ .name = "read_file_beacon",
++ .file = "drivers/net/wireless/ath/ath5k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001095_hash = {
++ .next = NULL,
++ .name = "read_file_credit_dist_stats",
++ .file = "drivers/net/wireless/ath/ath6kl/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001096_hash = {
++ .next = NULL,
++ .name = "read_file_debug",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001097_hash = {
++ .next = NULL,
++ .name = "read_file_debug",
++ .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001098_hash = {
++ .next = NULL,
++ .name = "read_file_debug",
++ .file = "drivers/net/wireless/ath/ath5k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001099_hash = {
++ .next = NULL,
++ .name = "read_file_disable_ani",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001100_hash = {
++ .next = NULL,
++ .name = "read_file_dma",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001101_hash = {
++ .next = NULL,
++ .name = "read_file_dump_nfcal",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001102_hash = {
++ .next = NULL,
++ .name = "read_file_frameerrors",
++ .file = "drivers/net/wireless/ath/ath5k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001103_hash = {
++ .next = NULL,
++ .name = "read_file_interrupt",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001104_hash = {
++ .next = NULL,
++ .name = "read_file_misc",
++ .file = "drivers/net/wireless/ath/ath5k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001105_hash = {
++ .next = NULL,
++ .name = "read_file_misc",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001106_hash = {
++ .next = NULL,
++ .name = "read_file_modal_eeprom",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001107_hash = {
++ .next = NULL,
++ .name = "read_file_queue",
++ .file = "drivers/net/wireless/ath/ath5k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001108_hash = {
++ .next = NULL,
++ .name = "read_file_queue",
++ .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001109_hash = {
++ .next = NULL,
++ .name = "read_file_rcstat",
++ .file = "drivers/net/wireless/ath/ath9k/rc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001110_hash = {
++ .next = NULL,
++ .name = "read_file_recv",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001111_hash = {
++ .next = NULL,
++ .name = "read_file_recv",
++ .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001112_hash = {
++ .next = NULL,
++ .name = "read_file_regidx",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001113_hash = {
++ .next = &_001103_hash,
++ .name = "read_file_regval",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001114_hash = {
++ .next = NULL,
++ .name = "read_file_rx_chainmask",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001115_hash = {
++ .next = NULL,
++ .name = "read_file_slot",
++ .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001116_hash = {
++ .next = NULL,
++ .name = "read_file_stations",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001117_hash = {
++ .next = NULL,
++ .name = "read_file_tgt_int_stats",
++ .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001118_hash = {
++ .next = NULL,
++ .name = "read_file_tgt_rx_stats",
++ .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001119_hash = {
++ .next = NULL,
++ .name = "read_file_tgt_stats",
++ .file = "drivers/net/wireless/ath/ath6kl/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001120_hash = {
++ .next = NULL,
++ .name = "read_file_tgt_tx_stats",
++ .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001121_hash = {
++ .next = NULL,
++ .name = "read_file_tx_chainmask",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001122_hash = {
++ .next = NULL,
++ .name = "read_file_war_stats",
++ .file = "drivers/net/wireless/ath/ath6kl/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001123_hash = {
++ .next = NULL,
++ .name = "read_file_wiphy",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001124_hash = {
++ .next = NULL,
++ .name = "read_file_xmit",
++ .file = "drivers/net/wireless/ath/ath9k/htc_drv_debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001125_hash = {
++ .next = NULL,
++ .name = "read_file_xmit",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001126_hash = {
++ .next = NULL,
++ .name = "read_flush",
++ .file = "net/sunrpc/cache.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001127_hash = {
++ .next = NULL,
++ .name = "realloc_buffer",
++ .file = "drivers/scsi/device_handler/scsi_dh_alua.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001128_hash = {
++ .next = NULL,
++ .name = "receive_DataRequest",
++ .file = "drivers/block/drbd/drbd_receiver.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001129_hash = {
++ .next = NULL,
++ .name = "recent_mt_proc_write",
++ .file = "net/netfilter/xt_recent.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001130_hash = {
++ .next = NULL,
++ .name = "redrat3_transmit_ir",
++ .file = "drivers/media/rc/redrat3.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001131_hash = {
++ .next = NULL,
++ .name = "reg_w_buf",
++ .file = "drivers/media/video/gspca/t613.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001132_hash = {
++ .next = NULL,
++ .name = "reg_w_ixbuf",
++ .file = "drivers/media/video/gspca/t613.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001133_hash = {
++ .next = NULL,
++ .name = "reiserfs_allocate_list_bitmaps",
++ .file = "include/linux/reiserfs_fs.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001134_hash = {
++ .next = NULL,
++ .name = "reiserfs_resize",
++ .file = "include/linux/reiserfs_fs_sb.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _001135_hash = {
++ .next = NULL,
++ .name = "remote_settings_file_write",
++ .file = "drivers/misc/ibmasm/ibmasmfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001136_hash = {
++ .next = NULL,
++ .name = "_req_append_segment",
++ .file = "drivers/scsi/osd/osd_initiator.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001137_hash = {
++ .next = NULL,
++ .name = "retry_count_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001138_hash = {
++ .next = NULL,
++ .name = "revalidate",
++ .file = "drivers/block/aoe/aoechr.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001139_hash = {
++ .next = NULL,
++ .name = "rfcomm_sock_setsockopt",
++ .file = "net/bluetooth/rfcomm/sock.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001140_hash = {
++ .next = NULL,
++ .name = "rfkill_fop_read",
++ .file = "net/rfkill/core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001141_hash = {
++ .next = NULL,
++ .name = "rndis_add_response",
++ .file = "drivers/usb/gadget/rndis.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001142_hash = {
++ .next = NULL,
++ .name = "rng_dev_read",
++ .file = "drivers/char/hw_random/core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001143_hash = {
++ .next = NULL,
++ .name = "roccat_common_receive",
++ .file = "drivers/hid/hid-roccat-common.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001144_hash = {
++ .next = NULL,
++ .name = "roccat_common_send",
++ .file = "drivers/hid/hid-roccat-common.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001145_hash = {
++ .next = NULL,
++ .name = "roccat_read",
++ .file = "drivers/hid/hid-roccat.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001146_hash = {
++ .next = NULL,
++ .name = "rpc_malloc",
++ .file = "include/linux/sunrpc/sched.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _001147_hash = {
++ .next = NULL,
++ .name = "rs_sta_dbgfs_rate_scale_data_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001148_hash = {
++ .next = NULL,
++ .name = "rs_sta_dbgfs_scale_table_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001149_hash = {
++ .next = NULL,
++ .name = "rs_sta_dbgfs_stats_table_read",
++ .file = "drivers/net/wireless/iwlwifi/iwl-agn-rs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001150_hash = {
++ .next = NULL,
++ .name = "rt2x00debug_write_bbp",
++ .file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001151_hash = {
++ .next = NULL,
++ .name = "rt2x00debug_write_csr",
++ .file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001152_hash = {
++ .next = &_000808_hash,
++ .name = "rt2x00debug_write_eeprom",
++ .file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001153_hash = {
++ .next = NULL,
++ .name = "rt2x00debug_write_rf",
++ .file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001154_hash = {
++ .next = NULL,
++ .name = "rts51x_read_mem",
++ .file = "drivers/usb/storage/realtek_cr.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001155_hash = {
++ .next = NULL,
++ .name = "rts51x_write_mem",
++ .file = "drivers/usb/storage/realtek_cr.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001156_hash = {
++ .next = NULL,
++ .name = "rts_threshold_read",
++ .file = "net/wireless/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001157_hash = {
++ .next = NULL,
++ .name = "rx_dropped_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001158_hash = {
++ .next = NULL,
++ .name = "rx_fcs_err_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001159_hash = {
++ .next = NULL,
++ .name = "rx_hdr_overflow_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001160_hash = {
++ .next = NULL,
++ .name = "rx_hw_stuck_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001161_hash = {
++ .next = NULL,
++ .name = "rx_out_of_mem_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001162_hash = {
++ .next = NULL,
++ .name = "rx_path_reset_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001163_hash = {
++ .next = NULL,
++ .name = "rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001164_hash = {
++ .next = NULL,
++ .name = "rxpipe_descr_host_int_trig_rx_data_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001165_hash = {
++ .next = NULL,
++ .name = "rxpipe_missed_beacon_host_int_trig_rx_data_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001166_hash = {
++ .next = NULL,
++ .name = "rxpipe_rx_prep_beacon_drop_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001167_hash = {
++ .next = NULL,
++ .name = "rxpipe_tx_xfr_host_int_trig_rx_data_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001168_hash = {
++ .next = NULL,
++ .name = "rx_reset_counter_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001169_hash = {
++ .next = NULL,
++ .name = "rx_xfr_hint_trig_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001170_hash = {
++ .next = NULL,
++ .name = "saa7164_buffer_alloc_user",
++ .file = "drivers/media/video/saa7164/saa7164-buffer.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001171_hash = {
++ .next = NULL,
++ .name = "scsi_execute",
++ .file = "include/scsi/scsi_device.h",
++ .param5 = 1,
++};
++struct size_overflow_hash _001172_hash = {
++ .next = NULL,
++ .name = "scsi_tgt_copy_sense",
++ .file = "drivers/scsi/scsi_tgt_lib.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001173_hash = {
++ .next = NULL,
++ .name = "sctp_auth_create_key",
++ .file = "net/sctp/auth.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001174_hash = {
++ .next = NULL,
++ .name = "sctp_make_abort_user",
++ .file = "include/net/sctp/sm.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001175_hash = {
++ .next = NULL,
++ .name = "sctpprobe_read",
++ .file = "net/sctp/probe.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001176_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_active_key",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001177_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_adaptation_layer",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001178_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_associnfo",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001179_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_auth_chunk",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001180_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_auth_key",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001181_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_autoclose",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001182_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_context",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001183_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_default_send_param",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001184_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_delayed_ack",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001185_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_del_key",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001186_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_events",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001187_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_hmac_ident",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001188_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_initmsg",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001189_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_maxburst",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001190_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_maxseg",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001191_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_peer_addr_params",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001192_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_peer_primary_addr",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001193_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt_rtoinfo",
++ .file = "net/sctp/socket.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001194_hash = {
++ .next = NULL,
++ .name = "sctp_tsnmap_init",
++ .file = "include/net/sctp/tsnmap.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _001195_hash = {
++ .next = NULL,
++ .name = "send_control_msg",
++ .file = "drivers/media/video/zr364xx.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001196_hash = {
++ .next = NULL,
++ .name = "set_aoe_iflist",
++ .file = "drivers/block/aoe/aoenet.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001197_hash = {
++ .next = NULL,
++ .name = "set_registers",
++ .file = "drivers/net/usb/pegasus.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001198_hash = {
++ .next = NULL,
++ .name = "setsockopt",
++ .file = "net/caif/caif_socket.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001199_hash = {
++ .next = NULL,
++ .name = "setup_req",
++ .file = "drivers/usb/gadget/inode.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001200_hash = {
++ .next = NULL,
++ .name = "sfq_alloc",
++ .file = "net/sched/sch_sfq.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001201_hash = {
++ .next = NULL,
++ .name = "sgl_map_user_pages",
++ .file = "drivers/scsi/st.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001202_hash = {
++ .next = NULL,
++ .name = "short_retry_limit_read",
++ .file = "net/wireless/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001203_hash = {
++ .next = NULL,
++ .name = "sm501_create_subdev",
++ .file = "drivers/mfd/sm501.c",
++ .param3 = 1,
++ .param4 = 1,
++};
++struct size_overflow_hash _001205_hash = {
++ .next = NULL,
++ .name = "sn9c102_read",
++ .file = "drivers/media/video/sn9c102/sn9c102_core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001206_hash = {
++ .next = NULL,
++ .name = "snd_ac97_pcm_assign",
++ .file = "include/sound/ac97_codec.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _001207_hash = {
++ .next = NULL,
++ .name = "snd_ctl_elem_user_tlv",
++ .file = "sound/core/control.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001208_hash = {
++ .next = NULL,
++ .name = "snd_emu10k1_fx8010_read",
++ .file = "sound/pci/emu10k1/emuproc.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001209_hash = {
++ .next = NULL,
++ .name = "snd_es1938_capture_copy",
++ .file = "sound/pci/es1938.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001210_hash = {
++ .next = NULL,
++ .name = "snd_gus_dram_peek",
++ .file = "sound/isa/gus/gus_dram.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001211_hash = {
++ .next = NULL,
++ .name = "snd_gus_dram_poke",
++ .file = "sound/isa/gus/gus_dram.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001212_hash = {
++ .next = NULL,
++ .name = "snd_hdsp_capture_copy",
++ .file = "sound/pci/rme9652/hdsp.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001213_hash = {
++ .next = NULL,
++ .name = "snd_hdsp_playback_copy",
++ .file = "sound/pci/rme9652/hdsp.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001214_hash = {
++ .next = NULL,
++ .name = "snd_info_entry_write",
++ .file = "sound/core/info.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001215_hash = {
++ .next = NULL,
++ .name = "snd_opl4_mem_proc_read",
++ .file = "sound/drivers/opl4/opl4_proc.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001216_hash = {
++ .next = NULL,
++ .name = "snd_opl4_mem_proc_write",
++ .file = "sound/drivers/opl4/opl4_proc.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001217_hash = {
++ .next = NULL,
++ .name = "snd_pcm_aio_read",
++ .file = "sound/core/pcm_native.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001218_hash = {
++ .next = NULL,
++ .name = "snd_pcm_aio_write",
++ .file = "sound/core/pcm_native.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001219_hash = {
++ .next = NULL,
++ .name = "snd_pcm_alloc_vmalloc_buffer",
++ .file = "drivers/media/video/cx231xx/cx231xx-audio.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001220_hash = {
++ .next = NULL,
++ .name = "snd_pcm_alloc_vmalloc_buffer",
++ .file = "drivers/media/video/cx18/cx18-alsa-pcm.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001221_hash = {
++ .next = NULL,
++ .name = "snd_pcm_alloc_vmalloc_buffer",
++ .file = "drivers/media/video/em28xx/em28xx-audio.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001222_hash = {
++ .next = NULL,
++ .name = "_snd_pcm_lib_alloc_vmalloc_buffer",
++ .file = "include/sound/pcm.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _001223_hash = {
++ .next = NULL,
++ .name = "snd_pcm_oss_read1",
++ .file = "sound/core/oss/pcm_oss.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001224_hash = {
++ .next = NULL,
++ .name = "snd_pcm_oss_write1",
++ .file = "sound/core/oss/pcm_oss.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001225_hash = {
++ .next = NULL,
++ .name = "snd_pcm_oss_write2",
++ .file = "sound/core/oss/pcm_oss.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001226_hash = {
++ .next = NULL,
++ .name = "snd_pcm_plugin_build",
++ .file = "sound/core/oss/pcm_plugin.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001227_hash = {
++ .next = NULL,
++ .name = "snd_rme9652_capture_copy",
++ .file = "sound/pci/rme9652/rme9652.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001228_hash = {
++ .next = NULL,
++ .name = "snd_rme9652_playback_copy",
++ .file = "sound/pci/rme9652/rme9652.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001229_hash = {
++ .next = NULL,
++ .name = "snd_soc_hw_bulk_write_raw",
++ .file = "sound/soc/soc-io.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001230_hash = {
++ .next = NULL,
++ .name = "snd_usb_ctl_msg",
++ .file = "sound/usb/helper.c",
++ .param8 = 1,
++};
++struct size_overflow_hash _001231_hash = {
++ .next = NULL,
++ .name = "_sp2d_alloc",
++ .file = "fs/exofs/ore_raid.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001232_hash = {
++ .next = NULL,
++ .name = "spidev_message",
++ .file = "drivers/spi/spidev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001233_hash = {
++ .next = NULL,
++ .name = "spidev_write",
++ .file = "drivers/spi/spidev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001234_hash = {
++ .next = NULL,
++ .name = "spi_show_regs",
++ .file = "drivers/spi/spi-dw.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001235_hash = {
++ .next = NULL,
++ .name = "srp_alloc_iu",
++ .file = "drivers/infiniband/ulp/srp/ib_srp.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001236_hash = {
++ .next = NULL,
++ .name = "srp_iu_pool_alloc",
++ .file = "drivers/scsi/libsrp.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001237_hash = {
++ .next = NULL,
++ .name = "srp_ring_alloc",
++ .file = "drivers/scsi/libsrp.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001238_hash = {
++ .next = NULL,
++ .name = "sta_agg_status_read",
++ .file = "net/mac80211/debugfs_sta.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001239_hash = {
++ .next = NULL,
++ .name = "sta_agg_status_write",
++ .file = "net/mac80211/debugfs_sta.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001240_hash = {
++ .next = NULL,
++ .name = "sta_connected_time_read",
++ .file = "net/mac80211/debugfs_sta.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001241_hash = {
++ .next = NULL,
++ .name = "sta_flags_read",
++ .file = "net/mac80211/debugfs_sta.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001242_hash = {
++ .next = NULL,
++ .name = "sta_ht_capa_read",
++ .file = "net/mac80211/debugfs_sta.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001243_hash = {
++ .next = NULL,
++ .name = "sta_last_seq_ctrl_read",
++ .file = "net/mac80211/debugfs_sta.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001244_hash = {
++ .next = NULL,
++ .name = "sta_num_ps_buf_frames_read",
++ .file = "net/mac80211/debugfs_sta.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001245_hash = {
++ .next = NULL,
++ .name = "stk_prepare_sio_buffers",
++ .file = "drivers/media/video/stk-webcam.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001246_hash = {
++ .next = NULL,
++ .name = "store_iwmct_log_level",
++ .file = "drivers/misc/iwmc3200top/log.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001247_hash = {
++ .next = NULL,
++ .name = "store_iwmct_log_level_fw",
++ .file = "drivers/misc/iwmc3200top/log.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001248_hash = {
++ .next = NULL,
++ .name = "str_to_user",
++ .file = "drivers/input/evdev.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001249_hash = {
++ .next = NULL,
++ .name = "svc_pool_map_alloc_arrays",
++ .file = "net/sunrpc/svc.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001250_hash = {
++ .next = NULL,
++ .name = "svc_setsockopt",
++ .file = "net/atm/svc.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001251_hash = {
++ .next = NULL,
++ .name = "t4_alloc_mem",
++ .file = "drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001252_hash = {
++ .next = NULL,
++ .name = "tda10048_writeregbulk",
++ .file = "drivers/media/dvb/frontends/tda10048.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001253_hash = {
++ .next = NULL,
++ .name = "__team_options_register",
++ .file = "drivers/net/team/team.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001254_hash = {
++ .next = NULL,
++ .name = "tifm_alloc_adapter",
++ .file = "include/linux/tifm.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _001255_hash = {
++ .next = NULL,
++ .name = "tipc_subseq_alloc",
++ .file = "net/tipc/name_table.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001256_hash = {
++ .next = NULL,
++ .name = "tm6000_read_write_usb",
++ .file = "drivers/media/video/tm6000/tm6000-core.c",
++ .param7 = 1,
++};
++struct size_overflow_hash _001257_hash = {
++ .next = NULL,
++ .name = "tower_write",
++ .file = "drivers/usb/misc/legousbtower.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001258_hash = {
++ .next = NULL,
++ .name = "trusted_instantiate",
++ .file = "security/keys/trusted.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001259_hash = {
++ .next = NULL,
++ .name = "trusted_update",
++ .file = "security/keys/trusted.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001260_hash = {
++ .next = NULL,
++ .name = "TSS_rawhmac",
++ .file = "security/keys/trusted.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001261_hash = {
++ .next = NULL,
++ .name = "tx_internal_desc_overflow_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001262_hash = {
++ .next = NULL,
++ .name = "tx_queue_len_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001263_hash = {
++ .next = NULL,
++ .name = "tx_queue_len_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001264_hash = {
++ .next = NULL,
++ .name = "tx_queue_status_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001265_hash = {
++ .next = NULL,
++ .name = "udf_alloc_i_data",
++ .file = "fs/udf/inode.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001266_hash = {
++ .next = NULL,
++ .name = "udf_sb_alloc_partition_maps",
++ .file = "fs/udf/super.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001267_hash = {
++ .next = NULL,
++ .name = "uea_idma_write",
++ .file = "drivers/usb/atm/ueagle-atm.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001268_hash = {
++ .next = NULL,
++ .name = "uea_request",
++ .file = "drivers/usb/atm/ueagle-atm.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001269_hash = {
++ .next = NULL,
++ .name = "uea_send_modem_cmd",
++ .file = "drivers/usb/atm/ueagle-atm.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001270_hash = {
++ .next = NULL,
++ .name = "uhci_debug_read",
++ .file = "drivers/usb/host/uhci-debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001271_hash = {
++ .next = NULL,
++ .name = "uio_read",
++ .file = "drivers/uio/uio.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001272_hash = {
++ .next = NULL,
++ .name = "uio_write",
++ .file = "drivers/uio/uio.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001273_hash = {
++ .next = NULL,
++ .name = "um_idi_write",
++ .file = "drivers/isdn/hardware/eicon/divasi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001274_hash = {
++ .next = NULL,
++ .name = "unlink_queued",
++ .file = "drivers/usb/misc/usbtest.c",
++ .param3 = 1,
++ .param4 = 1,
++};
++struct size_overflow_hash _001275_hash = {
++ .next = NULL,
++ .name = "us122l_ctl_msg",
++ .file = "sound/usb/usx2y/us122l.c",
++ .param8 = 1,
++};
++struct size_overflow_hash _001276_hash = {
++ .next = NULL,
++ .name = "usbdev_read",
++ .file = "drivers/usb/core/devio.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001277_hash = {
++ .next = NULL,
++ .name = "usblp_read",
++ .file = "drivers/usb/class/usblp.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001278_hash = {
++ .next = NULL,
++ .name = "usblp_write",
++ .file = "drivers/usb/class/usblp.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001279_hash = {
++ .next = NULL,
++ .name = "usbtest_alloc_urb",
++ .file = "drivers/usb/misc/usbtest.c",
++ .param3 = 1,
++ .param5 = 1,
++};
++struct size_overflow_hash _001281_hash = {
++ .next = NULL,
++ .name = "usbtmc_read",
++ .file = "drivers/usb/class/usbtmc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001282_hash = {
++ .next = NULL,
++ .name = "usbtmc_write",
++ .file = "drivers/usb/class/usbtmc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001283_hash = {
++ .next = NULL,
++ .name = "usbvision_v4l2_read",
++ .file = "drivers/media/video/usbvision/usbvision-video.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001284_hash = {
++ .next = NULL,
++ .name = "uvc_alloc_buffers",
++ .file = "drivers/usb/gadget/uvc_queue.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001285_hash = {
++ .next = NULL,
++ .name = "uvc_alloc_entity",
++ .file = "drivers/media/video/uvc/uvc_driver.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001286_hash = {
++ .next = NULL,
++ .name = "uvc_debugfs_stats_read",
++ .file = "drivers/media/video/uvc/uvc_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001287_hash = {
++ .next = NULL,
++ .name = "uvc_simplify_fraction",
++ .file = "drivers/media/video/uvc/uvc_driver.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001288_hash = {
++ .next = NULL,
++ .name = "uwb_rc_neh_grok_event",
++ .file = "drivers/uwb/neh.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001289_hash = {
++ .next = NULL,
++ .name = "v4l2_event_subscribe",
++ .file = "include/media/v4l2-event.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001290_hash = {
++ .next = NULL,
++ .name = "v4l_stk_read",
++ .file = "drivers/media/video/stk-webcam.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001291_hash = {
++ .next = NULL,
++ .name = "__vb2_perform_fileio",
++ .file = "drivers/media/video/videobuf2-core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001292_hash = {
++ .next = NULL,
++ .name = "vdma_mem_alloc",
++ .file = "arch/x86/include/asm/floppy.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _001293_hash = {
++ .next = NULL,
++ .name = "vfd_write",
++ .file = "drivers/media/rc/imon.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001294_hash = {
++ .next = NULL,
++ .name = "vhci_get_user",
++ .file = "drivers/bluetooth/hci_vhci.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001295_hash = {
++ .next = NULL,
++ .name = "__vhost_add_used_n",
++ .file = "drivers/vhost/vhost.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001296_hash = {
++ .next = NULL,
++ .name = "__videobuf_alloc_vb",
++ .file = "drivers/media/video/videobuf-dma-sg.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001297_hash = {
++ .next = NULL,
++ .name = "__videobuf_alloc_vb",
++ .file = "drivers/media/video/videobuf-dma-contig.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001298_hash = {
++ .next = NULL,
++ .name = "__videobuf_alloc_vb",
++ .file = "drivers/media/video/videobuf-vmalloc.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001299_hash = {
++ .next = NULL,
++ .name = "__videobuf_copy_to_user",
++ .file = "drivers/media/video/videobuf-core.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001300_hash = {
++ .next = NULL,
++ .name = "video_proc_write",
++ .file = "drivers/platform/x86/toshiba_acpi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001301_hash = {
++ .next = NULL,
++ .name = "vifs_state_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001302_hash = {
++ .next = NULL,
++ .name = "vlsi_alloc_ring",
++ .file = "drivers/net/irda/vlsi_ir.c",
++ .param3 = 1,
++ .param4 = 1,
++};
++struct size_overflow_hash _001304_hash = {
++ .next = NULL,
++ .name = "vol_cdev_direct_write",
++ .file = "drivers/mtd/ubi/cdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001305_hash = {
++ .next = NULL,
++ .name = "vol_cdev_read",
++ .file = "drivers/mtd/ubi/cdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001306_hash = {
++ .next = NULL,
++ .name = "vring_add_indirect",
++ .file = "drivers/virtio/virtio_ring.c",
++ .param3 = 1,
++ .param4 = 1,
++};
++struct size_overflow_hash _001308_hash = {
++ .next = NULL,
++ .name = "vring_new_virtqueue",
++ .file = "include/linux/virtio_ring.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _001309_hash = {
++ .next = NULL,
++ .name = "__vxge_hw_channel_allocate",
++ .file = "drivers/net/ethernet/neterion/vxge/vxge-config.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001310_hash = {
++ .next = NULL,
++ .name = "vxge_os_dma_malloc",
++ .file = "drivers/net/ethernet/neterion/vxge/vxge-config.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _001311_hash = {
++ .next = NULL,
++ .name = "vxge_os_dma_malloc_async",
++ .file = "drivers/net/ethernet/neterion/vxge/vxge-config.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001312_hash = {
++ .next = NULL,
++ .name = "w9966_v4l_read",
++ .file = "drivers/media/video/w9966.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001313_hash = {
++ .next = NULL,
++ .name = "waiters_read",
++ .file = "fs/dlm/debug_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001314_hash = {
++ .next = NULL,
++ .name = "wa_nep_queue",
++ .file = "drivers/usb/wusbcore/wa-nep.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001315_hash = {
++ .next = NULL,
++ .name = "__wa_xfer_setup_segs",
++ .file = "drivers/usb/wusbcore/wa-xfer.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001316_hash = {
++ .next = NULL,
++ .name = "wdm_read",
++ .file = "drivers/usb/class/cdc-wdm.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001317_hash = {
++ .next = NULL,
++ .name = "wdm_write",
++ .file = "drivers/usb/class/cdc-wdm.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001318_hash = {
++ .next = NULL,
++ .name = "wep_addr_key_count_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001319_hash = {
++ .next = &_000480_hash,
++ .name = "wep_decrypt_fail_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001320_hash = {
++ .next = NULL,
++ .name = "wep_default_key_count_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001321_hash = {
++ .next = NULL,
++ .name = "wep_interrupt_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001322_hash = {
++ .next = NULL,
++ .name = "wep_key_not_found_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001323_hash = {
++ .next = NULL,
++ .name = "wep_packets_read",
++ .file = "drivers/net/wireless/wl1251/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001324_hash = {
++ .next = NULL,
++ .name = "wiimote_hid_send",
++ .file = "drivers/hid/hid-wiimote-core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001325_hash = {
++ .next = NULL,
++ .name = "wl1271_format_buffer",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001326_hash = {
++ .next = NULL,
++ .name = "wl1273_fm_fops_write",
++ .file = "drivers/media/radio/radio-wl1273.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001327_hash = {
++ .next = NULL,
++ .name = "wlc_phy_loadsampletable_nphy",
++ .file = "drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001328_hash = {
++ .next = NULL,
++ .name = "wpan_phy_alloc",
++ .file = "include/net/wpan-phy.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _001329_hash = {
++ .next = NULL,
++ .name = "write_flush",
++ .file = "net/sunrpc/cache.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001330_hash = {
++ .next = NULL,
++ .name = "write_rio",
++ .file = "drivers/usb/misc/rio500.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001331_hash = {
++ .next = NULL,
++ .name = "wusb_ccm_mac",
++ .file = "drivers/usb/wusbcore/crypto.c",
++ .param7 = 1,
++};
++struct size_overflow_hash _001332_hash = {
++ .next = NULL,
++ .name = "xfs_attrmulti_attr_set",
++ .file = "fs/xfs/xfs_ioctl.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001333_hash = {
++ .next = NULL,
++ .name = "xfs_handle_to_dentry",
++ .file = "fs/xfs/xfs_ioctl.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001334_hash = {
++ .next = NULL,
++ .name = "xhci_alloc_stream_info",
++ .file = "drivers/usb/host/xhci-mem.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001335_hash = {
++ .next = NULL,
++ .name = "xprt_alloc",
++ .file = "include/linux/sunrpc/xprt.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _001336_hash = {
++ .next = NULL,
++ .name = "xprt_rdma_allocate",
++ .file = "net/sunrpc/xprtrdma/transport.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001337_hash = {
++ .next = NULL,
++ .name = "xt_alloc_table_info",
++ .file = "include/linux/netfilter/x_tables.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _001338_hash = {
++ .next = NULL,
++ .name = "zd_usb_iowrite16v_async",
++ .file = "drivers/net/wireless/zd1211rw/zd_usb.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001339_hash = {
++ .next = NULL,
++ .name = "zd_usb_read_fw",
++ .file = "drivers/net/wireless/zd1211rw/zd_usb.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001340_hash = {
++ .next = NULL,
++ .name = "zoran_write",
++ .file = "drivers/media/video/zoran/zoran_procfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001341_hash = {
++ .next = NULL,
++ .name = "ad7879_spi_multi_read",
++ .file = "drivers/input/touchscreen/ad7879-spi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001342_hash = {
++ .next = NULL,
++ .name = "aes_decrypt_fail_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001343_hash = {
++ .next = NULL,
++ .name = "aes_decrypt_interrupt_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001344_hash = {
++ .next = NULL,
++ .name = "aes_decrypt_packets_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001345_hash = {
++ .next = NULL,
++ .name = "aes_encrypt_fail_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001346_hash = {
++ .next = NULL,
++ .name = "aes_encrypt_interrupt_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001347_hash = {
++ .next = NULL,
++ .name = "aes_encrypt_packets_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001348_hash = {
++ .next = NULL,
++ .name = "afs_cell_create",
++ .file = "fs/afs/cell.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001349_hash = {
++ .next = NULL,
++ .name = "agp_create_user_memory",
++ .file = "drivers/char/agp/generic.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001350_hash = {
++ .next = NULL,
++ .name = "alg_setsockopt",
++ .file = "crypto/af_alg.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001351_hash = {
++ .next = NULL,
++ .name = "alloc_targets",
++ .file = "drivers/md/dm-table.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001352_hash = {
++ .next = NULL,
++ .name = "aoechr_write",
++ .file = "drivers/block/aoe/aoechr.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001353_hash = {
++ .next = NULL,
++ .name = "ath6kl_cfg80211_connect_event",
++ .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
++ .param7 = 1,
++ .param9 = 1,
++ .param8 = 1,
++};
++struct size_overflow_hash _001356_hash = {
++ .next = NULL,
++ .name = "ath6kl_mgmt_tx",
++ .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
++ .param9 = 1,
++};
++struct size_overflow_hash _001357_hash = {
++ .next = NULL,
++ .name = "atomic_read_file",
++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofilefs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001358_hash = {
++ .next = NULL,
++ .name = "beacon_interval_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001359_hash = {
++ .next = NULL,
++ .name = "bm_entry_write",
++ .file = "fs/binfmt_misc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001360_hash = {
++ .next = NULL,
++ .name = "bm_init",
++ .file = "lib/ts_bm.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001361_hash = {
++ .next = NULL,
++ .name = "bm_register_write",
++ .file = "fs/binfmt_misc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001362_hash = {
++ .next = NULL,
++ .name = "bm_status_write",
++ .file = "fs/binfmt_misc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001363_hash = {
++ .next = NULL,
++ .name = "brn_proc_write",
++ .file = "drivers/platform/x86/asus_acpi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001364_hash = {
++ .next = NULL,
++ .name = "btrfs_map_block",
++ .file = "fs/btrfs/volumes.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001365_hash = {
++ .next = NULL,
++ .name = "cache_downcall",
++ .file = "net/sunrpc/cache.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001366_hash = {
++ .next = NULL,
++ .name = "cache_slow_downcall",
++ .file = "net/sunrpc/cache.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001367_hash = {
++ .next = NULL,
++ .name = "ceph_dns_resolve_name",
++ .file = "net/ceph/messenger.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001368_hash = {
++ .next = NULL,
++ .name = "cfg80211_roamed",
++ .file = "include/net/cfg80211.h",
++ .param5 = 1,
++ .param7 = 1,
++};
++struct size_overflow_hash _001370_hash = {
++ .next = NULL,
++ .name = "cifs_readv_from_socket",
++ .file = "fs/cifs/connect.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001371_hash = {
++ .next = NULL,
++ .name = "configfs_write_file",
++ .file = "fs/configfs/file.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001372_hash = {
++ .next = &_001370_hash,
++ .name = "cpu_type_read",
++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001373_hash = {
++ .next = NULL,
++ .name = "cx18_copy_mdl_to_user",
++ .file = "drivers/media/video/cx18/cx18-fileops.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001374_hash = {
++ .next = NULL,
++ .name = "cxgbi_ddp_reserve",
++ .file = "drivers/scsi/cxgbi/libcxgbi.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001375_hash = {
++ .next = NULL,
++ .name = "cxgbi_device_portmap_create",
++ .file = "drivers/scsi/cxgbi/libcxgbi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001376_hash = {
++ .next = NULL,
++ .name = "datablob_hmac_append",
++ .file = "security/keys/encrypted-keys/encrypted.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001377_hash = {
++ .next = NULL,
++ .name = "datablob_hmac_verify",
++ .file = "security/keys/encrypted-keys/encrypted.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001378_hash = {
++ .next = NULL,
++ .name = "dataflash_read_fact_otp",
++ .file = "drivers/mtd/devices/mtd_dataflash.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001379_hash = {
++ .next = NULL,
++ .name = "dataflash_read_user_otp",
++ .file = "drivers/mtd/devices/mtd_dataflash.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001380_hash = {
++ .next = NULL,
++ .name = "depth_read",
++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001381_hash = {
++ .next = NULL,
++ .name = "depth_write",
++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001382_hash = {
++ .next = NULL,
++ .name = "dev_irnet_write",
++ .file = "net/irda/irnet/irnet_ppp.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001383_hash = {
++ .next = NULL,
++ .name = "dev_write",
++ .file = "sound/oss/msnd_pinnacle.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001384_hash = {
++ .next = NULL,
++ .name = "dfs_file_read",
++ .file = "fs/ubifs/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001385_hash = {
++ .next = NULL,
++ .name = "dfs_file_write",
++ .file = "fs/ubifs/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001386_hash = {
++ .next = NULL,
++ .name = "dfs_global_file_read",
++ .file = "fs/ubifs/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001387_hash = {
++ .next = NULL,
++ .name = "dfs_global_file_write",
++ .file = "fs/ubifs/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001388_hash = {
++ .next = NULL,
++ .name = "disconnect",
++ .file = "net/bluetooth/mgmt.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001389_hash = {
++ .next = NULL,
++ .name = "disp_proc_write",
++ .file = "drivers/platform/x86/asus_acpi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001390_hash = {
++ .next = NULL,
++ .name = "dma_rx_errors_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001391_hash = {
++ .next = NULL,
++ .name = "dma_rx_requested_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001392_hash = {
++ .next = NULL,
++ .name = "dma_tx_errors_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001393_hash = {
++ .next = NULL,
++ .name = "dma_tx_requested_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001394_hash = {
++ .next = NULL,
++ .name = "dm_exception_table_init",
++ .file = "drivers/md/dm-snap.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001395_hash = {
++ .next = NULL,
++ .name = "do_dccp_setsockopt",
++ .file = "net/dccp/proto.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001396_hash = {
++ .next = NULL,
++ .name = "dtim_interval_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001397_hash = {
++ .next = NULL,
++ .name = "dvb_audio_write",
++ .file = "drivers/media/dvb/ttpci/av7110_av.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001398_hash = {
++ .next = NULL,
++ .name = "dvb_demux_do_ioctl",
++ .file = "drivers/media/dvb/dvb-core/dmxdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001399_hash = {
++ .next = NULL,
++ .name = "dvb_dvr_do_ioctl",
++ .file = "drivers/media/dvb/dvb-core/dmxdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001400_hash = {
++ .next = NULL,
++ .name = "dvb_video_write",
++ .file = "drivers/media/dvb/ttpci/av7110_av.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001401_hash = {
++ .next = NULL,
++ .name = "ecryptfs_decode_and_decrypt_filename",
++ .file = "fs/ecryptfs/crypto.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001402_hash = {
++ .next = NULL,
++ .name = "ecryptfs_encrypt_and_encode_filename",
++ .file = "fs/ecryptfs/crypto.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001403_hash = {
++ .next = NULL,
++ .name = "enable_read",
++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001404_hash = {
++ .next = NULL,
++ .name = "enable_write",
++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001405_hash = {
++ .next = NULL,
++ .name = "event_calibration_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001406_hash = {
++ .next = NULL,
++ .name = "event_heart_beat_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001407_hash = {
++ .next = NULL,
++ .name = "event_oom_late_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001408_hash = {
++ .next = NULL,
++ .name = "event_phy_transmit_error_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001409_hash = {
++ .next = NULL,
++ .name = "event_rx_mem_empty_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001410_hash = {
++ .next = NULL,
++ .name = "event_rx_mismatch_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001411_hash = {
++ .next = NULL,
++ .name = "event_rx_pool_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001412_hash = {
++ .next = NULL,
++ .name = "event_tx_stuck_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001413_hash = {
++ .next = NULL,
++ .name = "excessive_retries_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001414_hash = {
++ .next = NULL,
++ .name = "exofs_read_kern",
++ .file = "fs/exofs/super.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001415_hash = {
++ .next = NULL,
++ .name = "fallback_on_nodma_alloc",
++ .file = "drivers/block/floppy.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001416_hash = {
++ .next = NULL,
++ .name = "__feat_register_sp",
++ .file = "net/dccp/feat.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001417_hash = {
++ .next = NULL,
++ .name = "ffs_ep0_write",
++ .file = "drivers/usb/gadget/f_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001418_hash = {
++ .next = NULL,
++ .name = "ffs_epfile_read",
++ .file = "drivers/usb/gadget/f_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001419_hash = {
++ .next = NULL,
++ .name = "ffs_epfile_write",
++ .file = "drivers/usb/gadget/f_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001420_hash = {
++ .next = NULL,
++ .name = "frequency_read",
++ .file = "net/mac80211/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001421_hash = {
++ .next = NULL,
++ .name = "fsm_init",
++ .file = "lib/ts_fsm.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001422_hash = {
++ .next = NULL,
++ .name = "garmin_read_process",
++ .file = "drivers/usb/serial/garmin_gps.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001423_hash = {
++ .next = NULL,
++ .name = "garp_request_join",
++ .file = "include/net/garp.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _001424_hash = {
++ .next = NULL,
++ .name = "hcd_alloc_coherent",
++ .file = "drivers/usb/core/hcd.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001425_hash = {
++ .next = NULL,
++ .name = "hci_sock_sendmsg",
++ .file = "net/bluetooth/hci_sock.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001426_hash = {
++ .next = NULL,
++ .name = "__hwahc_op_set_gtk",
++ .file = "drivers/usb/host/hwa-hc.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001427_hash = {
++ .next = NULL,
++ .name = "__hwahc_op_set_ptk",
++ .file = "drivers/usb/host/hwa-hc.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001428_hash = {
++ .next = NULL,
++ .name = "ib_send_cm_drep",
++ .file = "include/rdma/ib_cm.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001429_hash = {
++ .next = NULL,
++ .name = "ib_send_cm_mra",
++ .file = "include/rdma/ib_cm.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _001430_hash = {
++ .next = NULL,
++ .name = "ib_send_cm_rtu",
++ .file = "include/rdma/ib_cm.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001431_hash = {
++ .next = NULL,
++ .name = "ieee80211_bss_info_update",
++ .file = "net/mac80211/scan.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001432_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_aid",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001433_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_auto_open_plinks",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001434_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_ave_beacon",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001435_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_bssid",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001436_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_channel_type",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001437_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dot11MeshConfirmTimeout",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001438_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dot11MeshGateAnnouncementProtocol",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001439_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dot11MeshHoldingTimeout",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001440_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dot11MeshHWMPactivePathTimeout",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001441_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dot11MeshHWMPmaxPREQretries",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001442_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001443_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dot11MeshHWMPperrMinInterval",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001444_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dot11MeshHWMPpreqMinInterval",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001445_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dot11MeshHWMPRannInterval",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001446_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dot11MeshHWMPRootMode",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001447_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dot11MeshMaxPeerLinks",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001448_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dot11MeshMaxRetries",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001449_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dot11MeshRetryTimeout",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001450_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dot11MeshTTL",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001451_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dropped_frames_congestion",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001452_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dropped_frames_no_route",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001453_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dropped_frames_ttl",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001454_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_drop_unencrypted",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001455_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_dtim_count",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001456_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_element_ttl",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001457_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_estab_plinks",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001458_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_flags",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001459_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_fwded_frames",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001460_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_fwded_mcast",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001461_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_fwded_unicast",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001462_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_last_beacon",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001463_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_min_discovery_timeout",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001464_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_num_buffered_multicast",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001465_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_num_sta_authorized",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001466_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_num_sta_ps",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001467_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_path_refresh_time",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001468_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_peer",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001469_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_rc_rateidx_mask_2ghz",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001470_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_rc_rateidx_mask_5ghz",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001471_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_smps",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001472_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_state",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001473_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_tkip_mic_test",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001474_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_tsf",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001475_hash = {
++ .next = NULL,
++ .name = "ieee80211_send_probe_req",
++ .file = "net/mac80211/util.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001476_hash = {
++ .next = NULL,
++ .name = "init_map_ipmac",
++ .file = "net/netfilter/ipset/ip_set_bitmap_ipmac.c",
++ .param3 = 1,
++ .param4 = 1,
++};
++struct size_overflow_hash _001478_hash = {
++ .next = NULL,
++ .name = "init_tid_tabs",
++ .file = "drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c",
++ .param2 = 1,
++ .param4 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _001481_hash = {
++ .next = NULL,
++ .name = "isr_cmd_cmplt_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001482_hash = {
++ .next = NULL,
++ .name = "isr_commands_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001483_hash = {
++ .next = NULL,
++ .name = "isr_decrypt_done_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001484_hash = {
++ .next = NULL,
++ .name = "isr_dma0_done_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001485_hash = {
++ .next = NULL,
++ .name = "isr_dma1_done_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001486_hash = {
++ .next = NULL,
++ .name = "isr_fiqs_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001487_hash = {
++ .next = NULL,
++ .name = "isr_host_acknowledges_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001488_hash = {
++ .next = &_001393_hash,
++ .name = "isr_hw_pm_mode_changes_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001489_hash = {
++ .next = &_001205_hash,
++ .name = "isr_irqs_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001490_hash = {
++ .next = NULL,
++ .name = "isr_low_rssi_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001491_hash = {
++ .next = NULL,
++ .name = "isr_pci_pm_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001492_hash = {
++ .next = NULL,
++ .name = "isr_rx_headers_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001493_hash = {
++ .next = NULL,
++ .name = "isr_rx_mem_overflow_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001494_hash = {
++ .next = NULL,
++ .name = "isr_rx_procs_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001495_hash = {
++ .next = NULL,
++ .name = "isr_rx_rdys_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001496_hash = {
++ .next = NULL,
++ .name = "isr_tx_exch_complete_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001497_hash = {
++ .next = NULL,
++ .name = "isr_tx_procs_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001498_hash = {
++ .next = NULL,
++ .name = "isr_wakeups_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001499_hash = {
++ .next = NULL,
++ .name = "ivtv_read",
++ .file = "drivers/media/video/ivtv/ivtv-fileops.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001500_hash = {
++ .next = NULL,
++ .name = "kmem_realloc",
++ .file = "fs/xfs/kmem.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001501_hash = {
++ .next = NULL,
++ .name = "kmem_zalloc",
++ .file = "fs/xfs/kmem.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001502_hash = {
++ .next = NULL,
++ .name = "kmem_zalloc_greedy",
++ .file = "fs/xfs/kmem.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _001504_hash = {
++ .next = NULL,
++ .name = "kmp_init",
++ .file = "lib/ts_kmp.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001505_hash = {
++ .next = NULL,
++ .name = "lcd_proc_write",
++ .file = "drivers/platform/x86/asus_acpi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001506_hash = {
++ .next = NULL,
++ .name = "ledd_proc_write",
++ .file = "drivers/platform/x86/asus_acpi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001507_hash = {
++ .next = NULL,
++ .name = "mic_calc_failure_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001508_hash = {
++ .next = NULL,
++ .name = "mic_rx_pkts_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001509_hash = {
++ .next = NULL,
++ .name = "nfs4_realloc_slot_table",
++ .file = "fs/nfs/nfs4proc.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001510_hash = {
++ .next = NULL,
++ .name = "nfs_idmap_request_key",
++ .file = "fs/nfs/idmap.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _001511_hash = {
++ .next = NULL,
++ .name = "nsm_get_handle",
++ .file = "include/linux/lockd/lockd.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _001512_hash = {
++ .next = NULL,
++ .name = "ntfs_copy_from_user_iovec",
++ .file = "fs/ntfs/file.c",
++ .param3 = 1,
++ .param6 = 1,
++};
++struct size_overflow_hash _001514_hash = {
++ .next = NULL,
++ .name = "ntfs_file_buffered_write",
++ .file = "fs/ntfs/file.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001515_hash = {
++ .next = NULL,
++ .name = "ntfs_malloc_nofs",
++ .file = "fs/ntfs/malloc.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _001516_hash = {
++ .next = NULL,
++ .name = "ntfs_malloc_nofs_nofail",
++ .file = "fs/ntfs/malloc.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _001517_hash = {
++ .next = NULL,
++ .name = "ocfs2_control_message",
++ .file = "fs/ocfs2/stack_user.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001518_hash = {
++ .next = NULL,
++ .name = "opera1_usb_i2c_msgxfer",
++ .file = "drivers/media/dvb/dvb-usb/opera1.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001519_hash = {
++ .next = NULL,
++ .name = "orinoco_add_extscan_result",
++ .file = "drivers/net/wireless/orinoco/scan.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001520_hash = {
++ .next = NULL,
++ .name = "osd_req_list_collection_objects",
++ .file = "include/scsi/osd_initiator.h",
++ .param5 = 1,
++};
++struct size_overflow_hash _001521_hash = {
++ .next = NULL,
++ .name = "osd_req_list_partition_objects",
++ .file = "include/scsi/osd_initiator.h",
++ .param5 = 1,
++};
++struct size_overflow_hash _001522_hash = {
++ .next = NULL,
++ .name = "pair_device",
++ .file = "net/bluetooth/mgmt.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001523_hash = {
++ .next = NULL,
++ .name = "pccard_store_cis",
++ .file = "drivers/pcmcia/cistpl.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001524_hash = {
++ .next = NULL,
++ .name = "pin_code_reply",
++ .file = "net/bluetooth/mgmt.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001525_hash = {
++ .next = NULL,
++ .name = "play_iframe",
++ .file = "drivers/media/dvb/ttpci/av7110_av.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001526_hash = {
++ .next = NULL,
++ .name = "pointer_size_read",
++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001527_hash = {
++ .next = NULL,
++ .name = "power_read",
++ .file = "net/mac80211/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001528_hash = {
++ .next = NULL,
++ .name = "ps_pspoll_max_apturn_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001529_hash = {
++ .next = NULL,
++ .name = "ps_pspoll_timeouts_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001530_hash = {
++ .next = NULL,
++ .name = "ps_pspoll_utilization_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001531_hash = {
++ .next = NULL,
++ .name = "ps_upsd_max_apturn_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001532_hash = {
++ .next = NULL,
++ .name = "ps_upsd_max_sptime_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001533_hash = {
++ .next = NULL,
++ .name = "ps_upsd_timeouts_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001534_hash = {
++ .next = NULL,
++ .name = "ps_upsd_utilization_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001535_hash = {
++ .next = NULL,
++ .name = "pwr_disable_ps_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001536_hash = {
++ .next = NULL,
++ .name = "pwr_elp_enter_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001537_hash = {
++ .next = NULL,
++ .name = "pwr_enable_ps_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001538_hash = {
++ .next = NULL,
++ .name = "pwr_fix_tsf_ps_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001539_hash = {
++ .next = NULL,
++ .name = "pwr_missing_bcns_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001540_hash = {
++ .next = NULL,
++ .name = "pwr_power_save_off_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001541_hash = {
++ .next = NULL,
++ .name = "pwr_ps_enter_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001542_hash = {
++ .next = NULL,
++ .name = "pwr_rcvd_awake_beacons_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001543_hash = {
++ .next = NULL,
++ .name = "pwr_rcvd_beacons_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001544_hash = {
++ .next = NULL,
++ .name = "pwr_tx_without_ps_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001545_hash = {
++ .next = NULL,
++ .name = "pwr_tx_with_ps_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001546_hash = {
++ .next = NULL,
++ .name = "pwr_wake_on_host_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001547_hash = {
++ .next = NULL,
++ .name = "pwr_wake_on_timer_exp_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001548_hash = {
++ .next = NULL,
++ .name = "qcam_read",
++ .file = "drivers/media/video/c-qcam.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001549_hash = {
++ .next = NULL,
++ .name = "retry_count_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001550_hash = {
++ .next = NULL,
++ .name = "rx_dropped_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001551_hash = {
++ .next = NULL,
++ .name = "rx_fcs_err_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001552_hash = {
++ .next = NULL,
++ .name = "rx_hdr_overflow_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001553_hash = {
++ .next = NULL,
++ .name = "rx_hw_stuck_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001554_hash = {
++ .next = NULL,
++ .name = "rx_out_of_mem_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001555_hash = {
++ .next = NULL,
++ .name = "rx_path_reset_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001556_hash = {
++ .next = NULL,
++ .name = "rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001557_hash = {
++ .next = NULL,
++ .name = "rxpipe_descr_host_int_trig_rx_data_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001558_hash = {
++ .next = NULL,
++ .name = "rxpipe_missed_beacon_host_int_trig_rx_data_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001559_hash = {
++ .next = NULL,
++ .name = "rxpipe_rx_prep_beacon_drop_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001560_hash = {
++ .next = NULL,
++ .name = "rxpipe_tx_xfr_host_int_trig_rx_data_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001561_hash = {
++ .next = NULL,
++ .name = "rx_reset_counter_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001562_hash = {
++ .next = NULL,
++ .name = "rx_streaming_always_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001563_hash = {
++ .next = NULL,
++ .name = "rx_streaming_interval_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001564_hash = {
++ .next = NULL,
++ .name = "rx_xfr_hint_trig_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001565_hash = {
++ .next = NULL,
++ .name = "scsi_execute_req",
++ .file = "include/scsi/scsi_device.h",
++ .param5 = 1,
++};
++struct size_overflow_hash _001566_hash = {
++ .next = NULL,
++ .name = "scsi_tgt_kspace_exec",
++ .file = "drivers/scsi/scsi_tgt_lib.c",
++ .param8 = 1,
++};
++struct size_overflow_hash _001567_hash = {
++ .next = NULL,
++ .name = "sctp_sendmsg",
++ .file = "net/sctp/socket.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001568_hash = {
++ .next = NULL,
++ .name = "sctp_setsockopt",
++ .file = "net/sctp/socket.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001569_hash = {
++ .next = NULL,
++ .name = "set_connectable",
++ .file = "net/bluetooth/mgmt.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001570_hash = {
++ .next = NULL,
++ .name = "set_discoverable",
++ .file = "net/bluetooth/mgmt.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001571_hash = {
++ .next = NULL,
++ .name = "set_local_name",
++ .file = "net/bluetooth/mgmt.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001572_hash = {
++ .next = NULL,
++ .name = "set_powered",
++ .file = "net/bluetooth/mgmt.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001573_hash = {
++ .next = NULL,
++ .name = "simple_alloc_urb",
++ .file = "drivers/usb/misc/usbtest.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001574_hash = {
++ .next = NULL,
++ .name = "sm_checker_extend",
++ .file = "drivers/md/persistent-data/dm-space-map-checker.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001575_hash = {
++ .next = NULL,
++ .name = "snd_cs4281_BA0_read",
++ .file = "sound/pci/cs4281.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001576_hash = {
++ .next = NULL,
++ .name = "snd_cs4281_BA1_read",
++ .file = "sound/pci/cs4281.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001577_hash = {
++ .next = NULL,
++ .name = "snd_cs46xx_io_read",
++ .file = "sound/pci/cs46xx/cs46xx_lib.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001578_hash = {
++ .next = NULL,
++ .name = "snd_gus_dram_read",
++ .file = "include/sound/gus.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _001579_hash = {
++ .next = NULL,
++ .name = "snd_gus_dram_write",
++ .file = "include/sound/gus.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _001580_hash = {
++ .next = NULL,
++ .name = "snd_mem_proc_write",
++ .file = "sound/core/memalloc.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001581_hash = {
++ .next = NULL,
++ .name = "snd_pcm_oss_read",
++ .file = "sound/core/oss/pcm_oss.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001582_hash = {
++ .next = NULL,
++ .name = "snd_pcm_oss_sync1",
++ .file = "sound/core/oss/pcm_oss.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001583_hash = {
++ .next = NULL,
++ .name = "snd_pcm_oss_write",
++ .file = "sound/core/oss/pcm_oss.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001584_hash = {
++ .next = NULL,
++ .name = "snd_rme32_capture_copy",
++ .file = "sound/pci/rme32.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001585_hash = {
++ .next = NULL,
++ .name = "snd_rme32_playback_copy",
++ .file = "sound/pci/rme32.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001586_hash = {
++ .next = NULL,
++ .name = "snd_rme96_capture_copy",
++ .file = "sound/pci/rme96.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001587_hash = {
++ .next = NULL,
++ .name = "snd_rme96_playback_copy",
++ .file = "sound/pci/rme96.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001588_hash = {
++ .next = NULL,
++ .name = "spi_execute",
++ .file = "drivers/scsi/scsi_transport_spi.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001589_hash = {
++ .next = NULL,
++ .name = "srp_target_alloc",
++ .file = "include/scsi/libsrp.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001590_hash = {
++ .next = NULL,
++ .name = "stats_dot11ACKFailureCount_read",
++ .file = "net/mac80211/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001591_hash = {
++ .next = NULL,
++ .name = "stats_dot11FCSErrorCount_read",
++ .file = "net/mac80211/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001592_hash = {
++ .next = NULL,
++ .name = "stats_dot11RTSFailureCount_read",
++ .file = "net/mac80211/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001593_hash = {
++ .next = NULL,
++ .name = "stats_dot11RTSSuccessCount_read",
++ .file = "net/mac80211/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001594_hash = {
++ .next = NULL,
++ .name = "stk_allocate_buffers",
++ .file = "drivers/media/video/stk-webcam.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001595_hash = {
++ .next = NULL,
++ .name = "submit_inquiry",
++ .file = "drivers/scsi/device_handler/scsi_dh_rdac.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001596_hash = {
++ .next = NULL,
++ .name = "team_options_register",
++ .file = "include/linux/if_team.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001597_hash = {
++ .next = NULL,
++ .name = "test_unaligned_bulk",
++ .file = "drivers/usb/misc/usbtest.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001598_hash = {
++ .next = NULL,
++ .name = "timeout_read",
++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001599_hash = {
++ .next = NULL,
++ .name = "timeout_write",
++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofile_files.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001600_hash = {
++ .next = NULL,
++ .name = "tipc_link_send_sections_fast",
++ .file = "net/tipc/link.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001601_hash = {
++ .next = NULL,
++ .name = "total_ps_buffered_read",
++ .file = "net/mac80211/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001602_hash = {
++ .next = NULL,
++ .name = "ts_read",
++ .file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001603_hash = {
++ .next = NULL,
++ .name = "TSS_authhmac",
++ .file = "security/keys/trusted.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001604_hash = {
++ .next = NULL,
++ .name = "TSS_checkhmac1",
++ .file = "security/keys/trusted.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001605_hash = {
++ .next = NULL,
++ .name = "TSS_checkhmac2",
++ .file = "security/keys/trusted.c",
++ .param5 = 1,
++ .param7 = 1,
++};
++struct size_overflow_hash _001607_hash = {
++ .next = NULL,
++ .name = "ts_write",
++ .file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001608_hash = {
++ .next = NULL,
++ .name = "tx_internal_desc_overflow_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001609_hash = {
++ .next = NULL,
++ .name = "uapsd_max_sp_len_read",
++ .file = "net/mac80211/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001610_hash = {
++ .next = NULL,
++ .name = "uapsd_queues_read",
++ .file = "net/mac80211/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001611_hash = {
++ .next = NULL,
++ .name = "ulong_read_file",
++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofilefs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001612_hash = {
++ .next = NULL,
++ .name = "ulong_write_file",
++ .file = "arch/x86/oprofile/../../../drivers/oprofile/oprofilefs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001613_hash = {
++ .next = NULL,
++ .name = "usb_alloc_coherent",
++ .file = "include/linux/usb.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _001614_hash = {
++ .next = NULL,
++ .name = "user_power_read",
++ .file = "net/mac80211/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001615_hash = {
++ .next = NULL,
++ .name = "vb2_read",
++ .file = "include/media/videobuf2-core.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001616_hash = {
++ .next = NULL,
++ .name = "vb2_write",
++ .file = "include/media/videobuf2-core.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001617_hash = {
++ .next = NULL,
++ .name = "vhost_add_used_n",
++ .file = "drivers/vhost/vhost.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001618_hash = {
++ .next = NULL,
++ .name = "virtqueue_add_buf",
++ .file = "include/linux/virtio.h",
++ .param3 = 1,
++ .param4 = 1,
++};
++struct size_overflow_hash _001620_hash = {
++ .next = NULL,
++ .name = "vmbus_establish_gpadl",
++ .file = "include/linux/hyperv.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001621_hash = {
++ .next = NULL,
++ .name = "wep_addr_key_count_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001622_hash = {
++ .next = NULL,
++ .name = "wep_decrypt_fail_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001623_hash = {
++ .next = NULL,
++ .name = "wep_default_key_count_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001624_hash = {
++ .next = NULL,
++ .name = "wep_interrupt_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001625_hash = {
++ .next = NULL,
++ .name = "wep_iv_read",
++ .file = "net/mac80211/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001626_hash = {
++ .next = NULL,
++ .name = "wep_key_not_found_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001627_hash = {
++ .next = NULL,
++ .name = "wep_packets_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001628_hash = {
++ .next = NULL,
++ .name = "write_led",
++ .file = "drivers/platform/x86/asus_acpi.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001629_hash = {
++ .next = NULL,
++ .name = "wusb_prf",
++ .file = "include/linux/usb/wusb.h",
++ .param7 = 1,
++};
++struct size_overflow_hash _001630_hash = {
++ .next = NULL,
++ .name = "zd_usb_iowrite16v",
++ .file = "drivers/net/wireless/zd1211rw/zd_usb.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001631_hash = {
++ .next = NULL,
++ .name = "afs_cell_lookup",
++ .file = "fs/afs/cell.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001632_hash = {
++ .next = NULL,
++ .name = "agp_generic_alloc_user",
++ .file = "drivers/char/agp/generic.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001634_hash = {
++ .next = NULL,
++ .name = "bluetooth_proc_write",
++ .file = "drivers/platform/x86/asus_acpi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001635_hash = {
++ .next = NULL,
++ .name = "cache_write",
++ .file = "net/sunrpc/cache.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001636_hash = {
++ .next = NULL,
++ .name = "ch_do_scsi",
++ .file = "drivers/scsi/ch.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001637_hash = {
++ .next = NULL,
++ .name = "cx18_read",
++ .file = "drivers/media/video/cx18/cx18-fileops.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001638_hash = {
++ .next = NULL,
++ .name = "dccp_feat_register_sp",
++ .file = "net/dccp/feat.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001640_hash = {
++ .next = NULL,
++ .name = "iso_alloc_urb",
++ .file = "drivers/usb/misc/usbtest.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001641_hash = {
++ .next = NULL,
++ .name = "ivtv_read_pos",
++ .file = "drivers/media/video/ivtv/ivtv-fileops.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001642_hash = {
++ .next = NULL,
++ .name = "mcam_v4l_read",
++ .file = "drivers/media/video/marvell-ccic/mcam-core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001643_hash = {
++ .next = NULL,
++ .name = "mled_proc_write",
++ .file = "drivers/platform/x86/asus_acpi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001644_hash = {
++ .next = NULL,
++ .name = "nfs_idmap_lookup_id",
++ .file = "fs/nfs/idmap.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001645_hash = {
++ .next = NULL,
++ .name = "ocfs2_control_write",
++ .file = "fs/ocfs2/stack_user.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001646_hash = {
++ .next = NULL,
++ .name = "osd_req_list_dev_partitions",
++ .file = "include/scsi/osd_initiator.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _001647_hash = {
++ .next = NULL,
++ .name = "osd_req_list_partition_collections",
++ .file = "include/scsi/osd_initiator.h",
++ .param5 = 1,
++};
++struct size_overflow_hash _001648_hash = {
++ .next = NULL,
++ .name = "pwc_video_read",
++ .file = "drivers/media/video/pwc/pwc-if.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001649_hash = {
++ .next = NULL,
++ .name = "scsi_vpd_inquiry",
++ .file = "drivers/scsi/scsi.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001650_hash = {
++ .next = NULL,
++ .name = "snd_gf1_mem_proc_dump",
++ .file = "sound/isa/gus/gus_mem_proc.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001651_hash = {
++ .next = NULL,
++ .name = "spi_dv_device_echo_buffer",
++ .file = "drivers/scsi/scsi_transport_spi.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _001653_hash = {
++ .next = NULL,
++ .name = "tled_proc_write",
++ .file = "drivers/platform/x86/asus_acpi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001655_hash = {
++ .next = NULL,
++ .name = "usb_allocate_stream_buffers",
++ .file = "drivers/media/dvb/dvb-usb/usb-urb.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001656_hash = {
++ .next = NULL,
++ .name = "_usb_writeN_sync",
++ .file = "drivers/net/wireless/rtlwifi/usb.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001657_hash = {
++ .next = NULL,
++ .name = "vhost_add_used_and_signal_n",
++ .file = "drivers/vhost/vhost.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001658_hash = {
++ .next = NULL,
++ .name = "vmbus_open",
++ .file = "include/linux/hyperv.h",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _001660_hash = {
++ .next = NULL,
++ .name = "wled_proc_write",
++ .file = "drivers/platform/x86/asus_acpi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001661_hash = {
++ .next = NULL,
++ .name = "wusb_prf_256",
++ .file = "include/linux/usb/wusb.h",
++ .param7 = 1,
++};
++struct size_overflow_hash _001662_hash = {
++ .next = NULL,
++ .name = "wusb_prf_64",
++ .file = "include/linux/usb/wusb.h",
++ .param7 = 1,
++};
++struct size_overflow_hash _001663_hash = {
++ .next = NULL,
++ .name = "agp_allocate_memory",
++ .file = "include/linux/agp_backend.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _001664_hash = {
++ .next = NULL,
++ .name = "cx18_read_pos",
++ .file = "drivers/media/video/cx18/cx18-fileops.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001665_hash = {
++ .next = NULL,
++ .name = "nfs_map_group_to_gid",
++ .file = "include/linux/nfs_idmap.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001666_hash = {
++ .next = NULL,
++ .name = "nfs_map_name_to_uid",
++ .file = "include/linux/nfs_idmap.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001667_hash = {
++ .next = NULL,
++ .name = "test_iso_queue",
++ .file = "drivers/usb/misc/usbtest.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001668_hash = {
++ .next = NULL,
++ .name = "agp_allocate_memory_wrap",
++ .file = "drivers/char/agp/frontend.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001669_hash = {
++ .next = NULL,
++ .name = "alloc_irq_cpu_rmap",
++ .file = "include/linux/cpu_rmap.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _001670_hash = {
++ .next = NULL,
++ .name = "alloc_ring",
++ .file = "drivers/net/ethernet/chelsio/cxgb4/sge.c",
++ .param2 = 1,
++ .param4 = 1,
++};
++struct size_overflow_hash _001672_hash = {
++ .next = &_001124_hash,
++ .name = "atomic_counters_read",
++ .file = "drivers/infiniband/hw/ipath/ipath_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001673_hash = {
++ .next = NULL,
++ .name = "atomic_stats_read",
++ .file = "drivers/infiniband/hw/ipath/ipath_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001674_hash = {
++ .next = NULL,
++ .name = "c4iw_init_resource_fifo",
++ .file = "drivers/infiniband/hw/cxgb4/resource.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001675_hash = {
++ .next = NULL,
++ .name = "c4iw_init_resource_fifo_random",
++ .file = "drivers/infiniband/hw/cxgb4/resource.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001676_hash = {
++ .next = NULL,
++ .name = "compat_do_arpt_set_ctl",
++ .file = "net/ipv4/netfilter/arp_tables.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001677_hash = {
++ .next = NULL,
++ .name = "compat_do_ip6t_set_ctl",
++ .file = "net/ipv6/netfilter/ip6_tables.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001678_hash = {
++ .next = NULL,
++ .name = "compat_do_ipt_set_ctl",
++ .file = "net/ipv4/netfilter/ip_tables.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001679_hash = {
++ .next = NULL,
++ .name = "cxio_init_resource_fifo",
++ .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001680_hash = {
++ .next = NULL,
++ .name = "cxio_init_resource_fifo_random",
++ .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001681_hash = {
++ .next = NULL,
++ .name = "dev_counters_read",
++ .file = "drivers/infiniband/hw/qib/qib_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001682_hash = {
++ .next = NULL,
++ .name = "dev_names_read",
++ .file = "drivers/infiniband/hw/qib/qib_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001683_hash = {
++ .next = &_001468_hash,
++ .name = "do_arpt_set_ctl",
++ .file = "net/ipv4/netfilter/arp_tables.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001684_hash = {
++ .next = NULL,
++ .name = "do_ip6t_set_ctl",
++ .file = "net/ipv6/netfilter/ip6_tables.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001685_hash = {
++ .next = NULL,
++ .name = "do_ipt_set_ctl",
++ .file = "net/ipv4/netfilter/ip_tables.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001686_hash = {
++ .next = NULL,
++ .name = "drbd_bm_resize",
++ .file = "drivers/block/drbd/drbd_bitmap.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001687_hash = {
++ .next = NULL,
++ .name = "driver_names_read",
++ .file = "drivers/infiniband/hw/qib/qib_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001688_hash = {
++ .next = NULL,
++ .name = "driver_stats_read",
++ .file = "drivers/infiniband/hw/qib/qib_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001689_hash = {
++ .next = NULL,
++ .name = "flash_read",
++ .file = "drivers/infiniband/hw/qib/qib_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001690_hash = {
++ .next = NULL,
++ .name = "flash_read",
++ .file = "drivers/infiniband/hw/ipath/ipath_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001691_hash = {
++ .next = NULL,
++ .name = "flash_write",
++ .file = "drivers/infiniband/hw/qib/qib_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001692_hash = {
++ .next = NULL,
++ .name = "flash_write",
++ .file = "drivers/infiniband/hw/ipath/ipath_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001693_hash = {
++ .next = NULL,
++ .name = "ghash_async_setkey",
++ .file = "arch/x86/crypto/ghash-clmulni-intel_glue.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001694_hash = {
++ .next = NULL,
++ .name = "handle_eviocgbit",
++ .file = "drivers/input/evdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001695_hash = {
++ .next = NULL,
++ .name = "hid_parse_report",
++ .file = "include/linux/hid.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001696_hash = {
++ .next = NULL,
++ .name = "ipath_get_base_info",
++ .file = "drivers/infiniband/hw/ipath/ipath_file_ops.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001697_hash = {
++ .next = NULL,
++ .name = "options_write",
++ .file = "drivers/misc/sgi-gru/gruprocfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001698_hash = {
++ .next = NULL,
++ .name = "portcntrs_1_read",
++ .file = "drivers/infiniband/hw/qib/qib_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001699_hash = {
++ .next = NULL,
++ .name = "portcntrs_2_read",
++ .file = "drivers/infiniband/hw/qib/qib_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001700_hash = {
++ .next = NULL,
++ .name = "portnames_read",
++ .file = "drivers/infiniband/hw/qib/qib_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001701_hash = {
++ .next = NULL,
++ .name = "qib_alloc_devdata",
++ .file = "drivers/infiniband/hw/qib/qib_init.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001702_hash = {
++ .next = NULL,
++ .name = "qib_diag_write",
++ .file = "drivers/infiniband/hw/qib/qib_diag.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001703_hash = {
++ .next = NULL,
++ .name = "qib_get_base_info",
++ .file = "drivers/infiniband/hw/qib/qib_file_ops.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001704_hash = {
++ .next = NULL,
++ .name = "qsfp_1_read",
++ .file = "drivers/infiniband/hw/qib/qib_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001705_hash = {
++ .next = NULL,
++ .name = "qsfp_2_read",
++ .file = "drivers/infiniband/hw/qib/qib_fs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001706_hash = {
++ .next = NULL,
++ .name = "rfc4106_set_key",
++ .file = "arch/x86/crypto/aesni-intel_glue.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001707_hash = {
++ .next = &_000258_hash,
++ .name = "stats_read_ul",
++ .file = "drivers/idle/i7300_idle.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001708_hash = {
++ .next = NULL,
++ .name = "xpc_kmalloc_cacheline_aligned",
++ .file = "drivers/misc/sgi-xp/xpc_partition.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001709_hash = {
++ .next = NULL,
++ .name = "xpc_kzalloc_cacheline_aligned",
++ .file = "drivers/misc/sgi-xp/xpc_main.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001710_hash = {
++ .next = NULL,
++ .name = "c4iw_init_resource",
++ .file = "drivers/infiniband/hw/cxgb4/resource.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _001712_hash = {
++ .next = NULL,
++ .name = "cxio_hal_init_resource",
++ .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
++ .param2 = 1,
++ .param7 = 1,
++ .param6 = 1,
++};
++struct size_overflow_hash _001715_hash = {
++ .next = &_000734_hash,
++ .name = "cxio_hal_init_rhdl_resource",
++ .file = "drivers/infiniband/hw/cxgb3/cxio_resource.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001716_hash = {
++ .next = NULL,
++ .name = "amthi_read",
++ .file = "drivers/staging/mei/iorw.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001717_hash = {
++ .next = NULL,
++ .name = "bcm_char_read",
++ .file = "drivers/staging/bcm/Bcmchar.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001718_hash = {
++ .next = NULL,
++ .name = "BcmCopySection",
++ .file = "drivers/staging/bcm/nvm.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001719_hash = {
++ .next = NULL,
++ .name = "buffer_from_user",
++ .file = "drivers/staging/vme/devices/vme_user.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001720_hash = {
++ .next = NULL,
++ .name = "buffer_to_user",
++ .file = "drivers/staging/vme/devices/vme_user.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001721_hash = {
++ .next = NULL,
++ .name = "capabilities_read",
++ .file = "drivers/xen/xenfs/super.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001722_hash = {
++ .next = NULL,
++ .name = "chd_dec_fetch_cdata",
++ .file = "drivers/staging/crystalhd/crystalhd_lnx.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001723_hash = {
++ .next = NULL,
++ .name = "create_bounce_buffer",
++ .file = "drivers/staging/hv/storvsc_drv.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001724_hash = {
++ .next = NULL,
++ .name = "crystalhd_create_dio_pool",
++ .file = "drivers/staging/crystalhd/crystalhd_misc.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001725_hash = {
++ .next = NULL,
++ .name = "do_read_log_to_user",
++ .file = "drivers/staging/android/logger.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001726_hash = {
++ .next = NULL,
++ .name = "do_write_log_from_user",
++ .file = "drivers/staging/android/logger.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001727_hash = {
++ .next = NULL,
++ .name = "dt3155_read",
++ .file = "drivers/staging/media/dt3155v4l/dt3155v4l.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001728_hash = {
++ .next = NULL,
++ .name = "easycap_alsa_vmalloc",
++ .file = "drivers/staging/media/easycap/easycap_sound.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001729_hash = {
++ .next = NULL,
++ .name = "evm_read_key",
++ .file = "security/integrity/evm/evm_secfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001730_hash = {
++ .next = NULL,
++ .name = "evm_write_key",
++ .file = "security/integrity/evm/evm_secfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001731_hash = {
++ .next = NULL,
++ .name = "evtchn_read",
++ .file = "drivers/xen/evtchn.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001732_hash = {
++ .next = NULL,
++ .name = "gather_array",
++ .file = "drivers/xen/privcmd.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001733_hash = {
++ .next = NULL,
++ .name = "gnttab_map",
++ .file = "drivers/xen/grant-table.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001734_hash = {
++ .next = NULL,
++ .name = "iio_read_first_n_kfifo",
++ .file = "drivers/staging/iio/kfifo_buf.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001735_hash = {
++ .next = NULL,
++ .name = "iio_read_first_n_sw_rb",
++ .file = "drivers/staging/iio/ring_sw.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001736_hash = {
++ .next = NULL,
++ .name = "keymap_store",
++ .file = "drivers/staging/speakup/kobjects.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001737_hash = {
++ .next = NULL,
++ .name = "line6_dumpreq_initbuf",
++ .file = "drivers/staging/line6/dumprequest.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001738_hash = {
++ .next = NULL,
++ .name = "lirc_write",
++ .file = "drivers/staging/media/lirc/lirc_parallel.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001739_hash = {
++ .next = NULL,
++ .name = "lirc_write",
++ .file = "drivers/staging/media/lirc/lirc_sir.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001740_hash = {
++ .next = &_000815_hash,
++ .name = "lirc_write",
++ .file = "drivers/staging/media/lirc/lirc_serial.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001741_hash = {
++ .next = &_001021_hash,
++ .name = "_malloc",
++ .file = "drivers/staging/rtl8712/osdep_service.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _001742_hash = {
++ .next = NULL,
++ .name = "mei_read",
++ .file = "drivers/staging/mei/main.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001743_hash = {
++ .next = NULL,
++ .name = "mei_write",
++ .file = "drivers/staging/mei/main.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001744_hash = {
++ .next = NULL,
++ .name = "msg_set",
++ .file = "drivers/staging/speakup/i18n.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001745_hash = {
++ .next = NULL,
++ .name = "OS_kmalloc",
++ .file = "drivers/staging/cxt1e1/sbecom_inline_linux.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _001746_hash = {
++ .next = NULL,
++ .name = "queue_reply",
++ .file = "drivers/xen/xenbus/xenbus_dev_frontend.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001747_hash = {
++ .next = &_000841_hash,
++ .name = "resource_from_user",
++ .file = "drivers/staging/vme/devices/vme_user.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001748_hash = {
++ .next = NULL,
++ .name = "sca3000_read_first_n_hw_rb",
++ .file = "drivers/staging/iio/accel/sca3000_ring.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001749_hash = {
++ .next = NULL,
++ .name = "sep_lock_user_pages",
++ .file = "drivers/staging/sep/sep_driver.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _001751_hash = {
++ .next = NULL,
++ .name = "sep_prepare_input_output_dma_table_in_dcb",
++ .file = "drivers/staging/sep/sep_driver.c",
++ .param4 = 1,
++ .param5 = 1,
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _001753_hash = {
++ .next = NULL,
++ .name = "split",
++ .file = "drivers/xen/xenbus/xenbus_xs.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001754_hash = {
++ .next = NULL,
++ .name = "storvsc_connect_to_vsp",
++ .file = "drivers/staging/hv/storvsc_drv.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001755_hash = {
++ .next = NULL,
++ .name = "u32_array_read",
++ .file = "arch/x86/xen/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001756_hash = {
++ .next = NULL,
++ .name = "ValidateDSDParamsChecksum",
++ .file = "drivers/staging/bcm/led_control.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001757_hash = {
++ .next = NULL,
++ .name = "vfd_write",
++ .file = "drivers/staging/media/lirc/lirc_sasem.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001758_hash = {
++ .next = NULL,
++ .name = "vfd_write",
++ .file = "drivers/staging/media/lirc/lirc_imon.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001759_hash = {
++ .next = NULL,
++ .name = "Wb35Reg_BurstWrite",
++ .file = "drivers/staging/winbond/wb35reg.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001760_hash = {
++ .next = NULL,
++ .name = "xenbus_file_write",
++ .file = "drivers/xen/xenbus/xenbus_dev_frontend.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001761_hash = {
++ .next = NULL,
++ .name = "xsd_read",
++ .file = "drivers/xen/xenfs/xenstored.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001762_hash = {
++ .next = NULL,
++ .name = "line6_dumpreq_init",
++ .file = "drivers/staging/line6/dumprequest.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001763_hash = {
++ .next = NULL,
++ .name = "r8712_usbctrl_vendorreq",
++ .file = "drivers/staging/rtl8712/usb_ops_linux.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001764_hash = {
++ .next = NULL,
++ .name = "r871x_set_wpa_ie",
++ .file = "drivers/staging/rtl8712/rtl871x_ioctl_linux.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001765_hash = {
++ .next = NULL,
++ .name = "sep_prepare_input_dma_table",
++ .file = "drivers/staging/sep/sep_driver.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _001767_hash = {
++ .next = NULL,
++ .name = "sep_prepare_input_output_dma_table",
++ .file = "drivers/staging/sep/sep_driver.c",
++ .param2 = 1,
++ .param4 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _001770_hash = {
++ .next = NULL,
++ .name = "vme_user_write",
++ .file = "drivers/staging/vme/devices/vme_user.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001771_hash = {
++ .next = NULL,
++ .name = "alloc_ebda_hpc",
++ .file = "drivers/pci/hotplug/ibmphp_ebda.c",
++ .param1 = 1,
++ .param2 = 1,
++};
++struct size_overflow_hash _001772_hash = {
++ .next = NULL,
++ .name = "add_uuid",
++ .file = "net/bluetooth/mgmt.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001773_hash = {
++ .next = NULL,
++ .name = "__alloc_extent_buffer",
++ .file = "fs/btrfs/extent_io.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001774_hash = {
++ .next = NULL,
++ .name = "array_zalloc",
++ .file = "drivers/target/target_core_tpg.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001775_hash = {
++ .next = NULL,
++ .name = "ath6kl_fwlog_block_read",
++ .file = "drivers/net/wireless/ath/ath6kl/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001776_hash = {
++ .next = NULL,
++ .name = "ath6kl_listen_int_read",
++ .file = "drivers/net/wireless/ath/ath6kl/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001777_hash = {
++ .next = NULL,
++ .name = "ath6kl_mgmt_powersave_ap",
++ .file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001778_hash = {
++ .next = NULL,
++ .name = "__ath6kl_wmi_send_mgmt_cmd",
++ .file = "drivers/net/wireless/ath/ath6kl/wmi.c",
++ .param7 = 1,
++};
++struct size_overflow_hash _001779_hash = {
++ .next = NULL,
++ .name = "cld_pipe_downcall",
++ .file = "fs/nfsd/nfs4recover.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001780_hash = {
++ .next = NULL,
++ .name = "create_bounce_buffer",
++ .file = "drivers/scsi/storvsc_drv.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001781_hash = {
++ .next = NULL,
++ .name = "dwc3_link_state_write",
++ .file = "drivers/usb/dwc3/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001782_hash = {
++ .next = NULL,
++ .name = "dwc3_testmode_write",
++ .file = "drivers/usb/dwc3/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001783_hash = {
++ .next = NULL,
++ .name = "dynamic_ps_timeout_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001784_hash = {
++ .next = NULL,
++ .name = "forced_ps_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001785_hash = {
++ .next = NULL,
++ .name = "idmap_pipe_downcall",
++ .file = "fs/nfs/idmap.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001786_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_rc_rateidx_mcs_mask_2ghz",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001787_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_rc_rateidx_mcs_mask_5ghz",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001788_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_rssi_threshold",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001789_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_uapsd_max_sp_len",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001790_hash = {
++ .next = NULL,
++ .name = "ieee80211_if_read_uapsd_queues",
++ .file = "net/mac80211/debugfs_netdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001791_hash = {
++ .next = NULL,
++ .name = "irq_domain_add_linear",
++ .file = "include/linux/irqdomain.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _001792_hash = {
++ .next = NULL,
++ .name = "kmalloc_array",
++ .file = "include/linux/slab.h",
++ .param1 = 1,
++ .param2 = 1,
++};
++struct size_overflow_hash _001794_hash = {
++ .next = NULL,
++ .name = "nfc_llcp_send_i_frame",
++ .file = "net/nfc/llcp/commands.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001797_hash = {
++ .next = NULL,
++ .name = "pn533_dep_link_up",
++ .file = "drivers/nfc/pn533.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001798_hash = {
++ .next = NULL,
++ .name = "port_show_regs",
++ .file = "drivers/tty/serial/pch_uart.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001799_hash = {
++ .next = NULL,
++ .name = "qla4xxx_alloc_work",
++ .file = "drivers/scsi/qla4xxx/ql4_os.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001800_hash = {
++ .next = NULL,
++ .name = "rbd_add",
++ .file = "drivers/block/rbd.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001801_hash = {
++ .next = NULL,
++ .name = "read_file_reset",
++ .file = "drivers/net/wireless/ath/ath9k/debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001802_hash = {
++ .next = NULL,
++ .name = "regmap_bulk_write",
++ .file = "include/linux/regmap.h",
++ .param4 = 1,
++};
++struct size_overflow_hash _001803_hash = {
++ .next = NULL,
++ .name = "regmap_name_read_file",
++ .file = "drivers/base/regmap/regmap-debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001804_hash = {
++ .next = NULL,
++ .name = "reiserfs_allocate_list_bitmaps",
++ .file = "fs/reiserfs/journal.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001805_hash = {
++ .next = NULL,
++ .name = "reiserfs_resize",
++ .file = "fs/reiserfs/resize.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001806_hash = {
++ .next = NULL,
++ .name = "remove_uuid",
++ .file = "net/bluetooth/mgmt.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001807_hash = {
++ .next = NULL,
++ .name = "set_dev_class",
++ .file = "net/bluetooth/mgmt.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001808_hash = {
++ .next = NULL,
++ .name = "set_le",
++ .file = "net/bluetooth/mgmt.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001809_hash = {
++ .next = NULL,
++ .name = "set_link_security",
++ .file = "net/bluetooth/mgmt.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001810_hash = {
++ .next = NULL,
++ .name = "set_ssp",
++ .file = "net/bluetooth/mgmt.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001811_hash = {
++ .next = NULL,
++ .name = "shmem_setxattr",
++ .file = "mm/shmem.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001812_hash = {
++ .next = NULL,
++ .name = "shmem_xattr_alloc",
++ .file = "mm/shmem.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001813_hash = {
++ .next = NULL,
++ .name = "split_scan_timeout_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001814_hash = {
++ .next = NULL,
++ .name = "storvsc_connect_to_vsp",
++ .file = "drivers/scsi/storvsc_drv.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001815_hash = {
++ .next = NULL,
++ .name = "suspend_dtim_interval_read",
++ .file = "drivers/net/wireless/wl12xx/debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001816_hash = {
++ .next = NULL,
++ .name = "alloc_extent_buffer",
++ .file = "fs/btrfs/extent_io.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001817_hash = {
++ .next = NULL,
++ .name = "nfs_idmap_get_key",
++ .file = "fs/nfs/idmap.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001818_hash = {
++ .next = NULL,
++ .name = "iio_debugfs_read_reg",
++ .file = "drivers/staging/iio/industrialio-core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001819_hash = {
++ .next = NULL,
++ .name = "iio_debugfs_write_reg",
++ .file = "drivers/staging/iio/industrialio-core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001820_hash = {
++ .next = NULL,
++ .name = "iio_event_chrdev_read",
++ .file = "drivers/staging/iio/industrialio-event.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001821_hash = {
++ .next = NULL,
++ .name = "sep_create_dcb_dmatables_context",
++ .file = "drivers/staging/sep/sep_main.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001822_hash = {
++ .next = NULL,
++ .name = "sep_create_dcb_dmatables_context_kernel",
++ .file = "drivers/staging/sep/sep_main.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001823_hash = {
++ .next = NULL,
++ .name = "sep_create_msgarea_context",
++ .file = "drivers/staging/sep/sep_main.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001824_hash = {
++ .next = NULL,
++ .name = "sep_lli_table_secure_dma",
++ .file = "drivers/staging/sep/sep_main.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _001826_hash = {
++ .next = NULL,
++ .name = "sep_lock_user_pages",
++ .file = "drivers/staging/sep/sep_main.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _001828_hash = {
++ .next = NULL,
++ .name = "sep_prepare_input_output_dma_table_in_dcb",
++ .file = "drivers/staging/sep/sep_main.c",
++ .param4 = 1,
++ .param5 = 1,
++};
++struct size_overflow_hash _001830_hash = {
++ .next = NULL,
++ .name = "sep_read",
++ .file = "drivers/staging/sep/sep_main.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001831_hash = {
++ .next = NULL,
++ .name = "alloc_rx_desc_ring",
++ .file = "drivers/staging/rtl8187se/r8180_core.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001832_hash = {
++ .next = NULL,
++ .name = "alloc_subdevices",
++ .file = "drivers/staging/comedi/drivers/../comedidev.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _001833_hash = {
++ .next = NULL,
++ .name = "alloc_subdevices",
++ .file = "drivers/staging/comedi/drivers/addi-data/../../comedidev.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _001834_hash = {
++ .next = NULL,
++ .name = "comedi_read",
++ .file = "drivers/staging/comedi/comedi_fops.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001835_hash = {
++ .next = NULL,
++ .name = "comedi_write",
++ .file = "drivers/staging/comedi/comedi_fops.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001836_hash = {
++ .next = NULL,
++ .name = "compat_sys_preadv64",
++ .file = "fs/compat.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001837_hash = {
++ .next = NULL,
++ .name = "compat_sys_pwritev64",
++ .file = "fs/compat.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001838_hash = {
++ .next = NULL,
++ .name = "ext_sd_execute_read_data",
++ .file = "drivers/staging/rts5139/sd_cprm.c",
++ .param9 = 1,
++};
++struct size_overflow_hash _001839_hash = {
++ .next = NULL,
++ .name = "ext_sd_execute_write_data",
++ .file = "drivers/staging/rts5139/sd_cprm.c",
++ .param9 = 1,
++};
++struct size_overflow_hash _001840_hash = {
++ .next = NULL,
++ .name = "ieee80211_wx_set_gen_ie",
++ .file = "drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001841_hash = {
++ .next = NULL,
++ .name = "ieee80211_wx_set_gen_ie_rsl",
++ .file = "drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001842_hash = {
++ .next = NULL,
++ .name = "ni_gpct_device_construct",
++ .file = "drivers/staging/comedi/drivers/ni_tio.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001843_hash = {
++ .next = NULL,
++ .name = "Realloc",
++ .file = "drivers/staging/comedi/drivers/comedi_bond.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001844_hash = {
++ .next = NULL,
++ .name = "rtllib_wx_set_gen_ie",
++ .file = "drivers/staging/rtl8192e/rtllib_wx.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001845_hash = {
++ .next = NULL,
++ .name = "rts51x_transfer_data_partial",
++ .file = "drivers/staging/rts5139/rts51x_transport.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001846_hash = {
++ .next = NULL,
++ .name = "store_debug_level",
++ .file = "drivers/staging/rtl8192u/ieee80211/ieee80211_module.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001847_hash = {
++ .next = NULL,
++ .name = "usb_buffer_alloc",
++ .file = "drivers/staging/rts5139/rts51x.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _001848_hash = {
++ .next = NULL,
++ .name = "alloc_apertures",
++ .file = "include/linux/fb.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _001849_hash = {
++ .next = NULL,
++ .name = "bin_uuid",
++ .file = "kernel/sysctl_binary.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001850_hash = {
++ .next = &_000640_hash,
++ .name = "__copy_from_user_inatomic_nocache",
++ .file = "arch/x86/include/asm/uaccess_64.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001851_hash = {
++ .next = NULL,
++ .name = "do_dmabuf_dirty_sou",
++ .file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
++ .param7 = 1,
++};
++struct size_overflow_hash _001852_hash = {
++ .next = NULL,
++ .name = "do_surface_dirty_sou",
++ .file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
++ .param7 = 1,
++};
++struct size_overflow_hash _001853_hash = {
++ .next = NULL,
++ .name = "drm_agp_bind_pages",
++ .file = "drivers/gpu/drm/drm_agpsupport.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001854_hash = {
++ .next = NULL,
++ .name = "drm_calloc_large",
++ .file = "include/drm/drm_mem_util.h",
++ .param1 = 1,
++ .param2 = 1,
++};
++struct size_overflow_hash _001856_hash = {
++ .next = NULL,
++ .name = "drm_ht_create",
++ .file = "drivers/gpu/drm/drm_hashtab.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001857_hash = {
++ .next = NULL,
++ .name = "drm_malloc_ab",
++ .file = "include/drm/drm_mem_util.h",
++ .param1 = 1,
++ .param2 = 1,
++};
++struct size_overflow_hash _001859_hash = {
++ .next = NULL,
++ .name = "drm_plane_init",
++ .file = "drivers/gpu/drm/drm_crtc.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001860_hash = {
++ .next = NULL,
++ .name = "drm_vmalloc_dma",
++ .file = "drivers/gpu/drm/drm_scatter.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001861_hash = {
++ .next = NULL,
++ .name = "fb_read",
++ .file = "drivers/video/fbmem.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001862_hash = {
++ .next = NULL,
++ .name = "fb_write",
++ .file = "drivers/video/fbmem.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001863_hash = {
++ .next = NULL,
++ .name = "framebuffer_alloc",
++ .file = "include/linux/fb.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _001864_hash = {
++ .next = NULL,
++ .name = "i915_cache_sharing_read",
++ .file = "drivers/gpu/drm/i915/i915_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001865_hash = {
++ .next = NULL,
++ .name = "i915_cache_sharing_write",
++ .file = "drivers/gpu/drm/i915/i915_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001866_hash = {
++ .next = NULL,
++ .name = "i915_max_freq_read",
++ .file = "drivers/gpu/drm/i915/i915_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001867_hash = {
++ .next = NULL,
++ .name = "i915_max_freq_write",
++ .file = "drivers/gpu/drm/i915/i915_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001868_hash = {
++ .next = NULL,
++ .name = "i915_wedged_read",
++ .file = "drivers/gpu/drm/i915/i915_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001869_hash = {
++ .next = NULL,
++ .name = "i915_wedged_write",
++ .file = "drivers/gpu/drm/i915/i915_debugfs.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001870_hash = {
++ .next = NULL,
++ .name = "__module_alloc",
++ .file = "arch/x86/kernel/module.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001871_hash = {
++ .next = NULL,
++ .name = "module_alloc_update_bounds_rw",
++ .file = "kernel/module.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001872_hash = {
++ .next = NULL,
++ .name = "module_alloc_update_bounds_rx",
++ .file = "kernel/module.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001873_hash = {
++ .next = NULL,
++ .name = "p9_client_read",
++ .file = "include/net/9p/client.h",
++ .param5 = 1,
++};
++struct size_overflow_hash _001874_hash = {
++ .next = NULL,
++ .name = "probe_kernel_write",
++ .file = "include/linux/uaccess.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001875_hash = {
++ .next = NULL,
++ .name = "sched_feat_write",
++ .file = "kernel/sched/core.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001876_hash = {
++ .next = NULL,
++ .name = "tstats_write",
++ .file = "kernel/time/timer_stats.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001877_hash = {
++ .next = NULL,
++ .name = "ttm_bo_fbdev_io",
++ .file = "drivers/gpu/drm/ttm/ttm_bo_vm.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001878_hash = {
++ .next = NULL,
++ .name = "ttm_bo_io",
++ .file = "drivers/gpu/drm/ttm/ttm_bo_vm.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001879_hash = {
++ .next = NULL,
++ .name = "ttm_dma_page_pool_free",
++ .file = "drivers/gpu/drm/ttm/ttm_page_alloc_dma.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001880_hash = {
++ .next = NULL,
++ .name = "ttm_page_pool_free",
++ .file = "drivers/gpu/drm/ttm/ttm_page_alloc.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001881_hash = {
++ .next = NULL,
++ .name = "vmw_execbuf_process",
++ .file = "drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001882_hash = {
++ .next = NULL,
++ .name = "vmw_fifo_reserve",
++ .file = "drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001883_hash = {
++ .next = NULL,
++ .name = "vmw_kms_present",
++ .file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
++ .param9 = 1,
++};
++struct size_overflow_hash _001884_hash = {
++ .next = NULL,
++ .name = "vmw_kms_readback",
++ .file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001885_hash = {
++ .next = NULL,
++ .name = "__copy_from_user_inatomic_nocache",
++ .file = "arch/x86/include/asm/uaccess_32.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001886_hash = {
++ .next = NULL,
++ .name = "arcfb_write",
++ .file = "drivers/video/arcfb.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001887_hash = {
++ .next = NULL,
++ .name = "ath6kl_usb_submit_ctrl_in",
++ .file = "drivers/net/wireless/ath/ath6kl/usb.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001888_hash = {
++ .next = NULL,
++ .name = "ath6kl_usb_submit_ctrl_out",
++ .file = "drivers/net/wireless/ath/ath6kl/usb.c",
++ .param6 = 1,
++};
++struct size_overflow_hash _001889_hash = {
++ .next = NULL,
++ .name = "blk_dropped_read",
++ .file = "kernel/trace/blktrace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001890_hash = {
++ .next = NULL,
++ .name = "blk_msg_write",
++ .file = "kernel/trace/blktrace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001891_hash = {
++ .next = NULL,
++ .name = "broadsheetfb_write",
++ .file = "drivers/video/broadsheetfb.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001892_hash = {
++ .next = NULL,
++ .name = "cyttsp_probe",
++ .file = "drivers/input/touchscreen/cyttsp_core.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001893_hash = {
++ .next = NULL,
++ .name = "da9052_group_write",
++ .file = "include/linux/mfd/da9052/da9052.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001894_hash = {
++ .next = NULL,
++ .name = "dccpprobe_read",
++ .file = "net/dccp/probe.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001895_hash = {
++ .next = NULL,
++ .name = "__devres_alloc",
++ .file = "include/linux/device.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _001896_hash = {
++ .next = NULL,
++ .name = "event_enable_read",
++ .file = "kernel/trace/trace_events.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001897_hash = {
++ .next = NULL,
++ .name = "event_filter_read",
++ .file = "kernel/trace/trace_events.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001898_hash = {
++ .next = NULL,
++ .name = "event_filter_write",
++ .file = "kernel/trace/trace_events.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001899_hash = {
++ .next = NULL,
++ .name = "event_id_read",
++ .file = "kernel/trace/trace_events.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001900_hash = {
++ .next = NULL,
++ .name = "fb_sys_read",
++ .file = "include/linux/fb.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001901_hash = {
++ .next = NULL,
++ .name = "fb_sys_write",
++ .file = "include/linux/fb.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001902_hash = {
++ .next = NULL,
++ .name = "ftrace_pid_write",
++ .file = "kernel/trace/ftrace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001903_hash = {
++ .next = NULL,
++ .name = "ftrace_profile_read",
++ .file = "kernel/trace/ftrace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001904_hash = {
++ .next = NULL,
++ .name = "hecubafb_write",
++ .file = "drivers/video/hecubafb.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001905_hash = {
++ .next = NULL,
++ .name = "hsc_msg_alloc",
++ .file = "drivers/hsi/clients/hsi_char.c",
++ .param1 = 1,
++};
++struct size_overflow_hash _001906_hash = {
++ .next = NULL,
++ .name = "hsc_write",
++ .file = "drivers/hsi/clients/hsi_char.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001907_hash = {
++ .next = NULL,
++ .name = "hsi_alloc_controller",
++ .file = "include/linux/hsi/hsi.h",
++ .param1 = 1,
++};
++struct size_overflow_hash _001908_hash = {
++ .next = NULL,
++ .name = "hsi_register_board_info",
++ .file = "include/linux/hsi/hsi.h",
++ .param2 = 1,
++};
++struct size_overflow_hash _001909_hash = {
++ .next = NULL,
++ .name = "ivtvfb_write",
++ .file = "drivers/media/video/ivtv/ivtvfb.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001910_hash = {
++ .next = NULL,
++ .name = "metronomefb_write",
++ .file = "drivers/video/metronomefb.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001911_hash = {
++ .next = NULL,
++ .name = "odev_update",
++ .file = "drivers/video/via/viafbdev.c",
++ .param2 = 1,
++};
++struct size_overflow_hash _001912_hash = {
++ .next = NULL,
++ .name = "oz_add_farewell",
++ .file = "drivers/staging/ozwpan/ozproto.c",
++ .param5 = 1,
++};
++struct size_overflow_hash _001913_hash = {
++ .next = NULL,
++ .name = "oz_cdev_read",
++ .file = "drivers/staging/ozwpan/ozcdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001914_hash = {
++ .next = NULL,
++ .name = "oz_cdev_write",
++ .file = "drivers/staging/ozwpan/ozcdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001915_hash = {
++ .next = NULL,
++ .name = "pmcraid_copy_sglist",
++ .file = "drivers/scsi/pmcraid.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001916_hash = {
++ .next = NULL,
++ .name = "probes_write",
++ .file = "kernel/trace/trace_kprobe.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001917_hash = {
++ .next = NULL,
++ .name = "proc_fault_inject_read",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001918_hash = {
++ .next = NULL,
++ .name = "proc_fault_inject_write",
++ .file = "fs/proc/base.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001919_hash = {
++ .next = NULL,
++ .name = "rb_simple_read",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001920_hash = {
++ .next = NULL,
++ .name = "read_file_dfs",
++ .file = "drivers/net/wireless/ath/ath9k/dfs_debug.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001921_hash = {
++ .next = NULL,
++ .name = "show_header",
++ .file = "kernel/trace/trace_events.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001922_hash = {
++ .next = NULL,
++ .name = "stack_max_size_read",
++ .file = "kernel/trace/trace_stack.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001923_hash = {
++ .next = NULL,
++ .name = "subsystem_filter_read",
++ .file = "kernel/trace/trace_events.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001924_hash = {
++ .next = NULL,
++ .name = "subsystem_filter_write",
++ .file = "kernel/trace/trace_events.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001925_hash = {
++ .next = NULL,
++ .name = "system_enable_read",
++ .file = "kernel/trace/trace_events.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001926_hash = {
++ .next = NULL,
++ .name = "trace_options_core_read",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001927_hash = {
++ .next = NULL,
++ .name = "trace_options_read",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001928_hash = {
++ .next = NULL,
++ .name = "trace_seq_to_user",
++ .file = "include/linux/trace_seq.h",
++ .param3 = 1,
++};
++struct size_overflow_hash _001929_hash = {
++ .next = NULL,
++ .name = "tracing_buffers_read",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001930_hash = {
++ .next = NULL,
++ .name = "tracing_clock_write",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001931_hash = {
++ .next = NULL,
++ .name = "tracing_cpumask_read",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001932_hash = {
++ .next = NULL,
++ .name = "tracing_ctrl_read",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001933_hash = {
++ .next = NULL,
++ .name = "tracing_entries_read",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001934_hash = {
++ .next = NULL,
++ .name = "tracing_max_lat_read",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001935_hash = {
++ .next = NULL,
++ .name = "tracing_read_dyn_info",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001936_hash = {
++ .next = NULL,
++ .name = "tracing_readme_read",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001937_hash = {
++ .next = NULL,
++ .name = "tracing_saved_cmdlines_read",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001938_hash = {
++ .next = NULL,
++ .name = "tracing_set_trace_read",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001939_hash = {
++ .next = NULL,
++ .name = "tracing_set_trace_write",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001940_hash = {
++ .next = NULL,
++ .name = "tracing_stats_read",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001941_hash = {
++ .next = NULL,
++ .name = "tracing_total_entries_read",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001942_hash = {
++ .next = NULL,
++ .name = "tracing_trace_options_write",
++ .file = "kernel/trace/trace.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001943_hash = {
++ .next = NULL,
++ .name = "ufx_alloc_urb_list",
++ .file = "drivers/video/smscufx.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001944_hash = {
++ .next = NULL,
++ .name = "u_memcpya",
++ .file = "drivers/gpu/drm/nouveau/nouveau_gem.c",
++ .param2 = 1,
++ .param3 = 1,
++};
++struct size_overflow_hash _001946_hash = {
++ .next = NULL,
++ .name = "v9fs_fid_readn",
++ .file = "fs/9p/vfs_file.c",
++ .param4 = 1,
++};
++struct size_overflow_hash _001947_hash = {
++ .next = NULL,
++ .name = "v9fs_file_read",
++ .file = "fs/9p/vfs_file.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001948_hash = {
++ .next = NULL,
++ .name = "viafb_dfph_proc_write",
++ .file = "drivers/video/via/viafbdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001949_hash = {
++ .next = NULL,
++ .name = "viafb_dfpl_proc_write",
++ .file = "drivers/video/via/viafbdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001950_hash = {
++ .next = NULL,
++ .name = "viafb_dvp0_proc_write",
++ .file = "drivers/video/via/viafbdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001951_hash = {
++ .next = NULL,
++ .name = "viafb_dvp1_proc_write",
++ .file = "drivers/video/via/viafbdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001952_hash = {
++ .next = NULL,
++ .name = "viafb_vt1636_proc_write",
++ .file = "drivers/video/via/viafbdev.c",
++ .param3 = 1,
++};
++struct size_overflow_hash _001953_hash = {
++ .next = NULL,
++ .name = "vivi_read",
++ .file = "drivers/media/video/vivi.c",
++ .param3 = 1,
++};
++struct size_overflow_hash *size_overflow_hash[65536] = {
++ [56878] = &_000001_hash,
++ [11151] = &_000002_hash,
++ [17854] = &_000003_hash,
++ [4132] = &_000004_hash,
++ [39070] = &_000005_hash,
++ [35447] = &_000007_hash,
++ [47830] = &_000008_hash,
++ [65254] = &_000009_hash,
++ [17521] = &_000011_hash,
++ [41425] = &_000012_hash,
++ [5785] = &_000013_hash,
++ [19960] = &_000014_hash,
++ [26729] = &_000015_hash,
++ [7954] = &_000016_hash,
++ [22403] = &_000017_hash,
++ [23258] = &_000018_hash,
++ [55695] = &_000019_hash,
++ [38964] = &_000020_hash,
++ [64250] = &_000021_hash,
++ [31825] = &_000022_hash,
++ [47446] = &_000023_hash,
++ [61521] = &_000024_hash,
++ [64227] = &_000025_hash,
++ [53378] = &_000026_hash,
++ [8885] = &_000027_hash,
++ [62101] = &_000028_hash,
++ [18152] = &_000029_hash,
++ [37525] = &_000030_hash,
++ [25827] = &_000031_hash,
++ [1169] = &_000032_hash,
++ [11925] = &_000033_hash,
++ [20558] = &_000034_hash,
++ [44019] = &_000035_hash,
++ [21909] = &_000036_hash,
++ [63679] = &_000037_hash,
++ [39450] = &_000038_hash,
++ [25085] = &_000039_hash,
++ [17830] = &_000040_hash,
++ [14329] = &_000041_hash,
++ [31235] = &_000042_hash,
++ [48207] = &_000043_hash,
++ [34918] = &_000044_hash,
++ [46839] = &_000045_hash,
++ [57930] = &_000046_hash,
++ [41364] = &_000047_hash,
++ [17581] = &_000048_hash,
++ [45922] = &_000049_hash,
++ [49567] = &_000050_hash,
++ [18248] = &_000051_hash,
++ [25528] = &_000052_hash,
++ [61874] = &_000053_hash,
++ [22591] = &_000054_hash,
++ [48456] = &_000055_hash,
++ [8743] = &_000056_hash,
++ [39131] = &_000057_hash,
++ [48328] = &_000058_hash,
++ [47136] = &_000059_hash,
++ [6358] = &_000060_hash,
++ [12252] = &_000061_hash,
++ [49340] = &_000062_hash,
++ [45875] = &_000063_hash,
++ [52182] = &_000065_hash,
++ [31149] = &_000067_hash,
++ [20455] = &_000068_hash,
++ [19917] = &_000070_hash,
++ [64771] = &_000071_hash,
++ [25140] = &_000072_hash,
++ [34097] = &_000073_hash,
++ [58131] = &_000074_hash,
++ [65311] = &_000075_hash,
++ [60609] = &_000076_hash,
++ [1917] = &_000077_hash,
++ [15337] = &_000078_hash,
++ [4732] = &_000079_hash,
++ [38783] = &_000080_hash,
++ [37249] = &_000081_hash,
++ [9234] = &_000082_hash,
++ [33309] = &_000083_hash,
++ [22389] = &_000084_hash,
++ [56319] = &_000085_hash,
++ [21496] = &_000086_hash,
++ [8163] = &_000087_hash,
++ [58766] = &_000088_hash,
++ [21048] = &_000089_hash,
++ [51221] = &_000090_hash,
++ [21498] = &_000091_hash,
++ [42627] = &_000092_hash,
++ [53059] = &_000094_hash,
++ [52870] = &_000095_hash,
++ [1567] = &_000096_hash,
++ [38330] = &_000097_hash,
++ [30892] = &_000098_hash,
++ [16927] = &_000099_hash,
++ [16461] = &_000100_hash,
++ [5634] = &_000101_hash,
++ [16496] = &_000103_hash,
++ [40012] = &_000104_hash,
++ [46014] = &_000105_hash,
++ [39600] = &_000106_hash,
++ [7435] = &_000107_hash,
++ [13332] = &_000109_hash,
++ [36665] = &_000110_hash,
++ [12413] = &_000111_hash,
++ [27279] = &_000112_hash,
++ [44774] = &_000113_hash,
++ [14479] = &_000114_hash,
++ [32447] = &_000115_hash,
++ [15439] = &_000116_hash,
++ [17932] = &_000117_hash,
++ [26096] = &_000118_hash,
++ [50814] = &_000119_hash,
++ [22598] = &_000120_hash,
++ [48287] = &_000121_hash,
++ [15611] = &_000122_hash,
++ [13414] = &_000123_hash,
++ [40371] = &_000124_hash,
++ [284] = &_000125_hash,
++ [6293] = &_000127_hash,
++ [60587] = &_000128_hash,
++ [8181] = &_000129_hash,
++ [27451] = &_000130_hash,
++ [29259] = &_000131_hash,
++ [41172] = &_000132_hash,
++ [3315] = &_000133_hash,
++ [37550] = &_000134_hash,
++ [40395] = &_000135_hash,
++ [24124] = &_000136_hash,
++ [63535] = &_000137_hash,
++ [14981] = &_000138_hash,
++ [52008] = &_000139_hash,
++ [22091] = &_000140_hash,
++ [64800] = &_000141_hash,
++ [14919] = &_000142_hash,
++ [60340] = &_000143_hash,
++ [34205] = &_000145_hash,
++ [65246] = &_000146_hash,
++ [1299] = &_000147_hash,
++ [33165] = &_000148_hash,
++ [22394] = &_000149_hash,
++ [49562] = &_000150_hash,
++ [56881] = &_000151_hash,
++ [13870] = &_000152_hash,
++ [65074] = &_000153_hash,
++ [11553] = &_000154_hash,
++ [43222] = &_000155_hash,
++ [17984] = &_000156_hash,
++ [26811] = &_000157_hash,
++ [30848] = &_000158_hash,
++ [15627] = &_000159_hash,
++ [43101] = &_000160_hash,
++ [4082] = &_000161_hash,
++ [43692] = &_000162_hash,
++ [21622] = &_000163_hash,
++ [50734] = &_000164_hash,
++ [803] = &_000166_hash,
++ [64674] = &_000168_hash,
++ [57538] = &_000170_hash,
++ [42442] = &_000171_hash,
++ [23031] = &_000172_hash,
++ [40663] = &_000173_hash,
++ [51180] = &_000174_hash,
++ [24173] = &_000175_hash,
++ [9286] = &_000176_hash,
++ [49517] = &_000177_hash,
++ [34878] = &_000180_hash,
++ [22819] = &_000181_hash,
++ [64314] = &_000182_hash,
++ [20494] = &_000183_hash,
++ [9483] = &_000184_hash,
++ [26518] = &_000185_hash,
++ [44651] = &_000186_hash,
++ [1188] = &_000187_hash,
++ [36031] = &_000188_hash,
++ [33469] = &_000189_hash,
++ [19672] = &_000190_hash,
++ [3216] = &_000191_hash,
++ [25071] = &_000192_hash,
++ [11744] = &_000194_hash,
++ [2358] = &_000196_hash,
++ [10146] = &_000198_hash,
++ [58709] = &_000199_hash,
++ [64773] = &_000200_hash,
++ [6159] = &_000201_hash,
++ [28617] = &_000202_hash,
++ [61067] = &_000203_hash,
++ [12884] = &_000204_hash,
++ [37308] = &_000205_hash,
++ [59973] = &_000206_hash,
++ [35895] = &_000207_hash,
++ [24951] = &_000208_hash,
++ [3070] = &_000209_hash,
++ [61023] = &_000210_hash,
++ [45702] = &_000211_hash,
++ [5533] = &_000212_hash,
++ [29186] = &_000213_hash,
++ [26311] = &_000214_hash,
++ [40182] = &_000215_hash,
++ [50505] = &_000216_hash,
++ [59061] = &_000217_hash,
++ [27511] = &_000218_hash,
++ [63286] = &_000219_hash,
++ [6678] = &_000220_hash,
++ [23065] = &_000222_hash,
++ [18156] = &_000223_hash,
++ [53757] = &_000224_hash,
++ [53720] = &_000225_hash,
++ [50241] = &_000226_hash,
++ [22498] = &_000227_hash,
++ [10991] = &_000228_hash,
++ [40026] = &_000229_hash,
++ [19995] = &_000230_hash,
++ [30445] = &_000231_hash,
++ [57691] = &_000232_hash,
++ [23150] = &_000233_hash,
++ [9960] = &_000234_hash,
++ [8736] = &_000235_hash,
++ [23750] = &_000237_hash,
++ [18393] = &_000238_hash,
++ [28541] = &_000240_hash,
++ [59944] = &_000241_hash,
++ [35042] = &_000242_hash,
++ [63488] = &_000243_hash,
++ [27286] = &_000244_hash,
++ [46922] = &_000245_hash,
++ [11860] = &_000246_hash,
++ [52928] = &_000247_hash,
++ [46714] = &_000248_hash,
++ [57313] = &_000249_hash,
++ [61978] = &_000250_hash,
++ [61063] = &_000251_hash,
++ [22271] = &_000252_hash,
++ [4214] = &_000253_hash,
++ [46247] = &_000254_hash,
++ [33246] = &_000255_hash,
++ [58325] = &_000257_hash,
++ [47399] = &_000259_hash,
++ [34963] = &_000260_hash,
++ [21221] = &_000261_hash,
++ [32211] = &_000262_hash,
++ [20854] = &_000263_hash,
++ [49351] = &_000264_hash,
++ [52341] = &_000265_hash,
++ [53533] = &_000266_hash,
++ [52267] = &_000267_hash,
++ [46753] = &_000268_hash,
++ [2115] = &_000269_hash,
++ [44017] = &_000271_hash,
++ [13495] = &_000272_hash,
++ [12988] = &_000273_hash,
++ [55227] = &_000274_hash,
++ [47762] = &_000276_hash,
++ [17613] = &_000277_hash,
++ [52037] = &_000278_hash,
++ [5994] = &_000279_hash,
++ [46818] = &_000280_hash,
++ [13467] = &_000281_hash,
++ [61848] = &_000282_hash,
++ [43082] = &_000284_hash,
++ [55732] = &_000286_hash,
++ [2543] = &_000287_hash,
++ [51694] = &_000288_hash,
++ [18402] = &_000289_hash,
++ [38282] = &_000290_hash,
++ [5456] = &_000291_hash,
++ [58261] = &_000292_hash,
++ [24792] = &_000293_hash,
++ [6422] = &_000294_hash,
++ [63953] = &_000295_hash,
++ [27384] = &_000296_hash,
++ [47213] = &_000297_hash,
++ [23548] = &_000298_hash,
++ [47858] = &_000299_hash,
++ [52501] = &_000300_hash,
++ [12475] = &_000301_hash,
++ [52921] = &_000302_hash,
++ [19120] = &_000303_hash,
++ [14355] = &_000304_hash,
++ [30563] = &_000305_hash,
++ [14942] = &_000306_hash,
++ [30969] = &_000307_hash,
++ [57776] = &_000308_hash,
++ [21956] = &_000309_hash,
++ [44050] = &_000310_hash,
++ [2193] = &_000311_hash,
++ [44818] = &_000312_hash,
++ [50616] = &_000313_hash,
++ [49299] = &_000314_hash,
++ [2796] = &_000315_hash,
++ [4190] = &_000316_hash,
++ [11548] = &_000317_hash,
++ [53798] = &_000318_hash,
++ [60370] = &_000319_hash,
++ [35863] = &_000320_hash,
++ [54595] = &_000322_hash,
++ [2808] = &_000323_hash,
++ [24656] = &_000324_hash,
++ [895] = &_000325_hash,
++ [32809] = &_000326_hash,
++ [55621] = &_000327_hash,
++ [1733] = &_000328_hash,
++ [36069] = &_000330_hash,
++ [23714] = &_000331_hash,
++ [26020] = &_000332_hash,
++ [63875] = &_000333_hash,
++ [8919] = &_000335_hash,
++ [23906] = &_000336_hash,
++ [59497] = &_000337_hash,
++ [34782] = &_000338_hash,
++ [40998] = &_000339_hash,
++ [33328] = &_000340_hash,
++ [17866] = &_000341_hash,
++ [38741] = &_000342_hash,
++ [53939] = &_000343_hash,
++ [14658] = &_000344_hash,
++ [42465] = &_000345_hash,
++ [49600] = &_000346_hash,
++ [7391] = &_000347_hash,
++ [43616] = &_000348_hash,
++ [16775] = &_000349_hash,
++ [41393] = &_000350_hash,
++ [10532] = &_000351_hash,
++ [50366] = &_000352_hash,
++ [33324] = &_000353_hash,
++ [38200] = &_000354_hash,
++ [59315] = &_000355_hash,
++ [33916] = &_000356_hash,
++ [36593] = &_000357_hash,
++ [63079] = &_000358_hash,
++ [379] = &_000359_hash,
++ [34248] = &_000360_hash,
++ [27251] = &_000361_hash,
++ [29460] = &_000362_hash,
++ [7461] = &_000363_hash,
++ [9870] = &_000364_hash,
++ [44596] = &_000365_hash,
++ [45157] = &_000366_hash,
++ [55069] = &_000367_hash,
++ [29452] = &_000368_hash,
++ [54888] = &_000369_hash,
++ [31885] = &_000370_hash,
++ [20206] = &_000371_hash,
++ [20325] = &_000373_hash,
++ [18488] = &_000374_hash,
++ [22017] = &_000375_hash,
++ [57485] = &_000376_hash,
++ [49827] = &_000377_hash,
++ [37770] = &_000379_hash,
++ [52668] = &_000380_hash,
++ [13724] = &_000381_hash,
++ [59701] = &_000382_hash,
++ [11954] = &_000383_hash,
++ [9890] = &_000384_hash,
++ [17684] = &_000385_hash,
++ [18158] = &_000386_hash,
++ [61318] = &_000387_hash,
++ [2760] = &_000388_hash,
++ [38444] = &_000390_hash,
++ [55856] = &_000392_hash,
++ [34762] = &_000393_hash,
++ [48360] = &_000394_hash,
++ [40885] = &_000395_hash,
++ [36032] = &_000396_hash,
++ [52057] = &_000397_hash,
++ [12463] = &_000398_hash,
++ [30616] = &_000399_hash,
++ [38680] = &_000400_hash,
++ [41742] = &_000401_hash,
++ [50662] = &_000402_hash,
++ [48440] = &_000403_hash,
++ [34418] = &_000404_hash,
++ [64275] = &_000405_hash,
++ [12231] = &_000406_hash,
++ [53530] = &_000407_hash,
++ [54723] = &_000408_hash,
++ [19490] = &_000409_hash,
++ [11595] = &_000410_hash,
++ [15277] = &_000411_hash,
++ [4811] = &_000412_hash,
++ [42017] = &_000413_hash,
++ [17238] = &_000414_hash,
++ [55439] = &_000415_hash,
++ [45794] = &_000416_hash,
++ [60027] = &_000417_hash,
++ [3750] = &_000418_hash,
++ [11091] = &_000419_hash,
++ [32935] = &_000420_hash,
++ [22809] = &_000422_hash,
++ [60193] = &_000423_hash,
++ [14396] = &_000424_hash,
++ [18101] = &_000425_hash,
++ [46395] = &_000426_hash,
++ [24339] = &_000427_hash,
++ [26065] = &_000428_hash,
++ [43016] = &_000429_hash,
++ [41996] = &_000430_hash,
++ [7371] = &_000431_hash,
++ [32968] = &_000432_hash,
++ [53082] = &_000433_hash,
++ [38798] = &_000434_hash,
++ [12726] = &_000435_hash,
++ [55018] = &_000436_hash,
++ [26114] = &_000437_hash,
++ [31697] = &_000438_hash,
++ [21401] = &_000441_hash,
++ [33193] = &_000442_hash,
++ [52271] = &_000443_hash,
++ [20847] = &_000444_hash,
++ [30754] = &_000445_hash,
++ [54440] = &_000446_hash,
++ [22059] = &_000447_hash,
++ [47566] = &_000448_hash,
++ [22926] = &_000449_hash,
++ [20788] = &_000450_hash,
++ [18162] = &_000451_hash,
++ [65006] = &_000452_hash,
++ [11523] = &_000453_hash,
++ [29207] = &_000454_hash,
++ [18071] = &_000455_hash,
++ [7601] = &_000456_hash,
++ [12773] = &_000457_hash,
++ [61543] = &_000458_hash,
++ [5578] = &_000460_hash,
++ [49050] = &_000461_hash,
++ [51965] = &_000462_hash,
++ [6807] = &_000463_hash,
++ [22982] = &_000464_hash,
++ [36769] = &_000465_hash,
++ [53892] = &_000466_hash,
++ [2547] = &_000467_hash,
++ [53678] = &_000468_hash,
++ [61439] = &_000469_hash,
++ [31287] = &_000470_hash,
++ [6125] = &_000471_hash,
++ [57511] = &_000472_hash,
++ [13001] = &_000473_hash,
++ [62932] = &_000474_hash,
++ [62284] = &_000475_hash,
++ [9472] = &_000476_hash,
++ [26260] = &_000477_hash,
++ [63065] = &_000478_hash,
++ [18949] = &_000479_hash,
++ [29891] = &_000481_hash,
++ [41916] = &_000482_hash,
++ [40474] = &_000483_hash,
++ [63551] = &_000484_hash,
++ [36557] = &_000485_hash,
++ [2994] = &_000486_hash,
++ [5521] = &_000487_hash,
++ [51016] = &_000488_hash,
++ [7644] = &_000489_hash,
++ [55103] = &_000490_hash,
++ [11488] = &_000491_hash,
++ [7184] = &_000492_hash,
++ [36934] = &_000493_hash,
++ [54855] = &_000494_hash,
++ [63193] = &_000495_hash,
++ [12369] = &_000496_hash,
++ [15828] = &_000497_hash,
++ [61322] = &_000498_hash,
++ [5412] = &_000499_hash,
++ [28089] = &_000500_hash,
++ [64306] = &_000502_hash,
++ [24071] = &_000503_hash,
++ [50308] = &_000504_hash,
++ [38790] = &_000505_hash,
++ [9838] = &_000506_hash,
++ [18983] = &_000507_hash,
++ [9656] = &_000508_hash,
++ [18950] = &_000509_hash,
++ [59749] = &_000510_hash,
++ [20465] = &_000511_hash,
++ [4765] = &_000512_hash,
++ [16169] = &_000513_hash,
++ [6930] = &_000514_hash,
++ [16926] = &_000515_hash,
++ [35218] = &_000516_hash,
++ [19956] = &_000517_hash,
++ [55255] = &_000518_hash,
++ [861] = &_000519_hash,
++ [26574] = &_000520_hash,
++ [26794] = &_000521_hash,
++ [2133] = &_000522_hash,
++ [44616] = &_000523_hash,
++ [12840] = &_000524_hash,
++ [60426] = &_000525_hash,
++ [18133] = &_000526_hash,
++ [30479] = &_000527_hash,
++ [3219] = &_000528_hash,
++ [36488] = &_000529_hash,
++ [62043] = &_000530_hash,
++ [21714] = &_000532_hash,
++ [48007] = &_000533_hash,
++ [49969] = &_000534_hash,
++ [7701] = &_000535_hash,
++ [11521] = &_000536_hash,
++ [4269] = &_000537_hash,
++ [37627] = &_000539_hash,
++ [33555] = &_000540_hash,
++ [25900] = &_000541_hash,
++ [31709] = &_000542_hash,
++ [44626] = &_000544_hash,
++ [1679] = &_000545_hash,
++ [18349] = &_000546_hash,
++ [15338] = &_000547_hash,
++ [57935] = &_000548_hash,
++ [55850] = &_000549_hash,
++ [36063] = &_000550_hash,
++ [56674] = &_000551_hash,
++ [21379] = &_000552_hash,
++ [18507] = &_000553_hash,
++ [55719] = &_000554_hash,
++ [31210] = &_000555_hash,
++ [36207] = &_000556_hash,
++ [64180] = &_000557_hash,
++ [41770] = &_000558_hash,
++ [11600] = &_000559_hash,
++ [36638] = &_000560_hash,
++ [25576] = &_000561_hash,
++ [7000] = &_000562_hash,
++ [34187] = &_000563_hash,
++ [58533] = &_000564_hash,
++ [5083] = &_000565_hash,
++ [62614] = &_000566_hash,
++ [20085] = &_000567_hash,
++ [1135] = &_000568_hash,
++ [25613] = &_000569_hash,
++ [9541] = &_000570_hash,
++ [30577] = &_000571_hash,
++ [35722] = &_000572_hash,
++ [60407] = &_000573_hash,
++ [29465] = &_000574_hash,
++ [46891] = &_000575_hash,
++ [43633] = &_000576_hash,
++ [53743] = &_000577_hash,
++ [16196] = &_000578_hash,
++ [34425] = &_000580_hash,
++ [9646] = &_000581_hash,
++ [59756] = &_000583_hash,
++ [45524] = &_000584_hash,
++ [36702] = &_000585_hash,
++ [36747] = &_000586_hash,
++ [33643] = &_000588_hash,
++ [29158] = &_000589_hash,
++ [49662] = &_000590_hash,
++ [51062] = &_000591_hash,
++ [64755] = &_000592_hash,
++ [4829] = &_000594_hash,
++ [16413] = &_000595_hash,
++ [36125] = &_000596_hash,
++ [36293] = &_000597_hash,
++ [39712] = &_000598_hash,
++ [32160] = &_000599_hash,
++ [22962] = &_000600_hash,
++ [32001] = &_000601_hash,
++ [35828] = &_000602_hash,
++ [3106] = &_000603_hash,
++ [34039] = &_000604_hash,
++ [22393] = &_000605_hash,
++ [3560] = &_000606_hash,
++ [28195] = &_000607_hash,
++ [2062] = &_000608_hash,
++ [64001] = &_000609_hash,
++ [42407] = &_000610_hash,
++ [6253] = &_000611_hash,
++ [58640] = &_000612_hash,
++ [32195] = &_000613_hash,
++ [26197] = &_000614_hash,
++ [58003] = &_000615_hash,
++ [21662] = &_000616_hash,
++ [45750] = &_000617_hash,
++ [25798] = &_000618_hash,
++ [41052] = &_000619_hash,
++ [14096] = &_000620_hash,
++ [1439] = &_000621_hash,
++ [29074] = &_000622_hash,
++ [2376] = &_000623_hash,
++ [24068] = &_000625_hash,
++ [59519] = &_000627_hash,
++ [9893] = &_000628_hash,
++ [39979] = &_000630_hash,
++ [41540] = &_000631_hash,
++ [43200] = &_000633_hash,
++ [33494] = &_000634_hash,
++ [2028] = &_000635_hash,
++ [27206] = &_000636_hash,
++ [24302] = &_000637_hash,
++ [38112] = &_000638_hash,
++ [46538] = &_000639_hash,
++ [35228] = &_000641_hash,
++ [8339] = &_000642_hash,
++ [45349] = &_000643_hash,
++ [48404] = &_000644_hash,
++ [37865] = &_000645_hash,
++ [45763] = &_000646_hash,
++ [62347] = &_000647_hash,
++ [21644] = &_000648_hash,
++ [53135] = &_000649_hash,
++ [25095] = &_000650_hash,
++ [11697] = &_000651_hash,
++ [27003] = &_000652_hash,
++ [32464] = &_000653_hash,
++ [65339] = &_000654_hash,
++ [44248] = &_000655_hash,
++ [16] = &_000656_hash,
++ [29933] = &_000657_hash,
++ [34359] = &_000658_hash,
++ [3154] = &_000659_hash,
++ [59308] = &_000660_hash,
++ [61661] = &_000661_hash,
++ [23959] = &_000662_hash,
++ [6724] = &_000663_hash,
++ [54587] = &_000664_hash,
++ [28479] = &_000665_hash,
++ [56583] = &_000666_hash,
++ [64644] = &_000667_hash,
++ [23284] = &_000668_hash,
++ [61655] = &_000669_hash,
++ [20980] = &_000670_hash,
++ [19794] = &_000671_hash,
++ [30036] = &_000672_hash,
++ [25649] = &_000673_hash,
++ [47428] = &_000674_hash,
++ [47737] = &_000675_hash,
++ [8367] = &_000676_hash,
++ [2987] = &_000677_hash,
++ [50962] = &_000678_hash,
++ [10760] = &_000679_hash,
++ [31678] = &_000680_hash,
++ [48558] = &_000681_hash,
++ [2274] = &_000682_hash,
++ [831] = &_000683_hash,
++ [61833] = &_000684_hash,
++ [56864] = &_000685_hash,
++ [31040] = &_000686_hash,
++ [22196] = &_000687_hash,
++ [20076] = &_000688_hash,
++ [52821] = &_000689_hash,
++ [21896] = &_000690_hash,
++ [49367] = &_000691_hash,
++ [64731] = &_000692_hash,
++ [37110] = &_000693_hash,
++ [53694] = &_000694_hash,
++ [6175] = &_000695_hash,
++ [33048] = &_000696_hash,
++ [34746] = &_000697_hash,
++ [23777] = &_000698_hash,
++ [53828] = &_000699_hash,
++ [26539] = &_000700_hash,
++ [42628] = &_000701_hash,
++ [59115] = &_000702_hash,
++ [4456] = &_000703_hash,
++ [63619] = &_000704_hash,
++ [47329] = &_000705_hash,
++ [13534] = &_000706_hash,
++ [36955] = &_000707_hash,
++ [9841] = &_000708_hash,
++ [19308] = &_000709_hash,
++ [52439] = &_000710_hash,
++ [24680] = &_000711_hash,
++ [55652] = &_000712_hash,
++ [7842] = &_000713_hash,
++ [6500] = &_000714_hash,
++ [33485] = &_000715_hash,
++ [49920] = &_000716_hash,
++ [50750] = &_000717_hash,
++ [22318] = &_000718_hash,
++ [44599] = &_000719_hash,
++ [46403] = &_000720_hash,
++ [44534] = &_000721_hash,
++ [303] = &_000722_hash,
++ [22960] = &_000723_hash,
++ [10544] = &_000724_hash,
++ [8236] = &_000725_hash,
++ [21239] = &_000726_hash,
++ [24712] = &_000727_hash,
++ [37974] = &_000728_hash,
++ [62082] = &_000729_hash,
++ [57054] = &_000730_hash,
++ [53265] = &_000731_hash,
++ [52239] = &_000732_hash,
++ [14753] = &_000733_hash,
++ [60221] = &_000736_hash,
++ [27142] = &_000737_hash,
++ [14295] = &_000738_hash,
++ [25923] = &_000739_hash,
++ [29213] = &_000740_hash,
++ [31865] = &_000741_hash,
++ [4764] = &_000742_hash,
++ [10574] = &_000743_hash,
++ [55766] = &_000744_hash,
++ [22483] = &_000745_hash,
++ [61047] = &_000746_hash,
++ [41044] = &_000747_hash,
++ [58978] = &_000748_hash,
++ [47578] = &_000749_hash,
++ [7730] = &_000750_hash,
++ [15904] = &_000751_hash,
++ [25081] = &_000752_hash,
++ [45743] = &_000753_hash,
++ [58830] = &_000754_hash,
++ [59081] = &_000755_hash,
++ [47533] = &_000756_hash,
++ [11305] = &_000757_hash,
++ [29096] = &_000758_hash,
++ [19749] = &_000759_hash,
++ [56290] = &_000760_hash,
++ [44963] = &_000761_hash,
++ [30026] = &_000762_hash,
++ [27694] = &_000763_hash,
++ [8089] = &_000764_hash,
++ [38583] = &_000765_hash,
++ [1144] = &_000766_hash,
++ [20939] = &_000767_hash,
++ [22231] = &_000768_hash,
++ [17486] = &_000769_hash,
++ [51811] = &_000770_hash,
++ [62746] = &_000771_hash,
++ [19181] = &_000772_hash,
++ [52661] = &_000773_hash,
++ [51148] = &_000774_hash,
++ [49864] = &_000775_hash,
++ [37978] = &_000776_hash,
++ [6280] = &_000777_hash,
++ [12961] = &_000778_hash,
++ [60541] = &_000779_hash,
++ [37021] = &_000780_hash,
++ [26028] = &_000781_hash,
++ [41363] = &_000782_hash,
++ [42016] = &_000783_hash,
++ [58540] = &_000784_hash,
++ [2326] = &_000785_hash,
++ [60981] = &_000786_hash,
++ [13313] = &_000787_hash,
++ [44188] = &_000788_hash,
++ [34638] = &_000789_hash,
++ [20304] = &_000790_hash,
++ [60975] = &_000791_hash,
++ [12244] = &_000792_hash,
++ [16266] = &_000793_hash,
++ [3395] = &_000794_hash,
++ [63321] = &_000795_hash,
++ [20509] = &_000796_hash,
++ [57365] = &_000797_hash,
++ [47449] = &_000798_hash,
++ [56693] = &_000799_hash,
++ [33936] = &_000800_hash,
++ [52548] = &_000801_hash,
++ [18733] = &_000802_hash,
++ [15560] = &_000803_hash,
++ [13231] = &_000804_hash,
++ [64518] = &_000806_hash,
++ [54551] = &_000807_hash,
++ [54359] = &_000809_hash,
++ [46503] = &_000810_hash,
++ [22258] = &_000811_hash,
++ [39434] = &_000812_hash,
++ [52887] = &_000813_hash,
++ [3079] = &_000814_hash,
++ [18813] = &_000816_hash,
++ [47614] = &_000817_hash,
++ [38186] = &_000818_hash,
++ [57652] = &_000819_hash,
++ [10078] = &_000820_hash,
++ [17910] = &_000821_hash,
++ [13567] = &_000822_hash,
++ [21531] = &_000823_hash,
++ [46135] = &_000824_hash,
++ [10582] = &_000825_hash,
++ [4662] = &_000826_hash,
++ [17969] = &_000827_hash,
++ [43943] = &_000828_hash,
++ [46740] = &_000829_hash,
++ [26716] = &_000830_hash,
++ [58230] = &_000831_hash,
++ [252] = &_000832_hash,
++ [15704] = &_000833_hash,
++ [59765] = &_000834_hash,
++ [7322] = &_000835_hash,
++ [43950] = &_000836_hash,
++ [53093] = &_000837_hash,
++ [21646] = &_000838_hash,
++ [57063] = &_000839_hash,
++ [17132] = &_000840_hash,
++ [53922] = &_000842_hash,
++ [49155] = &_000843_hash,
++ [16356] = &_000844_hash,
++ [60037] = &_000845_hash,
++ [17299] = &_000846_hash,
++ [25678] = &_000847_hash,
++ [15494] = &_000848_hash,
++ [15159] = &_000849_hash,
++ [28442] = &_000850_hash,
++ [3514] = &_000851_hash,
++ [38151] = &_000852_hash,
++ [4173] = &_000853_hash,
++ [7258] = &_000854_hash,
++ [65109] = &_000855_hash,
++ [58827] = &_000856_hash,
++ [33575] = &_000857_hash,
++ [33078] = &_000858_hash,
++ [47234] = &_000859_hash,
++ [39193] = &_000860_hash,
++ [10950] = &_000861_hash,
++ [15613] = &_000862_hash,
++ [16046] = &_000863_hash,
++ [50172] = &_000864_hash,
++ [26107] = &_000865_hash,
++ [60543] = &_000866_hash,
++ [56337] = &_000867_hash,
++ [47626] = &_000868_hash,
++ [24409] = &_000869_hash,
++ [11732] = &_000870_hash,
++ [30010] = &_000871_hash,
++ [51480] = &_000872_hash,
++ [28518] = &_000873_hash,
++ [2061] = &_000874_hash,
++ [10885] = &_000875_hash,
++ [29517] = &_000876_hash,
++ [45913] = &_000877_hash,
++ [51774] = &_000878_hash,
++ [62298] = &_000879_hash,
++ [8892] = &_000880_hash,
++ [64891] = &_000881_hash,
++ [64537] = &_000882_hash,
++ [38103] = &_000883_hash,
++ [55518] = &_000884_hash,
++ [27419] = &_000885_hash,
++ [13869] = &_000886_hash,
++ [53150] = &_000887_hash,
++ [2884] = &_000888_hash,
++ [10362] = &_000889_hash,
++ [6961] = &_000890_hash,
++ [56975] = &_000891_hash,
++ [12508] = &_000892_hash,
++ [54597] = &_000893_hash,
++ [60499] = &_000894_hash,
++ [50109] = &_000895_hash,
++ [944] = &_000896_hash,
++ [29229] = &_000897_hash,
++ [37648] = &_000898_hash,
++ [1568] = &_000899_hash,
++ [61793] = &_000900_hash,
++ [53395] = &_000901_hash,
++ [5519] = &_000902_hash,
++ [28637] = &_000903_hash,
++ [53687] = &_000904_hash,
++ [6783] = &_000905_hash,
++ [43312] = &_000906_hash,
++ [2373] = &_000907_hash,
++ [33482] = &_000908_hash,
++ [24886] = &_000909_hash,
++ [48154] = &_000910_hash,
++ [12838] = &_000911_hash,
++ [47012] = &_000912_hash,
++ [23691] = &_000913_hash,
++ [37924] = &_000914_hash,
++ [47346] = &_000915_hash,
++ [5624] = &_000916_hash,
++ [16842] = &_000918_hash,
++ [60399] = &_000919_hash,
++ [2312] = &_000920_hash,
++ [59212] = &_000921_hash,
++ [11923] = &_000922_hash,
++ [10805] = &_000923_hash,
++ [36577] = &_000924_hash,
++ [60948] = &_000925_hash,
++ [21711] = &_000926_hash,
++ [54830] = &_000927_hash,
++ [1822] = &_000928_hash,
++ [44573] = &_000929_hash,
++ [23805] = &_000930_hash,
++ [46061] = &_000931_hash,
++ [33996] = &_000932_hash,
++ [40856] = &_000933_hash,
++ [16299] = &_000934_hash,
++ [63446] = &_000935_hash,
++ [31205] = &_000936_hash,
++ [33100] = &_000937_hash,
++ [40843] = &_000938_hash,
++ [23712] = &_000939_hash,
++ [36962] = &_000940_hash,
++ [9845] = &_000942_hash,
++ [13738] = &_000943_hash,
++ [58099] = &_000944_hash,
++ [31869] = &_000945_hash,
++ [63501] = &_000946_hash,
++ [58188] = &_000947_hash,
++ [51338] = &_000948_hash,
++ [54999] = &_000949_hash,
++ [2434] = &_000950_hash,
++ [34958] = &_000951_hash,
++ [41487] = &_000952_hash,
++ [11941] = &_000953_hash,
++ [56728] = &_000954_hash,
++ [48150] = &_000955_hash,
++ [13905] = &_000956_hash,
++ [9054] = &_000957_hash,
++ [10758] = &_000958_hash,
++ [48056] = &_000959_hash,
++ [24231] = &_000960_hash,
++ [43748] = &_000961_hash,
++ [24237] = &_000962_hash,
++ [14899] = &_000963_hash,
++ [38652] = &_000964_hash,
++ [65013] = &_000965_hash,
++ [16645] = &_000967_hash,
++ [55031] = &_000968_hash,
++ [23978] = &_000969_hash,
++ [24208] = &_000970_hash,
++ [18098] = &_000971_hash,
++ [2303] = &_000972_hash,
++ [3338] = &_000973_hash,
++ [39219] = &_000974_hash,
++ [18609] = &_000976_hash,
++ [64412] = &_000977_hash,
++ [16962] = &_000978_hash,
++ [26346] = &_000979_hash,
++ [39380] = &_000980_hash,
++ [33020] = &_000981_hash,
++ [22639] = &_000982_hash,
++ [6453] = &_000983_hash,
++ [58602] = &_000984_hash,
++ [50920] = &_000985_hash,
++ [56471] = &_000987_hash,
++ [15378] = &_000988_hash,
++ [3589] = &_000989_hash,
++ [12558] = &_000990_hash,
++ [3201] = &_000991_hash,
++ [28175] = &_000993_hash,
++ [43888] = &_000995_hash,
++ [56010] = &_000996_hash,
++ [32456] = &_000997_hash,
++ [29036] = &_000998_hash,
++ [32330] = &_000999_hash,
++ [25603] = &_001000_hash,
++ [17675] = &_001001_hash,
++ [36271] = &_001002_hash,
++ [49814] = &_001003_hash,
++ [5693] = &_001004_hash,
++ [51009] = &_001005_hash,
++ [62835] = &_001006_hash,
++ [27139] = &_001007_hash,
++ [45155] = &_001008_hash,
++ [17186] = &_001009_hash,
++ [46734] = &_001010_hash,
++ [61957] = &_001011_hash,
++ [51389] = &_001012_hash,
++ [23687] = &_001013_hash,
++ [46825] = &_001014_hash,
++ [52287] = &_001016_hash,
++ [31408] = &_001017_hash,
++ [5396] = &_001018_hash,
++ [62247] = &_001019_hash,
++ [7946] = &_001020_hash,
++ [58210] = &_001022_hash,
++ [15618] = &_001023_hash,
++ [61225] = &_001024_hash,
++ [13163] = &_001025_hash,
++ [36882] = &_001026_hash,
++ [8719] = &_001027_hash,
++ [8539] = &_001028_hash,
++ [27134] = &_001029_hash,
++ [53335] = &_001030_hash,
++ [30381] = &_001031_hash,
++ [32336] = &_001032_hash,
++ [32867] = &_001033_hash,
++ [1238] = &_001034_hash,
++ [8174] = &_001035_hash,
++ [6368] = &_001036_hash,
++ [29170] = &_001037_hash,
++ [9687] = &_001038_hash,
++ [61116] = &_001039_hash,
++ [31681] = &_001040_hash,
++ [22119] = &_001041_hash,
++ [59885] = &_001042_hash,
++ [47789] = &_001043_hash,
++ [5796] = &_001044_hash,
++ [43376] = &_001045_hash,
++ [36706] = &_001046_hash,
++ [47945] = &_001047_hash,
++ [33208] = &_001048_hash,
++ [55431] = &_001049_hash,
++ [25291] = &_001050_hash,
++ [58805] = &_001051_hash,
++ [23708] = &_001052_hash,
++ [29278] = &_001053_hash,
++ [1272] = &_001054_hash,
++ [10199] = &_001055_hash,
++ [34666] = &_001056_hash,
++ [49317] = &_001057_hash,
++ [18604] = &_001058_hash,
++ [42545] = &_001059_hash,
++ [33157] = &_001060_hash,
++ [53343] = &_001061_hash,
++ [64842] = &_001062_hash,
++ [61865] = &_001063_hash,
++ [54010] = &_001064_hash,
++ [64638] = &_001065_hash,
++ [20480] = &_001066_hash,
++ [23341] = &_001067_hash,
++ [10350] = &_001068_hash,
++ [30970] = &_001069_hash,
++ [62360] = &_001070_hash,
++ [52537] = &_001071_hash,
++ [51386] = &_001072_hash,
++ [48731] = &_001073_hash,
++ [58061] = &_001074_hash,
++ [40405] = &_001075_hash,
++ [57198] = &_001076_hash,
++ [19290] = &_001077_hash,
++ [60403] = &_001078_hash,
++ [2738] = &_001079_hash,
++ [59721] = &_001080_hash,
++ [24980] = &_001081_hash,
++ [55896] = &_001082_hash,
++ [57055] = &_001083_hash,
++ [46010] = &_001084_hash,
++ [712] = &_001085_hash,
++ [37747] = &_001086_hash,
++ [59996] = &_001087_hash,
++ [45219] = &_001088_hash,
++ [16917] = &_001089_hash,
++ [7415] = &_001090_hash,
++ [29576] = &_001091_hash,
++ [13584] = &_001092_hash,
++ [53364] = &_001093_hash,
++ [14813] = &_001094_hash,
++ [25543] = &_001095_hash,
++ [29240] = &_001096_hash,
++ [38748] = &_001097_hash,
++ [34848] = &_001099_hash,
++ [46226] = &_001100_hash,
++ [55526] = &_001101_hash,
++ [48271] = &_001102_hash,
++ [24658] = &_001104_hash,
++ [46964] = &_001105_hash,
++ [2637] = &_001106_hash,
++ [55601] = &_001107_hash,
++ [60275] = &_001108_hash,
++ [52645] = &_001109_hash,
++ [11712] = &_001110_hash,
++ [51364] = &_001111_hash,
++ [5106] = &_001112_hash,
++ [24710] = &_001113_hash,
++ [13101] = &_001114_hash,
++ [46963] = &_001115_hash,
++ [6779] = &_001116_hash,
++ [9237] = &_001117_hash,
++ [61524] = &_001118_hash,
++ [38247] = &_001119_hash,
++ [48715] = &_001120_hash,
++ [40797] = &_001121_hash,
++ [46780] = &_001122_hash,
++ [22071] = &_001123_hash,
++ [49735] = &_001125_hash,
++ [63925] = &_001126_hash,
++ [30902] = &_001127_hash,
++ [39828] = &_001128_hash,
++ [53089] = &_001129_hash,
++ [6394] = &_001130_hash,
++ [5116] = &_001131_hash,
++ [50702] = &_001132_hash,
++ [59565] = &_001133_hash,
++ [61042] = &_001134_hash,
++ [14533] = &_001135_hash,
++ [23807] = &_001136_hash,
++ [24296] = &_001137_hash,
++ [8808] = &_001138_hash,
++ [52383] = &_001139_hash,
++ [30487] = &_001140_hash,
++ [30125] = &_001141_hash,
++ [40665] = &_001142_hash,
++ [60809] = &_001143_hash,
++ [4842] = &_001144_hash,
++ [13955] = &_001145_hash,
++ [33237] = &_001146_hash,
++ [40673] = &_001147_hash,
++ [48026] = &_001148_hash,
++ [64033] = &_001149_hash,
++ [13879] = &_001150_hash,
++ [60114] = &_001151_hash,
++ [19472] = &_001152_hash,
++ [33552] = &_001153_hash,
++ [28575] = &_001154_hash,
++ [19696] = &_001155_hash,
++ [19742] = &_001156_hash,
++ [15286] = &_001157_hash,
++ [24629] = &_001158_hash,
++ [28382] = &_001159_hash,
++ [18962] = &_001160_hash,
++ [45796] = &_001161_hash,
++ [51632] = &_001162_hash,
++ [16907] = &_001163_hash,
++ [49336] = &_001164_hash,
++ [25316] = &_001165_hash,
++ [39978] = &_001166_hash,
++ [8091] = &_001167_hash,
++ [30680] = &_001168_hash,
++ [2066] = &_001169_hash,
++ [24271] = &_001170_hash,
++ [34934] = &_001171_hash,
++ [29208] = &_001172_hash,
++ [18591] = &_001173_hash,
++ [24373] = &_001174_hash,
++ [41485] = &_001175_hash,
++ [45487] = &_001176_hash,
++ [29299] = &_001177_hash,
++ [53920] = &_001178_hash,
++ [25407] = &_001179_hash,
++ [5525] = &_001180_hash,
++ [3531] = &_001181_hash,
++ [25143] = &_001182_hash,
++ [56046] = &_001183_hash,
++ [34693] = &_001184_hash,
++ [48644] = &_001185_hash,
++ [21226] = &_001186_hash,
++ [14051] = &_001187_hash,
++ [7715] = &_001188_hash,
++ [30413] = &_001189_hash,
++ [13681] = &_001190_hash,
++ [6554] = &_001191_hash,
++ [12228] = &_001192_hash,
++ [25497] = &_001193_hash,
++ [52228] = &_001194_hash,
++ [49069] = &_001195_hash,
++ [26961] = &_001196_hash,
++ [13768] = &_001197_hash,
++ [56185] = &_001198_hash,
++ [41838] = &_001199_hash,
++ [60119] = &_001200_hash,
++ [3112] = &_001201_hash,
++ [62001] = &_001202_hash,
++ [35888] = &_001203_hash,
++ [64177] = &_001207_hash,
++ [57222] = &_001208_hash,
++ [5260] = &_001209_hash,
++ [55517] = &_001210_hash,
++ [18186] = &_001211_hash,
++ [14257] = &_001212_hash,
++ [26846] = &_001213_hash,
++ [56097] = &_001214_hash,
++ [55151] = &_001215_hash,
++ [2999] = &_001216_hash,
++ [3602] = &_001217_hash,
++ [18460] = &_001218_hash,
++ [3507] = &_001219_hash,
++ [57847] = &_001220_hash,
++ [58077] = &_001221_hash,
++ [2659] = &_001222_hash,
++ [39846] = &_001223_hash,
++ [18629] = &_001224_hash,
++ [2723] = &_001225_hash,
++ [45230] = &_001226_hash,
++ [26941] = &_001227_hash,
++ [4344] = &_001228_hash,
++ [8487] = &_001229_hash,
++ [9901] = &_001230_hash,
++ [43061] = &_001231_hash,
++ [42551] = &_001232_hash,
++ [63272] = &_001233_hash,
++ [37771] = &_001234_hash,
++ [28261] = &_001235_hash,
++ [44694] = &_001236_hash,
++ [8573] = &_001237_hash,
++ [60174] = &_001238_hash,
++ [28040] = &_001239_hash,
++ [39423] = &_001240_hash,
++ [98] = &_001241_hash,
++ [62874] = &_001242_hash,
++ [38726] = &_001243_hash,
++ [55348] = &_001244_hash,
++ [10997] = &_001245_hash,
++ [88] = &_001246_hash,
++ [60639] = &_001247_hash,
++ [48159] = &_001248_hash,
++ [47899] = &_001249_hash,
++ [25367] = &_001250_hash,
++ [55681] = &_001251_hash,
++ [44716] = &_001252_hash,
++ [26161] = &_001253_hash,
++ [55347] = &_001254_hash,
++ [14518] = &_001255_hash,
++ [8887] = &_001256_hash,
++ [23009] = &_001257_hash,
++ [27962] = &_001258_hash,
++ [20004] = &_001259_hash,
++ [61750] = &_001260_hash,
++ [11661] = &_001261_hash,
++ [37118] = &_001262_hash,
++ [9370] = &_001263_hash,
++ [15099] = &_001264_hash,
++ [2404] = &_001265_hash,
++ [64074] = &_001266_hash,
++ [7538] = &_001267_hash,
++ [19736] = &_001268_hash,
++ [8199] = &_001269_hash,
++ [40711] = &_001270_hash,
++ [47859] = &_001271_hash,
++ [53925] = &_001272_hash,
++ [46888] = &_001273_hash,
++ [21783] = &_001274_hash,
++ [37305] = &_001275_hash,
++ [18414] = &_001276_hash,
++ [62423] = &_001277_hash,
++ [30371] = &_001278_hash,
++ [32617] = &_001279_hash,
++ [14530] = &_001281_hash,
++ [48623] = &_001282_hash,
++ [12845] = &_001283_hash,
++ [8895] = &_001284_hash,
++ [33661] = &_001285_hash,
++ [23178] = &_001286_hash,
++ [54706] = &_001287_hash,
++ [27133] = &_001288_hash,
++ [52745] = &_001289_hash,
++ [64420] = &_001290_hash,
++ [25617] = &_001291_hash,
++ [25414] = &_001292_hash,
++ [20445] = &_001293_hash,
++ [64006] = &_001294_hash,
++ [52646] = &_001295_hash,
++ [30281] = &_001296_hash,
++ [3761] = &_001297_hash,
++ [44345] = &_001298_hash,
++ [14713] = &_001299_hash,
++ [26043] = &_001300_hash,
++ [41679] = &_001301_hash,
++ [6267] = &_001302_hash,
++ [22247] = &_001304_hash,
++ [9440] = &_001305_hash,
++ [54676] = &_001306_hash,
++ [53982] = &_001308_hash,
++ [9467] = &_001309_hash,
++ [53419] = &_001310_hash,
++ [1424] = &_001311_hash,
++ [17561] = &_001312_hash,
++ [28161] = &_001313_hash,
++ [57262] = &_001314_hash,
++ [61071] = &_001315_hash,
++ [20067] = &_001316_hash,
++ [34321] = &_001317_hash,
++ [56199] = &_001318_hash,
++ [29070] = &_001319_hash,
++ [15698] = &_001320_hash,
++ [14173] = &_001321_hash,
++ [41224] = &_001322_hash,
++ [56438] = &_001323_hash,
++ [41894] = &_001324_hash,
++ [20885] = &_001325_hash,
++ [23275] = &_001326_hash,
++ [45043] = &_001327_hash,
++ [22143] = &_001328_hash,
++ [38029] = &_001329_hash,
++ [55343] = &_001330_hash,
++ [40624] = &_001331_hash,
++ [26476] = &_001332_hash,
++ [43128] = &_001333_hash,
++ [45115] = &_001334_hash,
++ [32958] = &_001335_hash,
++ [43091] = &_001336_hash,
++ [33299] = &_001337_hash,
++ [55021] = &_001338_hash,
++ [5509] = &_001339_hash,
++ [53012] = &_001340_hash,
++ [57849] = &_001341_hash,
++ [63282] = &_001342_hash,
++ [27883] = &_001343_hash,
++ [1670] = &_001344_hash,
++ [24095] = &_001345_hash,
++ [47810] = &_001346_hash,
++ [40759] = &_001347_hash,
++ [42139] = &_001348_hash,
++ [50484] = &_001349_hash,
++ [2305] = &_001350_hash,
++ [59832] = &_001351_hash,
++ [17662] = &_001352_hash,
++ [58943] = &_001353_hash,
++ [37417] = &_001356_hash,
++ [25127] = &_001357_hash,
++ [15006] = &_001358_hash,
++ [54292] = &_001359_hash,
++ [30642] = &_001360_hash,
++ [39939] = &_001361_hash,
++ [34818] = &_001362_hash,
++ [23378] = &_001363_hash,
++ [24090] = &_001364_hash,
++ [11111] = &_001365_hash,
++ [64141] = &_001366_hash,
++ [46457] = &_001367_hash,
++ [57927] = &_001368_hash,
++ [58877] = &_001371_hash,
++ [13880] = &_001372_hash,
++ [62888] = &_001373_hash,
++ [57962] = &_001374_hash,
++ [9117] = &_001375_hash,
++ [52012] = &_001376_hash,
++ [49246] = &_001377_hash,
++ [52701] = &_001378_hash,
++ [29857] = &_001379_hash,
++ [49420] = &_001380_hash,
++ [45897] = &_001381_hash,
++ [15141] = &_001382_hash,
++ [24177] = &_001383_hash,
++ [10325] = &_001384_hash,
++ [52861] = &_001385_hash,
++ [28922] = &_001386_hash,
++ [31089] = &_001387_hash,
++ [63084] = &_001388_hash,
++ [26245] = &_001389_hash,
++ [60000] = &_001390_hash,
++ [56935] = &_001391_hash,
++ [37569] = &_001392_hash,
++ [6446] = &_001394_hash,
++ [35883] = &_001395_hash,
++ [9123] = &_001396_hash,
++ [51457] = &_001397_hash,
++ [1787] = &_001398_hash,
++ [10135] = &_001399_hash,
++ [952] = &_001400_hash,
++ [53578] = &_001401_hash,
++ [9923] = &_001402_hash,
++ [45249] = &_001403_hash,
++ [52860] = &_001404_hash,
++ [29558] = &_001405_hash,
++ [40556] = &_001406_hash,
++ [53210] = &_001407_hash,
++ [2506] = &_001408_hash,
++ [48262] = &_001409_hash,
++ [46939] = &_001410_hash,
++ [17901] = &_001411_hash,
++ [27204] = &_001412_hash,
++ [52516] = &_001413_hash,
++ [55885] = &_001414_hash,
++ [6681] = &_001415_hash,
++ [42360] = &_001416_hash,
++ [20259] = &_001417_hash,
++ [8874] = &_001418_hash,
++ [53363] = &_001419_hash,
++ [17500] = &_001420_hash,
++ [63988] = &_001421_hash,
++ [26378] = &_001422_hash,
++ [7768] = &_001423_hash,
++ [12938] = &_001424_hash,
++ [6755] = &_001425_hash,
++ [43806] = &_001426_hash,
++ [15976] = &_001427_hash,
++ [2732] = &_001428_hash,
++ [2519] = &_001429_hash,
++ [14340] = &_001430_hash,
++ [34772] = &_001431_hash,
++ [36433] = &_001432_hash,
++ [16068] = &_001433_hash,
++ [22052] = &_001434_hash,
++ [8929] = &_001435_hash,
++ [63220] = &_001436_hash,
++ [18246] = &_001437_hash,
++ [37678] = &_001438_hash,
++ [4932] = &_001439_hash,
++ [46960] = &_001440_hash,
++ [16909] = &_001441_hash,
++ [44429] = &_001442_hash,
++ [59514] = &_001443_hash,
++ [62760] = &_001444_hash,
++ [41841] = &_001445_hash,
++ [25417] = &_001446_hash,
++ [63230] = &_001447_hash,
++ [39532] = &_001448_hash,
++ [24688] = &_001449_hash,
++ [18555] = &_001450_hash,
++ [54499] = &_001451_hash,
++ [10719] = &_001452_hash,
++ [1644] = &_001453_hash,
++ [15109] = &_001454_hash,
++ [15787] = &_001455_hash,
++ [57869] = &_001456_hash,
++ [54445] = &_001457_hash,
++ [19398] = &_001458_hash,
++ [9488] = &_001459_hash,
++ [12587] = &_001460_hash,
++ [17124] = &_001461_hash,
++ [53665] = &_001462_hash,
++ [40386] = &_001463_hash,
++ [39444] = &_001464_hash,
++ [28873] = &_001465_hash,
++ [11290] = &_001466_hash,
++ [51313] = &_001467_hash,
++ [23354] = &_001469_hash,
++ [49559] = &_001470_hash,
++ [49312] = &_001471_hash,
++ [36333] = &_001472_hash,
++ [59349] = &_001473_hash,
++ [60316] = &_001474_hash,
++ [2546] = &_001475_hash,
++ [57483] = &_001476_hash,
++ [14569] = &_001478_hash,
++ [61842] = &_001481_hash,
++ [32923] = &_001482_hash,
++ [57471] = &_001483_hash,
++ [83] = &_001484_hash,
++ [40242] = &_001485_hash,
++ [42578] = &_001486_hash,
++ [62037] = &_001487_hash,
++ [8131] = &_001488_hash,
++ [752] = &_001489_hash,
++ [56376] = &_001490_hash,
++ [22290] = &_001491_hash,
++ [46232] = &_001492_hash,
++ [35132] = &_001493_hash,
++ [23825] = &_001494_hash,
++ [43262] = &_001495_hash,
++ [8138] = &_001496_hash,
++ [31489] = &_001497_hash,
++ [57578] = &_001498_hash,
++ [28007] = &_001499_hash,
++ [28688] = &_001500_hash,
++ [19319] = &_001501_hash,
++ [12575] = &_001502_hash,
++ [62762] = &_001504_hash,
++ [47450] = &_001505_hash,
++ [1869] = &_001506_hash,
++ [51225] = &_001507_hash,
++ [19561] = &_001508_hash,
++ [64894] = &_001509_hash,
++ [6829] = &_001510_hash,
++ [30644] = &_001511_hash,
++ [63391] = &_001512_hash,
++ [11655] = &_001514_hash,
++ [28229] = &_001515_hash,
++ [22382] = &_001516_hash,
++ [22649] = &_001517_hash,
++ [42619] = &_001518_hash,
++ [19761] = &_001519_hash,
++ [56990] = &_001520_hash,
++ [19531] = &_001521_hash,
++ [26514] = &_001522_hash,
++ [56773] = &_001523_hash,
++ [15563] = &_001524_hash,
++ [26212] = &_001525_hash,
++ [29203] = &_001526_hash,
++ [32768] = &_001527_hash,
++ [15110] = &_001528_hash,
++ [3885] = &_001529_hash,
++ [13788] = &_001530_hash,
++ [27875] = &_001531_hash,
++ [54959] = &_001532_hash,
++ [20945] = &_001533_hash,
++ [59640] = &_001534_hash,
++ [4693] = &_001535_hash,
++ [13793] = &_001536_hash,
++ [25659] = &_001537_hash,
++ [18734] = &_001538_hash,
++ [17869] = &_001539_hash,
++ [26270] = &_001540_hash,
++ [18458] = &_001541_hash,
++ [58468] = &_001542_hash,
++ [61257] = &_001543_hash,
++ [39946] = &_001544_hash,
++ [52382] = &_001545_hash,
++ [18428] = &_001546_hash,
++ [31069] = &_001547_hash,
++ [61614] = &_001548_hash,
++ [60044] = &_001549_hash,
++ [36818] = &_001550_hash,
++ [54353] = &_001551_hash,
++ [55994] = &_001552_hash,
++ [65142] = &_001553_hash,
++ [1664] = &_001554_hash,
++ [32212] = &_001555_hash,
++ [63087] = &_001556_hash,
++ [29916] = &_001557_hash,
++ [54912] = &_001558_hash,
++ [10318] = &_001559_hash,
++ [44031] = &_001560_hash,
++ [50108] = &_001561_hash,
++ [57812] = &_001562_hash,
++ [63190] = &_001563_hash,
++ [48246] = &_001564_hash,
++ [3744] = &_001565_hash,
++ [56321] = &_001566_hash,
++ [42691] = &_001567_hash,
++ [62052] = &_001568_hash,
++ [21999] = &_001569_hash,
++ [13672] = &_001570_hash,
++ [20648] = &_001571_hash,
++ [42500] = &_001572_hash,
++ [22795] = &_001573_hash,
++ [19496] = &_001574_hash,
++ [35556] = &_001575_hash,
++ [57144] = &_001576_hash,
++ [1019] = &_001577_hash,
++ [28818] = &_001578_hash,
++ [52880] = &_001579_hash,
++ [6543] = &_001580_hash,
++ [18895] = &_001581_hash,
++ [857] = &_001582_hash,
++ [45966] = &_001583_hash,
++ [11785] = &_001584_hash,
++ [7736] = &_001585_hash,
++ [4308] = &_001586_hash,
++ [51095] = &_001587_hash,
++ [12101] = &_001588_hash,
++ [427] = &_001589_hash,
++ [4021] = &_001590_hash,
++ [54201] = &_001591_hash,
++ [5615] = &_001592_hash,
++ [16234] = &_001593_hash,
++ [51718] = &_001594_hash,
++ [42390] = &_001595_hash,
++ [55391] = &_001596_hash,
++ [28539] = &_001597_hash,
++ [943] = &_001598_hash,
++ [32683] = &_001599_hash,
++ [39182] = &_001600_hash,
++ [33198] = &_001601_hash,
++ [39446] = &_001602_hash,
++ [16394] = &_001603_hash,
++ [30791] = &_001604_hash,
++ [35530] = &_001605_hash,
++ [53193] = &_001607_hash,
++ [39401] = &_001608_hash,
++ [28624] = &_001609_hash,
++ [12066] = &_001610_hash,
++ [63492] = &_001611_hash,
++ [14897] = &_001612_hash,
++ [29641] = &_001613_hash,
++ [10165] = &_001614_hash,
++ [60046] = &_001615_hash,
++ [12429] = &_001616_hash,
++ [32788] = &_001617_hash,
++ [52698] = &_001618_hash,
++ [13130] = &_001620_hash,
++ [28643] = &_001621_hash,
++ [50666] = &_001622_hash,
++ [35126] = &_001623_hash,
++ [33593] = &_001624_hash,
++ [27547] = &_001625_hash,
++ [5484] = &_001626_hash,
++ [26642] = &_001627_hash,
++ [25586] = &_001628_hash,
++ [58757] = &_001629_hash,
++ [18701] = &_001630_hash,
++ [26271] = &_001631_hash,
++ [23829] = &_001632_hash,
++ [63659] = &_001634_hash,
++ [26603] = &_001635_hash,
++ [25704] = &_001636_hash,
++ [21149] = &_001637_hash,
++ [36900] = &_001638_hash,
++ [61577] = &_001640_hash,
++ [54095] = &_001641_hash,
++ [31650] = &_001642_hash,
++ [48970] = &_001643_hash,
++ [49357] = &_001644_hash,
++ [33835] = &_001645_hash,
++ [46861] = &_001646_hash,
++ [1428] = &_001647_hash,
++ [36247] = &_001648_hash,
++ [21600] = &_001649_hash,
++ [24747] = &_001650_hash,
++ [51012] = &_001651_hash,
++ [38974] = &_001653_hash,
++ [30549] = &_001655_hash,
++ [40146] = &_001656_hash,
++ [41756] = &_001657_hash,
++ [37010] = &_001658_hash,
++ [35072] = &_001660_hash,
++ [2114] = &_001661_hash,
++ [48424] = &_001662_hash,
++ [61522] = &_001663_hash,
++ [50633] = &_001664_hash,
++ [2283] = &_001665_hash,
++ [61763] = &_001666_hash,
++ [48195] = &_001667_hash,
++ [31000] = &_001668_hash,
++ [23856] = &_001669_hash,
++ [37421] = &_001670_hash,
++ [10019] = &_001672_hash,
++ [5148] = &_001673_hash,
++ [14363] = &_001674_hash,
++ [57354] = &_001675_hash,
++ [62460] = &_001676_hash,
++ [45174] = &_001677_hash,
++ [31054] = &_001678_hash,
++ [62195] = &_001679_hash,
++ [14976] = &_001680_hash,
++ [55676] = &_001681_hash,
++ [1025] = &_001682_hash,
++ [6921] = &_001683_hash,
++ [22158] = &_001684_hash,
++ [18050] = &_001685_hash,
++ [18612] = &_001686_hash,
++ [31107] = &_001687_hash,
++ [45212] = &_001688_hash,
++ [29599] = &_001689_hash,
++ [30827] = &_001690_hash,
++ [25086] = &_001691_hash,
++ [27146] = &_001692_hash,
++ [2467] = &_001693_hash,
++ [45786] = &_001694_hash,
++ [51909] = &_001695_hash,
++ [64604] = &_001696_hash,
++ [57819] = &_001697_hash,
++ [11001] = &_001698_hash,
++ [20326] = &_001699_hash,
++ [12682] = &_001700_hash,
++ [28932] = &_001701_hash,
++ [53491] = &_001702_hash,
++ [63894] = &_001703_hash,
++ [51191] = &_001704_hash,
++ [59759] = &_001705_hash,
++ [15691] = &_001706_hash,
++ [38786] = &_001707_hash,
++ [51546] = &_001708_hash,
++ [10121] = &_001709_hash,
++ [60786] = &_001710_hash,
++ [19952] = &_001712_hash,
++ [7271] = &_001715_hash,
++ [10729] = &_001716_hash,
++ [28883] = &_001717_hash,
++ [52042] = &_001718_hash,
++ [49606] = &_001719_hash,
++ [33243] = &_001720_hash,
++ [57341] = &_001721_hash,
++ [7978] = &_001722_hash,
++ [36330] = &_001723_hash,
++ [39035] = &_001724_hash,
++ [34498] = &_001725_hash,
++ [19789] = &_001726_hash,
++ [55685] = &_001727_hash,
++ [55419] = &_001728_hash,
++ [27798] = &_001729_hash,
++ [54599] = &_001730_hash,
++ [65522] = &_001731_hash,
++ [38111] = &_001732_hash,
++ [57077] = &_001733_hash,
++ [53053] = &_001734_hash,
++ [14190] = &_001735_hash,
++ [47037] = &_001736_hash,
++ [33296] = &_001737_hash,
++ [23803] = &_001738_hash,
++ [48773] = &_001739_hash,
++ [63014] = &_001740_hash,
++ [64392] = &_001741_hash,
++ [44203] = &_001742_hash,
++ [47717] = &_001743_hash,
++ [38399] = &_001744_hash,
++ [30385] = &_001745_hash,
++ [61693] = &_001746_hash,
++ [32049] = &_001747_hash,
++ [26133] = &_001748_hash,
++ [45038] = &_001749_hash,
++ [8582] = &_001751_hash,
++ [38182] = &_001753_hash,
++ [62457] = &_001754_hash,
++ [27937] = &_001755_hash,
++ [3795] = &_001756_hash,
++ [23228] = &_001757_hash,
++ [56511] = &_001758_hash,
++ [47807] = &_001759_hash,
++ [60528] = &_001760_hash,
++ [51858] = &_001761_hash,
++ [49183] = &_001762_hash,
++ [33807] = &_001763_hash,
++ [34791] = &_001764_hash,
++ [8150] = &_001765_hash,
++ [19691] = &_001767_hash,
++ [20519] = &_001770_hash,
++ [17144] = &_001771_hash,
++ [19394] = &_001772_hash,
++ [53730] = &_001773_hash,
++ [8447] = &_001774_hash,
++ [30004] = &_001775_hash,
++ [40939] = &_001776_hash,
++ [53674] = &_001777_hash,
++ [11820] = &_001778_hash,
++ [23401] = &_001779_hash,
++ [9641] = &_001780_hash,
++ [2721] = &_001781_hash,
++ [19700] = &_001782_hash,
++ [1619] = &_001783_hash,
++ [23272] = &_001784_hash,
++ [56424] = &_001785_hash,
++ [14483] = &_001786_hash,
++ [1599] = &_001787_hash,
++ [27604] = &_001788_hash,
++ [37219] = &_001789_hash,
++ [31958] = &_001790_hash,
++ [5273] = &_001791_hash,
++ [46712] = &_001792_hash,
++ [27259] = &_001794_hash,
++ [23674] = &_001797_hash,
++ [40509] = &_001798_hash,
++ [17549] = &_001799_hash,
++ [53992] = &_001800_hash,
++ [24062] = &_001801_hash,
++ [23371] = &_001802_hash,
++ [19115] = &_001803_hash,
++ [51532] = &_001804_hash,
++ [45193] = &_001805_hash,
++ [29340] = &_001806_hash,
++ [5048] = &_001807_hash,
++ [65040] = &_001808_hash,
++ [39155] = &_001809_hash,
++ [31406] = &_001810_hash,
++ [49182] = &_001811_hash,
++ [37695] = &_001812_hash,
++ [28432] = &_001813_hash,
++ [23482] = &_001814_hash,
++ [56550] = &_001815_hash,
++ [7374] = &_001816_hash,
++ [57050] = &_001817_hash,
++ [57011] = &_001818_hash,
++ [27529] = &_001819_hash,
++ [33662] = &_001820_hash,
++ [4314] = &_001821_hash,
++ [22812] = &_001822_hash,
++ [47555] = &_001823_hash,
++ [38737] = &_001824_hash,
++ [36101] = &_001826_hash,
++ [877] = &_001828_hash,
++ [2639] = &_001830_hash,
++ [64343] = &_001831_hash,
++ [11150] = &_001832_hash,
++ [46486] = &_001833_hash,
++ [18719] = &_001834_hash,
++ [49574] = &_001835_hash,
++ [37617] = &_001836_hash,
++ [3045] = &_001837_hash,
++ [39395] = &_001838_hash,
++ [15297] = &_001839_hash,
++ [50862] = &_001840_hash,
++ [28877] = &_001841_hash,
++ [57117] = &_001842_hash,
++ [62064] = &_001843_hash,
++ [64610] = &_001844_hash,
++ [24065] = &_001845_hash,
++ [24846] = &_001846_hash,
++ [8624] = &_001847_hash,
++ [14000] = &_001848_hash,
++ [31148] = &_001849_hash,
++ [62594] = &_001850_hash,
++ [39210] = &_001851_hash,
++ [2077] = &_001852_hash,
++ [23497] = &_001853_hash,
++ [34512] = &_001854_hash,
++ [16268] = &_001856_hash,
++ [14562] = &_001857_hash,
++ [17606] = &_001859_hash,
++ [25654] = &_001860_hash,
++ [56078] = &_001861_hash,
++ [61088] = &_001862_hash,
++ [53442] = &_001863_hash,
++ [54456] = &_001864_hash,
++ [22038] = &_001865_hash,
++ [58394] = &_001866_hash,
++ [38953] = &_001867_hash,
++ [16109] = &_001868_hash,
++ [3812] = &_001869_hash,
++ [5084] = &_001870_hash,
++ [41893] = &_001871_hash,
++ [45486] = &_001872_hash,
++ [50226] = &_001873_hash,
++ [63694] = &_001874_hash,
++ [56763] = &_001875_hash,
++ [20905] = &_001876_hash,
++ [13080] = &_001877_hash,
++ [54700] = &_001878_hash,
++ [40947] = &_001879_hash,
++ [32645] = &_001880_hash,
++ [57462] = &_001881_hash,
++ [33853] = &_001882_hash,
++ [57940] = &_001883_hash,
++ [45583] = &_001884_hash,
++ [49704] = &_001885_hash,
++ [39232] = &_001886_hash,
++ [5140] = &_001887_hash,
++ [45726] = &_001888_hash,
++ [35392] = &_001889_hash,
++ [44895] = &_001890_hash,
++ [17219] = &_001891_hash,
++ [50185] = &_001892_hash,
++ [3062] = &_001893_hash,
++ [9784] = &_001894_hash,
++ [52513] = &_001895_hash,
++ [52678] = &_001896_hash,
++ [36258] = &_001897_hash,
++ [2885] = &_001898_hash,
++ [11588] = &_001899_hash,
++ [65337] = &_001900_hash,
++ [19329] = &_001901_hash,
++ [23791] = &_001902_hash,
++ [38078] = &_001903_hash,
++ [42270] = &_001904_hash,
++ [30475] = &_001905_hash,
++ [25564] = &_001906_hash,
++ [33581] = &_001907_hash,
++ [59644] = &_001908_hash,
++ [5800] = &_001909_hash,
++ [42227] = &_001910_hash,
++ [54718] = &_001911_hash,
++ [41255] = &_001912_hash,
++ [31502] = &_001913_hash,
++ [44929] = &_001914_hash,
++ [47332] = &_001915_hash,
++ [10107] = &_001916_hash,
++ [47137] = &_001917_hash,
++ [26017] = &_001918_hash,
++ [41477] = &_001919_hash,
++ [6656] = &_001920_hash,
++ [50198] = &_001921_hash,
++ [48909] = &_001922_hash,
++ [9474] = &_001923_hash,
++ [58554] = &_001924_hash,
++ [45747] = &_001925_hash,
++ [43151] = &_001926_hash,
++ [15626] = &_001927_hash,
++ [17364] = &_001928_hash,
++ [15077] = &_001929_hash,
++ [31912] = &_001930_hash,
++ [2803] = &_001931_hash,
++ [42715] = &_001932_hash,
++ [12552] = &_001933_hash,
++ [13099] = &_001934_hash,
++ [40973] = &_001935_hash,
++ [20988] = &_001936_hash,
++ [16939] = &_001937_hash,
++ [48587] = &_001938_hash,
++ [52889] = &_001939_hash,
++ [38776] = &_001940_hash,
++ [58608] = &_001941_hash,
++ [4360] = &_001942_hash,
++ [53447] = &_001943_hash,
++ [25355] = &_001944_hash,
++ [14955] = &_001946_hash,
++ [5428] = &_001947_hash,
++ [11063] = &_001948_hash,
++ [59852] = &_001949_hash,
++ [45648] = &_001950_hash,
++ [21855] = &_001951_hash,
++ [54573] = &_001952_hash,
++ [56316] = &_001953_hash,
++};
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/tools/gcc/size_overflow_plugin.c linux-3.4-pax/tools/gcc/size_overflow_plugin.c
+--- linux-3.4/tools/gcc/size_overflow_plugin.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.4-pax/tools/gcc/size_overflow_plugin.c 2012-05-20 06:00:00.000000000 +0200
+@@ -0,0 +1,1188 @@
++/*
++ * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against INT_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o size_overflow_plugin.so size_overflow_plugin.c
++ * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
++ */
++
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "intl.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "toplev.h"
++#include "function.h"
++#include "tree-flow.h"
++#include "plugin.h"
++#include "gimple.h"
++#include "c-common.h"
++#include "diagnostic.h"
++#include "cfgloop.h"
++
++struct size_overflow_hash {
++ struct size_overflow_hash *next;
++ const char *name;
++ const char *file;
++ unsigned short param1:1;
++ unsigned short param2:1;
++ unsigned short param3:1;
++ unsigned short param4:1;
++ unsigned short param5:1;
++ unsigned short param6:1;
++ unsigned short param7:1;
++ unsigned short param8:1;
++ unsigned short param9:1;
++};
++
++#include "size_overflow_hash.h"
++
++#define __unused __attribute__((__unused__))
++#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
++#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
++#define BEFORE_STMT true
++#define AFTER_STMT false
++#define CREATE_NEW_VAR NULL_TREE
++
++int plugin_is_GPL_compatible;
++void debug_gimple_stmt(gimple gs);
++
++static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var);
++static tree signed_size_overflow_type;
++static tree unsigned_size_overflow_type;
++static tree report_size_overflow_decl;
++static tree const_char_ptr_type_node;
++static unsigned int handle_function(void);
++static bool file_match = true;
++
++static struct plugin_info size_overflow_plugin_info = {
++ .version = "20120521beta",
++ .help = "no-size_overflow\tturn off size overflow checking\n",
++};
++
++static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
++{
++ unsigned int arg_count = type_num_arguments(*node);
++
++ for (; args; args = TREE_CHAIN(args)) {
++ tree position = TREE_VALUE(args);
++ if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) {
++ error("handle_size_overflow_attribute: overflow parameter outside range.");
++ *no_add_attrs = true;
++ }
++ }
++ return NULL_TREE;
++}
++
++static struct attribute_spec no_size_overflow_attr = {
++ .name = "size_overflow",
++ .min_length = 1,
++ .max_length = -1,
++ .decl_required = false,
++ .type_required = true,
++ .function_type_required = true,
++ .handler = handle_size_overflow_attribute
++};
++
++static void register_attributes(void __unused *event_data, void __unused *data)
++{
++ register_attribute(&no_size_overflow_attr);
++}
++
++// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
++static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
++{
++#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
++#define cwmixa( in ) { cwfold( in, m, k, h ); }
++#define cwmixb( in ) { cwfold( in, n, h, k ); }
++
++ const unsigned int m = 0x57559429;
++ const unsigned int n = 0x5052acdb;
++ const unsigned int *key4 = (const unsigned int *)key;
++ unsigned int h = len;
++ unsigned int k = len + seed + n;
++ unsigned long long p;
++
++ while (len >= 8) {
++ cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
++ len -= 8;
++ }
++ if (len >= 4) {
++ cwmixb(key4[0]) key4 += 1;
++ len -= 4;
++ }
++ if (len)
++ cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
++ cwmixb(h ^ (k + n));
++ return k ^ h;
++
++#undef cwfold
++#undef cwmixa
++#undef cwmixb
++}
++
++static inline unsigned int get_hash_num(const char *fndecl, const char *loc_file, unsigned int seed)
++{
++ unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
++ unsigned int file = CrapWow(loc_file, strlen(loc_file), seed) & 0xffff;
++
++ if (file_match)
++ return fn ^ file;
++ else
++ return fn;
++}
++
++static inline tree get_original_function_decl(tree fndecl)
++{
++ if (DECL_ABSTRACT_ORIGIN(fndecl))
++ return DECL_ABSTRACT_ORIGIN(fndecl);
++ return fndecl;
++}
++
++static inline gimple get_def_stmt(tree node)
++{
++ gcc_assert(TREE_CODE(node) == SSA_NAME);
++ return SSA_NAME_DEF_STMT(node);
++}
++
++static struct size_overflow_hash *get_function_hash(tree fndecl, const char *loc_file)
++{
++ unsigned int hash;
++ struct size_overflow_hash *entry;
++ const char *func_name = NAME(fndecl);
++
++ hash = get_hash_num(NAME(fndecl), loc_file, 0);
++
++ entry = size_overflow_hash[hash];
++ while (entry) {
++ if (!strcmp(entry->name, func_name) && (!file_match || !strcmp(entry->file, loc_file)))
++ return entry;
++ entry = entry->next;
++ }
++
++ return NULL;
++}
++
++static void check_arg_type(tree var)
++{
++ tree type = TREE_TYPE(var);
++ enum tree_code code = TREE_CODE(type);
++
++ gcc_assert(code == INTEGER_TYPE || code == ENUMERAL_TYPE ||
++ (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) ||
++ (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE));
++}
++
++static int find_arg_number(tree arg, tree func)
++{
++ tree var;
++ bool match = false;
++ unsigned int argnum = 1;
++
++ if (TREE_CODE(arg) == SSA_NAME)
++ arg = SSA_NAME_VAR(arg);
++
++ for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) {
++ if (strcmp(NAME(arg), NAME(var))) {
++ argnum++;
++ continue;
++ }
++ check_arg_type(var);
++
++ match = true;
++ if (!TYPE_UNSIGNED(TREE_TYPE(var)))
++ return 0;
++ break;
++ }
++ if (!match) {
++ warning(0, "find_arg_number: cannot find the %s argument in %s", NAME(arg), NAME(func));
++ return 0;
++ }
++ return argnum;
++}
++
++static void print_missing_msg(tree func, const char *filename, unsigned int argnum)
++{
++ unsigned int new_hash;
++ location_t loc = DECL_SOURCE_LOCATION(func);
++ const char *curfunc = NAME(func);
++
++ new_hash = get_hash_num(curfunc, filename, 0);
++ inform(loc, "Function %s is missing from the size_overflow hash table +%s+%d+%u+%s+", curfunc, curfunc, argnum, new_hash, filename);
++}
++
++static void check_missing_attribute(tree arg)
++{
++ tree type, func = get_original_function_decl(current_function_decl);
++ unsigned int argnum;
++ struct size_overflow_hash *hash;
++ const char *filename;
++
++ gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
++
++ type = TREE_TYPE(arg);
++ // skip function pointers
++ if (TREE_CODE(type) == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == FUNCTION_TYPE)
++ return;
++
++ if (lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(func))))
++ return;
++
++ argnum = find_arg_number(arg, func);
++ if (argnum == 0)
++ return;
++
++ filename = DECL_SOURCE_FILE(func);
++
++ hash = get_function_hash(func, filename);
++ if (!hash) {
++ print_missing_msg(func, filename, argnum);
++ return;
++ }
++
++#define check_param(num) \
++ if (num == argnum && hash->param##num) \
++ return;
++ check_param(1);
++ check_param(2);
++ check_param(3);
++ check_param(4);
++ check_param(5);
++ check_param(6);
++ check_param(7);
++ check_param(8);
++ check_param(9);
++#undef check_param
++
++ print_missing_msg(func, filename, argnum);
++}
++
++static tree create_new_var(tree type)
++{
++ tree new_var = create_tmp_var(type, "cicus");
++
++ add_referenced_var(new_var);
++ mark_sym_for_renaming(new_var);
++ return new_var;
++}
++
++static bool is_bool(tree node)
++{
++ tree type;
++
++ if (node == NULL_TREE)
++ return false;
++
++ type = TREE_TYPE(node);
++ if (!INTEGRAL_TYPE_P(type))
++ return false;
++ if (TREE_CODE(type) == BOOLEAN_TYPE)
++ return true;
++ if (TYPE_PRECISION(type) == 1)
++ return true;
++ return false;
++}
++
++static tree cast_a_tree(tree type, tree var)
++{
++ gcc_assert(fold_convertible_p(type, var));
++
++ return fold_convert(type, var);
++}
++
++static gimple build_cast_stmt(tree type, tree var, tree new_var, location_t loc)
++{
++ gimple assign;
++
++ if (new_var == CREATE_NEW_VAR)
++ new_var = create_new_var(type);
++
++ assign = gimple_build_assign(new_var, cast_a_tree(type, var));
++ gimple_set_location(assign, loc);
++ gimple_set_lhs(assign, make_ssa_name(new_var, assign));
++
++ return assign;
++}
++
++static tree create_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, bool before)
++{
++ tree oldstmt_rhs1;
++ enum tree_code code;
++ gimple stmt;
++ gimple_stmt_iterator gsi;
++
++ if (!*potentionally_overflowed)
++ return NULL_TREE;
++
++ if (rhs1 == NULL_TREE) {
++ debug_gimple_stmt(oldstmt);
++ error("create_assign: rhs1 is NULL_TREE");
++ gcc_unreachable();
++ }
++
++ oldstmt_rhs1 = gimple_assign_rhs1(oldstmt);
++ code = TREE_CODE(oldstmt_rhs1);
++ if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP))
++ check_missing_attribute(oldstmt_rhs1);
++
++ stmt = build_cast_stmt(signed_size_overflow_type, rhs1, CREATE_NEW_VAR, gimple_location(oldstmt));
++ gsi = gsi_for_stmt(oldstmt);
++ if (lookup_stmt_eh_lp(oldstmt) != 0) {
++ basic_block next_bb, cur_bb;
++ edge e;
++
++ gcc_assert(before == false);
++ gcc_assert(stmt_can_throw_internal(oldstmt));
++ gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
++ gcc_assert(!gsi_end_p(gsi));
++
++ cur_bb = gimple_bb(oldstmt);
++ next_bb = cur_bb->next_bb;
++ e = find_edge(cur_bb, next_bb);
++ gcc_assert(e != NULL);
++ gcc_assert(e->flags & EDGE_FALLTHRU);
++
++ gsi = gsi_after_labels(next_bb);
++ gcc_assert(!gsi_end_p(gsi));
++ before = true;
++ }
++ if (before)
++ gsi_insert_before(&gsi, stmt, GSI_NEW_STMT);
++ else
++ gsi_insert_after(&gsi, stmt, GSI_NEW_STMT);
++ update_stmt(stmt);
++ pointer_set_insert(visited, oldstmt);
++ return gimple_get_lhs(stmt);
++}
++
++static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, tree rhs2, tree __unused rhs3)
++{
++ tree new_var, lhs = gimple_get_lhs(oldstmt);
++ gimple stmt;
++ gimple_stmt_iterator gsi;
++
++ if (!*potentionally_overflowed)
++ return NULL_TREE;
++
++ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
++ rhs1 = gimple_assign_rhs1(oldstmt);
++ rhs1 = create_assign(visited, potentionally_overflowed, oldstmt, rhs1, BEFORE_STMT);
++ }
++ if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
++ rhs2 = gimple_assign_rhs2(oldstmt);
++ rhs2 = create_assign(visited, potentionally_overflowed, oldstmt, rhs2, BEFORE_STMT);
++ }
++
++ stmt = gimple_copy(oldstmt);
++ gimple_set_location(stmt, gimple_location(oldstmt));
++
++ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
++ gimple_assign_set_rhs_code(stmt, MULT_EXPR);
++
++ if (is_bool(lhs))
++ new_var = SSA_NAME_VAR(lhs);
++ else
++ new_var = create_new_var(signed_size_overflow_type);
++ new_var = make_ssa_name(new_var, stmt);
++ gimple_set_lhs(stmt, new_var);
++
++ if (rhs1 != NULL_TREE) {
++ if (!gimple_assign_cast_p(oldstmt))
++ rhs1 = cast_a_tree(signed_size_overflow_type, rhs1);
++ gimple_assign_set_rhs1(stmt, rhs1);
++ }
++
++ if (rhs2 != NULL_TREE)
++ gimple_assign_set_rhs2(stmt, rhs2);
++#if BUILDING_GCC_VERSION >= 4007
++ if (rhs3 != NULL_TREE)
++ gimple_assign_set_rhs3(stmt, rhs3);
++#endif
++ gimple_set_vuse(stmt, gimple_vuse(oldstmt));
++ gimple_set_vdef(stmt, gimple_vdef(oldstmt));
++
++ gsi = gsi_for_stmt(oldstmt);
++ gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
++ update_stmt(stmt);
++ pointer_set_insert(visited, oldstmt);
++ return gimple_get_lhs(stmt);
++}
++
++static gimple overflow_create_phi_node(gimple oldstmt, tree var)
++{
++ basic_block bb;
++ gimple phi;
++ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
++
++ bb = gsi_bb(gsi);
++
++ phi = create_phi_node(var, bb);
++ gsi = gsi_last(phi_nodes(bb));
++ gsi_remove(&gsi, false);
++
++ gsi = gsi_for_stmt(oldstmt);
++ gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
++ gimple_set_bb(phi, bb);
++ return phi;
++}
++
++static tree signed_cast_constant(tree node)
++{
++ gcc_assert(is_gimple_constant(node));
++
++ return cast_a_tree(signed_size_overflow_type, node);
++}
++
++static basic_block create_a_first_bb(void)
++{
++ basic_block first_bb;
++
++ first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
++ if (dom_info_available_p(CDI_DOMINATORS))
++ set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
++ return first_bb;
++}
++
++static gimple cast_old_phi_arg(gimple oldstmt, tree arg, tree new_var, unsigned int i)
++{
++ basic_block bb;
++ gimple newstmt, def_stmt;
++ gimple_stmt_iterator gsi;
++
++ newstmt = build_cast_stmt(signed_size_overflow_type, arg, new_var, gimple_location(oldstmt));
++ if (TREE_CODE(arg) == SSA_NAME) {
++ def_stmt = get_def_stmt(arg);
++ if (gimple_code(def_stmt) != GIMPLE_NOP) {
++ gsi = gsi_for_stmt(def_stmt);
++ gsi_insert_after(&gsi, newstmt, GSI_NEW_STMT);
++ return newstmt;
++ }
++ }
++
++ bb = gimple_phi_arg_edge(oldstmt, i)->src;
++ if (bb->index == 0)
++ bb = create_a_first_bb();
++ gsi = gsi_after_labels(bb);
++ gsi_insert_before(&gsi, newstmt, GSI_NEW_STMT);
++ return newstmt;
++}
++
++static gimple handle_new_phi_arg(tree arg, tree new_var, tree new_rhs)
++{
++ gimple newstmt;
++ gimple_stmt_iterator gsi;
++ void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update);
++ gimple def_newstmt = get_def_stmt(new_rhs);
++
++ gsi_insert = gsi_insert_after;
++ gsi = gsi_for_stmt(def_newstmt);
++
++ switch (gimple_code(get_def_stmt(arg))) {
++ case GIMPLE_PHI:
++ newstmt = gimple_build_assign(new_var, new_rhs);
++ gsi = gsi_after_labels(gimple_bb(def_newstmt));
++ gsi_insert = gsi_insert_before;
++ break;
++ case GIMPLE_ASM:
++ case GIMPLE_CALL:
++ newstmt = gimple_build_assign(new_var, new_rhs);
++ break;
++ case GIMPLE_ASSIGN:
++ newstmt = gimple_build_assign(new_var, gimple_get_lhs(def_newstmt));
++ break;
++ default:
++ /* unknown gimple_code (handle_build_new_phi_arg) */
++ gcc_unreachable();
++ }
++
++ gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
++ gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
++ update_stmt(newstmt);
++ return newstmt;
++}
++
++static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree arg, tree new_var)
++{
++ gimple newstmt;
++ tree new_rhs;
++
++ new_rhs = expand(visited, potentionally_overflowed, arg);
++
++ if (new_rhs == NULL_TREE)
++ return NULL_TREE;
++
++ newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
++ return gimple_get_lhs(newstmt);
++}
++
++static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt)
++{
++ gimple phi;
++ tree new_var = create_new_var(signed_size_overflow_type);
++ unsigned int i, n = gimple_phi_num_args(oldstmt);
++
++ pointer_set_insert(visited, oldstmt);
++ phi = overflow_create_phi_node(oldstmt, new_var);
++ for (i = 0; i < n; i++) {
++ tree arg, lhs;
++
++ arg = gimple_phi_arg_def(oldstmt, i);
++ if (is_gimple_constant(arg))
++ arg = signed_cast_constant(arg);
++ lhs = build_new_phi_arg(visited, potentionally_overflowed, arg, new_var);
++ if (lhs == NULL_TREE)
++ lhs = gimple_get_lhs(cast_old_phi_arg(oldstmt, arg, new_var, i));
++ add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
++ }
++
++ update_stmt(phi);
++ return gimple_phi_result(phi);
++}
++
++static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
++{
++ gimple def_stmt = get_def_stmt(var);
++ tree new_rhs1, rhs1 = gimple_assign_rhs1(def_stmt);
++
++ *potentionally_overflowed = true;
++ new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
++ if (new_rhs1 == NULL_TREE) {
++ if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
++ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++ else
++ return create_assign(visited, potentionally_overflowed, def_stmt, rhs1, AFTER_STMT);
++ }
++ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, NULL_TREE, NULL_TREE);
++}
++
++static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
++{
++ gimple def_stmt = get_def_stmt(var);
++ tree rhs1 = gimple_assign_rhs1(def_stmt);
++
++ if (is_gimple_constant(rhs1))
++ return dup_assign(visited, potentionally_overflowed, def_stmt, signed_cast_constant(rhs1), NULL_TREE, NULL_TREE);
++
++ gcc_assert(TREE_CODE(rhs1) != COND_EXPR);
++ switch (TREE_CODE(rhs1)) {
++ case SSA_NAME:
++ return handle_unary_rhs(visited, potentionally_overflowed, var);
++
++ case ARRAY_REF:
++ case BIT_FIELD_REF:
++ case ADDR_EXPR:
++ case COMPONENT_REF:
++ case INDIRECT_REF:
++#if BUILDING_GCC_VERSION >= 4006
++ case MEM_REF:
++#endif
++ case PARM_DECL:
++ case TARGET_MEM_REF:
++ case VAR_DECL:
++ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++
++ default:
++ debug_gimple_stmt(def_stmt);
++ debug_tree(rhs1);
++ gcc_unreachable();
++ }
++}
++
++static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
++{
++ gimple cond_stmt;
++ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
++
++ cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
++ gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
++ update_stmt(cond_stmt);
++}
++
++static tree create_string_param(tree string)
++{
++ tree i_type, a_type;
++ int length = TREE_STRING_LENGTH(string);
++
++ gcc_assert(length > 0);
++
++ i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
++ a_type = build_array_type(char_type_node, i_type);
++
++ TREE_TYPE(string) = a_type;
++ TREE_CONSTANT(string) = 1;
++ TREE_READONLY(string) = 1;
++
++ return build1(ADDR_EXPR, ptr_type_node, string);
++}
++
++static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg)
++{
++ gimple func_stmt, def_stmt;
++ tree current_func, loc_file, loc_line;
++ expanded_location xloc;
++ gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
++
++ def_stmt = get_def_stmt(arg);
++ xloc = expand_location(gimple_location(def_stmt));
++
++ if (!gimple_has_location(def_stmt)) {
++ xloc = expand_location(gimple_location(stmt));
++ if (!gimple_has_location(stmt))
++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
++ }
++
++ loc_line = build_int_cstu(unsigned_type_node, xloc.line);
++
++ loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
++ loc_file = create_string_param(loc_file);
++
++ current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
++ current_func = create_string_param(current_func);
++
++ // void report_size_overflow(const char *file, unsigned int line, const char *func)
++ func_stmt = gimple_build_call(report_size_overflow_decl, 3, loc_file, loc_line, current_func);
++
++ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
++}
++
++static void __unused print_the_code_insertions(gimple stmt)
++{
++ location_t loc = gimple_location(stmt);
++
++ inform(loc, "Integer size_overflow check applied here.");
++}
++
++static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value)
++{
++ basic_block cond_bb, join_bb, bb_true;
++ edge e;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++
++ cond_bb = gimple_bb(stmt);
++ gsi_prev(&gsi);
++ if (gsi_end_p(gsi))
++ e = split_block_after_labels(cond_bb);
++ else
++ e = split_block(cond_bb, gsi_stmt(gsi));
++ cond_bb = e->src;
++ join_bb = e->dest;
++ e->flags = EDGE_FALSE_VALUE;
++ e->probability = REG_BR_PROB_BASE;
++
++ bb_true = create_empty_bb(cond_bb);
++ make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
++ make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
++ make_edge(bb_true, join_bb, EDGE_FALLTHRU);
++
++ if (dom_info_available_p(CDI_DOMINATORS)) {
++ set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
++ set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
++ }
++
++ if (current_loops != NULL) {
++ gcc_assert(cond_bb->loop_father == join_bb->loop_father);
++ add_bb_to_loop(bb_true, cond_bb->loop_father);
++ }
++
++ insert_cond(cond_bb, arg, cond_code, type_value);
++ insert_cond_result(bb_true, stmt, arg);
++
++// print_the_code_insertions(stmt);
++}
++
++static tree get_type_for_check(tree rhs)
++{
++ tree def_rhs;
++ gimple def_stmt = get_def_stmt(rhs);
++
++ if (!gimple_assign_cast_p(def_stmt))
++ return TREE_TYPE(rhs);
++ def_rhs = gimple_assign_rhs1(def_stmt);
++ if (TREE_CODE(TREE_TYPE(def_rhs)) == INTEGER_TYPE)
++ return TREE_TYPE(def_rhs);
++ return TREE_TYPE(rhs);
++}
++
++static gimple cast_to_unsigned_size_overflow_type(gimple stmt, tree cast_rhs)
++{
++ gimple ucast_stmt;
++ gimple_stmt_iterator gsi;
++ location_t loc = gimple_location(stmt);
++
++ ucast_stmt = build_cast_stmt(unsigned_size_overflow_type, cast_rhs, CREATE_NEW_VAR, loc);
++ gsi = gsi_for_stmt(stmt);
++ gsi_insert_before(&gsi, ucast_stmt, GSI_SAME_STMT);
++ return ucast_stmt;
++}
++
++static void check_size_overflow(gimple stmt, tree cast_rhs, tree rhs, bool *potentionally_overflowed)
++{
++ tree type_max, type_min, rhs_type;
++ gimple ucast_stmt;
++
++ if (!*potentionally_overflowed)
++ return;
++
++ rhs_type = get_type_for_check(rhs);
++
++ if (TYPE_UNSIGNED(rhs_type)) {
++ ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, cast_rhs);
++ type_max = cast_a_tree(unsigned_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
++ insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
++ } else {
++ type_max = cast_a_tree(signed_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
++ insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max);
++
++ type_min = cast_a_tree(signed_size_overflow_type, TYPE_MIN_VALUE(rhs_type));
++ insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min);
++ }
++}
++
++static tree change_assign_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple stmt, tree orig_rhs)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++ tree new_rhs, origtype = TREE_TYPE(orig_rhs);
++
++ gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
++
++ new_rhs = expand(visited, potentionally_overflowed, orig_rhs);
++ if (new_rhs == NULL_TREE)
++ return NULL_TREE;
++
++ assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, gimple_location(stmt));
++ gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
++ update_stmt(assign);
++ return gimple_get_lhs(assign);
++}
++
++static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var, tree rhs, tree new_rhs1, tree new_rhs2, void (*gimple_assign_set_rhs)(gimple, tree))
++{
++ tree new_rhs, cast_rhs;
++
++ if (gimple_assign_rhs_code(def_stmt) == MIN_EXPR)
++ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
++
++ new_rhs = change_assign_rhs(visited, potentionally_overflowed, def_stmt, rhs);
++ if (new_rhs != NULL_TREE) {
++ gimple_assign_set_rhs(def_stmt, new_rhs);
++ update_stmt(def_stmt);
++
++ cast_rhs = gimple_assign_rhs1(get_def_stmt(new_rhs));
++
++ check_size_overflow(def_stmt, cast_rhs, rhs, potentionally_overflowed);
++ }
++ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++}
++
++static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
++{
++ tree rhs1, rhs2;
++ gimple def_stmt = get_def_stmt(var);
++ tree new_rhs1 = NULL_TREE;
++ tree new_rhs2 = NULL_TREE;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++
++ /* no DImode/TImode division in the 32/64 bit kernel */
++ switch (gimple_assign_rhs_code(def_stmt)) {
++ case RDIV_EXPR:
++ case TRUNC_DIV_EXPR:
++ case CEIL_DIV_EXPR:
++ case FLOOR_DIV_EXPR:
++ case ROUND_DIV_EXPR:
++ case TRUNC_MOD_EXPR:
++ case CEIL_MOD_EXPR:
++ case FLOOR_MOD_EXPR:
++ case ROUND_MOD_EXPR:
++ case EXACT_DIV_EXPR:
++ case POINTER_PLUS_EXPR:
++ case BIT_AND_EXPR:
++ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++ default:
++ break;
++ }
++
++ *potentionally_overflowed = true;
++
++ if (TREE_CODE(rhs1) == SSA_NAME)
++ new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
++ if (TREE_CODE(rhs2) == SSA_NAME)
++ new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
++
++ if (is_gimple_constant(rhs2))
++ return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs1, new_rhs1, signed_cast_constant(rhs2), &gimple_assign_set_rhs1);
++
++ if (is_gimple_constant(rhs1))
++ return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs2, signed_cast_constant(rhs1), new_rhs2, &gimple_assign_set_rhs2);
++
++ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
++}
++
++#if BUILDING_GCC_VERSION >= 4007
++static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree rhs)
++{
++ if (is_gimple_constant(rhs))
++ return signed_cast_constant(rhs);
++ if (TREE_CODE(rhs) != SSA_NAME)
++ return NULL_TREE;
++ return expand(visited, potentionally_overflowed, rhs);
++}
++
++static tree handle_ternary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
++{
++ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3;
++ gimple def_stmt = get_def_stmt(var);
++
++ *potentionally_overflowed = true;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++ rhs3 = gimple_assign_rhs3(def_stmt);
++ new_rhs1 = get_new_rhs(visited, potentionally_overflowed, rhs1);
++ new_rhs2 = get_new_rhs(visited, potentionally_overflowed, rhs2);
++ new_rhs3 = get_new_rhs(visited, potentionally_overflowed, rhs3);
++
++ if (new_rhs1 == NULL_TREE && new_rhs2 != NULL_TREE && new_rhs3 != NULL_TREE)
++ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, new_rhs3);
++ error("handle_ternary_ops: unknown rhs");
++ gcc_unreachable();
++}
++#endif
++
++static void set_size_overflow_type(tree node)
++{
++ switch (TYPE_MODE(TREE_TYPE(node))) {
++ case SImode:
++ signed_size_overflow_type = intDI_type_node;
++ unsigned_size_overflow_type = unsigned_intDI_type_node;
++ break;
++ case DImode:
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) {
++ signed_size_overflow_type = intDI_type_node;
++ unsigned_size_overflow_type = unsigned_intDI_type_node;
++ } else {
++ signed_size_overflow_type = intTI_type_node;
++ unsigned_size_overflow_type = unsigned_intTI_type_node;
++ }
++ break;
++ default:
++ error("set_size_overflow_type: unsupported gcc configuration.");
++ gcc_unreachable();
++ }
++}
++
++static tree expand_visited(gimple def_stmt)
++{
++ gimple tmp;
++ gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
++
++ gsi_next(&gsi);
++ tmp = gsi_stmt(gsi);
++ switch (gimple_code(tmp)) {
++ case GIMPLE_ASSIGN:
++ return gimple_get_lhs(tmp);
++ case GIMPLE_PHI:
++ return gimple_phi_result(tmp);
++ case GIMPLE_CALL:
++ return gimple_call_lhs(tmp);
++ default:
++ return NULL_TREE;
++ }
++}
++
++static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
++{
++ gimple def_stmt;
++ enum tree_code code = TREE_CODE(TREE_TYPE(var));
++
++ if (is_gimple_constant(var))
++ return NULL_TREE;
++
++ if (TREE_CODE(var) == ADDR_EXPR)
++ return NULL_TREE;
++
++ gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE);
++ if (code != INTEGER_TYPE)
++ return NULL_TREE;
++
++ if (SSA_NAME_IS_DEFAULT_DEF(var)) {
++ check_missing_attribute(var);
++ return NULL_TREE;
++ }
++
++ def_stmt = get_def_stmt(var);
++
++ if (!def_stmt)
++ return NULL_TREE;
++
++ if (pointer_set_contains(visited, def_stmt))
++ return expand_visited(def_stmt);
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_NOP:
++ check_missing_attribute(var);
++ return NULL_TREE;
++ case GIMPLE_PHI:
++ return build_new_phi(visited, potentionally_overflowed, def_stmt);
++ case GIMPLE_CALL:
++ case GIMPLE_ASM:
++ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++ case GIMPLE_ASSIGN:
++ switch (gimple_num_ops(def_stmt)) {
++ case 2:
++ return handle_unary_ops(visited, potentionally_overflowed, var);
++ case 3:
++ return handle_binary_ops(visited, potentionally_overflowed, var);
++#if BUILDING_GCC_VERSION >= 4007
++ case 4:
++ return handle_ternary_ops(visited, potentionally_overflowed, var);
++#endif
++ }
++ default:
++ debug_gimple_stmt(def_stmt);
++ error("expand: unknown gimple code");
++ gcc_unreachable();
++ }
++}
++
++static void change_function_arg(gimple stmt, tree origarg, unsigned int argnum, tree newarg)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++ tree origtype = TREE_TYPE(origarg);
++
++ gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
++
++ assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, gimple_location(stmt));
++ gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
++ update_stmt(assign);
++
++ gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
++ update_stmt(stmt);
++}
++
++static tree get_function_arg(unsigned int argnum, gimple stmt, tree fndecl)
++{
++ const char *origid;
++ tree arg, origarg;
++
++ if (!DECL_ABSTRACT_ORIGIN(fndecl)) {
++ gcc_assert(gimple_call_num_args(stmt) > argnum);
++ return gimple_call_arg(stmt, argnum);
++ }
++
++ origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
++ while (origarg && argnum) {
++ argnum--;
++ origarg = TREE_CHAIN(origarg);
++ }
++
++ gcc_assert(argnum == 0);
++
++ gcc_assert(origarg != NULL_TREE);
++ origid = NAME(origarg);
++ for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) {
++ if (!strcmp(origid, NAME(arg)))
++ return arg;
++ }
++ return NULL_TREE;
++}
++
++static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum)
++{
++ struct pointer_set_t *visited;
++ tree arg, newarg, type_max;
++ gimple ucast_stmt;
++ bool potentionally_overflowed;
++
++ arg = get_function_arg(argnum, stmt, fndecl);
++ if (arg == NULL_TREE)
++ return;
++
++ if (is_gimple_constant(arg))
++ return;
++ if (TREE_CODE(arg) != SSA_NAME)
++ return;
++
++ check_arg_type(arg);
++
++ set_size_overflow_type(arg);
++
++ visited = pointer_set_create();
++ potentionally_overflowed = false;
++ newarg = expand(visited, &potentionally_overflowed, arg);
++ pointer_set_destroy(visited);
++
++ if (newarg == NULL_TREE || !potentionally_overflowed)
++ return;
++
++ change_function_arg(stmt, arg, argnum, newarg);
++
++ ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, newarg);
++
++ type_max = build_int_cstu(unsigned_size_overflow_type, 0x7fffffff);
++ insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
++}
++
++static void handle_function_by_attribute(gimple stmt, tree attr, tree fndecl)
++{
++ tree p = TREE_VALUE(attr);
++ do {
++ handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1);
++ p = TREE_CHAIN(p);
++ } while (p);
++}
++
++static void handle_function_by_hash(gimple stmt, tree fndecl)
++{
++ tree orig_fndecl;
++ struct size_overflow_hash *hash;
++ const char *filename = DECL_SOURCE_FILE(fndecl);
++
++ orig_fndecl = get_original_function_decl(fndecl);
++ hash = get_function_hash(orig_fndecl, filename);
++ if (!hash)
++ return;
++
++#define search_param(argnum) \
++ if (hash->param##argnum) \
++ handle_function_arg(stmt, fndecl, argnum - 1);
++
++ search_param(1);
++ search_param(2);
++ search_param(3);
++ search_param(4);
++ search_param(5);
++ search_param(6);
++ search_param(7);
++ search_param(8);
++ search_param(9);
++#undef search_param
++}
++
++static unsigned int handle_function(void)
++{
++ basic_block bb = ENTRY_BLOCK_PTR->next_bb;
++ int saved_last_basic_block = last_basic_block;
++
++ do {
++ gimple_stmt_iterator gsi;
++ basic_block next = bb->next_bb;
++
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ tree fndecl, attr;
++ gimple stmt = gsi_stmt(gsi);
++
++ if (!(is_gimple_call(stmt)))
++ continue;
++ fndecl = gimple_call_fndecl(stmt);
++ if (fndecl == NULL_TREE)
++ continue;
++ if (gimple_call_num_args(stmt) == 0)
++ continue;
++ attr = lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(fndecl)));
++ if (!attr || !TREE_VALUE(attr))
++ handle_function_by_hash(stmt, fndecl);
++ else
++ handle_function_by_attribute(stmt, attr, fndecl);
++ gsi = gsi_for_stmt(stmt);
++ }
++ bb = next;
++ } while (bb && bb->index <= saved_last_basic_block);
++ return 0;
++}
++
++static struct gimple_opt_pass size_overflow_pass = {
++ .pass = {
++ .type = GIMPLE_PASS,
++ .name = "size_overflow",
++ .gate = NULL,
++ .execute = handle_function,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = PROP_cfg | PROP_referenced_vars,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
++ }
++};
++
++static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
++{
++ tree fntype;
++
++ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
++
++ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func)
++ fntype = build_function_type_list(void_type_node,
++ const_char_ptr_type_node,
++ unsigned_type_node,
++ const_char_ptr_type_node,
++ NULL_TREE);
++ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
++
++ DECL_ASSEMBLER_NAME(report_size_overflow_decl);
++ TREE_PUBLIC(report_size_overflow_decl) = 1;
++ DECL_EXTERNAL(report_size_overflow_decl) = 1;
++ DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
++}
++
++extern struct gimple_opt_pass pass_dce;
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ int i;
++ const char * const plugin_name = plugin_info->base_name;
++ const int argc = plugin_info->argc;
++ const struct plugin_argument * const argv = plugin_info->argv;
++ bool enable = true;
++
++ struct register_pass_info size_overflow_pass_info = {
++ .pass = &size_overflow_pass.pass,
++ .reference_pass_name = "ssa",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_AFTER
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ for (i = 0; i < argc; ++i) {
++ if (!strcmp(argv[i].key, "no-size-overflow")) {
++ enable = false;
++ continue;
++ } else if (!(strcmp(argv[i].key, "no-file-match"))) {
++ file_match = false;
++ continue;
++ }
++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
++ if (enable) {
++ register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
++ }
++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
++
++ return 0;
++}
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/tools/gcc/stackleak_plugin.c linux-3.4-pax/tools/gcc/stackleak_plugin.c
+--- linux-3.4/tools/gcc/stackleak_plugin.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.4-pax/tools/gcc/stackleak_plugin.c 2012-05-21 12:10:12.500049050 +0200
+@@ -0,0 +1,313 @@
++/*
++ * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ * but for the kernel it doesn't matter since it doesn't link against
++ * any of the gcc libraries
++ *
++ * gcc plugin to help implement various PaX features
++ *
++ * - track lowest stack pointer
++ *
++ * TODO:
++ * - initialize all local variables
++ *
++ * BUGS:
++ * - none known
++ */
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++
++extern void print_gimple_stmt(FILE *, gimple, int, int);
++
++int plugin_is_GPL_compatible;
++
++static int track_frame_size = -1;
++static const char track_function[] = "pax_track_stack";
++static const char check_function[] = "pax_check_alloca";
++static bool init_locals;
++
++static struct plugin_info stackleak_plugin_info = {
++ .version = "201203140940",
++ .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
++// "initialize-locals\t\tforcibly initialize all stack frames\n"
++};
++
++static bool gate_stackleak_track_stack(void);
++static unsigned int execute_stackleak_tree_instrument(void);
++static unsigned int execute_stackleak_final(void);
++
++static struct gimple_opt_pass stackleak_tree_instrument_pass = {
++ .pass = {
++ .type = GIMPLE_PASS,
++ .name = "stackleak_tree_instrument",
++ .gate = gate_stackleak_track_stack,
++ .execute = execute_stackleak_tree_instrument,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = PROP_gimple_leh | PROP_cfg,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
++ }
++};
++
++static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
++ .pass = {
++ .type = RTL_PASS,
++ .name = "stackleak_final",
++ .gate = gate_stackleak_track_stack,
++ .execute = execute_stackleak_final,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = TODO_dump_func
++ }
++};
++
++static bool gate_stackleak_track_stack(void)
++{
++ return track_frame_size >= 0;
++}
++
++static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
++{
++ gimple check_alloca;
++ tree fntype, fndecl, alloca_size;
++
++ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
++ fndecl = build_fn_decl(check_function, fntype);
++ DECL_ASSEMBLER_NAME(fndecl); // for LTO
++
++ // insert call to void pax_check_alloca(unsigned long size)
++ alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
++ check_alloca = gimple_build_call(fndecl, 1, alloca_size);
++ gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
++}
++
++static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
++{
++ gimple track_stack;
++ tree fntype, fndecl;
++
++ fntype = build_function_type_list(void_type_node, NULL_TREE);
++ fndecl = build_fn_decl(track_function, fntype);
++ DECL_ASSEMBLER_NAME(fndecl); // for LTO
++
++ // insert call to void pax_track_stack(void)
++ track_stack = gimple_build_call(fndecl, 0);
++ gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
++}
++
++#if BUILDING_GCC_VERSION == 4005
++static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
++{
++ tree fndecl;
++
++ if (!is_gimple_call(stmt))
++ return false;
++ fndecl = gimple_call_fndecl(stmt);
++ if (!fndecl)
++ return false;
++ if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
++ return false;
++// print_node(stderr, "pax", fndecl, 4);
++ return DECL_FUNCTION_CODE(fndecl) == code;
++}
++#endif
++
++static bool is_alloca(gimple stmt)
++{
++ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
++ return true;
++
++#if BUILDING_GCC_VERSION >= 4007
++ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
++ return true;
++#endif
++
++ return false;
++}
++
++static unsigned int execute_stackleak_tree_instrument(void)
++{
++ basic_block bb, entry_bb;
++ bool prologue_instrumented = false, is_leaf = true;
++
++ entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
++
++ // 1. loop through BBs and GIMPLE statements
++ FOR_EACH_BB(bb) {
++ gimple_stmt_iterator gsi;
++
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ gimple stmt;
++
++ stmt = gsi_stmt(gsi);
++
++ if (is_gimple_call(stmt))
++ is_leaf = false;
++
++ // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
++ if (!is_alloca(stmt))
++ continue;
++
++ // 2. insert stack overflow check before each __builtin_alloca call
++ stackleak_check_alloca(&gsi);
++
++ // 3. insert track call after each __builtin_alloca call
++ stackleak_add_instrumentation(&gsi);
++ if (bb == entry_bb)
++ prologue_instrumented = true;
++ }
++ }
++
++ // special cases for some bad linux code: taking the address of static inline functions will materialize them
++ // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
++ // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
++ // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
++ if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
++ return 0;
++ if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
++ return 0;
++
++ // 4. insert track call at the beginning
++ if (!prologue_instrumented) {
++ gimple_stmt_iterator gsi;
++
++ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
++ if (dom_info_available_p(CDI_DOMINATORS))
++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
++ gsi = gsi_start_bb(bb);
++ stackleak_add_instrumentation(&gsi);
++ }
++
++ return 0;
++}
++
++static unsigned int execute_stackleak_final(void)
++{
++ rtx insn;
++
++ if (cfun->calls_alloca)
++ return 0;
++
++ // keep calls only if function frame is big enough
++ if (get_frame_size() >= track_frame_size)
++ return 0;
++
++ // 1. find pax_track_stack calls
++ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
++ // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
++ rtx body;
++
++ if (!CALL_P(insn))
++ continue;
++ body = PATTERN(insn);
++ if (GET_CODE(body) != CALL)
++ continue;
++ body = XEXP(body, 0);
++ if (GET_CODE(body) != MEM)
++ continue;
++ body = XEXP(body, 0);
++ if (GET_CODE(body) != SYMBOL_REF)
++ continue;
++ if (strcmp(XSTR(body, 0), track_function))
++ continue;
++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
++ // 2. delete call
++ insn = delete_insn_and_edges(insn);
++#if BUILDING_GCC_VERSION >= 4007
++ if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
++ insn = delete_insn_and_edges(insn);
++#endif
++ }
++
++// print_simple_rtl(stderr, get_insns());
++// print_rtl(stderr, get_insns());
++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
++
++ return 0;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ const int argc = plugin_info->argc;
++ const struct plugin_argument * const argv = plugin_info->argv;
++ int i;
++ struct register_pass_info stackleak_tree_instrument_pass_info = {
++ .pass = &stackleak_tree_instrument_pass.pass,
++// .reference_pass_name = "tree_profile",
++ .reference_pass_name = "optimized",
++ .ref_pass_instance_number = 0,
++ .pos_op = PASS_POS_INSERT_BEFORE
++ };
++ struct register_pass_info stackleak_final_pass_info = {
++ .pass = &stackleak_final_rtl_opt_pass.pass,
++ .reference_pass_name = "final",
++ .ref_pass_instance_number = 0,
++ .pos_op = PASS_POS_INSERT_BEFORE
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
++
++ for (i = 0; i < argc; ++i) {
++ if (!strcmp(argv[i].key, "track-lowest-sp")) {
++ if (!argv[i].value) {
++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ continue;
++ }
++ track_frame_size = atoi(argv[i].value);
++ if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
++ continue;
++ }
++ if (!strcmp(argv[i].key, "initialize-locals")) {
++ if (argv[i].value) {
++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
++ continue;
++ }
++ init_locals = true;
++ continue;
++ }
++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ }
++
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
++
++ return 0;
++}
+Binary files linux-3.4/tools/perf/perf and linux-3.4-pax/tools/perf/perf differ
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/tools/perf/util/include/asm/alternative-asm.h linux-3.4-pax/tools/perf/util/include/asm/alternative-asm.h
+--- linux-3.4/tools/perf/util/include/asm/alternative-asm.h 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/tools/perf/util/include/asm/alternative-asm.h 2012-05-21 12:10:12.504049050 +0200
+@@ -5,4 +5,7 @@
+
+ #define altinstruction_entry #
+
++ .macro pax_force_retaddr rip=0, reload=0
++ .endm
++
+ #endif
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/usr/gen_init_cpio.c linux-3.4-pax/usr/gen_init_cpio.c
+--- linux-3.4/usr/gen_init_cpio.c 2011-07-22 04:17:23.000000000 +0200
++++ linux-3.4-pax/usr/gen_init_cpio.c 2012-05-21 12:10:12.504049050 +0200
+@@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name,
+ int retval;
+ int rc = -1;
+ int namesize;
+- int i;
++ unsigned int i;
+
+ mode |= S_IFREG;
+
+@@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_
+ *env_var = *expanded = '\0';
+ strncat(env_var, start + 2, end - start - 2);
+ strncat(expanded, new_location, start - new_location);
+- strncat(expanded, getenv(env_var), PATH_MAX);
+- strncat(expanded, end + 1, PATH_MAX);
++ strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
++ strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
+ strncpy(new_location, expanded, PATH_MAX);
++ new_location[PATH_MAX] = 0;
+ } else
+ break;
+ }
+diff -NurpX linux-3.4-pax/Documentation/dontdiff linux-3.4/virt/kvm/kvm_main.c linux-3.4-pax/virt/kvm/kvm_main.c
+--- linux-3.4/virt/kvm/kvm_main.c 2012-05-21 11:33:45.943930292 +0200
++++ linux-3.4-pax/virt/kvm/kvm_main.c 2012-05-21 12:10:12.508049050 +0200
+@@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
+
+ static cpumask_var_t cpus_hardware_enabled;
+ static int kvm_usage_count = 0;
+-static atomic_t hardware_enable_failed;
++static atomic_unchecked_t hardware_enable_failed;
+
+ struct kmem_cache *kvm_vcpu_cache;
+ EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
+@@ -2247,7 +2247,7 @@ static void hardware_enable_nolock(void
+
+ if (r) {
+ cpumask_clear_cpu(cpu, cpus_hardware_enabled);
+- atomic_inc(&hardware_enable_failed);
++ atomic_inc_unchecked(&hardware_enable_failed);
+ printk(KERN_INFO "kvm: enabling virtualization on "
+ "CPU%d failed\n", cpu);
+ }
+@@ -2301,10 +2301,10 @@ static int hardware_enable_all(void)
+
+ kvm_usage_count++;
+ if (kvm_usage_count == 1) {
+- atomic_set(&hardware_enable_failed, 0);
++ atomic_set_unchecked(&hardware_enable_failed, 0);
+ on_each_cpu(hardware_enable_nolock, NULL, 1);
+
+- if (atomic_read(&hardware_enable_failed)) {
++ if (atomic_read_unchecked(&hardware_enable_failed)) {
+ hardware_disable_all_nolock();
+ r = -EBUSY;
+ }
+@@ -2667,7 +2667,7 @@ static void kvm_sched_out(struct preempt
+ kvm_arch_vcpu_put(vcpu);
+ }
+
+-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
++int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+ struct module *module)
+ {
+ int r;
+@@ -2730,7 +2730,7 @@ int kvm_init(void *opaque, unsigned vcpu
+ if (!vcpu_align)
+ vcpu_align = __alignof__(struct kvm_vcpu);
+ kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
+- 0, NULL);
++ SLAB_USERCOPY, NULL);
+ if (!kvm_vcpu_cache) {
+ r = -ENOMEM;
+ goto out_free_3;
+@@ -2740,9 +2740,11 @@ int kvm_init(void *opaque, unsigned vcpu
+ if (r)
+ goto out_free;
+
+- kvm_chardev_ops.owner = module;
+- kvm_vm_fops.owner = module;
+- kvm_vcpu_fops.owner = module;
++ pax_open_kernel();
++ *(void **)&kvm_chardev_ops.owner = module;
++ *(void **)&kvm_vm_fops.owner = module;
++ *(void **)&kvm_vcpu_fops.owner = module;
++ pax_close_kernel();
+
+ r = misc_register(&kvm_dev);
+ if (r) {