commit 48d63be0286cdf45b286900ff09770b2de48ddc3 Author: BlackLight Date: Sat Aug 14 14:30:41 2010 +0200 First commit for spp_ai diff --git a/Doxyfile b/Doxyfile new file mode 100644 index 0000000..769a526 --- /dev/null +++ b/Doxyfile @@ -0,0 +1,1630 @@ +# Doxyfile 1.7.1 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project +# +# All text after a hash (#) is considered a comment and will be ignored +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" ") + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = "Snort AI preprocessor module" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = 0.1 + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = doc + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, +# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English +# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, +# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, +# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful is your file systems +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for +# Java. For instance, namespaces will be presented as packages, qualified +# scopes will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources only. Doxygen will then generate output that is more tailored for +# Fortran. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for +# VHDL. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given extension. +# Doxygen has a built-in mapping, but you can override or extend it using this +# tag. The format is ext=language, where ext is a file extension, and language +# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, +# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make +# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C +# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions +# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. +# Doxygen will parse them like normal C++ but will assume all classes use public +# instead of private inheritance when no explicit protection keyword is present. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate getter +# and setter methods for a property. Setting this option to YES (the default) +# will make doxygen to replace the get and set methods by a property in the +# documentation. This will only work if the methods are indeed getting or +# setting a simple type. If this is not the case, or you want to show the +# methods anyway, you should set this option to NO. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum +# is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically +# be useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. + +TYPEDEF_HIDES_STRUCT = NO + +# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to +# determine which symbols to keep in memory and which to flush to disk. +# When the cache is full, less often used symbols will be written to disk. +# For small to medium size projects (<1000 input files) the default value is +# probably good enough. For larger projects a too small cache size can cause +# doxygen to be busy swapping symbols to and from disk most of the time +# causing a significant performance penality. +# If the system has enough physical memory increasing the cache will improve the +# performance by keeping more symbols in memory. Note that the value works on +# a logarithmic scale so increasing the size by one will rougly double the +# memory usage. The cache size is given by this formula: +# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, +# corresponding to a cache size of 2^16 = 65536 symbols + +SYMBOL_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespace are hidden. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen +# will list include files with double quotes in the documentation +# rather than with sharp brackets. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen +# will sort the (brief and detailed) documentation of class members so that +# constructors and destructors are listed first. If set to NO (the default) +# the constructors will appear in the respective orders defined by +# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. +# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO +# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or define consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and defines in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# If the sources in your project are distributed over multiple directories +# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy +# in the documentation. The default is NO. + +SHOW_DIRECTORIES = NO + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the +# Namespaces page. +# This will remove the Namespaces entry from the Quick Index +# and from the Folder Tree View (if specified). The default is YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. The create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. +# You can optionally specify a file name after the option, if omitted +# DoxygenLayout.xml will be used as the name of the layout file. + +LAYOUT_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be abled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx +# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 + +FILE_PATTERNS = + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or +# directories that are symbolic links (a Unix filesystem feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. +# If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. +# Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. +# The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER +# is applied to all files. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. +# Otherwise they will link to the documentation. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = YES + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own +# stylesheet in the HTML output directory as well, or it will be erased! + +HTML_STYLESHEET = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. +# Doxygen will adjust the colors in the stylesheet and background images +# according to this color. Hue is specified as an angle on a colorwheel, +# see http://en.wikipedia.org/wiki/Hue for more information. +# For instance the value 0 represents red, 60 is yellow, 120 is green, +# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. +# The allowed range is 0 to 359. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of +# the colors in the HTML output. For a value of 0 the output will use +# grayscales only. A value of 255 will produce the most vivid colors. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to +# the luminance component of the colors in the HTML output. Values below +# 100 gradually make the output lighter, whereas values above 100 make +# the output darker. The value divided by 100 is the actual gamma applied, +# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, +# and 100 does not change the gamma. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting +# this to NO can help when comparing the output of multiple runs. + +HTML_TIMESTAMP = YES + +# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, +# files or namespaces will be aligned in HTML using tables. If set to +# NO a bullet list will be used. + +HTML_ALIGN_MEMBERS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. For this to work a browser that supports +# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox +# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). + +HTML_DYNAMIC_SECTIONS = NO + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING +# is used to encode HtmlHelp index (hhk), content (hhc) and project file +# content. + +CHM_INDEX_ENCODING = + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated +# that can be used as input for Qt's qhelpgenerator to generate a +# Qt Compressed Help (.qch) of the generated HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to +# add. For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see +# +# Qt Help Project / Custom Filters. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's +# filter section matches. +# +# Qt Help Project / Filter Attributes. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files +# will be generated, which together with the HTML files, form an Eclipse help +# plugin. To install this plugin and make it available under the help contents +# menu in Eclipse, the contents of the directory containing the HTML and XML +# files needs to be copied into the plugins directory of eclipse. The name of +# the directory within the plugins directory should be the same as +# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before +# the help appears. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have +# this name. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# This tag can be used to set the number of enum values (range [1..20]) +# that doxygen will group on one line in the generated HTML documentation. + +ENUM_VALUES_PER_LINE = 4 + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. +# If the tag value is set to YES, a side panel will be generated +# containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). +# Windows users are probably better off using the HTML help feature. + +GENERATE_TREEVIEW = NO + +# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, +# and Class Hierarchy pages using a tree view instead of an ordered list. + +USE_INLINE_TREES = NO + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open +# links to external symbols imported via tag files in a separate window. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of Latex formulas included +# as images in the HTML documentation. The default is 10. Note that +# when you change the font size after a successful doxygen run you need +# to manually remove any form_*.png images from the HTML output directory +# to force them to be regenerated. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are +# not supported properly for IE 6.0, but are supported on all modern browsers. +# Note that when changing this option you need to delete any form_*.png files +# in the HTML output before the changes have effect. + +FORMULA_TRANSPARENT = YES + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box +# for the HTML output. The underlying search engine uses javascript +# and DHTML and should work on any modern browser. Note that when using +# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets +# (GENERATE_DOCSET) there is already a search function so this one should +# typically be disabled. For large projects the javascript based search engine +# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. + +SEARCHENGINE = YES + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a PHP enabled web server instead of at the web client +# using Javascript. Doxygen will generate the search PHP script and index +# file to put on the web server. The advantage of the server +# based approach is that it scales better to large projects and allows +# full text search. The disadvances is that it is more difficult to setup +# and does not have live searching capabilities. + +SERVER_BASED_SEARCH = NO + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = YES + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. +# Note that when enabling USE_PDFLATEX this option is only used for +# generating bitmaps for formulas in the HTML output, but not in the +# Makefile that is written to the output directory. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, a4wide, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4wide + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +# If LATEX_SOURCE_CODE is set to YES then doxygen will include +# source code with syntax highlighting in the LaTeX output. +# Note that which sources are shown also depends on other settings +# such as SOURCE_BROWSER. + +LATEX_SOURCE_CODE = NO + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. +# This is useful +# if you want to understand what is going on. +# On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# in the INCLUDE_PATH (see below) will be search if a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all function-like macros that are alone +# on a line, have an all uppercase name, and do not end with a semicolon. Such +# function macros are typically used for boiler-plate code, and will confuse +# the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool +# does not have to be run to correct the links. +# Note that each tag file must have a unique name +# (where the name does NOT include the path) +# If a tag file is not located in the directory in which doxygen +# is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option is superseded by the HAVE_DOT option below. This is only a +# fallback. It is recommended to install and use dot, since it yields more +# powerful graphs. + +CLASS_DIAGRAMS = YES + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see +# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = NO + +# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is +# allowed to run in parallel. When set to 0 (the default) doxygen will +# base this on the number of processors available in the system. You can set it +# explicitly to a value larger than 0 to get control over the balance +# between CPU load and processing speed. + +DOT_NUM_THREADS = 0 + +# By default doxygen will write a font called FreeSans.ttf to the output +# directory and reference it in all dot files that doxygen generates. This +# font does not include all possible unicode characters however, so when you need +# these (or just want a differently looking font) you can specify the font name +# using DOT_FONTNAME. You need need to make sure dot is able to find the font, +# which can be done by putting it in a standard location or by setting the +# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory +# containing the font. + +DOT_FONTNAME = FreeSans.ttf + +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the output directory to look for the +# FreeSans.ttf font (which doxygen will put there itself). If you specify a +# different font using DOT_FONTNAME you can set the path where dot +# can find it using this tag. + +DOT_FONTPATH = + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# the CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are png, jpg, or gif +# If left blank png will be used. + +DOT_IMAGE_FORMAT = png + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = YES + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..77552cc --- /dev/null +++ b/Makefile @@ -0,0 +1,39 @@ +# Path to your Snort preprocess directory (default: /usr/lib/snort_dynamicpreprocessor) +# CHANGE THIS LINE IF YOU INSTALLED SNORT SOMEWHERE ELSE!!!!!!!!!! +PREPROC_PATH=/home/blacklight/local/snort/lib/snort_dynamicpreprocessor + +INCLUDES=-I. -I../../.. -I../include -I./uthash +DEFINES=-D_GNU_SOURCE -D_XOPEN_SOURCE -DDYNAMIC_PLUGIN -DSUP_IP6 -DENABLE_MYSQL -DHAVE_CONFIG_H +CMDLINE=-g -O2 -fvisibility=hidden -fno-strict-aliasing -Wall -fstack-protector +LIBPATH=-L/usr/lib +LDLINKS=-lpthread +LIBTOOL=./libtool --tag=CC +OUTPUT=libsf_ai_preproc.la +LDOPTIONS=-export-dynamic -rpath ${PREPROC_PATH} + +OBJECTS=\ +sf_dynamic_preproc_lib.lo \ +sfPolicyUserData.lo \ +spp_ai.lo \ +stream.lo \ +alert_parser.lo \ +regex.lo \ +cluster.lo + +all: + /bin/sh ${LIBTOOL} --mode=compile gcc ${CMDLINE} ${INCLUDES} ${DEFINES} -c -o sf_dynamic_preproc_lib.lo sf_dynamic_preproc_lib.c + /bin/sh ${LIBTOOL} --mode=compile gcc ${CMDLINE} ${INCLUDES} ${DEFINES} -c -o sfPolicyUserData.lo sfPolicyUserData.c + /bin/sh ${LIBTOOL} --mode=compile gcc ${CMDLINE} ${INCLUDES} ${DEFINES} -c -o alert_parser.lo alert_parser.c + /bin/sh ${LIBTOOL} --mode=compile gcc ${CMDLINE} ${INCLUDES} ${DEFINES} -c -o regex.lo regex.c + /bin/sh ${LIBTOOL} --mode=compile gcc ${CMDLINE} ${INCLUDES} ${DEFINES} -c -o stream.lo stream.c + /bin/sh ${LIBTOOL} --mode=compile gcc ${CMDLINE} ${INCLUDES} ${DEFINES} -c -o spp_ai.lo spp_ai.c + /bin/sh ${LIBTOOL} --mode=compile gcc ${CMDLINE} ${INCLUDES} ${DEFINES} -c -o cluster.lo cluster.c + /bin/sh ${LIBTOOL} --mode=link gcc ${CMDLINE} ${LDOPTIONS} ${LIBPATH} -o ${OUTPUT} ${OBJECTS} ${LDLINKS} + +clean: + rm -rf .libs _libs + test -z "${OUTPUT}" || rm -f ${OUTPUT} + rm -f "./so_locations" + rm -f *.o + rm -f *.lo + diff --git a/TODO b/TODO new file mode 100644 index 0000000..6a622b7 --- /dev/null +++ b/TODO @@ -0,0 +1,2 @@ +- Managing clusters for addresses, timestamps (and more?) + diff --git a/alert_parser.c b/alert_parser.c new file mode 100644 index 0000000..17f2240 --- /dev/null +++ b/alert_parser.c @@ -0,0 +1,315 @@ +/* + * ===================================================================================== + * + * Filename: alert_parser.c + * + * Description: Managing the parsing of Snort's alert file + * + * Version: 0.1 + * Created: 08/08/2010 09:21:57 + * Revision: none + * Compiler: gcc + * + * Author: BlackLight (http://0x00.ath.cx), + * Licence: GNU GPL v.3 + * Company: DO WHAT YOU WANT CAUSE A PIRATE IS FREE, YOU ARE A PIRATE! + * + * ===================================================================================== + */ + +#include "spp_ai.h" + +#include +#include +#include +#include +#include + + +PRIVATE AI_snort_alert *alerts = NULL; +PRIVATE FILE *alert_fp = NULL; + +/** + * FUNCTION: AI_alertparser_thread + * \brief Thread for parsing Snort's alert file + * \param arg void* pointer to module's configuration + */ + +void* +AI_alertparser_thread ( void* arg ) +{ + struct logtime { + unsigned short day; + unsigned short month; + unsigned short year; + unsigned short hour; + unsigned short min; + unsigned short sec; + }; + + int i; + int ifd; + int wd; + int nmatches = 0; + char line[8192]; + char strtime[256]; + char **matches = NULL; + time_t stamp; + struct tm *_tm; + struct logtime ltime; + struct stat st; + struct pkt_key key; + struct pkt_info *info; + + AI_config *conf = ( AI_config* ) arg; + AI_snort_alert *alert = NULL; + AI_snort_alert *tmp = NULL; + BOOL in_alert = false; + + while ( 1 ) + { + FILE *fp = fopen ( "/home/blacklight/LOG", "a" ); + + if (( ifd = inotify_init() ) < 0 ) + { + _dpd.fatalMsg ( "Could not initialize an inotify object on the alert log file" ); + } + + if ( stat ( conf->alertfile, &st ) < 0 ) + { + if (( wd = inotify_add_watch ( ifd, conf->alertfile, IN_CREATE )) < 0 ) + { + _dpd.fatalMsg ( "Could not initialize a watch descriptor on the alert log file" ); + } + + read ( ifd, line, sizeof(line) ); + inotify_rm_watch ( ifd, wd ); + } else { + if ( !alert_fp ) + { + if ( ! (alert_fp = fopen ( conf->alertfile, "r" )) ) + { + _dpd.fatalMsg ( "Could not open alert log file for reading" ); + } + } + } + + if (( wd = inotify_add_watch ( ifd, conf->alertfile, IN_MODIFY )) < 0 ) + { + _dpd.fatalMsg ( "Could not initialize a watch descriptor on the alert log file" ); + } + + fseek ( alert_fp, 0, SEEK_END ); + read ( ifd, line, sizeof(line) ); + + while ( !feof ( alert_fp )) + { + fgets ( line, sizeof(line), alert_fp ); + + for ( i = strlen(line)-1; + i >= 0 && ( line[i] == '\n' || line[i] == '\r' || line[i] == '\t' || line[i] == ' ' ); + i++ ) + { + line[i] = 0; + } + + if ( strlen(line) == 0 ) + { + if ( in_alert ) + { + if ( alert->ipproto == IPPROTO_TCP ) + { + key.src_ip = alert->src_addr; + key.dst_port = alert->dst_port; + + if (( info = AI_get_stream_by_key ( key ) )) + { + AI_set_stream_observed ( key ); + alert->stream = info; + + if ( alerts == NULL ) + { + alerts = alert; + alerts->next = NULL; + } else { + for ( tmp = alerts; tmp->next; tmp = tmp->next ); + tmp->next = alert; + } + + /* TODO Do something!! */ + } + } + + in_alert = false; + alert = NULL; + } + + continue; + } + + if ( !in_alert ) + { + if ( preg_match ( "^\\[\\*\\*\\]\\s*\\[([0-9]+):([0-9]+):([0-9]+)\\]\\s*(.*)\\s*\\[\\*\\*\\]$", line, &matches, &nmatches ) > 0 ) + { + in_alert = true; + + if ( !( alert = ( AI_snort_alert* ) malloc ( sizeof(AI_snort_alert) )) ) + { + _dpd.fatalMsg ( "\nDynamic memory allocation error at %s:%d\n", __FILE__, __LINE__ ); + } + + memset ( alert, 0, sizeof(AI_snort_alert) ); + + alert->gid = strtoul ( matches[0], NULL, 10 ); + alert->sid = strtoul ( matches[1], NULL, 10 ); + alert->rev = strtoul ( matches[2], NULL, 10 ); + alert->desc = strdup ( matches[3] ); + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + } else { + _dpd.fatalMsg ( "Parse error: a line in the alert log cannot be associated to an alert block\n" ); + } + } else if ( preg_match ( "\\[Priority:\\s*([0-9]+)\\]", line, &matches, &nmatches) > 0 ) { + alert->priority = (unsigned short) strtoul ( matches[0], NULL, 10 ); + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + + if ( preg_match ( "\\[Classification:\\s*([^\\]]+)\\]", line, &matches, &nmatches) > 0 ) + { + alert->classification = strdup ( matches[0] ); + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + } + } else if ( preg_match ( "^([0-9]{2})/([0-9]{2})-([0-9]{2}):([0-9]{2}):([0-9]{2})\\.[0-9]+\\s+([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}):([0-9]{1,5})\\s*" + "->\\s*([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}):([0-9]{1,5})", + line, &matches, &nmatches ) > 0 ) { + stamp = time(NULL); + _tm = localtime ( &stamp ); + + ltime.year = (unsigned short) _tm->tm_year + 1900; + ltime.day = (unsigned short) strtoul ( matches[0], NULL, 10 ); + ltime.month = (unsigned short) strtoul ( matches[1], NULL, 10 ); + ltime.hour = (unsigned short) strtoul ( matches[2], NULL, 10 ); + ltime.min = (unsigned short) strtoul ( matches[3], NULL, 10 ); + ltime.sec = (unsigned short) strtoul ( matches[4], NULL, 10 ); + + snprintf ( strtime, sizeof(strtime), "%02hu/%02hu/%04hu, %02hu:%02hu:%02hu", + ltime.day, ltime.month, ltime.year, ltime.hour, ltime.min, ltime.sec ); + + strptime ( strtime, "%d/%m/%Y, %H:%M:%S", _tm ); + alert->timestamp = mktime ( _tm ); + + alert->src_addr = inet_addr ( matches[5] ); + alert->dst_addr = inet_addr ( matches[7] ); + alert->src_port = htons ( atoi( matches[6] )); + alert->dst_port = htons ( atoi( matches[8] )); + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + } else if ( preg_match ( "^([0-9]{2})/([0-9]{2})-([0-9]{2}):([0-9]{2}):([0-9]{2})\\.[0-9]+\\s+([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})\\s*" + "->\\s*([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})", + line, &matches, &nmatches ) > 0 ) { + stamp = time(NULL); + _tm = localtime ( &stamp ); + + ltime.year = (unsigned short) _tm->tm_year + 1900; + ltime.day = (unsigned short) strtoul ( matches[0], NULL, 10 ); + ltime.month = (unsigned short) strtoul ( matches[1], NULL, 10 ); + ltime.hour = (unsigned short) strtoul ( matches[2], NULL, 10 ); + ltime.min = (unsigned short) strtoul ( matches[3], NULL, 10 ); + ltime.sec = (unsigned short) strtoul ( matches[4], NULL, 10 ); + + snprintf ( strtime, sizeof(strtime), "%02hu/%02hu/%04hu, %02hu:%02hu:%02hu", + ltime.day, ltime.month, ltime.year, ltime.hour, ltime.min, ltime.sec ); + + strptime ( strtime, "%d/%m/%Y, %H:%M:%S", _tm ); + alert->timestamp = mktime ( _tm ); + + alert->src_addr = inet_addr ( matches[5] ); + alert->dst_addr = inet_addr ( matches[6] ); + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + } else if ( preg_match ( "^([^\\s+]+)\\s+TTL:\\s*([0-9]+)\\s+TOS:\\s*0x([0-9A-F]+)\\s+ID:\\s*([0-9]+)\\s+IpLen:\\s*([0-9]+)", + line, &matches, &nmatches ) > 0 ) { + if ( !strcasecmp ( matches[0], "tcp" ) ) { + alert->ipproto = IPPROTO_TCP; + } else if ( !strcasecmp ( matches[0], "udp" ) ) { + alert->ipproto = IPPROTO_UDP; + } else if ( !strcasecmp ( matches[0], "icmp" ) ) { + alert->ipproto = IPPROTO_ICMP; + } else { + alert->ipproto = IPPROTO_NONE; + } + + alert->ttl = htons ( (uint16_t) strtoul ( matches[1], NULL, 10 )); + alert->tos = htons ( (uint16_t) strtoul ( matches[2], NULL, 16 )); + alert->id = htons ( (uint16_t) strtoul ( matches[3], NULL, 10 )); + alert->iplen = htons ( (uint16_t) strtoul ( matches[4], NULL, 10 )); + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + } else if ( preg_match ( "^([\\*UAPRSF]{8})\\s+Seq:\\s*0x([0-9A-F]+)\\s+Ack:\\s*0x([0-9A-F]+)\\s+Win:\\s*0x([0-9A-F]+)\\s+TcpLen:\\s*([0-9]+)", + line, &matches, &nmatches ) > 0 ) { + alert->tcp_flags = 0; + alert->tcp_flags |= ( strstr ( matches[0], "C" ) ) ? TCPHEADER_RES1 : 0; + alert->tcp_flags |= ( strstr ( matches[0], "E" ) ) ? TCPHEADER_RES2 : 0; + alert->tcp_flags |= ( strstr ( matches[0], "U" ) ) ? TCPHEADER_URG : 0; + alert->tcp_flags |= ( strstr ( matches[0], "A" ) ) ? TCPHEADER_ACK : 0; + alert->tcp_flags |= ( strstr ( matches[0], "P" ) ) ? TCPHEADER_PUSH : 0; + alert->tcp_flags |= ( strstr ( matches[0], "R" ) ) ? TCPHEADER_RST : 0; + alert->tcp_flags |= ( strstr ( matches[0], "S" ) ) ? TCPHEADER_SYN : 0; + alert->tcp_flags |= ( strstr ( matches[0], "F" ) ) ? TCPHEADER_FIN : 0; + + alert->sequence = htonl ( strtoul ( matches[1], NULL, 16 )); + alert->ack = htonl ( strtoul ( matches[2], NULL, 16 )); + alert->window = htons ( (uint16_t) strtoul ( matches[3], NULL, 16 )); + alert->tcplen = htons ( (uint16_t) strtoul ( matches[4], NULL, 10 )); + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + } + } + + fclose ( fp ); + } + + return (void*) 0; +} /* ----- end of function AI_alertparser_thread ----- */ + + +/** + * FUNCTION: AI_get_alerts + * \brief Return the alerts parsed so far as a linked list + * \return An AI_snort_alert pointer identifying the list of alerts + */ +AI_snort_alert* +AI_get_alerts () +{ + return alerts; +} /* ----- end of function AI_get_alerts ----- */ + diff --git a/build.sh b/build.sh new file mode 100755 index 0000000..6cd2a66 --- /dev/null +++ b/build.sh @@ -0,0 +1,21 @@ +#!/bin/sh + +OUTDIR=${HOME}/local/snort/lib/snort_dynamicpreprocessor +LIBSF=libsf_ai_preproc + +make clean +make +chmod +x ./${LIBSF}.la +cp ./${LIBSF}.la ${OUTDIR} +cp .libs/${LIBSF}.a ${OUTDIR} +cp .libs/${LIBSF}.so.0.0.0 ${OUTDIR} +cd ${OUTDIR} + +if [ ! -f ${LIBSF}.so.0 ]; then + ln -sf ${LIBSF}.so.0.0.0 ${LIBSF}.so.0 +fi + +if [ ! -f ${LIBSF}.so ]; then + ln -sf ${LIBSF}.so.0.0.0 ${LIBSF}.so +fi + diff --git a/cluster.c b/cluster.c new file mode 100644 index 0000000..b17f04b --- /dev/null +++ b/cluster.c @@ -0,0 +1,345 @@ +/* + * ===================================================================================== + * + * Filename: cluster.c + * + * Description: Module for managing alarm clustering and cluter hierarchies + * + * Version: 1.0 + * Created: 12/08/2010 12:43:28 + * Revision: none + * Compiler: gcc + * + * Author: BlackLight (http://0x00.ath.cx), + * Licence: GNU GPL v.3 + * Company: DO WHAT YOU WANT CAUSE A PIRATE IS FREE, YOU ARE A PIRATE! + * + * ===================================================================================== + */ + +#include "spp_ai.h" +#include +#include +#include + +PRIVATE hierarchy_node *src_port_root = NULL; +PRIVATE hierarchy_node *src_addr_root = NULL; +PRIVATE hierarchy_node *dst_port_root = NULL; +PRIVATE hierarchy_node *dst_addr_root = NULL; +PRIVATE AI_config *_config = NULL; +PRIVATE AI_snort_alert *alert_log = NULL; + + +/** + * FUNCTION: _hierarchy_node_new + * \brief Create a new clustering hierarchy node + * \param label Label for the node + * \param min_val Minimum value for the range represented by the node + * \param max_val Maximum value for the range represented by the node + * \return The brand new node if the allocation was ok, otherwise abort the application + */ + +PRIVATE hierarchy_node* +_hierarchy_node_new ( char *label, int min_val, int max_val ) +{ + hierarchy_node *n = NULL; + + if ( !( n = ( hierarchy_node* ) malloc ( sizeof ( hierarchy_node )) )) + { + _dpd.fatalMsg ( "Dynamic memory allocation failure at %s:%d\n", __FILE__, __LINE__ ); + } + + n->min_val = min_val; + n->max_val = max_val; + n->nchildren = 0; + n->children = NULL; + n->parent = NULL; + strncpy ( n->label, label, sizeof ( n->label )); + + return n; +} /* ----- end of function _hierarchy_node_new ----- */ + + +/** + * FUNCTION: _hierarchy_node_append + * \brief Append a node to a clustering hierarchy node + * \param parent Parent node + * \param child Child node + */ + +PRIVATE void +_hierarchy_node_append ( hierarchy_node *parent, hierarchy_node *child ) +{ + if ( !( parent->children = ( hierarchy_node** ) realloc ( parent->children, (++(parent->nchildren)) * sizeof ( hierarchy_node* )) )) + { + _dpd.fatalMsg ( "Dynamic memory allocation failure at %s:%d\n", __FILE__, __LINE__ ); + } + + parent->children[ parent->nchildren - 1 ] = child; + child->parent = parent; +} /* ----- end of function _hierarchy_node_append ----- */ + + +/* PRIVATE void */ +/* _hierarchy_node_free ( hierarchy_node *n ) */ +/* { */ +/* int i; */ +/* */ +/* if ( !n ) */ +/* return; */ +/* */ +/* for ( i=0; i < n->nchildren; i++ ) */ +/* { */ +/* if ( n->children[i] ) */ +/* _hierarchy_node_free ( n->children[i] ); */ +/* } */ +/* */ +/* free ( n ); */ +/* n = NULL; */ +/* } */ + + +/** + * FUNCTION: _AI_get_min_hierarchy_node + * \brief Get the minimum node in a hierarchy tree that matches a certain value + * \param val Value to be matched in the range + * \param root Root of the hierarchy + * \return The minimum node that matches the value if any, NULL otherwise + */ +PRIVATE hierarchy_node* +_AI_get_min_hierarchy_node ( int val, hierarchy_node *root ) +{ + int i; + hierarchy_node *next = NULL; + + if ( !root ) + { + return NULL; + } + + if ( (unsigned) val < (unsigned) root->min_val || (unsigned) val > (unsigned) root->max_val ) + { + return NULL; + } + + for ( i=0; i < root->nchildren && !next; i++ ) + { + if ( root->children[i]->min_val <= val && root->children[i]->max_val >= val ) + { + next = root->children[i]; + } + } + + if ( !next ) + return root; + + return _AI_get_min_hierarchy_node ( val, next ); +} /* ----- end of function _AI_get_min_hierarchy_node ----- */ + +/** + * FUNCTION: _AI_cluster_thread + * \brief Thread for periodically clustering the log information + */ +PRIVATE void* +_AI_cluster_thread ( void* arg ) +{ + AI_snort_alert *tmp; + hierarchy_node *node, *child; + char label[256]; + + while ( 1 ) + { + sleep ( _config->alertClusteringInterval ); + + if ( !( alert_log = AI_get_alerts() )) + { + continue; + } + + FILE *fp = fopen ( "/home/blacklight/LOG", "a" ); + + for ( tmp = alert_log; tmp; tmp = tmp->next ) + { + if ( src_addr_root && !tmp->src_addr_node ) + { + node = _AI_get_min_hierarchy_node ( ntohl ( tmp->src_addr ), src_addr_root ); + + if ( node ) + { + if ( node->min_val < node->max_val ) + { + inet_ntop ( AF_INET, &(tmp->src_addr), label, INET_ADDRSTRLEN ); + child = _hierarchy_node_new ( label, ntohl ( tmp->src_addr ), ntohl ( tmp->src_addr )); + _hierarchy_node_append ( node, child ); + node = child; + } + + tmp->src_addr_node = node; + fprintf ( fp, "minimum range holding %s: %s (prev: %s)\n", label, tmp->src_addr_node->label, tmp->src_addr_node->parent->label ); + } + } + + if ( dst_addr_root && !tmp->dst_addr_node ) + { + node = _AI_get_min_hierarchy_node ( ntohl ( tmp->dst_addr ), dst_addr_root ); + + if ( node ) + { + if ( node->min_val < node->max_val ) + { + /* snprintf ( label, sizeof(label), "%d", ntohl ( tmp->dst_addr )); */ + inet_ntop ( AF_INET, &(tmp->src_addr), label, INET_ADDRSTRLEN ); + child = _hierarchy_node_new ( label, ntohl ( tmp->dst_addr ), ntohl ( tmp->dst_addr )); + _hierarchy_node_append ( node, child ); + node = child; + } + + tmp->dst_addr_node = node; + } + } + + if ( src_port_root && !tmp->src_port_node ) + { + node = _AI_get_min_hierarchy_node ( ntohs ( tmp->src_port ), src_port_root ); + + if ( node ) + { + if ( node->min_val < node->max_val ) + { + snprintf ( label, sizeof(label), "%d", ntohs ( tmp->src_port )); + child = _hierarchy_node_new ( label, ntohs ( tmp->src_port ), ntohs ( tmp->src_port )); + _hierarchy_node_append ( node, child ); + node = child; + } + + tmp->src_port_node = node; + fprintf ( fp, "minimum range holding %d: %s (prev: %s)\n", ntohs(tmp->src_port), tmp->src_port_node->label, tmp->src_port_node->parent->label ); + } + } + + if ( dst_port_root && !tmp->dst_port_node ) + { + node = _AI_get_min_hierarchy_node ( ntohs ( tmp->dst_port ), dst_port_root ); + + if ( node ) + { + if ( node->min_val < node->max_val ) + { + snprintf ( label, sizeof(label), "%d", ntohs ( tmp->dst_port )); + child = _hierarchy_node_new ( label, ntohs ( tmp->dst_port ), ntohs ( tmp->dst_port )); + _hierarchy_node_append ( node, child ); + node = child; + } + + tmp->dst_port_node = node; + fprintf ( fp, "minimum range holding %d: %s (prev: %s)\n", ntohs(tmp->dst_port), tmp->dst_port_node->label, tmp->dst_port_node->parent->label ); + } + } + } + + fclose ( fp ); + } + + return (void*) 0; +} /* ----- end of function AI_cluster_thread ----- */ + + +/** + * FUNCTION: AI_hierarchies_build + * \brief Build the clustering hierarchy trees + * \param conf Reference to the configuration of the module + * \param nodes Nodes containing the information about the clustering ranges + * \param n_nodes Number of nodes + */ + +void +AI_hierarchies_build ( AI_config *conf, hierarchy_node **nodes, int n_nodes ) +{ + int i, j; + int min_range = 0; + pthread_t cluster_thread; + hierarchy_node *root = NULL; + hierarchy_node *cover = NULL; + _config = conf; + + for ( i=0; i < n_nodes; i++ ) + { + switch ( nodes[i]->type ) + { + case src_port: + if ( !src_port_root ) + src_port_root = _hierarchy_node_new ( "1-65535", 1, 65535 ); + + root = src_port_root; + min_range = 65534; + break; + + case dst_port: + if ( !dst_port_root ) + dst_port_root = _hierarchy_node_new ( "1-65535", 1, 65535 ); + + root = dst_port_root; + min_range = 65534; + break; + + case src_addr: + if ( !src_addr_root ) + src_addr_root = _hierarchy_node_new ( "0.0.0.0/0", + 0x0, 0xffffffff ); + + root = src_addr_root; + min_range = 0xffffffff; + break; + + case dst_addr: + if ( !dst_addr_root ) + dst_addr_root = _hierarchy_node_new ( "0.0.0.0/0", + 0x0, 0xffffffff ); + + root = dst_addr_root; + min_range = 0xffffffff; + break; + + /* TODO Manage range for timestamps (and something more?) */ + default: + break; + } + + cover = NULL; + + for ( j=0; j < n_nodes; j++ ) + { + if ( i != j ) + { + if ( (unsigned) nodes[j]->min_val <= (unsigned) nodes[i]->min_val && + (unsigned) nodes[j]->max_val >= (unsigned) nodes[i]->max_val ) + { + if (( (unsigned) nodes[i]->min_val - (unsigned) nodes[j]->min_val + + (unsigned) nodes[j]->max_val - (unsigned) nodes[i]->max_val ) <= min_range ) + { + cover = nodes[j]; + min_range = nodes[i]->min_val - nodes[j]->min_val + + nodes[j]->max_val - nodes[i]->max_val; + } + } + } + } + + if ( cover ) + { + _hierarchy_node_append ( cover, nodes[i] ); + } else { + if ( (unsigned) nodes[i]->min_val >= (unsigned) root->min_val && (unsigned) nodes[i]->max_val <= (unsigned) root->max_val && + ( (unsigned) nodes[i]->min_val != (unsigned) root->min_val || (unsigned) nodes[i]->max_val != (unsigned) root->max_val )) + { + _hierarchy_node_append ( root, nodes[i] ); + } + } + } + + if ( pthread_create ( &cluster_thread, NULL, _AI_cluster_thread, NULL ) != 0 ) + { + _dpd.fatalMsg ( "Failed to create the hash cleanup thread\n" ); + } +} /* ----- end of function AI_hierarchies_build ----- */ + diff --git a/doc/html/annotated.html b/doc/html/annotated.html new file mode 100644 index 0000000..c082cfd --- /dev/null +++ b/doc/html/annotated.html @@ -0,0 +1,78 @@ + + + + +Snort AI preprocessor module: Data Structures + + + + + + + + + +
+
+

Data Structures

+
+
+Here are the data structures with brief descriptions: + + + +
_AI_config
pkt_info
pkt_key
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/bc_s.png b/doc/html/bc_s.png new file mode 100644 index 0000000..e401862 Binary files /dev/null and b/doc/html/bc_s.png differ diff --git a/doc/html/classes.html b/doc/html/classes.html new file mode 100644 index 0000000..ed906f1 --- /dev/null +++ b/doc/html/classes.html @@ -0,0 +1,78 @@ + + + + +Snort AI preprocessor module: Alphabetical List + + + + + + + + + +
+
+

Data Structure Index

+
+
+
P | _
+ +
  P  
+
pkt_info   pkt_key   
  _  
+
_AI_config   
P | _
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/closed.png b/doc/html/closed.png new file mode 100644 index 0000000..b7d4bd9 Binary files /dev/null and b/doc/html/closed.png differ diff --git a/doc/html/doxygen.css b/doc/html/doxygen.css new file mode 100644 index 0000000..658686f --- /dev/null +++ b/doc/html/doxygen.css @@ -0,0 +1,656 @@ +/* The standard CSS for doxygen */ + +body, table, div, p, dl { + font-family: Lucida Grande, Verdana, Geneva, Arial, sans-serif; + font-size: 12px; +} + +/* @group Heading Levels */ + +h1 { + font-size: 150%; +} + +h2 { + font-size: 120%; +} + +h3 { + font-size: 100%; +} + +dt { + font-weight: bold; +} + +div.multicol { + -moz-column-gap: 1em; + -webkit-column-gap: 1em; + -moz-column-count: 3; + -webkit-column-count: 3; +} + +p.startli, p.startdd, p.starttd { + margin-top: 2px; +} + +p.endli { + margin-bottom: 0px; +} + +p.enddd { + margin-bottom: 4px; +} + +p.endtd { + margin-bottom: 2px; +} + +/* @end */ + +caption { + font-weight: bold; +} + +span.legend { + font-size: 70%; + text-align: center; +} + +h3.version { + font-size: 90%; + text-align: center; +} + +div.qindex, div.navtab{ + background-color: #EBEFF6; + border: 1px solid #A3B4D7; + text-align: center; + margin: 2px; + padding: 2px; +} + +div.qindex, div.navpath { + width: 100%; + line-height: 140%; +} + +div.navtab { + margin-right: 15px; +} + +/* @group Link Styling */ + +a { + color: #3D578C; + font-weight: normal; + text-decoration: none; +} + +.contents a:visited { + color: #4665A2; +} + +a:hover { + text-decoration: underline; +} + +a.qindex { + font-weight: bold; +} + +a.qindexHL { + font-weight: bold; + background-color: #9CAFD4; + color: #ffffff; + border: 1px double #869DCA; +} + +.contents a.qindexHL:visited { + color: #ffffff; +} + +a.el { + font-weight: bold; +} + +a.elRef { +} + +a.code { + color: #4665A2; +} + +a.codeRef { + color: #4665A2; +} + +/* @end */ + +dl.el { + margin-left: -1cm; +} + +.fragment { + font-family: monospace, fixed; + font-size: 105%; +} + +pre.fragment { + border: 1px solid #C4CFE5; + background-color: #FBFCFD; + padding: 4px 6px; + margin: 4px 8px 4px 2px; + overflow: auto; + word-wrap: break-word; + font-size: 9pt; + line-height: 125%; +} + +div.ah { + background-color: black; + font-weight: bold; + color: #ffffff; + margin-bottom: 3px; + margin-top: 3px; + padding: 0.2em; + border: solid thin #333; + border-radius: 0.5em; + -webkit-border-radius: .5em; + -moz-border-radius: .5em; + -webkit-box-shadow: 2px 2px 3px #999; + -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; + background-image: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#000),color-stop(0.3, #444)); + background-image: -moz-linear-gradient(center top, #eee 0%, #444 40%, #000); +} + +div.groupHeader { + margin-left: 16px; + margin-top: 12px; + margin-bottom: 6px; + font-weight: bold; +} + +div.groupText { + margin-left: 16px; + font-style: italic; +} + +body { + background: white; + color: black; + margin: 0; +} + +div.contents { + margin-top: 10px; + margin-left: 10px; + margin-right: 10px; +} + +td.indexkey { + background-color: #EBEFF6; + font-weight: bold; + border: 1px solid #C4CFE5; + margin: 2px 0px 2px 0; + padding: 2px 10px; +} + +td.indexvalue { + background-color: #EBEFF6; + border: 1px solid #C4CFE5; + padding: 2px 10px; + margin: 2px 0px; +} + +tr.memlist { + background-color: #EEF1F7; +} + +p.formulaDsp { + text-align: center; +} + +img.formulaDsp { + +} + +img.formulaInl { + vertical-align: middle; +} + +div.center { + text-align: center; + margin-top: 0px; + margin-bottom: 0px; + padding: 0px; +} + +div.center img { + border: 0px; +} + +address.footer { + text-align: right; + padding-right: 12px; +} + +img.footer { + border: 0px; + vertical-align: middle; +} + +/* @group Code Colorization */ + +span.keyword { + color: #008000 +} + +span.keywordtype { + color: #604020 +} + +span.keywordflow { + color: #e08000 +} + +span.comment { + color: #800000 +} + +span.preprocessor { + color: #806020 +} + +span.stringliteral { + color: #002080 +} + +span.charliteral { + color: #008080 +} + +span.vhdldigit { + color: #ff00ff +} + +span.vhdlchar { + color: #000000 +} + +span.vhdlkeyword { + color: #700070 +} + +span.vhdllogic { + color: #ff0000 +} + +/* @end */ + +/* +.search { + color: #003399; + font-weight: bold; +} + +form.search { + margin-bottom: 0px; + margin-top: 0px; +} + +input.search { + font-size: 75%; + color: #000080; + font-weight: normal; + background-color: #e8eef2; +} +*/ + +td.tiny { + font-size: 75%; +} + +.dirtab { + padding: 4px; + border-collapse: collapse; + border: 1px solid #A3B4D7; +} + +th.dirtab { + background: #EBEFF6; + font-weight: bold; +} + +hr { + height: 0px; + border: none; + border-top: 1px solid #4A6AAA; +} + +hr.footer { + height: 1px; +} + +/* @group Member Descriptions */ + +table.memberdecls { + border-spacing: 0px; + padding: 0px; +} + +.mdescLeft, .mdescRight, +.memItemLeft, .memItemRight, +.memTemplItemLeft, .memTemplItemRight, .memTemplParams { + background-color: #F9FAFC; + border: none; + margin: 4px; + padding: 1px 0 0 8px; +} + +.mdescLeft, .mdescRight { + padding: 0px 8px 4px 8px; + color: #555; +} + +.memItemLeft, .memItemRight, .memTemplParams { + border-top: 1px solid #C4CFE5; +} + +.memItemLeft, .memTemplItemLeft { + white-space: nowrap; +} + +.memTemplParams { + color: #4665A2; + white-space: nowrap; +} + +/* @end */ + +/* @group Member Details */ + +/* Styles for detailed member documentation */ + +.memtemplate { + font-size: 80%; + color: #4665A2; + font-weight: normal; + margin-left: 3px; +} + +.memnav { + background-color: #EBEFF6; + border: 1px solid #A3B4D7; + text-align: center; + margin: 2px; + margin-right: 15px; + padding: 2px; +} + +.memitem { + padding: 0; + margin-bottom: 10px; +} + +.memname { + white-space: nowrap; + font-weight: bold; + margin-left: 6px; +} + +.memproto { + border-top: 1px solid #A8B8D9; + border-left: 1px solid #A8B8D9; + border-right: 1px solid #A8B8D9; + padding: 6px 0px 6px 0px; + color: #253555; + font-weight: bold; + text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); + /* firefox specific markup */ + -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; + -moz-border-radius-topright: 8px; + -moz-border-radius-topleft: 8px; + /* webkit specific markup */ + -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + -webkit-border-top-right-radius: 8px; + -webkit-border-top-left-radius: 8px; + background-image:url('nav_f.png'); + background-repeat:repeat-x; + background-color: #E2E8F2; + +} + +.memdoc { + border-bottom: 1px solid #A8B8D9; + border-left: 1px solid #A8B8D9; + border-right: 1px solid #A8B8D9; + padding: 2px 5px; + background-color: #FBFCFD; + border-top-width: 0; + /* firefox specific markup */ + -moz-border-radius-bottomleft: 8px; + -moz-border-radius-bottomright: 8px; + -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; + background-image: -moz-linear-gradient(center top, #FFFFFF 0%, #FFFFFF 60%, #F7F8FB 95%, #EEF1F7); + /* webkit specific markup */ + -webkit-border-bottom-left-radius: 8px; + -webkit-border-bottom-right-radius: 8px; + -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + background-image: -webkit-gradient(linear,center top,center bottom,from(#FFFFFF), color-stop(0.6,#FFFFFF), color-stop(0.60,#FFFFFF), color-stop(0.95,#F7F8FB), to(#EEF1F7)); +} + +.paramkey { + text-align: right; +} + +.paramtype { + white-space: nowrap; +} + +.paramname { + color: #602020; + white-space: nowrap; +} +.paramname em { + font-style: normal; +} + +/* @end */ + +/* @group Directory (tree) */ + +/* for the tree view */ + +.ftvtree { + font-family: sans-serif; + margin: 0px; +} + +/* these are for tree view when used as main index */ + +.directory { + font-size: 9pt; + font-weight: bold; + margin: 5px; +} + +.directory h3 { + margin: 0px; + margin-top: 1em; + font-size: 11pt; +} + +/* +The following two styles can be used to replace the root node title +with an image of your choice. Simply uncomment the next two styles, +specify the name of your image and be sure to set 'height' to the +proper pixel height of your image. +*/ + +/* +.directory h3.swap { + height: 61px; + background-repeat: no-repeat; + background-image: url("yourimage.gif"); +} +.directory h3.swap span { + display: none; +} +*/ + +.directory > h3 { + margin-top: 0; +} + +.directory p { + margin: 0px; + white-space: nowrap; +} + +.directory div { + display: none; + margin: 0px; +} + +.directory img { + vertical-align: -30%; +} + +/* these are for tree view when not used as main index */ + +.directory-alt { + font-size: 100%; + font-weight: bold; +} + +.directory-alt h3 { + margin: 0px; + margin-top: 1em; + font-size: 11pt; +} + +.directory-alt > h3 { + margin-top: 0; +} + +.directory-alt p { + margin: 0px; + white-space: nowrap; +} + +.directory-alt div { + display: none; + margin: 0px; +} + +.directory-alt img { + vertical-align: -30%; +} + +/* @end */ + +div.dynheader { + margin-top: 8px; +} + +address { + font-style: normal; + color: #2A3D61; +} + +table.doxtable { + border-collapse:collapse; +} + +table.doxtable td, table.doxtable th { + border: 1px solid #2D4068; + padding: 3px 7px 2px; +} + +table.doxtable th { + background-color: #374F7F; + color: #FFFFFF; + font-size: 110%; + padding-bottom: 4px; + padding-top: 5px; + text-align:left; +} + +.tabsearch { + top: 0px; + left: 10px; + height: 36px; + background-image: url('tab_b.png'); + z-index: 101; + overflow: hidden; + font-size: 13px; +} + +.navpath ul +{ + font-size: 11px; + background-image:url('tab_b.png'); + background-repeat:repeat-x; + height:30px; + line-height:30px; + color:#8AA0CC; + border:solid 1px #C2CDE4; + overflow:hidden; + margin:0px; + padding:0px; +} + +.navpath li +{ + list-style-type:none; + float:left; + padding-left:10px; + padding-right: 15px; + background-image:url('bc_s.png'); + background-repeat:no-repeat; + background-position:right; + color:#364D7C; +} + +.navpath a +{ + height:32px; + display:block; + text-decoration: none; + outline: none; +} + +.navpath a:hover +{ + color:#6884BD; +} + +div.summary +{ + float: right; + font-size: 8pt; + padding-right: 5px; + width: 50%; + text-align: right; +} + +div.summary a +{ + white-space: nowrap; +} + +div.header +{ + background-image:url('nav_h.png'); + background-repeat:repeat-x; + background-color: #F9FAFC; + margin: 0px; + border-bottom: 1px solid #C4CFE5; +} + +div.headertitle +{ + padding: 5px 5px 5px 10px; +} + diff --git a/doc/html/doxygen.png b/doc/html/doxygen.png new file mode 100644 index 0000000..635ed52 Binary files /dev/null and b/doc/html/doxygen.png differ diff --git a/doc/html/files.html b/doc/html/files.html new file mode 100644 index 0000000..fecb8d8 --- /dev/null +++ b/doc/html/files.html @@ -0,0 +1,80 @@ + + + + +Snort AI preprocessor module: File Index + + + + + + + + + +
+
+

File List

+
+
+Here is a list of all files with brief descriptions: + + + + + + +
sf_dynamic_preproc_lib.c
sf_preproc_info.h [code]
sfPolicyUserData.c
spp_ai.c
spp_ai.h [code]
stream.c
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/functions.html b/doc/html/functions.html new file mode 100644 index 0000000..119b287 --- /dev/null +++ b/doc/html/functions.html @@ -0,0 +1,107 @@ + + + + +Snort AI preprocessor module: Data Fields + + + + + + + + + +
+Here is a list of all struct and union fields with links to the structures/unions they belong to: +
+ + + + +
+ +
+ + + + diff --git a/doc/html/functions_vars.html b/doc/html/functions_vars.html new file mode 100644 index 0000000..c2988a4 --- /dev/null +++ b/doc/html/functions_vars.html @@ -0,0 +1,107 @@ + + + + +Snort AI preprocessor module: Data Fields - Variables + + + + + + + + + +
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/globals.html b/doc/html/globals.html new file mode 100644 index 0000000..a55e9a8 --- /dev/null +++ b/doc/html/globals.html @@ -0,0 +1,275 @@ + + + + +Snort AI preprocessor module: Data Fields + + + + + + + + + +
+Here is a list of all functions, variables, defines, enums, and typedefs with links to the files they belong to: + +

- _ -

+ + +

- a -

+ + +

- b -

+ + +

- d -

+ + +

- e -

+ + +

- f -

+ + +

- g -

+ + +

- h -

+ + +

- i -

+ + +

- l -

+ + +

- m -

+ + +

- p -

+ + +

- r -

+ + +

- s -

+ + +

- t -

+ + +

- u -

+
+ + + + +
+ +
+ + + + diff --git a/doc/html/globals_defs.html b/doc/html/globals_defs.html new file mode 100644 index 0000000..8d4bdf9 --- /dev/null +++ b/doc/html/globals_defs.html @@ -0,0 +1,111 @@ + + + + +Snort AI preprocessor module: Data Fields + + + + + + + + + +
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/globals_enum.html b/doc/html/globals_enum.html new file mode 100644 index 0000000..d58719c --- /dev/null +++ b/doc/html/globals_enum.html @@ -0,0 +1,84 @@ + + + + +Snort AI preprocessor module: Data Fields + + + + + + + + + +
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/globals_eval.html b/doc/html/globals_eval.html new file mode 100644 index 0000000..411e132 --- /dev/null +++ b/doc/html/globals_eval.html @@ -0,0 +1,87 @@ + + + + +Snort AI preprocessor module: Data Fields + + + + + + + + + +
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/globals_func.html b/doc/html/globals_func.html new file mode 100644 index 0000000..c534670 --- /dev/null +++ b/doc/html/globals_func.html @@ -0,0 +1,129 @@ + + + + +Snort AI preprocessor module: Data Fields + + + + + + + + + +
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/globals_type.html b/doc/html/globals_type.html new file mode 100644 index 0000000..177e922 --- /dev/null +++ b/doc/html/globals_type.html @@ -0,0 +1,90 @@ + + + + +Snort AI preprocessor module: Data Fields + + + + + + + + + +
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/globals_vars.html b/doc/html/globals_vars.html new file mode 100644 index 0000000..dda05d3 --- /dev/null +++ b/doc/html/globals_vars.html @@ -0,0 +1,97 @@ + + + + +Snort AI preprocessor module: Data Fields + + + + + + + + + +
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/group__sfPolicyConfig.html b/doc/html/group__sfPolicyConfig.html new file mode 100644 index 0000000..d768f02 --- /dev/null +++ b/doc/html/group__sfPolicyConfig.html @@ -0,0 +1,223 @@ + + + + +Snort AI preprocessor module: Sourcefire policy configuration module + + + + + + + + + +
+ +
+

Sourcefire policy configuration module

+
+
+ + + + + + + +

+Functions

tSfPolicyUserContextId sfPolicyConfigCreate (void)
void sfPolicyConfigDelete (tSfPolicyUserContextId pContext)
int sfPolicyUserDataSet (tSfPolicyUserContextId pContext, tSfPolicyId policyId, void *config)
void * sfPolicyUserDataClear (tSfPolicyUserContextId pContext, tSfPolicyId policyId)
int sfPolicyUserDataIterate (tSfPolicyUserContextId pContext, int(*callback)(tSfPolicyUserContextId pContext, tSfPolicyId policyId, void *config))
+

Detailed Description

+

Create a user policy configuration context. A context provides facility for creating policy specific data instances. User can create as many policy instances as memory resources will allow. User can create/delete context, set/clear/get user date for a specific policy, default policy or current policy. User can also iterate over all instances user data.

+

In current design, preprocessor use this module directly to manage policy specific data instances. A future enhancement can be to extract policy management code from each processor and put it in a new policy management module. Policy management module will set a single pointer to user data before calling appropriate callback function in a preprocessor. As an example, policy module will iterate over all policies and call CleanExit functions in every preprocessor for each policy. This will make policy management module will hide policies from preprocessors and make them policy agnostic.

+

Function Documentation

+ +
+
+ + + + + + + + + +
tSfPolicyUserContextId sfPolicyConfigCreate (void  ) 
+
+
+

Create a user context. Allocates a new context and return it to user. All transactions within a context are independent from any other transactions in a different context.

+
Returns:
tSfPolicyUserContextId
+ +
+
+ +
+
+ + + + + + + + + +
void sfPolicyConfigDelete (tSfPolicyUserContextId  pContext ) 
+
+
+

Delete a user policy data context.

+
Parameters:
+ + +
pContext 
+
+
+ +
+
+ +
+
+ + + + + + + + + + + + + + + + + + +
void* sfPolicyUserDataClear (tSfPolicyUserContextId  pContext,
tSfPolicyId  policyId 
)
+
+
+

user is responsible for freeing any memory.

+ +
+
+ +
+
+ + + + + + + + + + + + + + + + + + +
int sfPolicyUserDataIterate (tSfPolicyUserContextId  pContext,
int(*)(tSfPolicyUserContextId pContext, tSfPolicyId policyId, void *config)  callback 
)
+
+
+ +
+
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
int sfPolicyUserDataSet (tSfPolicyUserContextId  pContext,
tSfPolicyId  policyId,
void *  config 
)
+
+
+

Store a pointer to user data.

+
Parameters:
+ + + + +
pContext 
policyId is 0 based.
config - pointer to user configuration.
+
+
+ +
+
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/index.html b/doc/html/index.html new file mode 100644 index 0000000..2bd2436 --- /dev/null +++ b/doc/html/index.html @@ -0,0 +1,66 @@ + + + + +Snort AI preprocessor module: Main Page + + + + + + + + + +
+
+

Snort AI preprocessor module Documentation

+
+
+

0.1

+ + + + +
+ +
+ + + + diff --git a/doc/html/installdox b/doc/html/installdox new file mode 100755 index 0000000..2697a81 --- /dev/null +++ b/doc/html/installdox @@ -0,0 +1,117 @@ +#!/usr/bin/perl + +%subst = ( ); +$quiet = 0; + +if (open(F,"search.cfg")) +{ + $_= ; s/[ \t\n]*$//g ; $subst{"_doc"} = $_; + $_= ; s/[ \t\n]*$//g ; $subst{"_cgi"} = $_; +} + +while ( @ARGV ) { + $_ = shift @ARGV; + if ( s/^-// ) { + if ( /^l(.*)/ ) { + $v = ($1 eq "") ? shift @ARGV : $1; + ($v =~ /\/$/) || ($v .= "/"); + $_ = $v; + if ( /(.+)\@(.+)/ ) { + if ( exists $subst{$1} ) { + $subst{$1} = $2; + } else { + print STDERR "Unknown tag file $1 given with option -l\n"; + &usage(); + } + } else { + print STDERR "Argument $_ is invalid for option -l\n"; + &usage(); + } + } + elsif ( /^q/ ) { + $quiet = 1; + } + elsif ( /^\?|^h/ ) { + &usage(); + } + else { + print STDERR "Illegal option -$_\n"; + &usage(); + } + } + else { + push (@files, $_ ); + } +} + +foreach $sub (keys %subst) +{ + if ( $subst{$sub} eq "" ) + { + print STDERR "No substitute given for tag file `$sub'\n"; + &usage(); + } + elsif ( ! $quiet && $sub ne "_doc" && $sub ne "_cgi" ) + { + print "Substituting $subst{$sub} for each occurrence of tag file $sub\n"; + } +} + +if ( ! @files ) { + if (opendir(D,".")) { + foreach $file ( readdir(D) ) { + $match = ".html"; + next if ( $file =~ /^\.\.?$/ ); + ($file =~ /$match/) && (push @files, $file); + ($file =~ "tree.js") && (push @files, $file); + } + closedir(D); + } +} + +if ( ! @files ) { + print STDERR "Warning: No input files given and none found!\n"; +} + +foreach $f (@files) +{ + if ( ! $quiet ) { + print "Editing: $f...\n"; + } + $oldf = $f; + $f .= ".bak"; + unless (rename $oldf,$f) { + print STDERR "Error: cannot rename file $oldf\n"; + exit 1; + } + if (open(F,"<$f")) { + unless (open(G,">$oldf")) { + print STDERR "Error: opening file $oldf for writing\n"; + exit 1; + } + if ($oldf ne "tree.js") { + while () { + s/doxygen\=\"([^ \"\:\t\>\<]*)\:([^ \"\t\>\<]*)\" (href|src)=\"\2/doxygen\=\"$1:$subst{$1}\" \3=\"$subst{$1}/g; + print G "$_"; + } + } + else { + while () { + s/\"([^ \"\:\t\>\<]*)\:([^ \"\t\>\<]*)\", \"\2/\"$1:$subst{$1}\" ,\"$subst{$1}/g; + print G "$_"; + } + } + } + else { + print STDERR "Warning file $f does not exist\n"; + } + unlink $f; +} + +sub usage { + print STDERR "Usage: installdox [options] [html-file [html-file ...]]\n"; + print STDERR "Options:\n"; + print STDERR " -l tagfile\@linkName tag file + URL or directory \n"; + print STDERR " -q Quiet mode\n\n"; + exit 1; +} diff --git a/doc/html/modules.html b/doc/html/modules.html new file mode 100644 index 0000000..b099071 --- /dev/null +++ b/doc/html/modules.html @@ -0,0 +1,69 @@ + + + + +Snort AI preprocessor module: Module Index + + + + + + + + + +
+
+

Modules

+
+
+Here is a list of all modules: +
+ + + + +
+ +
+ + + + diff --git a/doc/html/nav_f.png b/doc/html/nav_f.png new file mode 100644 index 0000000..1b07a16 Binary files /dev/null and b/doc/html/nav_f.png differ diff --git a/doc/html/nav_h.png b/doc/html/nav_h.png new file mode 100644 index 0000000..01f5fa6 Binary files /dev/null and b/doc/html/nav_h.png differ diff --git a/doc/html/open.png b/doc/html/open.png new file mode 100644 index 0000000..7b35d2c Binary files /dev/null and b/doc/html/open.png differ diff --git a/doc/html/search/all_5f.html b/doc/html/search/all_5f.html new file mode 100644 index 0000000..3524402 --- /dev/null +++ b/doc/html/search/all_5f.html @@ -0,0 +1,40 @@ + + + + + + + +
+
Loading...
+
+ +
+
+
+ _AI_stream_free + stream.c +
+
+ +
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_61.html b/doc/html/search/all_61.html new file mode 100644 index 0000000..da33e78 --- /dev/null +++ b/doc/html/search/all_61.html @@ -0,0 +1,71 @@ + + + + + + + +
+
Loading...
+
+
+ AI_config + spp_ai.h +
+
+ +
+
+ AI_init + spp_ai.c +
+
+
+
+ AI_parse + spp_ai.c +
+
+ +
+
+ AI_process + spp_ai.c +
+
+ +
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_62.html b/doc/html/search/all_62.html new file mode 100644 index 0000000..478a18f --- /dev/null +++ b/doc/html/search/all_62.html @@ -0,0 +1,32 @@ + + + + + + + +
+
Loading...
+
+
+ BOOL + spp_ai.h +
+
+
+
+ BUILD_VERSION + sf_preproc_info.h +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_64.html b/doc/html/search/all_64.html new file mode 100644 index 0000000..a2a37bd --- /dev/null +++ b/doc/html/search/all_64.html @@ -0,0 +1,50 @@ + + + + + + + +
+
Loading...
+
+
+ dst_port + pkt_key +
+
+
+
+ DST_PORT_MATCH + spp_ai.c +
+
+
+
+ DST_PORT_MATCH_STR + spp_ai.c +
+
+
+
+ DYNAMIC_PREPROC_SETUP + sf_preproc_info.h +
+
+
+
+ DynamicPreprocessorFatalMessage + sf_dynamic_preproc_lib.c +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_65.html b/doc/html/search/all_65.html new file mode 100644 index 0000000..ccff20a --- /dev/null +++ b/doc/html/search/all_65.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ ex_config + spp_ai.c +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_66.html b/doc/html/search/all_66.html new file mode 100644 index 0000000..8b9f9ba --- /dev/null +++ b/doc/html/search/all_66.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ false + spp_ai.h +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_67.html b/doc/html/search/all_67.html new file mode 100644 index 0000000..5804711 --- /dev/null +++ b/doc/html/search/all_67.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ GENERATOR_EXAMPLE + spp_ai.c +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_68.html b/doc/html/search/all_68.html new file mode 100644 index 0000000..46500ff --- /dev/null +++ b/doc/html/search/all_68.html @@ -0,0 +1,38 @@ + + + + + + + +
+
Loading...
+
+
+ hash + stream.c +
+
+
+
+ hashCleanupInterval + _AI_config +
+
+
+
+ hh + pkt_info +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_69.html b/doc/html/search/all_69.html new file mode 100644 index 0000000..129334f --- /dev/null +++ b/doc/html/search/all_69.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ InitializePreprocessor + sf_dynamic_preproc_lib.c +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_6b.html b/doc/html/search/all_6b.html new file mode 100644 index 0000000..765184f --- /dev/null +++ b/doc/html/search/all_6b.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ key + pkt_info +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_6c.html b/doc/html/search/all_6c.html new file mode 100644 index 0000000..a17e561 --- /dev/null +++ b/doc/html/search/all_6c.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ LibVersion + sf_dynamic_preproc_lib.c +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_6d.html b/doc/html/search/all_6d.html new file mode 100644 index 0000000..70215d7 --- /dev/null +++ b/doc/html/search/all_6d.html @@ -0,0 +1,32 @@ + + + + + + + +
+
Loading...
+
+
+ MAJOR_VERSION + sf_preproc_info.h +
+
+
+
+ MINOR_VERSION + sf_preproc_info.h +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_6e.html b/doc/html/search/all_6e.html new file mode 100644 index 0000000..7db4670 --- /dev/null +++ b/doc/html/search/all_6e.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ next + pkt_info +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_70.html b/doc/html/search/all_70.html new file mode 100644 index 0000000..9688127 --- /dev/null +++ b/doc/html/search/all_70.html @@ -0,0 +1,54 @@ + + + + + + + +
+
Loading...
+
+
+ parserPolicyId + sfPolicyUserData.c +
+
+
+
+ pkt + pkt_info +
+
+
+
+ pkt_info +
+
+
+
+ pkt_key +
+
+
+
+ portToCheck + _AI_config +
+
+
+
+ PREPROC_NAME + sf_preproc_info.h +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_72.html b/doc/html/search/all_72.html new file mode 100644 index 0000000..02e5703 --- /dev/null +++ b/doc/html/search/all_72.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ runtimePolicyId + sfPolicyUserData.c +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_73.html b/doc/html/search/all_73.html new file mode 100644 index 0000000..e55c20a --- /dev/null +++ b/doc/html/search/all_73.html @@ -0,0 +1,104 @@ + + + + + + + +
+
Loading...
+ + +
+
+ sfPolicyConfigCreate + sfPolicyUserData.c +
+
+
+
+ sfPolicyConfigDelete + sfPolicyUserData.c +
+
+ +
+
+ sfPolicyUserDataClear + sfPolicyUserData.c +
+
+
+
+ sfPolicyUserDataIterate + sfPolicyUserData.c +
+
+
+
+ sfPolicyUserDataSet + sfPolicyUserData.c +
+
+
+
+ spp_ai.c +
+
+
+
+ spp_ai.h +
+
+
+
+ src_ip + pkt_key +
+
+
+
+ SRC_PORT_MATCH + spp_ai.c +
+
+
+
+ SRC_PORT_MATCH_STR + spp_ai.c +
+
+
+
+ stream.c +
+
+
+
+ streamExpireInterval + _AI_config +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_74.html b/doc/html/search/all_74.html new file mode 100644 index 0000000..d9a5a00 --- /dev/null +++ b/doc/html/search/all_74.html @@ -0,0 +1,32 @@ + + + + + + + +
+
Loading...
+
+
+ timestamp + pkt_info +
+
+
+
+ true + spp_ai.h +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/all_75.html b/doc/html/search/all_75.html new file mode 100644 index 0000000..aa48425 --- /dev/null +++ b/doc/html/search/all_75.html @@ -0,0 +1,32 @@ + + + + + + + +
+
Loading...
+
+
+ uint16_t + spp_ai.h +
+
+
+
+ uint32_t + spp_ai.h +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/classes_5f.html b/doc/html/search/classes_5f.html new file mode 100644 index 0000000..e707a94 --- /dev/null +++ b/doc/html/search/classes_5f.html @@ -0,0 +1,25 @@ + + + + + + + +
+
Loading...
+
+ +
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/classes_70.html b/doc/html/search/classes_70.html new file mode 100644 index 0000000..621bb35 --- /dev/null +++ b/doc/html/search/classes_70.html @@ -0,0 +1,30 @@ + + + + + + + +
+
Loading...
+
+
+ pkt_info +
+
+
+
+ pkt_key +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/close.png b/doc/html/search/close.png new file mode 100644 index 0000000..9342d3d Binary files /dev/null and b/doc/html/search/close.png differ diff --git a/doc/html/search/defines_62.html b/doc/html/search/defines_62.html new file mode 100644 index 0000000..bfead72 --- /dev/null +++ b/doc/html/search/defines_62.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ BUILD_VERSION + sf_preproc_info.h +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/defines_64.html b/doc/html/search/defines_64.html new file mode 100644 index 0000000..79d2e40 --- /dev/null +++ b/doc/html/search/defines_64.html @@ -0,0 +1,38 @@ + + + + + + + +
+
Loading...
+
+
+ DST_PORT_MATCH + spp_ai.c +
+
+
+
+ DST_PORT_MATCH_STR + spp_ai.c +
+
+
+
+ DYNAMIC_PREPROC_SETUP + sf_preproc_info.h +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/defines_67.html b/doc/html/search/defines_67.html new file mode 100644 index 0000000..5804711 --- /dev/null +++ b/doc/html/search/defines_67.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ GENERATOR_EXAMPLE + spp_ai.c +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/defines_6d.html b/doc/html/search/defines_6d.html new file mode 100644 index 0000000..70215d7 --- /dev/null +++ b/doc/html/search/defines_6d.html @@ -0,0 +1,32 @@ + + + + + + + +
+
Loading...
+
+
+ MAJOR_VERSION + sf_preproc_info.h +
+
+
+
+ MINOR_VERSION + sf_preproc_info.h +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/defines_70.html b/doc/html/search/defines_70.html new file mode 100644 index 0000000..0e1d892 --- /dev/null +++ b/doc/html/search/defines_70.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ PREPROC_NAME + sf_preproc_info.h +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/defines_73.html b/doc/html/search/defines_73.html new file mode 100644 index 0000000..285529c --- /dev/null +++ b/doc/html/search/defines_73.html @@ -0,0 +1,32 @@ + + + + + + + +
+
Loading...
+
+
+ SRC_PORT_MATCH + spp_ai.c +
+
+
+
+ SRC_PORT_MATCH_STR + spp_ai.c +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/enums_62.html b/doc/html/search/enums_62.html new file mode 100644 index 0000000..c1c60b3 --- /dev/null +++ b/doc/html/search/enums_62.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ BOOL + spp_ai.h +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/enumvalues_66.html b/doc/html/search/enumvalues_66.html new file mode 100644 index 0000000..8b9f9ba --- /dev/null +++ b/doc/html/search/enumvalues_66.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ false + spp_ai.h +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/enumvalues_74.html b/doc/html/search/enumvalues_74.html new file mode 100644 index 0000000..28c9396 --- /dev/null +++ b/doc/html/search/enumvalues_74.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ true + spp_ai.h +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/files_73.html b/doc/html/search/files_73.html new file mode 100644 index 0000000..be2177b --- /dev/null +++ b/doc/html/search/files_73.html @@ -0,0 +1,50 @@ + + + + + + + +
+
Loading...
+ + + +
+
+ spp_ai.c +
+
+
+
+ spp_ai.h +
+
+
+
+ stream.c +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/functions_5f.html b/doc/html/search/functions_5f.html new file mode 100644 index 0000000..3071030 --- /dev/null +++ b/doc/html/search/functions_5f.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ _AI_stream_free + stream.c +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/functions_61.html b/doc/html/search/functions_61.html new file mode 100644 index 0000000..41619e6 --- /dev/null +++ b/doc/html/search/functions_61.html @@ -0,0 +1,65 @@ + + + + + + + +
+
Loading...
+ +
+
+ AI_init + spp_ai.c +
+
+
+
+ AI_parse + spp_ai.c +
+
+ +
+
+ AI_process + spp_ai.c +
+
+ +
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/functions_64.html b/doc/html/search/functions_64.html new file mode 100644 index 0000000..a9191e9 --- /dev/null +++ b/doc/html/search/functions_64.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ DynamicPreprocessorFatalMessage + sf_dynamic_preproc_lib.c +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/functions_69.html b/doc/html/search/functions_69.html new file mode 100644 index 0000000..129334f --- /dev/null +++ b/doc/html/search/functions_69.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ InitializePreprocessor + sf_dynamic_preproc_lib.c +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/functions_6c.html b/doc/html/search/functions_6c.html new file mode 100644 index 0000000..a17e561 --- /dev/null +++ b/doc/html/search/functions_6c.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ LibVersion + sf_dynamic_preproc_lib.c +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/functions_73.html b/doc/html/search/functions_73.html new file mode 100644 index 0000000..6738e0b --- /dev/null +++ b/doc/html/search/functions_73.html @@ -0,0 +1,50 @@ + + + + + + + +
+
Loading...
+
+
+ sfPolicyConfigCreate + sfPolicyUserData.c +
+
+
+
+ sfPolicyConfigDelete + sfPolicyUserData.c +
+
+
+
+ sfPolicyUserDataClear + sfPolicyUserData.c +
+
+
+
+ sfPolicyUserDataIterate + sfPolicyUserData.c +
+
+
+
+ sfPolicyUserDataSet + sfPolicyUserData.c +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/mag_sel.png b/doc/html/search/mag_sel.png new file mode 100644 index 0000000..81f6040 Binary files /dev/null and b/doc/html/search/mag_sel.png differ diff --git a/doc/html/search/nomatches.html b/doc/html/search/nomatches.html new file mode 100644 index 0000000..b1ded27 --- /dev/null +++ b/doc/html/search/nomatches.html @@ -0,0 +1,12 @@ + + + + + + + +
+
No Matches
+
+ + diff --git a/doc/html/search/search.css b/doc/html/search/search.css new file mode 100644 index 0000000..50249e5 --- /dev/null +++ b/doc/html/search/search.css @@ -0,0 +1,240 @@ +/*---------------- Search Box */ + +#FSearchBox { + float: left; +} + +#searchli { + float: right; + display: block; + width: 170px; + height: 36px; +} + +#MSearchBox { + white-space : nowrap; + position: absolute; + float: none; + display: inline; + margin-top: 8px; + right: 0px; + width: 170px; + z-index: 102; +} + +#MSearchBox .left +{ + display:block; + position:absolute; + left:10px; + width:20px; + height:19px; + background:url('search_l.png') no-repeat; + background-position:right; +} + +#MSearchSelect { + display:block; + position:absolute; + width:20px; + height:19px; +} + +.left #MSearchSelect { + left:4px; +} + +.right #MSearchSelect { + right:5px; +} + +#MSearchField { + display:block; + position:absolute; + height:19px; + background:url('search_m.png') repeat-x; + border:none; + width:116px; + margin-left:20px; + padding-left:4px; + color: #909090; + outline: none; + font: 9pt Arial, Verdana, sans-serif; +} + +#FSearchBox #MSearchField { + margin-left:15px; +} + +#MSearchBox .right { + display:block; + position:absolute; + right:10px; + top:0px; + width:20px; + height:19px; + background:url('search_r.png') no-repeat; + background-position:left; +} + +#MSearchClose { + display: none; + position: absolute; + top: 4px; + background : none; + border: none; + margin: 0px 4px 0px 0px; + padding: 0px 0px; + outline: none; +} + +.left #MSearchClose { + left: 6px; +} + +.right #MSearchClose { + right: 2px; +} + +.MSearchBoxActive #MSearchField { + color: #000000; +} + +/*---------------- Search filter selection */ + +#MSearchSelectWindow { + display: none; + position: absolute; + left: 0; top: 0; + border: 1px solid #90A5CE; + background-color: #F9FAFC; + z-index: 1; + padding-top: 4px; + padding-bottom: 4px; + -moz-border-radius: 4px; + -webkit-border-top-left-radius: 4px; + -webkit-border-top-right-radius: 4px; + -webkit-border-bottom-left-radius: 4px; + -webkit-border-bottom-right-radius: 4px; + -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); +} + +.SelectItem { + font: 8pt Arial, Verdana, sans-serif; + padding-left: 2px; + padding-right: 12px; + border: 0px; +} + +span.SelectionMark { + margin-right: 4px; + font-family: monospace; + outline-style: none; + text-decoration: none; +} + +a.SelectItem { + display: block; + outline-style: none; + color: #000000; + text-decoration: none; + padding-left: 6px; + padding-right: 12px; +} + +a.SelectItem:focus, +a.SelectItem:active { + color: #000000; + outline-style: none; + text-decoration: none; +} + +a.SelectItem:hover { + color: #FFFFFF; + background-color: #3D578C; + outline-style: none; + text-decoration: none; + cursor: pointer; + display: block; +} + +/*---------------- Search results window */ + +iframe#MSearchResults { + width: 60ex; + height: 15em; +} + +#MSearchResultsWindow { + display: none; + position: absolute; + left: 0; top: 0; + border: 1px solid #000; + background-color: #EEF1F7; +} + +/* ----------------------------------- */ + + +#SRIndex { + clear:both; + padding-bottom: 15px; +} + +.SREntry { + font-size: 10pt; + padding-left: 1ex; +} + +.SRPage .SREntry { + font-size: 8pt; + padding: 1px 5px; +} + +body.SRPage { + margin: 5px 2px; +} + +.SRChildren { + padding-left: 3ex; padding-bottom: .5em +} + +.SRPage .SRChildren { + display: none; +} + +.SRSymbol { + font-weight: bold; + color: #425E97; + font-family: Arial, Verdana, sans-serif; + text-decoration: none; + outline: none; +} + +a.SRScope { + display: block; + color: #425E97; + font-family: Arial, Verdana, sans-serif; + text-decoration: none; + outline: none; +} + +a.SRSymbol:focus, a.SRSymbol:active, +a.SRScope:focus, a.SRScope:active { + text-decoration: underline; +} + +.SRPage .SRStatus { + padding: 2px 5px; + font-size: 8pt; + font-style: italic; +} + +.SRResult { + display: none; +} + +DIV.searchresults { + margin-left: 10px; + margin-right: 10px; +} diff --git a/doc/html/search/search.js b/doc/html/search/search.js new file mode 100644 index 0000000..7d8a494 --- /dev/null +++ b/doc/html/search/search.js @@ -0,0 +1,742 @@ +// Search script generated by doxygen +// Copyright (C) 2009 by Dimitri van Heesch. + +// The code in this file is loosly based on main.js, part of Natural Docs, +// which is Copyright (C) 2003-2008 Greg Valure +// Natural Docs is licensed under the GPL. + +var indexSectionsWithContent = +{ + 0: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010110111111011110101111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + 1: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + 2: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + 3: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010100100001001000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + 4: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000110010010010101110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + 5: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + 6: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + 7: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + 8: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010100100000100100100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +}; + +var indexSectionNames = +{ + 0: "all", + 1: "classes", + 2: "files", + 3: "functions", + 4: "variables", + 5: "typedefs", + 6: "enums", + 7: "enumvalues", + 8: "defines" +}; + +function convertToId(search) +{ + var result = ''; + for (i=0;i do a search + { + this.Search(); + } + } + + this.OnSearchSelectKey = function(evt) + { + var e = (evt) ? evt : window.event; // for IE + if (e.keyCode==40 && this.searchIndex0) // Up + { + this.searchIndex--; + this.OnSelectItem(this.searchIndex); + } + else if (e.keyCode==13 || e.keyCode==27) + { + this.OnSelectItem(this.searchIndex); + this.CloseSelectionWindow(); + this.DOMSearchField().focus(); + } + return false; + } + + // --------- Actions + + // Closes the results window. + this.CloseResultsWindow = function() + { + this.DOMPopupSearchResultsWindow().style.display = 'none'; + this.DOMSearchClose().style.display = 'none'; + this.Activate(false); + } + + this.CloseSelectionWindow = function() + { + this.DOMSearchSelectWindow().style.display = 'none'; + } + + // Performs a search. + this.Search = function() + { + this.keyTimeout = 0; + + // strip leading whitespace + var searchValue = this.DOMSearchField().value.replace(/^ +/, ""); + + var code = searchValue.toLowerCase().charCodeAt(0); + var hexCode; + if (code<16) + { + hexCode="0"+code.toString(16); + } + else + { + hexCode=code.toString(16); + } + + var resultsPage; + var resultsPageWithSearch; + var hasResultsPage; + + if (indexSectionsWithContent[this.searchIndex].charAt(code) == '1') + { + resultsPage = this.resultsPath + '/' + indexSectionNames[this.searchIndex] + '_' + hexCode + '.html'; + resultsPageWithSearch = resultsPage+'?'+escape(searchValue); + hasResultsPage = true; + } + else // nothing available for this search term + { + resultsPage = this.resultsPath + '/nomatches.html'; + resultsPageWithSearch = resultsPage; + hasResultsPage = false; + } + + window.frames.MSearchResults.location.href = resultsPageWithSearch; + var domPopupSearchResultsWindow = this.DOMPopupSearchResultsWindow(); + + if (domPopupSearchResultsWindow.style.display!='block') + { + var domSearchBox = this.DOMSearchBox(); + this.DOMSearchClose().style.display = 'inline'; + if (this.insideFrame) + { + var domPopupSearchResults = this.DOMPopupSearchResults(); + domPopupSearchResultsWindow.style.position = 'relative'; + domPopupSearchResultsWindow.style.display = 'block'; + var width = document.body.clientWidth - 8; // the -8 is for IE :-( + domPopupSearchResultsWindow.style.width = width + 'px'; + domPopupSearchResults.style.width = width + 'px'; + } + else + { + var domPopupSearchResults = this.DOMPopupSearchResults(); + var left = getXPos(domSearchBox) + 150; // domSearchBox.offsetWidth; + var top = getYPos(domSearchBox) + 20; // domSearchBox.offsetHeight + 1; + domPopupSearchResultsWindow.style.display = 'block'; + left -= domPopupSearchResults.offsetWidth; + domPopupSearchResultsWindow.style.top = top + 'px'; + domPopupSearchResultsWindow.style.left = left + 'px'; + } + } + + this.lastSearchValue = searchValue; + this.lastResultsPage = resultsPage; + } + + // -------- Activation Functions + + // Activates or deactivates the search panel, resetting things to + // their default values if necessary. + this.Activate = function(isActive) + { + if (isActive || // open it + this.DOMPopupSearchResultsWindow().style.display == 'block' + ) + { + this.DOMSearchBox().className = 'MSearchBoxActive'; + + var searchField = this.DOMSearchField(); + + if (searchField.value == this.searchLabel) // clear "Search" term upon entry + { + searchField.value = ''; + this.searchActive = true; + } + } + else if (!isActive) // directly remove the panel + { + this.DOMSearchBox().className = 'MSearchBoxInactive'; + this.DOMSearchField().value = this.searchLabel; + this.searchActive = false; + this.lastSearchValue = '' + this.lastResultsPage = ''; + } + } +} + +// ----------------------------------------------------------------------- + +// The class that handles everything on the search results page. +function SearchResults(name) +{ + // The number of matches from the last run of . + this.lastMatchCount = 0; + this.lastKey = 0; + this.repeatOn = false; + + // Toggles the visibility of the passed element ID. + this.FindChildElement = function(id) + { + var parentElement = document.getElementById(id); + var element = parentElement.firstChild; + + while (element && element!=parentElement) + { + if (element.nodeName == 'DIV' && element.className == 'SRChildren') + { + return element; + } + + if (element.nodeName == 'DIV' && element.hasChildNodes()) + { + element = element.firstChild; + } + else if (element.nextSibling) + { + element = element.nextSibling; + } + else + { + do + { + element = element.parentNode; + } + while (element && element!=parentElement && !element.nextSibling); + + if (element && element!=parentElement) + { + element = element.nextSibling; + } + } + } + } + + this.Toggle = function(id) + { + var element = this.FindChildElement(id); + if (element) + { + if (element.style.display == 'block') + { + element.style.display = 'none'; + } + else + { + element.style.display = 'block'; + } + } + } + + // Searches for the passed string. If there is no parameter, + // it takes it from the URL query. + // + // Always returns true, since other documents may try to call it + // and that may or may not be possible. + this.Search = function(search) + { + if (!search) // get search word from URL + { + search = window.location.search; + search = search.substring(1); // Remove the leading '?' + search = unescape(search); + } + + search = search.replace(/^ +/, ""); // strip leading spaces + search = search.replace(/ +$/, ""); // strip trailing spaces + search = search.toLowerCase(); + search = convertToId(search); + + var resultRows = document.getElementsByTagName("div"); + var matches = 0; + + var i = 0; + while (i < resultRows.length) + { + var row = resultRows.item(i); + if (row.className == "SRResult") + { + var rowMatchName = row.id.toLowerCase(); + rowMatchName = rowMatchName.replace(/^sr\d*_/, ''); // strip 'sr123_' + + if (search.length<=rowMatchName.length && + rowMatchName.substr(0, search.length)==search) + { + row.style.display = 'block'; + matches++; + } + else + { + row.style.display = 'none'; + } + } + i++; + } + document.getElementById("Searching").style.display='none'; + if (matches == 0) // no results + { + document.getElementById("NoMatches").style.display='block'; + } + else // at least one result + { + document.getElementById("NoMatches").style.display='none'; + } + this.lastMatchCount = matches; + return true; + } + + // return the first item with index index or higher that is visible + this.NavNext = function(index) + { + var focusItem; + while (1) + { + var focusName = 'Item'+index; + focusItem = document.getElementById(focusName); + if (focusItem && focusItem.parentNode.parentNode.style.display=='block') + { + break; + } + else if (!focusItem) // last element + { + break; + } + focusItem=null; + index++; + } + return focusItem; + } + + this.NavPrev = function(index) + { + var focusItem; + while (1) + { + var focusName = 'Item'+index; + focusItem = document.getElementById(focusName); + if (focusItem && focusItem.parentNode.parentNode.style.display=='block') + { + break; + } + else if (!focusItem) // last element + { + break; + } + focusItem=null; + index--; + } + return focusItem; + } + + this.ProcessKeys = function(e) + { + if (e.type == "keydown") + { + this.repeatOn = false; + this.lastKey = e.keyCode; + } + else if (e.type == "keypress") + { + if (!this.repeatOn) + { + if (this.lastKey) this.repeatOn = true; + return false; // ignore first keypress after keydown + } + } + else if (e.type == "keyup") + { + this.lastKey = 0; + this.repeatOn = false; + } + return this.lastKey!=0; + } + + this.Nav = function(evt,itemIndex) + { + var e = (evt) ? evt : window.event; // for IE + if (e.keyCode==13) return true; + if (!this.ProcessKeys(e)) return false; + + if (this.lastKey==38) // Up + { + var newIndex = itemIndex-1; + var focusItem = this.NavPrev(newIndex); + if (focusItem) + { + var child = this.FindChildElement(focusItem.parentNode.parentNode.id); + if (child && child.style.display == 'block') // children visible + { + var n=0; + var tmpElem; + while (1) // search for last child + { + tmpElem = document.getElementById('Item'+newIndex+'_c'+n); + if (tmpElem) + { + focusItem = tmpElem; + } + else // found it! + { + break; + } + n++; + } + } + } + if (focusItem) + { + focusItem.focus(); + } + else // return focus to search field + { + parent.document.getElementById("MSearchField").focus(); + } + } + else if (this.lastKey==40) // Down + { + var newIndex = itemIndex+1; + var focusItem; + var item = document.getElementById('Item'+itemIndex); + var elem = this.FindChildElement(item.parentNode.parentNode.id); + if (elem && elem.style.display == 'block') // children visible + { + focusItem = document.getElementById('Item'+itemIndex+'_c0'); + } + if (!focusItem) focusItem = this.NavNext(newIndex); + if (focusItem) focusItem.focus(); + } + else if (this.lastKey==39) // Right + { + var item = document.getElementById('Item'+itemIndex); + var elem = this.FindChildElement(item.parentNode.parentNode.id); + if (elem) elem.style.display = 'block'; + } + else if (this.lastKey==37) // Left + { + var item = document.getElementById('Item'+itemIndex); + var elem = this.FindChildElement(item.parentNode.parentNode.id); + if (elem) elem.style.display = 'none'; + } + else if (this.lastKey==27) // Escape + { + parent.searchBox.CloseResultsWindow(); + parent.document.getElementById("MSearchField").focus(); + } + else if (this.lastKey==13) // Enter + { + return true; + } + return false; + } + + this.NavChild = function(evt,itemIndex,childIndex) + { + var e = (evt) ? evt : window.event; // for IE + if (e.keyCode==13) return true; + if (!this.ProcessKeys(e)) return false; + + if (this.lastKey==38) // Up + { + if (childIndex>0) + { + var newIndex = childIndex-1; + document.getElementById('Item'+itemIndex+'_c'+newIndex).focus(); + } + else // already at first child, jump to parent + { + document.getElementById('Item'+itemIndex).focus(); + } + } + else if (this.lastKey==40) // Down + { + var newIndex = childIndex+1; + var elem = document.getElementById('Item'+itemIndex+'_c'+newIndex); + if (!elem) // last child, jump to parent next parent + { + elem = this.NavNext(itemIndex+1); + } + if (elem) + { + elem.focus(); + } + } + else if (this.lastKey==27) // Escape + { + parent.searchBox.CloseResultsWindow(); + parent.document.getElementById("MSearchField").focus(); + } + else if (this.lastKey==13) // Enter + { + return true; + } + return false; + } +} diff --git a/doc/html/search/search_l.png b/doc/html/search/search_l.png new file mode 100644 index 0000000..c872f4d Binary files /dev/null and b/doc/html/search/search_l.png differ diff --git a/doc/html/search/search_m.png b/doc/html/search/search_m.png new file mode 100644 index 0000000..b429a16 Binary files /dev/null and b/doc/html/search/search_m.png differ diff --git a/doc/html/search/search_r.png b/doc/html/search/search_r.png new file mode 100644 index 0000000..97ee8b4 Binary files /dev/null and b/doc/html/search/search_r.png differ diff --git a/doc/html/search/typedefs_61.html b/doc/html/search/typedefs_61.html new file mode 100644 index 0000000..8093bed --- /dev/null +++ b/doc/html/search/typedefs_61.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ AI_config + spp_ai.h +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/typedefs_75.html b/doc/html/search/typedefs_75.html new file mode 100644 index 0000000..aa48425 --- /dev/null +++ b/doc/html/search/typedefs_75.html @@ -0,0 +1,32 @@ + + + + + + + +
+
Loading...
+
+
+ uint16_t + spp_ai.h +
+
+
+
+ uint32_t + spp_ai.h +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/variables_5f.html b/doc/html/search/variables_5f.html new file mode 100644 index 0000000..be8fa09 --- /dev/null +++ b/doc/html/search/variables_5f.html @@ -0,0 +1,29 @@ + + + + + + + +
+
Loading...
+ +
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/variables_64.html b/doc/html/search/variables_64.html new file mode 100644 index 0000000..9b77f00 --- /dev/null +++ b/doc/html/search/variables_64.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ dst_port + pkt_key +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/variables_65.html b/doc/html/search/variables_65.html new file mode 100644 index 0000000..ccff20a --- /dev/null +++ b/doc/html/search/variables_65.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ ex_config + spp_ai.c +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/variables_68.html b/doc/html/search/variables_68.html new file mode 100644 index 0000000..46500ff --- /dev/null +++ b/doc/html/search/variables_68.html @@ -0,0 +1,38 @@ + + + + + + + +
+
Loading...
+
+
+ hash + stream.c +
+
+
+
+ hashCleanupInterval + _AI_config +
+
+
+
+ hh + pkt_info +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/variables_6b.html b/doc/html/search/variables_6b.html new file mode 100644 index 0000000..765184f --- /dev/null +++ b/doc/html/search/variables_6b.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ key + pkt_info +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/variables_6e.html b/doc/html/search/variables_6e.html new file mode 100644 index 0000000..7db4670 --- /dev/null +++ b/doc/html/search/variables_6e.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ next + pkt_info +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/variables_70.html b/doc/html/search/variables_70.html new file mode 100644 index 0000000..bffc90b --- /dev/null +++ b/doc/html/search/variables_70.html @@ -0,0 +1,38 @@ + + + + + + + +
+
Loading...
+
+
+ parserPolicyId + sfPolicyUserData.c +
+
+
+
+ pkt + pkt_info +
+
+
+
+ portToCheck + _AI_config +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/variables_72.html b/doc/html/search/variables_72.html new file mode 100644 index 0000000..02e5703 --- /dev/null +++ b/doc/html/search/variables_72.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ runtimePolicyId + sfPolicyUserData.c +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/variables_73.html b/doc/html/search/variables_73.html new file mode 100644 index 0000000..6ce91b2 --- /dev/null +++ b/doc/html/search/variables_73.html @@ -0,0 +1,32 @@ + + + + + + + +
+
Loading...
+
+
+ src_ip + pkt_key +
+
+
+
+ streamExpireInterval + _AI_config +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/search/variables_74.html b/doc/html/search/variables_74.html new file mode 100644 index 0000000..08f5c5a --- /dev/null +++ b/doc/html/search/variables_74.html @@ -0,0 +1,26 @@ + + + + + + + +
+
Loading...
+
+
+ timestamp + pkt_info +
+
+
Searching...
+
No Matches
+ +
+ + diff --git a/doc/html/sfPolicyUserData_8c.html b/doc/html/sfPolicyUserData_8c.html new file mode 100644 index 0000000..dcd82e5 --- /dev/null +++ b/doc/html/sfPolicyUserData_8c.html @@ -0,0 +1,119 @@ + + + + +Snort AI preprocessor module: sfPolicyUserData.c File Reference + + + + + + + + + +
+ +
+

sfPolicyUserData.c File Reference

+
+
+#include "stdlib.h"
+#include "string.h"
+#include "sfPolicy.h"
+#include "sfPolicyUserData.h"
+ + + + + + + + + + +

+Functions

tSfPolicyUserContextId sfPolicyConfigCreate (void)
void sfPolicyConfigDelete (tSfPolicyUserContextId pContext)
int sfPolicyUserDataSet (tSfPolicyUserContextId pContext, tSfPolicyId policyId, void *config)
void * sfPolicyUserDataClear (tSfPolicyUserContextId pContext, tSfPolicyId policyId)
int sfPolicyUserDataIterate (tSfPolicyUserContextId pContext, int(*callback)(tSfPolicyUserContextId pContext, tSfPolicyId policyId, void *config))

+Variables

tSfPolicyId runtimePolicyId = 0
tSfPolicyId parserPolicyId = 0
+

Variable Documentation

+ +
+
+ + + + +
tSfPolicyId parserPolicyId = 0
+
+
+ +
+
+ +
+
+ + + + +
tSfPolicyId runtimePolicyId = 0
+
+
+ +
+
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/sf__dynamic__preproc__lib_8c.html b/doc/html/sf__dynamic__preproc__lib_8c.html new file mode 100644 index 0000000..70a8444 --- /dev/null +++ b/doc/html/sf__dynamic__preproc__lib_8c.html @@ -0,0 +1,175 @@ + + + + +Snort AI preprocessor module: sf_dynamic_preproc_lib.c File Reference + + + + + + + + + +
+ +
+

sf_dynamic_preproc_lib.c File Reference

+
+
+#include "sf_preproc_info.h"
+#include "sf_snort_packet.h"
+#include "sf_dynamic_preproc_lib.h"
+#include "sf_dynamic_meta.h"
+#include "sf_dynamic_preprocessor.h"
+#include "sf_dynamic_common.h"
+#include "sf_dynamic_define.h"
+#include <stdio.h>
+#include <string.h>
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdlib.h>
+ + + + + + + +

+Functions

NORETURN void DynamicPreprocessorFatalMessage (const char *format,...)
PREPROC_LINKAGE int InitializePreprocessor (DynamicPreprocessorData *dpd)
PREPROC_LINKAGE int LibVersion (DynamicPluginMeta *dpm)

+Variables

DynamicPreprocessorData _dpd
+

Function Documentation

+ +
+
+ + + + + + + + + + + + + + + + + + +
NORETURN void DynamicPreprocessorFatalMessage (const char *  format,
  ... 
)
+
+
+ +
+
+ +
+
+ + + + + + + + + +
PREPROC_LINKAGE int InitializePreprocessor (DynamicPreprocessorData *  dpd ) 
+
+
+ +
+
+ +
+
+ + + + + + + + + +
PREPROC_LINKAGE int LibVersion (DynamicPluginMeta *  dpm ) 
+
+
+ +
+
+

Variable Documentation

+ +
+
+ + + + +
DynamicPreprocessorData _dpd
+
+
+ +
+
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/sf__preproc__info_8h.html b/doc/html/sf__preproc__info_8h.html new file mode 100644 index 0000000..a9ccc96 --- /dev/null +++ b/doc/html/sf__preproc__info_8h.html @@ -0,0 +1,178 @@ + + + + +Snort AI preprocessor module: sf_preproc_info.h File Reference + + + + + + + + + +
+ +
+

sf_preproc_info.h File Reference

+
+
+ +

Go to the source code of this file.

+ + + + + + + + + + +

+Defines

#define MAJOR_VERSION   1
#define MINOR_VERSION   0
#define BUILD_VERSION   1
#define PREPROC_NAME   "SF_AI"
#define DYNAMIC_PREPROC_SETUP   AI_setup

+Functions

void AI_setup ()
 Set up the preprocessor module.
+

Define Documentation

+ +
+
+ + + + +
#define BUILD_VERSION   1
+
+
+ +
+
+ +
+
+ + + + +
#define DYNAMIC_PREPROC_SETUP   AI_setup
+
+
+ +
+
+ +
+
+ + + + +
#define MAJOR_VERSION   1
+
+
+ +
+
+ +
+
+ + + + +
#define MINOR_VERSION   0
+
+
+ +
+
+ +
+
+ + + + +
#define PREPROC_NAME   "SF_AI"
+
+
+ +
+
+

Function Documentation

+ +
+
+ + + + + + + + + +
void AI_setup (void  ) 
+
+
+ +

Set up the preprocessor module.

+

FUNCTION: AI_setup

+ +
+
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/sf__preproc__info_8h_source.html b/doc/html/sf__preproc__info_8h_source.html new file mode 100644 index 0000000..f8ff46b --- /dev/null +++ b/doc/html/sf__preproc__info_8h_source.html @@ -0,0 +1,85 @@ + + + + +Snort AI preprocessor module: sf_preproc_info.h Source File + + + + + + + + + + + + + +
+ +
+ + + + diff --git a/doc/html/spp__ai_8c.html b/doc/html/spp__ai_8c.html new file mode 100644 index 0000000..d53f048 --- /dev/null +++ b/doc/html/spp__ai_8c.html @@ -0,0 +1,318 @@ + + + + +Snort AI preprocessor module: spp_ai.c File Reference + + + + + + + + + +
+ +
+

spp_ai.c File Reference

+
+
+#include "spp_ai.h"
+#include "preprocids.h"
+#include "sf_dynamic_preproc_lib.h"
+#include "sf_dynamic_preprocessor.h"
+#include "debug.h"
+#include "sfPolicy.h"
+#include "sfPolicyUserData.h"
+#include <sys/types.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <pthread.h>
+ + + + + + + + + + + + + + + + + + + +

+Defines

#define GENERATOR_EXAMPLE   256
#define SRC_PORT_MATCH   1
#define SRC_PORT_MATCH_STR   "example_preprocessor: src port match"
#define DST_PORT_MATCH   2
#define DST_PORT_MATCH_STR   "example_preprocessor: dest port match"

+Functions

static void AI_init (char *args)
 Initialize the preprocessor module.
static void AI_process (void *pkt, void *context)
 Function executed every time the module receives a packet to be processed.
static AI_configAI_parse (char *args)
 Parse the arguments passed to the module saving them to a valid configuration struct.
void AI_setup (void)
 Set up the preprocessor module.

+Variables

tSfPolicyUserContextId ex_config = NULL
DynamicPreprocessorData _dpd
+

Define Documentation

+ +
+
+ + + + +
#define DST_PORT_MATCH   2
+
+
+ +
+
+ +
+
+ + + + +
#define DST_PORT_MATCH_STR   "example_preprocessor: dest port match"
+
+
+ +
+
+ +
+
+ + + + +
#define GENERATOR_EXAMPLE   256
+
+
+ +
+
+ +
+
+ + + + +
#define SRC_PORT_MATCH   1
+
+
+ +
+
+ +
+
+ + + + +
#define SRC_PORT_MATCH_STR   "example_preprocessor: src port match"
+
+
+ +
+
+

Function Documentation

+ +
+
+ + + + + + + + + +
static void AI_init (char *  args )  [static]
+
+
+ +

Initialize the preprocessor module.

+

FUNCTION: AI_init

+
Parameters:
+ + +
args Configuration arguments passed to the module
+
+
+ +
+
+ +
+
+ + + + + + + + + +
static AI_config * AI_parse (char *  args )  [static]
+
+
+ +

Parse the arguments passed to the module saving them to a valid configuration struct.

+

FUNCTION: AI_config

+
Parameters:
+ + +
args Arguments passed to the module
+
+
+
Returns:
Pointer to AI_config keeping the configuration for the module
+ +
+
+ +
+
+ + + + + + + + + + + + + + + + + + +
void AI_process (void *  pkt,
void *  context 
) [static]
+
+
+ +

Function executed every time the module receives a packet to be processed.

+

FUNCTION: AI_process

+
Parameters:
+ + + +
pkt void* pointer to the packet data
context void* pointer to the context
+
+
+ +
+
+ +
+
+ + + + + + + + + +
void AI_setup (void  ) 
+
+
+ +

Set up the preprocessor module.

+

FUNCTION: AI_setup

+ +
+
+

Variable Documentation

+ +
+
+ + + + +
DynamicPreprocessorData _dpd
+
+
+ +
+
+ +
+
+ + + + +
tSfPolicyUserContextId ex_config = NULL
+
+
+ +
+
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/spp__ai_8h.html b/doc/html/spp__ai_8h.html new file mode 100644 index 0000000..ecbf4f0 --- /dev/null +++ b/doc/html/spp__ai_8h.html @@ -0,0 +1,218 @@ + + + + +Snort AI preprocessor module: spp_ai.h File Reference + + + + + + + + + +
+ +
+

spp_ai.h File Reference

+
+
+#include "sf_snort_packet.h"
+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + +

+Data Structures

struct  _AI_config

+Typedefs

typedef unsigned int uint32_t
typedef unsigned short uint16_t
typedef struct _AI_config AI_config

+Enumerations

enum  BOOL { false, +true + }

+Functions

void AI_pkt_enqueue (SFSnortPacket *)
 Function called for appending a new packet to the hash table, creating a new stream or appending it to an existing stream.
void * AI_hashcleanup_thread (void *)
 Thread called for cleaning up the hash table from the traffic streams older than a certain threshold.
+

Typedef Documentation

+ +
+
+ + + + +
typedef struct _AI_config AI_config
+
+
+ +
+
+ +
+
+ + + + +
typedef unsigned short uint16_t
+
+
+ +
+
+ +
+
+ + + + +
typedef unsigned int uint32_t
+
+
+ +
+
+

Enumeration Type Documentation

+ +
+
+ + + + +
enum BOOL
+
+
+
Enumerator:
+ + +
false  +
true  +
+
+
+ +
+
+

Function Documentation

+ +
+
+ + + + + + + + + +
void* AI_hashcleanup_thread (void *  arg ) 
+
+
+ +

Thread called for cleaning up the hash table from the traffic streams older than a certain threshold.

+

FUNCTION: AI_hashcleanup_thread

+
Parameters:
+ + +
arg Pointer to the AI_config struct
+
+
+ +
+
+ +
+
+ + + + + + + + + +
void AI_pkt_enqueue (SFSnortPacket *  pkt ) 
+
+
+ +

Function called for appending a new packet to the hash table, creating a new stream or appending it to an existing stream.

+

FUNCTION: AI_pkt_enqueue

+
Parameters:
+ + +
pkt Packet to be appended
+
+
+ +
+
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/spp__ai_8h_source.html b/doc/html/spp__ai_8h_source.html new file mode 100644 index 0000000..ba2d92c --- /dev/null +++ b/doc/html/spp__ai_8h_source.html @@ -0,0 +1,114 @@ + + + + +Snort AI preprocessor module: spp_ai.h Source File + + + + + + + + + + + + + +
+ +
+ + + + diff --git a/doc/html/stream_8c.html b/doc/html/stream_8c.html new file mode 100644 index 0000000..ad4c66a --- /dev/null +++ b/doc/html/stream_8c.html @@ -0,0 +1,197 @@ + + + + +Snort AI preprocessor module: stream.c File Reference + + + + + + + + + +
+ +
+

stream.c File Reference

+
+
+#include "spp_ai.h"
+#include "uthash.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+ + + + + + + + + + + + + +

+Data Structures

struct  pkt_key
struct  pkt_info

+Functions

static void _AI_stream_free (struct pkt_info *stream)
 Remove a stream from the hash table (private function).
void * AI_hashcleanup_thread (void *arg)
 Thread called for cleaning up the hash table from the traffic streams older than a certain threshold.
void AI_pkt_enqueue (SFSnortPacket *pkt)
 Function called for appending a new packet to the hash table, creating a new stream or appending it to an existing stream.

+Variables

static struct pkt_infohash = NULL
+

Function Documentation

+ +
+
+ + + + + + + + + +
static void _AI_stream_free (struct pkt_info stream )  [static]
+
+
+ +

Remove a stream from the hash table (private function).

+

FUNCTION: _AI_stream_free

+
Parameters:
+ + +
stream Stream to be removed
+
+
+ +
+
+ +
+
+ + + + + + + + + +
void* AI_hashcleanup_thread (void *  arg ) 
+
+
+ +

Thread called for cleaning up the hash table from the traffic streams older than a certain threshold.

+

FUNCTION: AI_hashcleanup_thread

+
Parameters:
+ + +
arg Pointer to the AI_config struct
+
+
+ +
+
+ +
+
+ + + + + + + + + +
void AI_pkt_enqueue (SFSnortPacket *  pkt ) 
+
+
+ +

Function called for appending a new packet to the hash table, creating a new stream or appending it to an existing stream.

+

FUNCTION: AI_pkt_enqueue

+
Parameters:
+ + +
pkt Packet to be appended
+
+
+ +
+
+

Variable Documentation

+ +
+
+ + + + +
struct pkt_info* hash = NULL [static]
+
+
+ +
+
+
+ + + + +
+ +
+ + + + diff --git a/doc/html/struct__AI__config.html b/doc/html/struct__AI__config.html new file mode 100644 index 0000000..cd36787 --- /dev/null +++ b/doc/html/struct__AI__config.html @@ -0,0 +1,127 @@ + + + + +Snort AI preprocessor module: _AI_config Struct Reference + + + + + + + + + +
+ +
+

_AI_config Struct Reference

+
+
+ +

#include <spp_ai.h>

+ + + + + +

+Data Fields

uint16_t portToCheck
unsigned long hashCleanupInterval
unsigned long streamExpireInterval
+

Field Documentation

+ +
+
+ + + + +
unsigned long _AI_config::hashCleanupInterval
+
+
+ +
+
+ +
+ +
+ +
+
+ +
+
+ + + + +
unsigned long _AI_config::streamExpireInterval
+
+
+ +
+
+
The documentation for this struct was generated from the following file: +
+ + + + +
+ +
+ + + + diff --git a/doc/html/structpkt__info.html b/doc/html/structpkt__info.html new file mode 100644 index 0000000..5ff74e3 --- /dev/null +++ b/doc/html/structpkt__info.html @@ -0,0 +1,153 @@ + + + + +Snort AI preprocessor module: pkt_info Struct Reference + + + + + + + + + +
+ +
+

pkt_info Struct Reference

+
+
+ + + + + + + +

+Data Fields

struct pkt_key key
time_t timestamp
SFSnortPacket * pkt
struct pkt_infonext
UT_hash_handle hh
+

Field Documentation

+ +
+
+ + + + +
UT_hash_handle pkt_info::hh
+
+
+ +
+
+ +
+
+ + + + +
struct pkt_key pkt_info::key
+
+
+ +
+
+ +
+
+ + + + +
struct pkt_info* pkt_info::next
+
+
+ +
+
+ +
+
+ + + + +
SFSnortPacket* pkt_info::pkt
+
+
+ +
+
+ +
+
+ + + + +
time_t pkt_info::timestamp
+
+
+ +
+
+
The documentation for this struct was generated from the following file: +
+ + + + +
+ +
+ + + + diff --git a/doc/html/structpkt__key.html b/doc/html/structpkt__key.html new file mode 100644 index 0000000..c785c5c --- /dev/null +++ b/doc/html/structpkt__key.html @@ -0,0 +1,111 @@ + + + + +Snort AI preprocessor module: pkt_key Struct Reference + + + + + + + + + +
+ +
+

pkt_key Struct Reference

+
+
+ + + + +

+Data Fields

uint32_t src_ip
uint16_t dst_port
+

Field Documentation

+ +
+ +
+ +
+
+ +
+ +
+ +
+
+
The documentation for this struct was generated from the following file: +
+ + + + +
+ +
+ + + + diff --git a/doc/html/tab_a.png b/doc/html/tab_a.png new file mode 100644 index 0000000..2d99ef2 Binary files /dev/null and b/doc/html/tab_a.png differ diff --git a/doc/html/tab_b.png b/doc/html/tab_b.png new file mode 100644 index 0000000..b2c3d2b Binary files /dev/null and b/doc/html/tab_b.png differ diff --git a/doc/html/tab_h.png b/doc/html/tab_h.png new file mode 100644 index 0000000..c11f48f Binary files /dev/null and b/doc/html/tab_h.png differ diff --git a/doc/html/tab_s.png b/doc/html/tab_s.png new file mode 100644 index 0000000..978943a Binary files /dev/null and b/doc/html/tab_s.png differ diff --git a/doc/html/tabs.css b/doc/html/tabs.css new file mode 100644 index 0000000..2192056 --- /dev/null +++ b/doc/html/tabs.css @@ -0,0 +1,59 @@ +.tabs, .tabs2, .tabs3 { + background-image: url('tab_b.png'); + width: 100%; + z-index: 101; + font-size: 13px; +} + +.tabs2 { + font-size: 10px; +} +.tabs3 { + font-size: 9px; +} + +.tablist { + margin: 0; + padding: 0; + display: table; +} + +.tablist li { + float: left; + display: table-cell; + background-image: url('tab_b.png'); + line-height: 36px; + list-style: none; +} + +.tablist a { + display: block; + padding: 0 20px; + font-weight: bold; + background-image:url('tab_s.png'); + background-repeat:no-repeat; + background-position:right; + color: #283A5D; + text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); + text-decoration: none; + outline: none; +} + +.tabs3 .tablist a { + padding: 0 10px; +} + +.tablist a:hover { + background-image: url('tab_h.png'); + background-repeat:repeat-x; + color: #fff; + text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0); + text-decoration: none; +} + +.tablist li.current a { + background-image: url('tab_a.png'); + background-repeat:repeat-x; + color: #fff; + text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0); +} diff --git a/doc/latex/Makefile b/doc/latex/Makefile new file mode 100644 index 0000000..8b7c89a --- /dev/null +++ b/doc/latex/Makefile @@ -0,0 +1,19 @@ +all: clean refman.pdf + +pdf: refman.pdf + +refman.pdf: refman.tex + pdflatex refman.tex + makeindex refman.idx + pdflatex refman.tex + latex_count=5 ; \ + while egrep -s 'Rerun (LaTeX|to get cross-references right)' refman.log && [ $$latex_count -gt 0 ] ;\ + do \ + echo "Rerunning latex...." ;\ + pdflatex refman.tex ;\ + latex_count=`expr $$latex_count - 1` ;\ + done + + +clean: + rm -f *.ps *.dvi *.aux *.toc *.idx *.ind *.ilg *.log *.out refman.pdf diff --git a/doc/latex/annotated.tex b/doc/latex/annotated.tex new file mode 100644 index 0000000..a4eb945 --- /dev/null +++ b/doc/latex/annotated.tex @@ -0,0 +1,6 @@ +\section{Data Structures} +Here are the data structures with brief descriptions:\begin{DoxyCompactList} +\item\contentsline{section}{\hyperlink{struct__AI__config}{\_\-AI\_\-config} }{\pageref{struct__AI__config}}{} +\item\contentsline{section}{\hyperlink{structpkt__info}{pkt\_\-info} }{\pageref{structpkt__info}}{} +\item\contentsline{section}{\hyperlink{structpkt__key}{pkt\_\-key} }{\pageref{structpkt__key}}{} +\end{DoxyCompactList} diff --git a/doc/latex/doxygen.sty b/doc/latex/doxygen.sty new file mode 100644 index 0000000..e048d39 --- /dev/null +++ b/doc/latex/doxygen.sty @@ -0,0 +1,356 @@ +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{doxygen} + +% Packages used by this style file +\RequirePackage{alltt} +\RequirePackage{array} +\RequirePackage{calc} +\RequirePackage{color} +\RequirePackage{fancyhdr} +\RequirePackage{verbatim} + +% Setup fancy headings +\pagestyle{fancyplain} +\newcommand{\clearemptydoublepage}{% + \newpage{\pagestyle{empty}\cleardoublepage}% +} +\renewcommand{\chaptermark}[1]{% + \markboth{#1}{}% +} +\renewcommand{\sectionmark}[1]{% + \markright{\thesection\ #1}% +} +\lhead[\fancyplain{}{\bfseries\thepage}]{% + \fancyplain{}{\bfseries\rightmark}% +} +\rhead[\fancyplain{}{\bfseries\leftmark}]{% + \fancyplain{}{\bfseries\thepage}% +} +\rfoot[\fancyplain{}{\bfseries\scriptsize% + Generated on Wed Aug 4 2010 11:30:57 for Snort AI preprocessor module by Doxygen }]{} +\lfoot[]{\fancyplain{}{\bfseries\scriptsize% + Generated on Wed Aug 4 2010 11:30:57 for Snort AI preprocessor module by Doxygen }} +\cfoot{} + +%---------- Internal commands used in this style file ---------------- + +% Generic environment used by all paragraph-based environments defined +% below. Note that the command \title{...} needs to be defined inside +% those environments! +\newenvironment{DoxyDesc}[1]{% + \begin{list}{}% + {% + \settowidth{\labelwidth}{40pt}% + \setlength{\leftmargin}{\labelwidth}% + \setlength{\parsep}{0pt}% + \setlength{\itemsep}{-4pt}% + \renewcommand{\makelabel}{\entrylabel}% + }% + \item[#1]% +}{% + \end{list}% +} + +%---------- Commands used by doxygen LaTeX output generator ---------- + +% Used by
 ... 
+\newenvironment{DoxyPre}{% + \small% + \begin{alltt}% +}{% + \end{alltt}% + \normalsize% +} + +% Used by @code ... @endcode +\newenvironment{DoxyCode}{% + \footnotesize% + \verbatim% +}{% + \endverbatim% + \normalsize% +} + +% Used by @example, @include, @includelineno and @dontinclude +\newenvironment{DoxyCodeInclude}{% + \DoxyCode% +}{% + \endDoxyCode% +} + +% Used by @verbatim ... @endverbatim +\newenvironment{DoxyVerb}{% + \footnotesize% + \verbatim% +}{% + \endverbatim% + \normalsize% +} + +% Used by @verbinclude +\newenvironment{DoxyVerbInclude}{% + \DoxyVerb% +}{% + \endDoxyVerb% +} + +% Used by numbered lists (using '-#' or
    ...
) +\newenvironment{DoxyEnumerate}{% + \enumerate% +}{% + \endenumerate% +} + +% Used by bullet lists (using '-', @li, @arg, or
    ...
) +\newenvironment{DoxyItemize}{% + \itemize% +}{% + \enditemize% +} + +% Used by description lists (using
...
) +\newenvironment{DoxyDescription}{% + \description% +}{% + \enddescription% +} + +% Used by @image, @dotfile, and @dot ... @enddot +% (only if caption is specified) +\newenvironment{DoxyImage}{% + \begin{figure}[H]% + \begin{center}% +}{% + \end{center}% + \end{figure}% +} + +% Used by @image, @dotfile, @dot ... @enddot, and @msc ... @endmsc +% (only if no caption is specified) +\newenvironment{DoxyImageNoCaption}{% +}{% +} + +% Used by @attention +\newenvironment{DoxyAttention}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @author and @authors +\newenvironment{DoxyAuthor}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @date +\newenvironment{DoxyDate}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @invariant +\newenvironment{DoxyInvariant}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @note +\newenvironment{DoxyNote}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @post +\newenvironment{DoxyPostcond}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @pre +\newenvironment{DoxyPrecond}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @remark +\newenvironment{DoxyRemark}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @return +\newenvironment{DoxyReturn}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @since +\newenvironment{DoxySince}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @see +\newenvironment{DoxySeeAlso}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @version +\newenvironment{DoxyVersion}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @warning +\newenvironment{DoxyWarning}[1]{% + \begin{DoxyDesc}{#1}% +}{% + \end{DoxyDesc}% +} + +% Used by @internal +\newenvironment{DoxyInternal}[1]{% + \paragraph*{#1}% +}{% +} + +% Used by @par and @paragraph +\newenvironment{DoxyParagraph}[1]{% + \begin{list}{}% + {% + \settowidth{\labelwidth}{40pt}% + \setlength{\leftmargin}{\labelwidth}% + \setlength{\parsep}{0pt}% + \setlength{\itemsep}{-4pt}% + \renewcommand{\makelabel}{\entrylabel}% + }% + \item[#1]% +}{% + \end{list}% +} + +% Used by parameter lists +\newenvironment{DoxyParams}[1]{% + \begin{DoxyDesc}{#1}% + \begin{description}% +}{% + \end{description}% + \end{DoxyDesc}% +} + +% is used for parameters within a detailed function description +\newenvironment{DoxyParamCaption}{% + \renewcommand{\item}[2][]{##1 {\em ##2}}% + }{% +} + +% Used by return value lists +\newenvironment{DoxyRetVals}[1]{% + \begin{DoxyDesc}{#1}% + \begin{description}% +}{% + \end{description}% + \end{DoxyDesc}% +} + +% Used by exception lists +\newenvironment{DoxyExceptions}[1]{% + \begin{DoxyDesc}{#1}% + \begin{description}% +}{% + \end{description}% + \end{DoxyDesc}% +} + +% Used by template parameter lists +\newenvironment{DoxyTemplParams}[1]{% + \begin{DoxyDesc}{#1}% + \begin{description}% +}{% + \end{description}% + \end{DoxyDesc}% +} + +\newcommand{\doxyref}[3]{\textbf{#1} (\textnormal{#2}\,\pageref{#3})} +\newenvironment{DoxyCompactList} +{\begin{list}{}{ + \setlength{\leftmargin}{0.5cm} + \setlength{\itemsep}{0pt} + \setlength{\parsep}{0pt} + \setlength{\topsep}{0pt} + \renewcommand{\makelabel}{\hfill}}} +{\end{list}} +\newenvironment{DoxyCompactItemize} +{ + \begin{itemize} + \setlength{\itemsep}{-3pt} + \setlength{\parsep}{0pt} + \setlength{\topsep}{0pt} + \setlength{\partopsep}{0pt} +} +{\end{itemize}} +\newcommand{\PBS}[1]{\let\temp=\\#1\let\\=\temp} +\newlength{\tmplength} +\newenvironment{TabularC}[1] +{ +\setlength{\tmplength} + {\linewidth/(#1)-\tabcolsep*2-\arrayrulewidth*(#1+1)/(#1)} + \par\begin{tabular*}{\linewidth} + {*{#1}{|>{\PBS\raggedright\hspace{0pt}}p{\the\tmplength}}|} +} +{\end{tabular*}\par} +\newcommand{\entrylabel}[1]{ + {\parbox[b]{\labelwidth-4pt}{\makebox[0pt][l]{\textbf{#1}}\vspace{1.5\baselineskip}}}} +\newenvironment{Desc} +{\begin{list}{} + { + \settowidth{\labelwidth}{40pt} + \setlength{\leftmargin}{\labelwidth} + \setlength{\parsep}{0pt} + \setlength{\itemsep}{-4pt} + \renewcommand{\makelabel}{\entrylabel} + } +} +{\end{list}} +\newenvironment{Indent} + {\begin{list}{}{\setlength{\leftmargin}{0.5cm}} + \item[]\ignorespaces} + {\unskip\end{list}} +\setlength{\parindent}{0cm} +\setlength{\parskip}{0.2cm} +\addtocounter{secnumdepth}{1} +\sloppy +\usepackage[T1]{fontenc} +\makeatletter +\renewcommand{\paragraph}{\@startsection{paragraph}{4}{0ex}% + {-3.25ex plus -1ex minus -0.2ex}% + {1.5ex plus 0.2ex}% + {\normalfont\normalsize\bfseries}} +\makeatother +\stepcounter{secnumdepth} +\stepcounter{tocdepth} +\definecolor{comment}{rgb}{0.5,0.0,0.0} +\definecolor{keyword}{rgb}{0.0,0.5,0.0} +\definecolor{keywordtype}{rgb}{0.38,0.25,0.125} +\definecolor{keywordflow}{rgb}{0.88,0.5,0.0} +\definecolor{preprocessor}{rgb}{0.5,0.38,0.125} +\definecolor{stringliteral}{rgb}{0.0,0.125,0.25} +\definecolor{charliteral}{rgb}{0.0,0.5,0.5} +\definecolor{vhdldigit}{rgb}{1.0,0.0,1.0} +\definecolor{vhdlkeyword}{rgb}{0.43,0.0,0.43} +\definecolor{vhdllogic}{rgb}{1.0,0.0,0.0} +\definecolor{vhdlchar}{rgb}{0.0,0.0,0.0} diff --git a/doc/latex/files.tex b/doc/latex/files.tex new file mode 100644 index 0000000..704242b --- /dev/null +++ b/doc/latex/files.tex @@ -0,0 +1,9 @@ +\section{File List} +Here is a list of all files with brief descriptions:\begin{DoxyCompactList} +\item\contentsline{section}{\hyperlink{sf__dynamic__preproc__lib_8c}{sf\_\-dynamic\_\-preproc\_\-lib.c} }{\pageref{sf__dynamic__preproc__lib_8c}}{} +\item\contentsline{section}{\hyperlink{sf__preproc__info_8h}{sf\_\-preproc\_\-info.h} }{\pageref{sf__preproc__info_8h}}{} +\item\contentsline{section}{\hyperlink{sfPolicyUserData_8c}{sfPolicyUserData.c} }{\pageref{sfPolicyUserData_8c}}{} +\item\contentsline{section}{\hyperlink{spp__ai_8c}{spp\_\-ai.c} }{\pageref{spp__ai_8c}}{} +\item\contentsline{section}{\hyperlink{spp__ai_8h}{spp\_\-ai.h} }{\pageref{spp__ai_8h}}{} +\item\contentsline{section}{\hyperlink{stream_8c}{stream.c} }{\pageref{stream_8c}}{} +\end{DoxyCompactList} diff --git a/doc/latex/group__sfPolicyConfig.tex b/doc/latex/group__sfPolicyConfig.tex new file mode 100644 index 0000000..42e7f90 --- /dev/null +++ b/doc/latex/group__sfPolicyConfig.tex @@ -0,0 +1,85 @@ +\hypertarget{group__sfPolicyConfig}{ +\section{Sourcefire policy configuration module} +\label{group__sfPolicyConfig}\index{Sourcefire policy configuration module@{Sourcefire policy configuration module}} +} +\subsection*{Functions} +\begin{DoxyCompactItemize} +\item +tSfPolicyUserContextId \hyperlink{group__sfPolicyConfig_gac62cd5838bee4a9d3f40561eae920cdd}{sfPolicyConfigCreate} (void) +\item +void \hyperlink{group__sfPolicyConfig_ga189d09ed6d1203ebace6ea2c2aafc1b8}{sfPolicyConfigDelete} (tSfPolicyUserContextId pContext) +\item +int \hyperlink{group__sfPolicyConfig_ga8e14fd83397b9bbb14568070183db80b}{sfPolicyUserDataSet} (tSfPolicyUserContextId pContext, tSfPolicyId policyId, void $\ast$config) +\item +void $\ast$ \hyperlink{group__sfPolicyConfig_gae8f2ae426b1f1a50eabfade6d22c2c85}{sfPolicyUserDataClear} (tSfPolicyUserContextId pContext, tSfPolicyId policyId) +\item +int \hyperlink{group__sfPolicyConfig_ga3f3ab9314d29d2ee2a8285289b388f17}{sfPolicyUserDataIterate} (tSfPolicyUserContextId pContext, int($\ast$callback)(tSfPolicyUserContextId pContext, tSfPolicyId policyId, void $\ast$config)) +\end{DoxyCompactItemize} + + +\subsection{Detailed Description} +Create a user policy configuration context. A context provides facility for creating policy specific data instances. User can create as many policy instances as memory resources will allow. User can create/delete context, set/clear/get user date for a specific policy, default policy or current policy. User can also iterate over all instances user data. + +In current design, preprocessor use this module directly to manage policy specific data instances. A future enhancement can be to extract policy management code from each processor and put it in a new policy management module. Policy management module will set a single pointer to user data before calling appropriate callback function in a preprocessor. As an example, policy module will iterate over all policies and call CleanExit functions in every preprocessor for each policy. This will make policy management module will hide policies from preprocessors and make them policy agnostic. + +\subsection{Function Documentation} +\hypertarget{group__sfPolicyConfig_gac62cd5838bee4a9d3f40561eae920cdd}{ +\index{sfPolicyConfig@{sfPolicyConfig}!sfPolicyConfigCreate@{sfPolicyConfigCreate}} +\index{sfPolicyConfigCreate@{sfPolicyConfigCreate}!sfPolicyConfig@{sfPolicyConfig}} +\subsubsection[{sfPolicyConfigCreate}]{\setlength{\rightskip}{0pt plus 5cm}tSfPolicyUserContextId sfPolicyConfigCreate ( +\begin{DoxyParamCaption} +\item[{void}]{} +\end{DoxyParamCaption} +)}} +\label{group__sfPolicyConfig_gac62cd5838bee4a9d3f40561eae920cdd} +Create a user context. Allocates a new context and return it to user. All transactions within a context are independent from any other transactions in a different context. + +\begin{DoxyReturn}{Returns} +tSfPolicyUserContextId +\end{DoxyReturn} +\hypertarget{group__sfPolicyConfig_ga189d09ed6d1203ebace6ea2c2aafc1b8}{ +\index{sfPolicyConfig@{sfPolicyConfig}!sfPolicyConfigDelete@{sfPolicyConfigDelete}} +\index{sfPolicyConfigDelete@{sfPolicyConfigDelete}!sfPolicyConfig@{sfPolicyConfig}} +\subsubsection[{sfPolicyConfigDelete}]{\setlength{\rightskip}{0pt plus 5cm}void sfPolicyConfigDelete ( +\begin{DoxyParamCaption} +\item[{tSfPolicyUserContextId}]{ pContext} +\end{DoxyParamCaption} +)}} +\label{group__sfPolicyConfig_ga189d09ed6d1203ebace6ea2c2aafc1b8} +Delete a user policy data context. +\begin{DoxyParams}{Parameters} +\item[{\em pContext}]\end{DoxyParams} +\hypertarget{group__sfPolicyConfig_gae8f2ae426b1f1a50eabfade6d22c2c85}{ +\index{sfPolicyConfig@{sfPolicyConfig}!sfPolicyUserDataClear@{sfPolicyUserDataClear}} +\index{sfPolicyUserDataClear@{sfPolicyUserDataClear}!sfPolicyConfig@{sfPolicyConfig}} +\subsubsection[{sfPolicyUserDataClear}]{\setlength{\rightskip}{0pt plus 5cm}void$\ast$ sfPolicyUserDataClear ( +\begin{DoxyParamCaption} +\item[{tSfPolicyUserContextId}]{ pContext, } +\item[{tSfPolicyId}]{ policyId} +\end{DoxyParamCaption} +)}} +\label{group__sfPolicyConfig_gae8f2ae426b1f1a50eabfade6d22c2c85} +user is responsible for freeing any memory. \hypertarget{group__sfPolicyConfig_ga3f3ab9314d29d2ee2a8285289b388f17}{ +\index{sfPolicyConfig@{sfPolicyConfig}!sfPolicyUserDataIterate@{sfPolicyUserDataIterate}} +\index{sfPolicyUserDataIterate@{sfPolicyUserDataIterate}!sfPolicyConfig@{sfPolicyConfig}} +\subsubsection[{sfPolicyUserDataIterate}]{\setlength{\rightskip}{0pt plus 5cm}int sfPolicyUserDataIterate ( +\begin{DoxyParamCaption} +\item[{tSfPolicyUserContextId}]{ pContext, } +\item[{int($\ast$)(tSfPolicyUserContextId pContext, tSfPolicyId policyId, void $\ast$config)}]{ callback} +\end{DoxyParamCaption} +)}} +\label{group__sfPolicyConfig_ga3f3ab9314d29d2ee2a8285289b388f17} +\hypertarget{group__sfPolicyConfig_ga8e14fd83397b9bbb14568070183db80b}{ +\index{sfPolicyConfig@{sfPolicyConfig}!sfPolicyUserDataSet@{sfPolicyUserDataSet}} +\index{sfPolicyUserDataSet@{sfPolicyUserDataSet}!sfPolicyConfig@{sfPolicyConfig}} +\subsubsection[{sfPolicyUserDataSet}]{\setlength{\rightskip}{0pt plus 5cm}int sfPolicyUserDataSet ( +\begin{DoxyParamCaption} +\item[{tSfPolicyUserContextId}]{ pContext, } +\item[{tSfPolicyId}]{ policyId, } +\item[{void $\ast$}]{ config} +\end{DoxyParamCaption} +)}} +\label{group__sfPolicyConfig_ga8e14fd83397b9bbb14568070183db80b} +Store a pointer to user data. +\begin{DoxyParams}{Parameters} +\item[{\em pContext}]\item[{\em policyId}]is 0 based. \item[{\em config}]-\/ pointer to user configuration. \end{DoxyParams} diff --git a/doc/latex/modules.tex b/doc/latex/modules.tex new file mode 100644 index 0000000..9bbb499 --- /dev/null +++ b/doc/latex/modules.tex @@ -0,0 +1,4 @@ +\section{Modules} +Here is a list of all modules:\begin{DoxyCompactList} +\item \contentsline{section}{Sourcefire policy configuration module}{\pageref{group__sfPolicyConfig}}{} +\end{DoxyCompactList} diff --git a/doc/latex/refman.tex b/doc/latex/refman.tex new file mode 100644 index 0000000..51d4829 --- /dev/null +++ b/doc/latex/refman.tex @@ -0,0 +1,73 @@ +\documentclass[a4paper]{book} +\usepackage{a4wide} +\usepackage{makeidx} +\usepackage{graphicx} +\usepackage{multicol} +\usepackage{float} +\usepackage{listings} +\usepackage{color} +\usepackage{textcomp} +\usepackage{alltt} +\usepackage{times} +\usepackage{ifpdf} +\ifpdf +\usepackage[pdftex, + pagebackref=true, + colorlinks=true, + linkcolor=blue, + unicode + ]{hyperref} +\else +\usepackage[ps2pdf, + pagebackref=true, + colorlinks=true, + linkcolor=blue, + unicode + ]{hyperref} +\usepackage{pspicture} +\fi +\usepackage[utf8]{inputenc} +\usepackage{doxygen} +\lstset{language=C++,inputencoding=utf8,basicstyle=\footnotesize,breaklines=true,breakatwhitespace=true,tabsize=8,numbers=left } +\makeindex +\setcounter{tocdepth}{3} +\renewcommand{\footrulewidth}{0.4pt} +\begin{document} +\hypersetup{pageanchor=false} +\begin{titlepage} +\vspace*{7cm} +\begin{center} +{\Large Snort AI preprocessor module \\[1ex]\large 0.1 }\\ +\vspace*{1cm} +{\large Generated by Doxygen 1.7.1}\\ +\vspace*{0.5cm} +{\small Wed Aug 4 2010 11:30:57}\\ +\end{center} +\end{titlepage} +\clearemptydoublepage +\pagenumbering{roman} +\tableofcontents +\clearemptydoublepage +\pagenumbering{arabic} +\hypersetup{pageanchor=true} +\chapter{Module Index} +\input{modules} +\chapter{Data Structure Index} +\input{annotated} +\chapter{File Index} +\input{files} +\chapter{Module Documentation} +\input{group__sfPolicyConfig} +\chapter{Data Structure Documentation} +\input{struct__AI__config} +\input{structpkt__info} +\input{structpkt__key} +\chapter{File Documentation} +\input{sf__dynamic__preproc__lib_8c} +\input{sf__preproc__info_8h} +\input{sfPolicyUserData_8c} +\input{spp__ai_8c} +\input{spp__ai_8h} +\input{stream_8c} +\printindex +\end{document} diff --git a/doc/latex/sfPolicyUserData_8c.tex b/doc/latex/sfPolicyUserData_8c.tex new file mode 100644 index 0000000..fd4fa56 --- /dev/null +++ b/doc/latex/sfPolicyUserData_8c.tex @@ -0,0 +1,41 @@ +\hypertarget{sfPolicyUserData_8c}{ +\section{sfPolicyUserData.c File Reference} +\label{sfPolicyUserData_8c}\index{sfPolicyUserData.c@{sfPolicyUserData.c}} +} +{\ttfamily \#include \char`\"{}stdlib.h\char`\"{}}\par +{\ttfamily \#include \char`\"{}string.h\char`\"{}}\par +{\ttfamily \#include \char`\"{}sfPolicy.h\char`\"{}}\par +{\ttfamily \#include \char`\"{}sfPolicyUserData.h\char`\"{}}\par +\subsection*{Functions} +\begin{DoxyCompactItemize} +\item +tSfPolicyUserContextId \hyperlink{group__sfPolicyConfig_gac62cd5838bee4a9d3f40561eae920cdd}{sfPolicyConfigCreate} (void) +\item +void \hyperlink{group__sfPolicyConfig_ga189d09ed6d1203ebace6ea2c2aafc1b8}{sfPolicyConfigDelete} (tSfPolicyUserContextId pContext) +\item +int \hyperlink{group__sfPolicyConfig_ga8e14fd83397b9bbb14568070183db80b}{sfPolicyUserDataSet} (tSfPolicyUserContextId pContext, tSfPolicyId policyId, void $\ast$config) +\item +void $\ast$ \hyperlink{group__sfPolicyConfig_gae8f2ae426b1f1a50eabfade6d22c2c85}{sfPolicyUserDataClear} (tSfPolicyUserContextId pContext, tSfPolicyId policyId) +\item +int \hyperlink{group__sfPolicyConfig_ga3f3ab9314d29d2ee2a8285289b388f17}{sfPolicyUserDataIterate} (tSfPolicyUserContextId pContext, int($\ast$callback)(tSfPolicyUserContextId pContext, tSfPolicyId policyId, void $\ast$config)) +\end{DoxyCompactItemize} +\subsection*{Variables} +\begin{DoxyCompactItemize} +\item +tSfPolicyId \hyperlink{sfPolicyUserData_8c_a281b418c0dc978a74cd7ab5e46ee0fa4}{runtimePolicyId} = 0 +\item +tSfPolicyId \hyperlink{sfPolicyUserData_8c_a0a415b8e70250b11e64a463134d00b4f}{parserPolicyId} = 0 +\end{DoxyCompactItemize} + + +\subsection{Variable Documentation} +\hypertarget{sfPolicyUserData_8c_a0a415b8e70250b11e64a463134d00b4f}{ +\index{sfPolicyUserData.c@{sfPolicyUserData.c}!parserPolicyId@{parserPolicyId}} +\index{parserPolicyId@{parserPolicyId}!sfPolicyUserData.c@{sfPolicyUserData.c}} +\subsubsection[{parserPolicyId}]{\setlength{\rightskip}{0pt plus 5cm}tSfPolicyId {\bf parserPolicyId} = 0}} +\label{sfPolicyUserData_8c_a0a415b8e70250b11e64a463134d00b4f} +\hypertarget{sfPolicyUserData_8c_a281b418c0dc978a74cd7ab5e46ee0fa4}{ +\index{sfPolicyUserData.c@{sfPolicyUserData.c}!runtimePolicyId@{runtimePolicyId}} +\index{runtimePolicyId@{runtimePolicyId}!sfPolicyUserData.c@{sfPolicyUserData.c}} +\subsubsection[{runtimePolicyId}]{\setlength{\rightskip}{0pt plus 5cm}tSfPolicyId {\bf runtimePolicyId} = 0}} +\label{sfPolicyUserData_8c_a281b418c0dc978a74cd7ab5e46ee0fa4} diff --git a/doc/latex/sf__dynamic__preproc__lib_8c.tex b/doc/latex/sf__dynamic__preproc__lib_8c.tex new file mode 100644 index 0000000..928b774 --- /dev/null +++ b/doc/latex/sf__dynamic__preproc__lib_8c.tex @@ -0,0 +1,69 @@ +\hypertarget{sf__dynamic__preproc__lib_8c}{ +\section{sf\_\-dynamic\_\-preproc\_\-lib.c File Reference} +\label{sf__dynamic__preproc__lib_8c}\index{sf\_\-dynamic\_\-preproc\_\-lib.c@{sf\_\-dynamic\_\-preproc\_\-lib.c}} +} +{\ttfamily \#include \char`\"{}sf\_\-preproc\_\-info.h\char`\"{}}\par +{\ttfamily \#include \char`\"{}sf\_\-snort\_\-packet.h\char`\"{}}\par +{\ttfamily \#include \char`\"{}sf\_\-dynamic\_\-preproc\_\-lib.h\char`\"{}}\par +{\ttfamily \#include \char`\"{}sf\_\-dynamic\_\-meta.h\char`\"{}}\par +{\ttfamily \#include \char`\"{}sf\_\-dynamic\_\-preprocessor.h\char`\"{}}\par +{\ttfamily \#include \char`\"{}sf\_\-dynamic\_\-common.h\char`\"{}}\par +{\ttfamily \#include \char`\"{}sf\_\-dynamic\_\-define.h\char`\"{}}\par +{\ttfamily \#include $<$stdio.h$>$}\par +{\ttfamily \#include $<$string.h$>$}\par +{\ttfamily \#include $<$ctype.h$>$}\par +{\ttfamily \#include $<$stdarg.h$>$}\par +{\ttfamily \#include $<$stdlib.h$>$}\par +\subsection*{Functions} +\begin{DoxyCompactItemize} +\item +NORETURN void \hyperlink{sf__dynamic__preproc__lib_8c_a57c853c0f626bde2af6619cdeeb7471b}{DynamicPreprocessorFatalMessage} (const char $\ast$format,...) +\item +PREPROC\_\-LINKAGE int \hyperlink{sf__dynamic__preproc__lib_8c_a16439ea02cc5c66c842c21c5b537b1d9}{InitializePreprocessor} (DynamicPreprocessorData $\ast$dpd) +\item +PREPROC\_\-LINKAGE int \hyperlink{sf__dynamic__preproc__lib_8c_a06d857402af54fb10872f43051e86494}{LibVersion} (DynamicPluginMeta $\ast$dpm) +\end{DoxyCompactItemize} +\subsection*{Variables} +\begin{DoxyCompactItemize} +\item +DynamicPreprocessorData \hyperlink{sf__dynamic__preproc__lib_8c_ab46420126c43c1aac5eabc5db266a71c}{\_\-dpd} +\end{DoxyCompactItemize} + + +\subsection{Function Documentation} +\hypertarget{sf__dynamic__preproc__lib_8c_a57c853c0f626bde2af6619cdeeb7471b}{ +\index{sf\_\-dynamic\_\-preproc\_\-lib.c@{sf\_\-dynamic\_\-preproc\_\-lib.c}!DynamicPreprocessorFatalMessage@{DynamicPreprocessorFatalMessage}} +\index{DynamicPreprocessorFatalMessage@{DynamicPreprocessorFatalMessage}!sf_dynamic_preproc_lib.c@{sf\_\-dynamic\_\-preproc\_\-lib.c}} +\subsubsection[{DynamicPreprocessorFatalMessage}]{\setlength{\rightskip}{0pt plus 5cm}NORETURN void DynamicPreprocessorFatalMessage ( +\begin{DoxyParamCaption} +\item[{const char $\ast$}]{ format, } +\item[{}]{ ...} +\end{DoxyParamCaption} +)}} +\label{sf__dynamic__preproc__lib_8c_a57c853c0f626bde2af6619cdeeb7471b} +\hypertarget{sf__dynamic__preproc__lib_8c_a16439ea02cc5c66c842c21c5b537b1d9}{ +\index{sf\_\-dynamic\_\-preproc\_\-lib.c@{sf\_\-dynamic\_\-preproc\_\-lib.c}!InitializePreprocessor@{InitializePreprocessor}} +\index{InitializePreprocessor@{InitializePreprocessor}!sf_dynamic_preproc_lib.c@{sf\_\-dynamic\_\-preproc\_\-lib.c}} +\subsubsection[{InitializePreprocessor}]{\setlength{\rightskip}{0pt plus 5cm}PREPROC\_\-LINKAGE int InitializePreprocessor ( +\begin{DoxyParamCaption} +\item[{DynamicPreprocessorData $\ast$}]{ dpd} +\end{DoxyParamCaption} +)}} +\label{sf__dynamic__preproc__lib_8c_a16439ea02cc5c66c842c21c5b537b1d9} +\hypertarget{sf__dynamic__preproc__lib_8c_a06d857402af54fb10872f43051e86494}{ +\index{sf\_\-dynamic\_\-preproc\_\-lib.c@{sf\_\-dynamic\_\-preproc\_\-lib.c}!LibVersion@{LibVersion}} +\index{LibVersion@{LibVersion}!sf_dynamic_preproc_lib.c@{sf\_\-dynamic\_\-preproc\_\-lib.c}} +\subsubsection[{LibVersion}]{\setlength{\rightskip}{0pt plus 5cm}PREPROC\_\-LINKAGE int LibVersion ( +\begin{DoxyParamCaption} +\item[{DynamicPluginMeta $\ast$}]{ dpm} +\end{DoxyParamCaption} +)}} +\label{sf__dynamic__preproc__lib_8c_a06d857402af54fb10872f43051e86494} + + +\subsection{Variable Documentation} +\hypertarget{sf__dynamic__preproc__lib_8c_ab46420126c43c1aac5eabc5db266a71c}{ +\index{sf\_\-dynamic\_\-preproc\_\-lib.c@{sf\_\-dynamic\_\-preproc\_\-lib.c}!\_\-dpd@{\_\-dpd}} +\index{\_\-dpd@{\_\-dpd}!sf_dynamic_preproc_lib.c@{sf\_\-dynamic\_\-preproc\_\-lib.c}} +\subsubsection[{\_\-dpd}]{\setlength{\rightskip}{0pt plus 5cm}DynamicPreprocessorData {\bf \_\-dpd}}} +\label{sf__dynamic__preproc__lib_8c_ab46420126c43c1aac5eabc5db266a71c} diff --git a/doc/latex/sf__preproc__info_8h.tex b/doc/latex/sf__preproc__info_8h.tex new file mode 100644 index 0000000..fb22540 --- /dev/null +++ b/doc/latex/sf__preproc__info_8h.tex @@ -0,0 +1,67 @@ +\hypertarget{sf__preproc__info_8h}{ +\section{sf\_\-preproc\_\-info.h File Reference} +\label{sf__preproc__info_8h}\index{sf\_\-preproc\_\-info.h@{sf\_\-preproc\_\-info.h}} +} +\subsection*{Defines} +\begin{DoxyCompactItemize} +\item +\#define \hyperlink{sf__preproc__info_8h_aa9e8f3bb466bb421d13913df7aeaa20c}{MAJOR\_\-VERSION}~1 +\item +\#define \hyperlink{sf__preproc__info_8h_a320988aa2655ee094f3a34a52da10831}{MINOR\_\-VERSION}~0 +\item +\#define \hyperlink{sf__preproc__info_8h_ad7a967dd260384e94010b31b1412a0b4}{BUILD\_\-VERSION}~1 +\item +\#define \hyperlink{sf__preproc__info_8h_af5d5329206253ca0c1a3b8d4a43195af}{PREPROC\_\-NAME}~\char`\"{}SF\_\-AI\char`\"{} +\item +\#define \hyperlink{sf__preproc__info_8h_aba4c0d0af324a3861e662ed4650aae44}{DYNAMIC\_\-PREPROC\_\-SETUP}~AI\_\-setup +\end{DoxyCompactItemize} +\subsection*{Functions} +\begin{DoxyCompactItemize} +\item +void \hyperlink{sf__preproc__info_8h_ad81716bc3f0fec4df74198a7cbdbd43c}{AI\_\-setup} () +\begin{DoxyCompactList}\small\item\em Set up the preprocessor module. \item\end{DoxyCompactList}\end{DoxyCompactItemize} + + +\subsection{Define Documentation} +\hypertarget{sf__preproc__info_8h_ad7a967dd260384e94010b31b1412a0b4}{ +\index{sf\_\-preproc\_\-info.h@{sf\_\-preproc\_\-info.h}!BUILD\_\-VERSION@{BUILD\_\-VERSION}} +\index{BUILD\_\-VERSION@{BUILD\_\-VERSION}!sf_preproc_info.h@{sf\_\-preproc\_\-info.h}} +\subsubsection[{BUILD\_\-VERSION}]{\setlength{\rightskip}{0pt plus 5cm}\#define BUILD\_\-VERSION~1}} +\label{sf__preproc__info_8h_ad7a967dd260384e94010b31b1412a0b4} +\hypertarget{sf__preproc__info_8h_aba4c0d0af324a3861e662ed4650aae44}{ +\index{sf\_\-preproc\_\-info.h@{sf\_\-preproc\_\-info.h}!DYNAMIC\_\-PREPROC\_\-SETUP@{DYNAMIC\_\-PREPROC\_\-SETUP}} +\index{DYNAMIC\_\-PREPROC\_\-SETUP@{DYNAMIC\_\-PREPROC\_\-SETUP}!sf_preproc_info.h@{sf\_\-preproc\_\-info.h}} +\subsubsection[{DYNAMIC\_\-PREPROC\_\-SETUP}]{\setlength{\rightskip}{0pt plus 5cm}\#define DYNAMIC\_\-PREPROC\_\-SETUP~AI\_\-setup}} +\label{sf__preproc__info_8h_aba4c0d0af324a3861e662ed4650aae44} +\hypertarget{sf__preproc__info_8h_aa9e8f3bb466bb421d13913df7aeaa20c}{ +\index{sf\_\-preproc\_\-info.h@{sf\_\-preproc\_\-info.h}!MAJOR\_\-VERSION@{MAJOR\_\-VERSION}} +\index{MAJOR\_\-VERSION@{MAJOR\_\-VERSION}!sf_preproc_info.h@{sf\_\-preproc\_\-info.h}} +\subsubsection[{MAJOR\_\-VERSION}]{\setlength{\rightskip}{0pt plus 5cm}\#define MAJOR\_\-VERSION~1}} +\label{sf__preproc__info_8h_aa9e8f3bb466bb421d13913df7aeaa20c} +\hypertarget{sf__preproc__info_8h_a320988aa2655ee094f3a34a52da10831}{ +\index{sf\_\-preproc\_\-info.h@{sf\_\-preproc\_\-info.h}!MINOR\_\-VERSION@{MINOR\_\-VERSION}} +\index{MINOR\_\-VERSION@{MINOR\_\-VERSION}!sf_preproc_info.h@{sf\_\-preproc\_\-info.h}} +\subsubsection[{MINOR\_\-VERSION}]{\setlength{\rightskip}{0pt plus 5cm}\#define MINOR\_\-VERSION~0}} +\label{sf__preproc__info_8h_a320988aa2655ee094f3a34a52da10831} +\hypertarget{sf__preproc__info_8h_af5d5329206253ca0c1a3b8d4a43195af}{ +\index{sf\_\-preproc\_\-info.h@{sf\_\-preproc\_\-info.h}!PREPROC\_\-NAME@{PREPROC\_\-NAME}} +\index{PREPROC\_\-NAME@{PREPROC\_\-NAME}!sf_preproc_info.h@{sf\_\-preproc\_\-info.h}} +\subsubsection[{PREPROC\_\-NAME}]{\setlength{\rightskip}{0pt plus 5cm}\#define PREPROC\_\-NAME~\char`\"{}SF\_\-AI\char`\"{}}} +\label{sf__preproc__info_8h_af5d5329206253ca0c1a3b8d4a43195af} + + +\subsection{Function Documentation} +\hypertarget{sf__preproc__info_8h_ad81716bc3f0fec4df74198a7cbdbd43c}{ +\index{sf\_\-preproc\_\-info.h@{sf\_\-preproc\_\-info.h}!AI\_\-setup@{AI\_\-setup}} +\index{AI\_\-setup@{AI\_\-setup}!sf_preproc_info.h@{sf\_\-preproc\_\-info.h}} +\subsubsection[{AI\_\-setup}]{\setlength{\rightskip}{0pt plus 5cm}void AI\_\-setup ( +\begin{DoxyParamCaption} +\item[{void}]{} +\end{DoxyParamCaption} +)}} +\label{sf__preproc__info_8h_ad81716bc3f0fec4df74198a7cbdbd43c} + + +Set up the preprocessor module. + +FUNCTION: AI\_\-setup \ No newline at end of file diff --git a/doc/latex/spp__ai_8c.tex b/doc/latex/spp__ai_8c.tex new file mode 100644 index 0000000..21baed3 --- /dev/null +++ b/doc/latex/spp__ai_8c.tex @@ -0,0 +1,156 @@ +\hypertarget{spp__ai_8c}{ +\section{spp\_\-ai.c File Reference} +\label{spp__ai_8c}\index{spp\_\-ai.c@{spp\_\-ai.c}} +} +{\ttfamily \#include \char`\"{}spp\_\-ai.h\char`\"{}}\par +{\ttfamily \#include \char`\"{}preprocids.h\char`\"{}}\par +{\ttfamily \#include \char`\"{}sf\_\-dynamic\_\-preproc\_\-lib.h\char`\"{}}\par +{\ttfamily \#include \char`\"{}sf\_\-dynamic\_\-preprocessor.h\char`\"{}}\par +{\ttfamily \#include \char`\"{}debug.h\char`\"{}}\par +{\ttfamily \#include \char`\"{}sfPolicy.h\char`\"{}}\par +{\ttfamily \#include \char`\"{}sfPolicyUserData.h\char`\"{}}\par +{\ttfamily \#include $<$sys/types.h$>$}\par +{\ttfamily \#include $<$stdlib.h$>$}\par +{\ttfamily \#include $<$ctype.h$>$}\par +{\ttfamily \#include $<$string.h$>$}\par +{\ttfamily \#include $<$pthread.h$>$}\par +\subsection*{Defines} +\begin{DoxyCompactItemize} +\item +\#define \hyperlink{spp__ai_8c_a9e7d446fc8b40be2cfbb5c69c3e132ca}{GENERATOR\_\-EXAMPLE}~256 +\item +\#define \hyperlink{spp__ai_8c_af4c767ae0346026264c851108f42be63}{SRC\_\-PORT\_\-MATCH}~1 +\item +\#define \hyperlink{spp__ai_8c_a3ec4dd8f1ebed73c13175d9b9c820e2e}{SRC\_\-PORT\_\-MATCH\_\-STR}~\char`\"{}example\_\-preprocessor: src port match\char`\"{} +\item +\#define \hyperlink{spp__ai_8c_a8ab13e8ad1dfd19b9237a99ae6130146}{DST\_\-PORT\_\-MATCH}~2 +\item +\#define \hyperlink{spp__ai_8c_a1f3521b9bcf5daf99190be58473a4110}{DST\_\-PORT\_\-MATCH\_\-STR}~\char`\"{}example\_\-preprocessor: dest port match\char`\"{} +\end{DoxyCompactItemize} +\subsection*{Functions} +\begin{DoxyCompactItemize} +\item +static void \hyperlink{spp__ai_8c_a3524cbdf8fddbcf38c4ed55241002242}{AI\_\-init} (char $\ast$args) +\begin{DoxyCompactList}\small\item\em Initialize the preprocessor module. \item\end{DoxyCompactList}\item +static void \hyperlink{spp__ai_8c_a57c05cda012c443cb4c358dc327cd3d1}{AI\_\-process} (void $\ast$pkt, void $\ast$context) +\begin{DoxyCompactList}\small\item\em Function executed every time the module receives a packet to be processed. \item\end{DoxyCompactList}\item +static \hyperlink{struct__AI__config}{AI\_\-config} $\ast$ \hyperlink{spp__ai_8c_ae1c5c4b38ee2819d427848eb3046373e}{AI\_\-parse} (char $\ast$args) +\begin{DoxyCompactList}\small\item\em Parse the arguments passed to the module saving them to a valid configuration struct. \item\end{DoxyCompactList}\item +void \hyperlink{spp__ai_8c_a1b9ebb5c719c7d9426ddfc1f3da36570}{AI\_\-setup} (void) +\begin{DoxyCompactList}\small\item\em Set up the preprocessor module. \item\end{DoxyCompactList}\end{DoxyCompactItemize} +\subsection*{Variables} +\begin{DoxyCompactItemize} +\item +tSfPolicyUserContextId \hyperlink{spp__ai_8c_a3dd75596c540d148643fe6d1fdc02628}{ex\_\-config} = NULL +\item +DynamicPreprocessorData \hyperlink{spp__ai_8c_ab46420126c43c1aac5eabc5db266a71c}{\_\-dpd} +\end{DoxyCompactItemize} + + +\subsection{Define Documentation} +\hypertarget{spp__ai_8c_a8ab13e8ad1dfd19b9237a99ae6130146}{ +\index{spp\_\-ai.c@{spp\_\-ai.c}!DST\_\-PORT\_\-MATCH@{DST\_\-PORT\_\-MATCH}} +\index{DST\_\-PORT\_\-MATCH@{DST\_\-PORT\_\-MATCH}!spp_ai.c@{spp\_\-ai.c}} +\subsubsection[{DST\_\-PORT\_\-MATCH}]{\setlength{\rightskip}{0pt plus 5cm}\#define DST\_\-PORT\_\-MATCH~2}} +\label{spp__ai_8c_a8ab13e8ad1dfd19b9237a99ae6130146} +\hypertarget{spp__ai_8c_a1f3521b9bcf5daf99190be58473a4110}{ +\index{spp\_\-ai.c@{spp\_\-ai.c}!DST\_\-PORT\_\-MATCH\_\-STR@{DST\_\-PORT\_\-MATCH\_\-STR}} +\index{DST\_\-PORT\_\-MATCH\_\-STR@{DST\_\-PORT\_\-MATCH\_\-STR}!spp_ai.c@{spp\_\-ai.c}} +\subsubsection[{DST\_\-PORT\_\-MATCH\_\-STR}]{\setlength{\rightskip}{0pt plus 5cm}\#define DST\_\-PORT\_\-MATCH\_\-STR~\char`\"{}example\_\-preprocessor: dest port match\char`\"{}}} +\label{spp__ai_8c_a1f3521b9bcf5daf99190be58473a4110} +\hypertarget{spp__ai_8c_a9e7d446fc8b40be2cfbb5c69c3e132ca}{ +\index{spp\_\-ai.c@{spp\_\-ai.c}!GENERATOR\_\-EXAMPLE@{GENERATOR\_\-EXAMPLE}} +\index{GENERATOR\_\-EXAMPLE@{GENERATOR\_\-EXAMPLE}!spp_ai.c@{spp\_\-ai.c}} +\subsubsection[{GENERATOR\_\-EXAMPLE}]{\setlength{\rightskip}{0pt plus 5cm}\#define GENERATOR\_\-EXAMPLE~256}} +\label{spp__ai_8c_a9e7d446fc8b40be2cfbb5c69c3e132ca} +\hypertarget{spp__ai_8c_af4c767ae0346026264c851108f42be63}{ +\index{spp\_\-ai.c@{spp\_\-ai.c}!SRC\_\-PORT\_\-MATCH@{SRC\_\-PORT\_\-MATCH}} +\index{SRC\_\-PORT\_\-MATCH@{SRC\_\-PORT\_\-MATCH}!spp_ai.c@{spp\_\-ai.c}} +\subsubsection[{SRC\_\-PORT\_\-MATCH}]{\setlength{\rightskip}{0pt plus 5cm}\#define SRC\_\-PORT\_\-MATCH~1}} +\label{spp__ai_8c_af4c767ae0346026264c851108f42be63} +\hypertarget{spp__ai_8c_a3ec4dd8f1ebed73c13175d9b9c820e2e}{ +\index{spp\_\-ai.c@{spp\_\-ai.c}!SRC\_\-PORT\_\-MATCH\_\-STR@{SRC\_\-PORT\_\-MATCH\_\-STR}} +\index{SRC\_\-PORT\_\-MATCH\_\-STR@{SRC\_\-PORT\_\-MATCH\_\-STR}!spp_ai.c@{spp\_\-ai.c}} +\subsubsection[{SRC\_\-PORT\_\-MATCH\_\-STR}]{\setlength{\rightskip}{0pt plus 5cm}\#define SRC\_\-PORT\_\-MATCH\_\-STR~\char`\"{}example\_\-preprocessor: src port match\char`\"{}}} +\label{spp__ai_8c_a3ec4dd8f1ebed73c13175d9b9c820e2e} + + +\subsection{Function Documentation} +\hypertarget{spp__ai_8c_a3524cbdf8fddbcf38c4ed55241002242}{ +\index{spp\_\-ai.c@{spp\_\-ai.c}!AI\_\-init@{AI\_\-init}} +\index{AI\_\-init@{AI\_\-init}!spp_ai.c@{spp\_\-ai.c}} +\subsubsection[{AI\_\-init}]{\setlength{\rightskip}{0pt plus 5cm}static void AI\_\-init ( +\begin{DoxyParamCaption} +\item[{char $\ast$}]{ args} +\end{DoxyParamCaption} +)\hspace{0.3cm}{\ttfamily \mbox{[}static\mbox{]}}}} +\label{spp__ai_8c_a3524cbdf8fddbcf38c4ed55241002242} + + +Initialize the preprocessor module. + +FUNCTION: AI\_\-init +\begin{DoxyParams}{Parameters} +\item[{\em args}]Configuration arguments passed to the module \end{DoxyParams} +\hypertarget{spp__ai_8c_ae1c5c4b38ee2819d427848eb3046373e}{ +\index{spp\_\-ai.c@{spp\_\-ai.c}!AI\_\-parse@{AI\_\-parse}} +\index{AI\_\-parse@{AI\_\-parse}!spp_ai.c@{spp\_\-ai.c}} +\subsubsection[{AI\_\-parse}]{\setlength{\rightskip}{0pt plus 5cm}static {\bf AI\_\-config} $\ast$ AI\_\-parse ( +\begin{DoxyParamCaption} +\item[{char $\ast$}]{ args} +\end{DoxyParamCaption} +)\hspace{0.3cm}{\ttfamily \mbox{[}static\mbox{]}}}} +\label{spp__ai_8c_ae1c5c4b38ee2819d427848eb3046373e} + + +Parse the arguments passed to the module saving them to a valid configuration struct. + +FUNCTION: AI\_\-config +\begin{DoxyParams}{Parameters} +\item[{\em args}]Arguments passed to the module \end{DoxyParams} +\begin{DoxyReturn}{Returns} +Pointer to AI\_\-config keeping the configuration for the module +\end{DoxyReturn} +\hypertarget{spp__ai_8c_a57c05cda012c443cb4c358dc327cd3d1}{ +\index{spp\_\-ai.c@{spp\_\-ai.c}!AI\_\-process@{AI\_\-process}} +\index{AI\_\-process@{AI\_\-process}!spp_ai.c@{spp\_\-ai.c}} +\subsubsection[{AI\_\-process}]{\setlength{\rightskip}{0pt plus 5cm}void AI\_\-process ( +\begin{DoxyParamCaption} +\item[{void $\ast$}]{ pkt, } +\item[{void $\ast$}]{ context} +\end{DoxyParamCaption} +)\hspace{0.3cm}{\ttfamily \mbox{[}static\mbox{]}}}} +\label{spp__ai_8c_a57c05cda012c443cb4c358dc327cd3d1} + + +Function executed every time the module receives a packet to be processed. + +FUNCTION: AI\_\-process +\begin{DoxyParams}{Parameters} +\item[{\em pkt}]void$\ast$ pointer to the packet data \item[{\em context}]void$\ast$ pointer to the context \end{DoxyParams} +\hypertarget{spp__ai_8c_a1b9ebb5c719c7d9426ddfc1f3da36570}{ +\index{spp\_\-ai.c@{spp\_\-ai.c}!AI\_\-setup@{AI\_\-setup}} +\index{AI\_\-setup@{AI\_\-setup}!spp_ai.c@{spp\_\-ai.c}} +\subsubsection[{AI\_\-setup}]{\setlength{\rightskip}{0pt plus 5cm}void AI\_\-setup ( +\begin{DoxyParamCaption} +\item[{void}]{} +\end{DoxyParamCaption} +)}} +\label{spp__ai_8c_a1b9ebb5c719c7d9426ddfc1f3da36570} + + +Set up the preprocessor module. + +FUNCTION: AI\_\-setup + +\subsection{Variable Documentation} +\hypertarget{spp__ai_8c_ab46420126c43c1aac5eabc5db266a71c}{ +\index{spp\_\-ai.c@{spp\_\-ai.c}!\_\-dpd@{\_\-dpd}} +\index{\_\-dpd@{\_\-dpd}!spp_ai.c@{spp\_\-ai.c}} +\subsubsection[{\_\-dpd}]{\setlength{\rightskip}{0pt plus 5cm}DynamicPreprocessorData {\bf \_\-dpd}}} +\label{spp__ai_8c_ab46420126c43c1aac5eabc5db266a71c} +\hypertarget{spp__ai_8c_a3dd75596c540d148643fe6d1fdc02628}{ +\index{spp\_\-ai.c@{spp\_\-ai.c}!ex\_\-config@{ex\_\-config}} +\index{ex\_\-config@{ex\_\-config}!spp_ai.c@{spp\_\-ai.c}} +\subsubsection[{ex\_\-config}]{\setlength{\rightskip}{0pt plus 5cm}tSfPolicyUserContextId {\bf ex\_\-config} = NULL}} +\label{spp__ai_8c_a3dd75596c540d148643fe6d1fdc02628} diff --git a/doc/latex/spp__ai_8h.tex b/doc/latex/spp__ai_8h.tex new file mode 100644 index 0000000..0430843 --- /dev/null +++ b/doc/latex/spp__ai_8h.tex @@ -0,0 +1,108 @@ +\hypertarget{spp__ai_8h}{ +\section{spp\_\-ai.h File Reference} +\label{spp__ai_8h}\index{spp\_\-ai.h@{spp\_\-ai.h}} +} +{\ttfamily \#include \char`\"{}sf\_\-snort\_\-packet.h\char`\"{}}\par +\subsection*{Data Structures} +\begin{DoxyCompactItemize} +\item +struct \hyperlink{struct__AI__config}{\_\-AI\_\-config} +\end{DoxyCompactItemize} +\subsection*{Typedefs} +\begin{DoxyCompactItemize} +\item +typedef unsigned int \hyperlink{spp__ai_8h_a435d1572bf3f880d55459d9805097f62}{uint32\_\-t} +\item +typedef unsigned short \hyperlink{spp__ai_8h_a273cf69d639a59973b6019625df33e30}{uint16\_\-t} +\item +typedef struct \hyperlink{struct__AI__config}{\_\-AI\_\-config} \hyperlink{spp__ai_8h_a3fc526e5a55f5d137402b1bbd1b6072c}{AI\_\-config} +\end{DoxyCompactItemize} +\subsection*{Enumerations} +\begin{DoxyCompactItemize} +\item +enum \hyperlink{spp__ai_8h_a3e5b8192e7d9ffaf3542f1210aec18dd}{BOOL} \{ \hyperlink{spp__ai_8h_a3e5b8192e7d9ffaf3542f1210aec18ddae9de385ef6fe9bf3360d1038396b884c}{false}, +\hyperlink{spp__ai_8h_a3e5b8192e7d9ffaf3542f1210aec18dda08f175a5505a10b9ed657defeb050e4b}{true} + \} +\end{DoxyCompactItemize} +\subsection*{Functions} +\begin{DoxyCompactItemize} +\item +void \hyperlink{spp__ai_8h_af6f7d167c3623bbc669e8d31c2719b29}{AI\_\-pkt\_\-enqueue} (SFSnortPacket $\ast$) +\begin{DoxyCompactList}\small\item\em Function called for appending a new packet to the hash table, creating a new stream or appending it to an existing stream. \item\end{DoxyCompactList}\item +void $\ast$ \hyperlink{spp__ai_8h_ad56f71be823eead743972274b99c82ff}{AI\_\-hashcleanup\_\-thread} (void $\ast$) +\begin{DoxyCompactList}\small\item\em Thread called for cleaning up the hash table from the traffic streams older than a certain threshold. \item\end{DoxyCompactList}\end{DoxyCompactItemize} + + +\subsection{Typedef Documentation} +\hypertarget{spp__ai_8h_a3fc526e5a55f5d137402b1bbd1b6072c}{ +\index{spp\_\-ai.h@{spp\_\-ai.h}!AI\_\-config@{AI\_\-config}} +\index{AI\_\-config@{AI\_\-config}!spp_ai.h@{spp\_\-ai.h}} +\subsubsection[{AI\_\-config}]{\setlength{\rightskip}{0pt plus 5cm}typedef struct {\bf \_\-AI\_\-config} {\bf AI\_\-config}}} +\label{spp__ai_8h_a3fc526e5a55f5d137402b1bbd1b6072c} +\hypertarget{spp__ai_8h_a273cf69d639a59973b6019625df33e30}{ +\index{spp\_\-ai.h@{spp\_\-ai.h}!uint16\_\-t@{uint16\_\-t}} +\index{uint16\_\-t@{uint16\_\-t}!spp_ai.h@{spp\_\-ai.h}} +\subsubsection[{uint16\_\-t}]{\setlength{\rightskip}{0pt plus 5cm}typedef unsigned short {\bf uint16\_\-t}}} +\label{spp__ai_8h_a273cf69d639a59973b6019625df33e30} +\hypertarget{spp__ai_8h_a435d1572bf3f880d55459d9805097f62}{ +\index{spp\_\-ai.h@{spp\_\-ai.h}!uint32\_\-t@{uint32\_\-t}} +\index{uint32\_\-t@{uint32\_\-t}!spp_ai.h@{spp\_\-ai.h}} +\subsubsection[{uint32\_\-t}]{\setlength{\rightskip}{0pt plus 5cm}typedef unsigned int {\bf uint32\_\-t}}} +\label{spp__ai_8h_a435d1572bf3f880d55459d9805097f62} + + +\subsection{Enumeration Type Documentation} +\hypertarget{spp__ai_8h_a3e5b8192e7d9ffaf3542f1210aec18dd}{ +\index{spp\_\-ai.h@{spp\_\-ai.h}!BOOL@{BOOL}} +\index{BOOL@{BOOL}!spp_ai.h@{spp\_\-ai.h}} +\subsubsection[{BOOL}]{\setlength{\rightskip}{0pt plus 5cm}enum {\bf BOOL}}} +\label{spp__ai_8h_a3e5b8192e7d9ffaf3542f1210aec18dd} +\begin{Desc} +\item[Enumerator: ]\par +\begin{description} +\index{false@{false}!spp\_\-ai.h@{spp\_\-ai.h}}\index{spp\_\-ai.h@{spp\_\-ai.h}!false@{false}}\item[{\em +\hypertarget{spp__ai_8h_a3e5b8192e7d9ffaf3542f1210aec18ddae9de385ef6fe9bf3360d1038396b884c}{ +false} +\label{spp__ai_8h_a3e5b8192e7d9ffaf3542f1210aec18ddae9de385ef6fe9bf3360d1038396b884c} +}]\index{true@{true}!spp\_\-ai.h@{spp\_\-ai.h}}\index{spp\_\-ai.h@{spp\_\-ai.h}!true@{true}}\item[{\em +\hypertarget{spp__ai_8h_a3e5b8192e7d9ffaf3542f1210aec18dda08f175a5505a10b9ed657defeb050e4b}{ +true} +\label{spp__ai_8h_a3e5b8192e7d9ffaf3542f1210aec18dda08f175a5505a10b9ed657defeb050e4b} +}]\end{description} +\end{Desc} + + + +\subsection{Function Documentation} +\hypertarget{spp__ai_8h_ad56f71be823eead743972274b99c82ff}{ +\index{spp\_\-ai.h@{spp\_\-ai.h}!AI\_\-hashcleanup\_\-thread@{AI\_\-hashcleanup\_\-thread}} +\index{AI\_\-hashcleanup\_\-thread@{AI\_\-hashcleanup\_\-thread}!spp_ai.h@{spp\_\-ai.h}} +\subsubsection[{AI\_\-hashcleanup\_\-thread}]{\setlength{\rightskip}{0pt plus 5cm}void$\ast$ AI\_\-hashcleanup\_\-thread ( +\begin{DoxyParamCaption} +\item[{void $\ast$}]{ arg} +\end{DoxyParamCaption} +)}} +\label{spp__ai_8h_ad56f71be823eead743972274b99c82ff} + + +Thread called for cleaning up the hash table from the traffic streams older than a certain threshold. + +FUNCTION: AI\_\-hashcleanup\_\-thread +\begin{DoxyParams}{Parameters} +\item[{\em arg}]Pointer to the AI\_\-config struct \end{DoxyParams} +\hypertarget{spp__ai_8h_af6f7d167c3623bbc669e8d31c2719b29}{ +\index{spp\_\-ai.h@{spp\_\-ai.h}!AI\_\-pkt\_\-enqueue@{AI\_\-pkt\_\-enqueue}} +\index{AI\_\-pkt\_\-enqueue@{AI\_\-pkt\_\-enqueue}!spp_ai.h@{spp\_\-ai.h}} +\subsubsection[{AI\_\-pkt\_\-enqueue}]{\setlength{\rightskip}{0pt plus 5cm}void AI\_\-pkt\_\-enqueue ( +\begin{DoxyParamCaption} +\item[{SFSnortPacket $\ast$}]{ pkt} +\end{DoxyParamCaption} +)}} +\label{spp__ai_8h_af6f7d167c3623bbc669e8d31c2719b29} + + +Function called for appending a new packet to the hash table, creating a new stream or appending it to an existing stream. + +FUNCTION: AI\_\-pkt\_\-enqueue +\begin{DoxyParams}{Parameters} +\item[{\em pkt}]Packet to be appended \end{DoxyParams} diff --git a/doc/latex/stream_8c.tex b/doc/latex/stream_8c.tex new file mode 100644 index 0000000..eeb2e72 --- /dev/null +++ b/doc/latex/stream_8c.tex @@ -0,0 +1,92 @@ +\hypertarget{stream_8c}{ +\section{stream.c File Reference} +\label{stream_8c}\index{stream.c@{stream.c}} +} +{\ttfamily \#include \char`\"{}spp\_\-ai.h\char`\"{}}\par +{\ttfamily \#include \char`\"{}uthash.h\char`\"{}}\par +{\ttfamily \#include $<$stdio.h$>$}\par +{\ttfamily \#include $<$stdlib.h$>$}\par +{\ttfamily \#include $<$string.h$>$}\par +{\ttfamily \#include $<$time.h$>$}\par +{\ttfamily \#include $<$unistd.h$>$}\par +{\ttfamily \#include $<$arpa/inet.h$>$}\par +\subsection*{Data Structures} +\begin{DoxyCompactItemize} +\item +struct \hyperlink{structpkt__key}{pkt\_\-key} +\item +struct \hyperlink{structpkt__info}{pkt\_\-info} +\end{DoxyCompactItemize} +\subsection*{Functions} +\begin{DoxyCompactItemize} +\item +static void \hyperlink{stream_8c_a2a0c295a6828df716311977538cabd4a}{\_\-AI\_\-stream\_\-free} (struct \hyperlink{structpkt__info}{pkt\_\-info} $\ast$stream) +\begin{DoxyCompactList}\small\item\em Remove a stream from the hash table (private function). \item\end{DoxyCompactList}\item +void $\ast$ \hyperlink{stream_8c_a24b1131374e5059564b8a12380c4eb75}{AI\_\-hashcleanup\_\-thread} (void $\ast$arg) +\begin{DoxyCompactList}\small\item\em Thread called for cleaning up the hash table from the traffic streams older than a certain threshold. \item\end{DoxyCompactList}\item +void \hyperlink{stream_8c_a7d71c5645b9baff7b6c4b9a181bf80c5}{AI\_\-pkt\_\-enqueue} (SFSnortPacket $\ast$pkt) +\begin{DoxyCompactList}\small\item\em Function called for appending a new packet to the hash table, creating a new stream or appending it to an existing stream. \item\end{DoxyCompactList}\end{DoxyCompactItemize} +\subsection*{Variables} +\begin{DoxyCompactItemize} +\item +static struct \hyperlink{structpkt__info}{pkt\_\-info} $\ast$ \hyperlink{stream_8c_a96fbc549c67e0d852ced3cb72980e923}{hash} = NULL +\end{DoxyCompactItemize} + + +\subsection{Function Documentation} +\hypertarget{stream_8c_a2a0c295a6828df716311977538cabd4a}{ +\index{stream.c@{stream.c}!\_\-AI\_\-stream\_\-free@{\_\-AI\_\-stream\_\-free}} +\index{\_\-AI\_\-stream\_\-free@{\_\-AI\_\-stream\_\-free}!stream.c@{stream.c}} +\subsubsection[{\_\-AI\_\-stream\_\-free}]{\setlength{\rightskip}{0pt plus 5cm}static void \_\-AI\_\-stream\_\-free ( +\begin{DoxyParamCaption} +\item[{struct {\bf pkt\_\-info} $\ast$}]{ stream} +\end{DoxyParamCaption} +)\hspace{0.3cm}{\ttfamily \mbox{[}static\mbox{]}}}} +\label{stream_8c_a2a0c295a6828df716311977538cabd4a} + + +Remove a stream from the hash table (private function). + +FUNCTION: \_\-AI\_\-stream\_\-free +\begin{DoxyParams}{Parameters} +\item[{\em stream}]Stream to be removed \end{DoxyParams} +\hypertarget{stream_8c_a24b1131374e5059564b8a12380c4eb75}{ +\index{stream.c@{stream.c}!AI\_\-hashcleanup\_\-thread@{AI\_\-hashcleanup\_\-thread}} +\index{AI\_\-hashcleanup\_\-thread@{AI\_\-hashcleanup\_\-thread}!stream.c@{stream.c}} +\subsubsection[{AI\_\-hashcleanup\_\-thread}]{\setlength{\rightskip}{0pt plus 5cm}void$\ast$ AI\_\-hashcleanup\_\-thread ( +\begin{DoxyParamCaption} +\item[{void $\ast$}]{ arg} +\end{DoxyParamCaption} +)}} +\label{stream_8c_a24b1131374e5059564b8a12380c4eb75} + + +Thread called for cleaning up the hash table from the traffic streams older than a certain threshold. + +FUNCTION: AI\_\-hashcleanup\_\-thread +\begin{DoxyParams}{Parameters} +\item[{\em arg}]Pointer to the AI\_\-config struct \end{DoxyParams} +\hypertarget{stream_8c_a7d71c5645b9baff7b6c4b9a181bf80c5}{ +\index{stream.c@{stream.c}!AI\_\-pkt\_\-enqueue@{AI\_\-pkt\_\-enqueue}} +\index{AI\_\-pkt\_\-enqueue@{AI\_\-pkt\_\-enqueue}!stream.c@{stream.c}} +\subsubsection[{AI\_\-pkt\_\-enqueue}]{\setlength{\rightskip}{0pt plus 5cm}void AI\_\-pkt\_\-enqueue ( +\begin{DoxyParamCaption} +\item[{SFSnortPacket $\ast$}]{ pkt} +\end{DoxyParamCaption} +)}} +\label{stream_8c_a7d71c5645b9baff7b6c4b9a181bf80c5} + + +Function called for appending a new packet to the hash table, creating a new stream or appending it to an existing stream. + +FUNCTION: AI\_\-pkt\_\-enqueue +\begin{DoxyParams}{Parameters} +\item[{\em pkt}]Packet to be appended \end{DoxyParams} + + +\subsection{Variable Documentation} +\hypertarget{stream_8c_a96fbc549c67e0d852ced3cb72980e923}{ +\index{stream.c@{stream.c}!hash@{hash}} +\index{hash@{hash}!stream.c@{stream.c}} +\subsubsection[{hash}]{\setlength{\rightskip}{0pt plus 5cm}struct {\bf pkt\_\-info}$\ast$ {\bf hash} = NULL\hspace{0.3cm}{\ttfamily \mbox{[}static\mbox{]}}}} +\label{stream_8c_a96fbc549c67e0d852ced3cb72980e923} diff --git a/doc/latex/struct__AI__config.tex b/doc/latex/struct__AI__config.tex new file mode 100644 index 0000000..66cd016 --- /dev/null +++ b/doc/latex/struct__AI__config.tex @@ -0,0 +1,40 @@ +\hypertarget{struct__AI__config}{ +\section{\_\-AI\_\-config Struct Reference} +\label{struct__AI__config}\index{\_\-AI\_\-config@{\_\-AI\_\-config}} +} + + +{\ttfamily \#include $<$spp\_\-ai.h$>$} + +\subsection*{Data Fields} +\begin{DoxyCompactItemize} +\item +\hyperlink{spp__ai_8h_a273cf69d639a59973b6019625df33e30}{uint16\_\-t} \hyperlink{struct__AI__config_ab22e082ad6637f6280134e882bf53b0d}{portToCheck} +\item +unsigned long \hyperlink{struct__AI__config_a890e6756dc637e9d41b7051a4d1ddc93}{hashCleanupInterval} +\item +unsigned long \hyperlink{struct__AI__config_a338358f23bf15f567a015a99d892c8e7}{streamExpireInterval} +\end{DoxyCompactItemize} + + +\subsection{Field Documentation} +\hypertarget{struct__AI__config_a890e6756dc637e9d41b7051a4d1ddc93}{ +\index{\_\-AI\_\-config@{\_\-AI\_\-config}!hashCleanupInterval@{hashCleanupInterval}} +\index{hashCleanupInterval@{hashCleanupInterval}!_AI_config@{\_\-AI\_\-config}} +\subsubsection[{hashCleanupInterval}]{\setlength{\rightskip}{0pt plus 5cm}unsigned long {\bf \_\-AI\_\-config::hashCleanupInterval}}} +\label{struct__AI__config_a890e6756dc637e9d41b7051a4d1ddc93} +\hypertarget{struct__AI__config_ab22e082ad6637f6280134e882bf53b0d}{ +\index{\_\-AI\_\-config@{\_\-AI\_\-config}!portToCheck@{portToCheck}} +\index{portToCheck@{portToCheck}!_AI_config@{\_\-AI\_\-config}} +\subsubsection[{portToCheck}]{\setlength{\rightskip}{0pt plus 5cm}{\bf uint16\_\-t} {\bf \_\-AI\_\-config::portToCheck}}} +\label{struct__AI__config_ab22e082ad6637f6280134e882bf53b0d} +\hypertarget{struct__AI__config_a338358f23bf15f567a015a99d892c8e7}{ +\index{\_\-AI\_\-config@{\_\-AI\_\-config}!streamExpireInterval@{streamExpireInterval}} +\index{streamExpireInterval@{streamExpireInterval}!_AI_config@{\_\-AI\_\-config}} +\subsubsection[{streamExpireInterval}]{\setlength{\rightskip}{0pt plus 5cm}unsigned long {\bf \_\-AI\_\-config::streamExpireInterval}}} +\label{struct__AI__config_a338358f23bf15f567a015a99d892c8e7} + + +The documentation for this struct was generated from the following file:\begin{DoxyCompactItemize} +\item +\hyperlink{spp__ai_8h}{spp\_\-ai.h}\end{DoxyCompactItemize} diff --git a/doc/latex/structpkt__info.tex b/doc/latex/structpkt__info.tex new file mode 100644 index 0000000..668d3cb --- /dev/null +++ b/doc/latex/structpkt__info.tex @@ -0,0 +1,50 @@ +\hypertarget{structpkt__info}{ +\section{pkt\_\-info Struct Reference} +\label{structpkt__info}\index{pkt\_\-info@{pkt\_\-info}} +} +\subsection*{Data Fields} +\begin{DoxyCompactItemize} +\item +struct \hyperlink{structpkt__key}{pkt\_\-key} \hyperlink{structpkt__info_a231d4734d3c62292b06eb9ea4b49c339}{key} +\item +time\_\-t \hyperlink{structpkt__info_a7f5090443f21e6290f0439f1bb872e92}{timestamp} +\item +SFSnortPacket $\ast$ \hyperlink{structpkt__info_a8d5ebd04a32067b05387e5c5056fe168}{pkt} +\item +struct \hyperlink{structpkt__info}{pkt\_\-info} $\ast$ \hyperlink{structpkt__info_a5ee3c51f2ca5768b94819182641ef168}{next} +\item +UT\_\-hash\_\-handle \hyperlink{structpkt__info_a264e90d4b5d490de040f38c1072e142f}{hh} +\end{DoxyCompactItemize} + + +\subsection{Field Documentation} +\hypertarget{structpkt__info_a264e90d4b5d490de040f38c1072e142f}{ +\index{pkt\_\-info@{pkt\_\-info}!hh@{hh}} +\index{hh@{hh}!pkt_info@{pkt\_\-info}} +\subsubsection[{hh}]{\setlength{\rightskip}{0pt plus 5cm}UT\_\-hash\_\-handle {\bf pkt\_\-info::hh}}} +\label{structpkt__info_a264e90d4b5d490de040f38c1072e142f} +\hypertarget{structpkt__info_a231d4734d3c62292b06eb9ea4b49c339}{ +\index{pkt\_\-info@{pkt\_\-info}!key@{key}} +\index{key@{key}!pkt_info@{pkt\_\-info}} +\subsubsection[{key}]{\setlength{\rightskip}{0pt plus 5cm}struct {\bf pkt\_\-key} {\bf pkt\_\-info::key}}} +\label{structpkt__info_a231d4734d3c62292b06eb9ea4b49c339} +\hypertarget{structpkt__info_a5ee3c51f2ca5768b94819182641ef168}{ +\index{pkt\_\-info@{pkt\_\-info}!next@{next}} +\index{next@{next}!pkt_info@{pkt\_\-info}} +\subsubsection[{next}]{\setlength{\rightskip}{0pt plus 5cm}struct {\bf pkt\_\-info}$\ast$ {\bf pkt\_\-info::next}}} +\label{structpkt__info_a5ee3c51f2ca5768b94819182641ef168} +\hypertarget{structpkt__info_a8d5ebd04a32067b05387e5c5056fe168}{ +\index{pkt\_\-info@{pkt\_\-info}!pkt@{pkt}} +\index{pkt@{pkt}!pkt_info@{pkt\_\-info}} +\subsubsection[{pkt}]{\setlength{\rightskip}{0pt plus 5cm}SFSnortPacket$\ast$ {\bf pkt\_\-info::pkt}}} +\label{structpkt__info_a8d5ebd04a32067b05387e5c5056fe168} +\hypertarget{structpkt__info_a7f5090443f21e6290f0439f1bb872e92}{ +\index{pkt\_\-info@{pkt\_\-info}!timestamp@{timestamp}} +\index{timestamp@{timestamp}!pkt_info@{pkt\_\-info}} +\subsubsection[{timestamp}]{\setlength{\rightskip}{0pt plus 5cm}time\_\-t {\bf pkt\_\-info::timestamp}}} +\label{structpkt__info_a7f5090443f21e6290f0439f1bb872e92} + + +The documentation for this struct was generated from the following file:\begin{DoxyCompactItemize} +\item +\hyperlink{stream_8c}{stream.c}\end{DoxyCompactItemize} diff --git a/doc/latex/structpkt__key.tex b/doc/latex/structpkt__key.tex new file mode 100644 index 0000000..c0db4da --- /dev/null +++ b/doc/latex/structpkt__key.tex @@ -0,0 +1,29 @@ +\hypertarget{structpkt__key}{ +\section{pkt\_\-key Struct Reference} +\label{structpkt__key}\index{pkt\_\-key@{pkt\_\-key}} +} +\subsection*{Data Fields} +\begin{DoxyCompactItemize} +\item +\hyperlink{spp__ai_8h_a435d1572bf3f880d55459d9805097f62}{uint32\_\-t} \hyperlink{structpkt__key_a3a091c20dafb8b3f689db00c5b2f8ddb}{src\_\-ip} +\item +\hyperlink{spp__ai_8h_a273cf69d639a59973b6019625df33e30}{uint16\_\-t} \hyperlink{structpkt__key_af77f5eb1f4cd88b43fe99fd73553351d}{dst\_\-port} +\end{DoxyCompactItemize} + + +\subsection{Field Documentation} +\hypertarget{structpkt__key_af77f5eb1f4cd88b43fe99fd73553351d}{ +\index{pkt\_\-key@{pkt\_\-key}!dst\_\-port@{dst\_\-port}} +\index{dst\_\-port@{dst\_\-port}!pkt_key@{pkt\_\-key}} +\subsubsection[{dst\_\-port}]{\setlength{\rightskip}{0pt plus 5cm}{\bf uint16\_\-t} {\bf pkt\_\-key::dst\_\-port}}} +\label{structpkt__key_af77f5eb1f4cd88b43fe99fd73553351d} +\hypertarget{structpkt__key_a3a091c20dafb8b3f689db00c5b2f8ddb}{ +\index{pkt\_\-key@{pkt\_\-key}!src\_\-ip@{src\_\-ip}} +\index{src\_\-ip@{src\_\-ip}!pkt_key@{pkt\_\-key}} +\subsubsection[{src\_\-ip}]{\setlength{\rightskip}{0pt plus 5cm}{\bf uint32\_\-t} {\bf pkt\_\-key::src\_\-ip}}} +\label{structpkt__key_a3a091c20dafb8b3f689db00c5b2f8ddb} + + +The documentation for this struct was generated from the following file:\begin{DoxyCompactItemize} +\item +\hyperlink{stream_8c}{stream.c}\end{DoxyCompactItemize} diff --git a/libtool b/libtool new file mode 100755 index 0000000..d4e7b7b --- /dev/null +++ b/libtool @@ -0,0 +1,8893 @@ +#! /bin/sh + +# libtool - Provide generalized library-building support services. +# Generated automatically by config.status (snort) 2.8.6.1 +# Libtool was configured on host wintermute: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008 Free Software Foundation, Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is part of GNU Libtool. +# +# GNU Libtool is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, or +# obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + +# The names of the tagged configurations supported by this script. +available_tags="" + +# ### BEGIN LIBTOOL CONFIG + +# Which release of libtool.m4 was used? +macro_version=2.2.6b +macro_revision=1.3017 + +# Whether or not to build shared libraries. +build_libtool_libs=yes + +# Whether or not to build static libraries. +build_old_libs=yes + +# What type of objects to build. +pic_mode=default + +# Whether or not to optimize for fast installation. +fast_install=yes + +# The host system. +host_alias= +host=i686-pc-linux-gnu +host_os=linux-gnu + +# The build system. +build_alias= +build=i686-pc-linux-gnu +build_os=linux-gnu + +# A sed program that does not truncate output. +SED="/bin/sed" + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="$SED -e 1s/^X//" + +# A grep program that handles long lines. +GREP="/bin/grep" + +# An ERE matcher. +EGREP="/bin/grep -E" + +# A literal string matcher. +FGREP="/bin/grep -F" + +# A BSD- or MS-compatible name lister. +NM="/usr/bin/nm -B" + +# Whether we need soft or hard links. +LN_S="ln -s" + +# What is the maximum length of a command? +max_cmd_len=1572864 + +# Object file suffix (normally "o"). +objext=o + +# Executable file suffix (normally ""). +exeext= + +# whether the shell understands "unset". +lt_unset=unset + +# turn spaces into newlines. +SP2NL="tr \\040 \\012" + +# turn newlines into spaces. +NL2SP="tr \\015\\012 \\040\\040" + +# How to create reloadable object files. +reload_flag=" -r" +reload_cmds="\$LD\$reload_flag -o \$output\$reload_objs" + +# An object symbol dumper. +OBJDUMP="objdump" + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method="pass_all" + +# Command to use when deplibs_check_method == "file_magic". +file_magic_cmd="\$MAGIC_CMD" + +# The archiver. +AR="ar" +AR_FLAGS="cru" + +# A symbol stripping program. +STRIP="strip" + +# Commands used to install an old-style archive. +RANLIB="ranlib" +old_postinstall_cmds="chmod 644 \$oldlib~\$RANLIB \$oldlib" +old_postuninstall_cmds="" + +# A C compiler. +LTCC="gcc" + +# LTCC compiler flags. +LTCFLAGS="-g -O2 -fvisibility=hidden -fno-strict-aliasing -Wall" + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe="sed -n -e 's/^.*[ ]\\([ABCDGIRSTW][ABCDGIRSTW]*\\)[ ][ ]*\\([_A-Za-z][_A-Za-z0-9]*\\)\$/\\1 \\2 \\2/p'" + +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl="sed -n -e 's/^T .* \\(.*\\)\$/extern int \\1();/p' -e 's/^[ABCDGIRSTW]* .* \\(.*\\)\$/extern char \\1;/p'" + +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address="sed -n -e 's/^: \\([^ ]*\\) \$/ {\\\"\\1\\\", (void *) 0},/p' -e 's/^[ABCDGIRSTW]* \\([^ ]*\\) \\([^ ]*\\)\$/ {\"\\2\", (void *) \\&\\2},/p'" + +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \\([^ ]*\\) \$/ {\\\"\\1\\\", (void *) 0},/p' -e 's/^[ABCDGIRSTW]* \\([^ ]*\\) \\(lib[^ ]*\\)\$/ {\"\\2\", (void *) \\&\\2},/p' -e 's/^[ABCDGIRSTW]* \\([^ ]*\\) \\([^ ]*\\)\$/ {\"lib\\2\", (void *) \\&\\2},/p'" + +# The name of the directory that contains temporary libtool files. +objdir=.libs + +# Shell to use when invoking shell scripts. +SHELL="/bin/sh" + +# An echo program that does not interpret backslashes. +ECHO="echo" + +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=file + +# Must we lock files when doing compilation? +need_locks="no" + +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL="" + +# Tool to change global to local symbols on Mac OS X. +NMEDIT="" + +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO="" + +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL="" + +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64="" + +# Old archive suffix (normally "a"). +libext=a + +# Shared library suffix (normally ".so"). +shrext_cmds=".so" + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds="" + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink="PATH LD_LIBRARY_PATH LD_RUN_PATH GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" + +# Do we need the "lib" prefix for modules? +need_lib_prefix=no + +# Do we need a version for libraries? +need_version=no + +# Library versioning type. +version_type=linux + +# Shared library runtime path variable. +runpath_var=LD_RUN_PATH + +# Shared library path variable. +shlibpath_var=LD_LIBRARY_PATH + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=no + +# Format of library name prefix. +libname_spec="lib\$name" + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec="\${libname}\${release}\${shared_ext}\$versuffix \${libname}\${release}\${shared_ext}\$major \$libname\${shared_ext}" + +# The coded name of the library, if different from the real name. +soname_spec="\${libname}\${release}\${shared_ext}\$major" + +# Command to use after installation of a shared archive. +postinstall_cmds="" + +# Command to use after uninstallation of a shared archive. +postuninstall_cmds="" + +# Commands used to finish a libtool library installation in a directory. +finish_cmds="PATH=\\\"\\\$PATH:/sbin\\\" ldconfig -n \$libdir" + +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval="" + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=yes + +# Compile-time system search path for libraries. +sys_lib_search_path_spec="/usr/lib/gcc/i486-linux-gnu/4.4.4 /usr/lib /lib /usr/lib/i486-linux-gnu" + +# Run-time system search path for libraries. +sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib /usr/lib/atlas /lib/i486-linux-gnu /usr/lib/i486-linux-gnu /usr/local/lib " + +# Whether dlopen is supported. +dlopen_support=unknown + +# Whether dlopen of programs is supported. +dlopen_self=unknown + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=unknown + +# Commands to strip libraries. +old_striplib="strip --strip-debug" +striplib="strip --strip-unneeded" + + +# The linker used to build libraries. +LD="/usr/bin/ld" + +# Commands used to build an old-style archive. +old_archive_cmds="\$AR \$AR_FLAGS \$oldlib\$oldobjs~\$RANLIB \$oldlib" + +# A language specific compiler. +CC="gcc" + +# Is the compiler the GNU compiler? +with_gcc=yes + +# Compiler flag to turn off builtin functions. +no_builtin_flag=" -fno-builtin" + +# How to pass a linker flag through the compiler. +wl="-Wl," + +# Additional compiler flags for building library objects. +pic_flag=" -fPIC -DPIC" + +# Compiler flag to prevent dynamic linking. +link_static_flag="-static" + +# Does compiler simultaneously support -c and -o options? +compiler_c_o="yes" + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=no + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=no + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec="\${wl}--export-dynamic" + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec="\${wl}--whole-archive\$convenience \${wl}--no-whole-archive" + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object="no" + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds="" + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds="" + +# Commands used to build a shared archive. +archive_cmds="\$CC -shared \$libobjs \$deplibs \$compiler_flags \${wl}-soname \$wl\$soname -o \$lib" +archive_expsym_cmds="echo \\\"{ global:\\\" > \$output_objdir/\$libname.ver~ + cat \$export_symbols | sed -e \\\"s/\\\\(.*\\\\)/\\\\1;/\\\" >> \$output_objdir/\$libname.ver~ + echo \\\"local: *; };\\\" >> \$output_objdir/\$libname.ver~ + \$CC -shared \$libobjs \$deplibs \$compiler_flags \${wl}-soname \$wl\$soname \${wl}-version-script \${wl}\$output_objdir/\$libname.ver -o \$lib" + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds="" +module_expsym_cmds="" + +# Whether we are building with GNU ld or not. +with_gnu_ld="yes" + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag="" + +# Flag that enforces no undefined symbols. +no_undefined_flag="" + +# Flag to hardcode $libdir into a binary during linking. +# This must work even if $libdir does not exist +hardcode_libdir_flag_spec="\${wl}-rpath \${wl}\$libdir" + +# If ld is used when linking, flag to hardcode $libdir into a binary +# during linking. This must work even if $libdir does not exist. +hardcode_libdir_flag_spec_ld="" + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator="" + +# Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=no + +# Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting ${shlibpath_var} if the +# library is relocated. +hardcode_direct_absolute=no + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=no + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=unsupported + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=no + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=no + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=unknown + +# Fix the shell variable $srcfile for the compiler. +fix_srcfile_path="" + +# Set to "yes" if exported symbols are required. +always_export_symbols=no + +# The commands to list exported symbols. +export_symbols_cmds="\$NM \$libobjs \$convenience | \$global_symbol_pipe | \$SED 's/.* //' | sort | uniq > \$export_symbols" + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms="_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*" + +# Symbols that must always be exported. +include_expsyms="" + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds="" + +# Specify filename containing input files. +file_list_spec="" + +# How to hardcode a shared library path into an executable. +hardcode_action=immediate + +# ### END LIBTOOL CONFIG + +# Generated from ltmain.m4sh. + +# ltmain.sh (GNU libtool) 2.2.6b +# Written by Gordon Matzigkeit , 1996 + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, 2007 2008 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, +# or obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +# Usage: $progname [OPTION]... [MODE-ARG]... +# +# Provide generalized library-building support services. +# +# --config show all configuration variables +# --debug enable verbose shell tracing +# -n, --dry-run display commands without modifying any files +# --features display basic configuration information and exit +# --mode=MODE use operation mode MODE +# --preserve-dup-deps don't remove duplicate dependency libraries +# --quiet, --silent don't print informational messages +# --tag=TAG use configuration variables from tag TAG +# -v, --verbose print informational messages (default) +# --version print version information +# -h, --help print short or long help message +# +# MODE must be one of the following: +# +# clean remove files from the build directory +# compile compile a source file into a libtool object +# execute automatically set library path, then run a program +# finish complete the installation of libtool libraries +# install install libraries or executables +# link create a library or an executable +# uninstall remove libraries from an installed directory +# +# MODE-ARGS vary depending on the MODE. +# Try `$progname --help --mode=MODE' for a more detailed description of MODE. +# +# When reporting a bug, please describe a test case to reproduce it and +# include the following information: +# +# host-triplet: $host +# shell: $SHELL +# compiler: $LTCC +# compiler flags: $LTCFLAGS +# linker: $LD (gnu? $with_gnu_ld) +# $progname: (GNU libtool) 2.2.6b +# automake: $automake_version +# autoconf: $autoconf_version +# +# Report bugs to . + +PROGRAM=ltmain.sh +PACKAGE=libtool +VERSION=2.2.6b +TIMESTAMP="" +package_revision=1.3017 + +# Be Bourne compatible +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh + +# NLS nuisances: We save the old values to restore during execute mode. +# Only set LANG and LC_ALL to C if already set. +# These must not be set unconditionally because not all systems understand +# e.g. LANG=C (notably SCO). +lt_user_locale= +lt_safe_locale= +for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES +do + eval "if test \"\${$lt_var+set}\" = set; then + save_$lt_var=\$$lt_var + $lt_var=C + export $lt_var + lt_user_locale=\"$lt_var=\\\$save_\$lt_var; \$lt_user_locale\" + lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\" + fi" +done + +$lt_unset CDPATH + + + + + +: ${CP="cp -f"} +: ${ECHO="echo"} +: ${EGREP="/bin/grep -E"} +: ${FGREP="/bin/grep -F"} +: ${GREP="/bin/grep"} +: ${LN_S="ln -s"} +: ${MAKE="make"} +: ${MKDIR="mkdir"} +: ${MV="mv -f"} +: ${RM="rm -f"} +: ${SED="/bin/sed"} +: ${SHELL="${CONFIG_SHELL-/bin/sh}"} +: ${Xsed="$SED -e 1s/^X//"} + +# Global variables: +EXIT_SUCCESS=0 +EXIT_FAILURE=1 +EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. +EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. + +exit_status=$EXIT_SUCCESS + +# Make sure IFS has a sensible default +lt_nl=' +' +IFS=" $lt_nl" + +dirname="s,/[^/]*$,," +basename="s,^.*/,," + +# func_dirname_and_basename file append nondir_replacement +# perform func_basename and func_dirname in a single function +# call: +# dirname: Compute the dirname of FILE. If nonempty, +# add APPEND to the result, otherwise set result +# to NONDIR_REPLACEMENT. +# value returned in "$func_dirname_result" +# basename: Compute filename of FILE. +# value retuned in "$func_basename_result" +# Implementation must be kept synchronized with func_dirname +# and func_basename. For efficiency, we do not delegate to +# those functions but instead duplicate the functionality here. +func_dirname_and_basename () +{ + # Extract subdirectory from the argument. + func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"` + if test "X$func_dirname_result" = "X${1}"; then + func_dirname_result="${3}" + else + func_dirname_result="$func_dirname_result${2}" + fi + func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"` +} + +# Generated shell functions inserted here. + +# func_dirname file append nondir_replacement +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +func_dirname () +{ + case ${1} in + */*) func_dirname_result="${1%/*}${2}" ;; + * ) func_dirname_result="${3}" ;; + esac +} + +# func_basename file +func_basename () +{ + func_basename_result="${1##*/}" +} + +# func_dirname_and_basename file append nondir_replacement +# perform func_basename and func_dirname in a single function +# call: +# dirname: Compute the dirname of FILE. If nonempty, +# add APPEND to the result, otherwise set result +# to NONDIR_REPLACEMENT. +# value returned in "$func_dirname_result" +# basename: Compute filename of FILE. +# value retuned in "$func_basename_result" +# Implementation must be kept synchronized with func_dirname +# and func_basename. For efficiency, we do not delegate to +# those functions but instead duplicate the functionality here. +func_dirname_and_basename () +{ + case ${1} in + */*) func_dirname_result="${1%/*}${2}" ;; + * ) func_dirname_result="${3}" ;; + esac + func_basename_result="${1##*/}" +} + +# func_stripname prefix suffix name +# strip PREFIX and SUFFIX off of NAME. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +func_stripname () +{ + # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are + # positional parameters, so assign one to ordinary parameter first. + func_stripname_result=${3} + func_stripname_result=${func_stripname_result#"${1}"} + func_stripname_result=${func_stripname_result%"${2}"} +} + +# func_opt_split +func_opt_split () +{ + func_opt_split_opt=${1%%=*} + func_opt_split_arg=${1#*=} +} + +# func_lo2o object +func_lo2o () +{ + case ${1} in + *.lo) func_lo2o_result=${1%.lo}.${objext} ;; + *) func_lo2o_result=${1} ;; + esac +} + +# func_xform libobj-or-source +func_xform () +{ + func_xform_result=${1%.*}.lo +} + +# func_arith arithmetic-term... +func_arith () +{ + func_arith_result=$(( $* )) +} + +# func_len string +# STRING may not start with a hyphen. +func_len () +{ + func_len_result=${#1} +} + + +# func_append var value +# Append VALUE to the end of shell variable VAR. +func_append () +{ + eval "$1+=\$2" +} +# Generated shell functions inserted here. + +# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh +# is ksh but when the shell is invoked as "sh" and the current value of +# the _XPG environment variable is not equal to 1 (one), the special +# positional parameter $0, within a function call, is the name of the +# function. +progpath="$0" + +# The name of this program: +# In the unlikely event $progname began with a '-', it would play havoc with +# func_echo (imagine progname=-n), so we prepend ./ in that case: +func_dirname_and_basename "$progpath" +progname=$func_basename_result +case $progname in + -*) progname=./$progname ;; +esac + +# Make sure we have an absolute path for reexecution: +case $progpath in + [\\/]*|[A-Za-z]:\\*) ;; + *[\\/]*) + progdir=$func_dirname_result + progdir=`cd "$progdir" && pwd` + progpath="$progdir/$progname" + ;; + *) + save_IFS="$IFS" + IFS=: + for progdir in $PATH; do + IFS="$save_IFS" + test -x "$progdir/$progname" && break + done + IFS="$save_IFS" + test -n "$progdir" || progdir=`pwd` + progpath="$progdir/$progname" + ;; +esac + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed="${SED}"' -e 1s/^X//' +sed_quote_subst='s/\([`"$\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Re-`\' parameter expansions in output of double_quote_subst that were +# `\'-ed in input to the same. If an odd number of `\' preceded a '$' +# in input to double_quote_subst, that '$' was protected from expansion. +# Since each input `\' is now two `\'s, look for any number of runs of +# four `\'s followed by two `\'s and then a '$'. `\' that '$'. +bs='\\' +bs2='\\\\' +bs4='\\\\\\\\' +dollar='\$' +sed_double_backslash="\ + s/$bs4/&\\ +/g + s/^$bs2$dollar/$bs&/ + s/\\([^$bs]\\)$bs2$dollar/\\1$bs2$bs$dollar/g + s/\n//g" + +# Standard options: +opt_dry_run=false +opt_help=false +opt_quiet=false +opt_verbose=false +opt_warning=: + +# func_echo arg... +# Echo program name prefixed message, along with the current mode +# name if it has been set yet. +func_echo () +{ + $ECHO "$progname${mode+: }$mode: $*" +} + +# func_verbose arg... +# Echo program name prefixed message in verbose mode only. +func_verbose () +{ + $opt_verbose && func_echo ${1+"$@"} + + # A bug in bash halts the script if the last line of a function + # fails when set -e is in force, so we need another command to + # work around that: + : +} + +# func_error arg... +# Echo program name prefixed message to standard error. +func_error () +{ + $ECHO "$progname${mode+: }$mode: "${1+"$@"} 1>&2 +} + +# func_warning arg... +# Echo program name prefixed warning message to standard error. +func_warning () +{ + $opt_warning && $ECHO "$progname${mode+: }$mode: warning: "${1+"$@"} 1>&2 + + # bash bug again: + : +} + +# func_fatal_error arg... +# Echo program name prefixed message to standard error, and exit. +func_fatal_error () +{ + func_error ${1+"$@"} + exit $EXIT_FAILURE +} + +# func_fatal_help arg... +# Echo program name prefixed message to standard error, followed by +# a help hint, and exit. +func_fatal_help () +{ + func_error ${1+"$@"} + func_fatal_error "$help" +} +help="Try \`$progname --help' for more information." ## default + + +# func_grep expression filename +# Check whether EXPRESSION matches any line of FILENAME, without output. +func_grep () +{ + $GREP "$1" "$2" >/dev/null 2>&1 +} + + +# func_mkdir_p directory-path +# Make sure the entire path to DIRECTORY-PATH is available. +func_mkdir_p () +{ + my_directory_path="$1" + my_dir_list= + + if test -n "$my_directory_path" && test "$opt_dry_run" != ":"; then + + # Protect directory names starting with `-' + case $my_directory_path in + -*) my_directory_path="./$my_directory_path" ;; + esac + + # While some portion of DIR does not yet exist... + while test ! -d "$my_directory_path"; do + # ...make a list in topmost first order. Use a colon delimited + # list incase some portion of path contains whitespace. + my_dir_list="$my_directory_path:$my_dir_list" + + # If the last portion added has no slash in it, the list is done + case $my_directory_path in */*) ;; *) break ;; esac + + # ...otherwise throw away the child directory and loop + my_directory_path=`$ECHO "X$my_directory_path" | $Xsed -e "$dirname"` + done + my_dir_list=`$ECHO "X$my_dir_list" | $Xsed -e 's,:*$,,'` + + save_mkdir_p_IFS="$IFS"; IFS=':' + for my_dir in $my_dir_list; do + IFS="$save_mkdir_p_IFS" + # mkdir can fail with a `File exist' error if two processes + # try to create one of the directories concurrently. Don't + # stop in that case! + $MKDIR "$my_dir" 2>/dev/null || : + done + IFS="$save_mkdir_p_IFS" + + # Bail out if we (or some other process) failed to create a directory. + test -d "$my_directory_path" || \ + func_fatal_error "Failed to create \`$1'" + fi +} + + +# func_mktempdir [string] +# Make a temporary directory that won't clash with other running +# libtool processes, and avoids race conditions if possible. If +# given, STRING is the basename for that directory. +func_mktempdir () +{ + my_template="${TMPDIR-/tmp}/${1-$progname}" + + if test "$opt_dry_run" = ":"; then + # Return a directory name, but don't create it in dry-run mode + my_tmpdir="${my_template}-$$" + else + + # If mktemp works, use that first and foremost + my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null` + + if test ! -d "$my_tmpdir"; then + # Failing that, at least try and use $RANDOM to avoid a race + my_tmpdir="${my_template}-${RANDOM-0}$$" + + save_mktempdir_umask=`umask` + umask 0077 + $MKDIR "$my_tmpdir" + umask $save_mktempdir_umask + fi + + # If we're not in dry-run mode, bomb out on failure + test -d "$my_tmpdir" || \ + func_fatal_error "cannot create temporary directory \`$my_tmpdir'" + fi + + $ECHO "X$my_tmpdir" | $Xsed +} + + +# func_quote_for_eval arg +# Aesthetically quote ARG to be evaled later. +# This function returns two values: FUNC_QUOTE_FOR_EVAL_RESULT +# is double-quoted, suitable for a subsequent eval, whereas +# FUNC_QUOTE_FOR_EVAL_UNQUOTED_RESULT has merely all characters +# which are still active within double quotes backslashified. +func_quote_for_eval () +{ + case $1 in + *[\\\`\"\$]*) + func_quote_for_eval_unquoted_result=`$ECHO "X$1" | $Xsed -e "$sed_quote_subst"` ;; + *) + func_quote_for_eval_unquoted_result="$1" ;; + esac + + case $func_quote_for_eval_unquoted_result in + # Double-quote args containing shell metacharacters to delay + # word splitting, command substitution and and variable + # expansion for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + func_quote_for_eval_result="\"$func_quote_for_eval_unquoted_result\"" + ;; + *) + func_quote_for_eval_result="$func_quote_for_eval_unquoted_result" + esac +} + + +# func_quote_for_expand arg +# Aesthetically quote ARG to be evaled later; same as above, +# but do not quote variable references. +func_quote_for_expand () +{ + case $1 in + *[\\\`\"]*) + my_arg=`$ECHO "X$1" | $Xsed \ + -e "$double_quote_subst" -e "$sed_double_backslash"` ;; + *) + my_arg="$1" ;; + esac + + case $my_arg in + # Double-quote args containing shell metacharacters to delay + # word splitting and command substitution for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + my_arg="\"$my_arg\"" + ;; + esac + + func_quote_for_expand_result="$my_arg" +} + + +# func_show_eval cmd [fail_exp] +# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. +func_show_eval () +{ + my_cmd="$1" + my_fail_exp="${2-:}" + + ${opt_silent-false} || { + func_quote_for_expand "$my_cmd" + eval "func_echo $func_quote_for_expand_result" + } + + if ${opt_dry_run-false}; then :; else + eval "$my_cmd" + my_status=$? + if test "$my_status" -eq 0; then :; else + eval "(exit $my_status); $my_fail_exp" + fi + fi +} + + +# func_show_eval_locale cmd [fail_exp] +# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. Use the saved locale for evaluation. +func_show_eval_locale () +{ + my_cmd="$1" + my_fail_exp="${2-:}" + + ${opt_silent-false} || { + func_quote_for_expand "$my_cmd" + eval "func_echo $func_quote_for_expand_result" + } + + if ${opt_dry_run-false}; then :; else + eval "$lt_user_locale + $my_cmd" + my_status=$? + eval "$lt_safe_locale" + if test "$my_status" -eq 0; then :; else + eval "(exit $my_status); $my_fail_exp" + fi + fi +} + + + + + +# func_version +# Echo version message to standard output and exit. +func_version () +{ + $SED -n '/^# '$PROGRAM' (GNU /,/# warranty; / { + s/^# // + s/^# *$// + s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/ + p + }' < "$progpath" + exit $? +} + +# func_usage +# Echo short help message to standard output and exit. +func_usage () +{ + $SED -n '/^# Usage:/,/# -h/ { + s/^# // + s/^# *$// + s/\$progname/'$progname'/ + p + }' < "$progpath" + $ECHO + $ECHO "run \`$progname --help | more' for full usage" + exit $? +} + +# func_help +# Echo long help message to standard output and exit. +func_help () +{ + $SED -n '/^# Usage:/,/# Report bugs to/ { + s/^# // + s/^# *$// + s*\$progname*'$progname'* + s*\$host*'"$host"'* + s*\$SHELL*'"$SHELL"'* + s*\$LTCC*'"$LTCC"'* + s*\$LTCFLAGS*'"$LTCFLAGS"'* + s*\$LD*'"$LD"'* + s/\$with_gnu_ld/'"$with_gnu_ld"'/ + s/\$automake_version/'"`(automake --version) 2>/dev/null |$SED 1q`"'/ + s/\$autoconf_version/'"`(autoconf --version) 2>/dev/null |$SED 1q`"'/ + p + }' < "$progpath" + exit $? +} + +# func_missing_arg argname +# Echo program name prefixed message to standard error and set global +# exit_cmd. +func_missing_arg () +{ + func_error "missing argument for $1" + exit_cmd=exit +} + +exit_cmd=: + + + + + +# Check that we have a working $ECHO. +if test "X$1" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift +elif test "X$1" = X--fallback-echo; then + # Avoid inline document here, it may be left over + : +elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t'; then + # Yippee, $ECHO works! + : +else + # Restart under the correct shell, and then maybe $ECHO will work. + exec $SHELL "$progpath" --no-reexec ${1+"$@"} +fi + +if test "X$1" = X--fallback-echo; then + # used as fallback echo + shift + cat </dev/null 2>&1; then + taglist="$taglist $tagname" + + # Evaluate the configuration. Be careful to quote the path + # and the sed script, to avoid splitting on whitespace, but + # also don't use non-portable quotes within backquotes within + # quotes we have to do it in 2 steps: + extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` + eval "$extractedcf" + else + func_error "ignoring unknown tag $tagname" + fi + ;; + esac +} + +# Parse options once, thoroughly. This comes as soon as possible in +# the script to make things like `libtool --version' happen quickly. +{ + + # Shorthand for --mode=foo, only valid as the first argument + case $1 in + clean|clea|cle|cl) + shift; set dummy --mode clean ${1+"$@"}; shift + ;; + compile|compil|compi|comp|com|co|c) + shift; set dummy --mode compile ${1+"$@"}; shift + ;; + execute|execut|execu|exec|exe|ex|e) + shift; set dummy --mode execute ${1+"$@"}; shift + ;; + finish|finis|fini|fin|fi|f) + shift; set dummy --mode finish ${1+"$@"}; shift + ;; + install|instal|insta|inst|ins|in|i) + shift; set dummy --mode install ${1+"$@"}; shift + ;; + link|lin|li|l) + shift; set dummy --mode link ${1+"$@"}; shift + ;; + uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) + shift; set dummy --mode uninstall ${1+"$@"}; shift + ;; + esac + + # Parse non-mode specific arguments: + while test "$#" -gt 0; do + opt="$1" + shift + + case $opt in + --config) func_config ;; + + --debug) preserve_args="$preserve_args $opt" + func_echo "enabling shell trace mode" + opt_debug='set -x' + $opt_debug + ;; + + -dlopen) test "$#" -eq 0 && func_missing_arg "$opt" && break + execute_dlfiles="$execute_dlfiles $1" + shift + ;; + + --dry-run | -n) opt_dry_run=: ;; + --features) func_features ;; + --finish) mode="finish" ;; + + --mode) test "$#" -eq 0 && func_missing_arg "$opt" && break + case $1 in + # Valid mode arguments: + clean) ;; + compile) ;; + execute) ;; + finish) ;; + install) ;; + link) ;; + relink) ;; + uninstall) ;; + + # Catch anything else as an error + *) func_error "invalid argument for $opt" + exit_cmd=exit + break + ;; + esac + + mode="$1" + shift + ;; + + --preserve-dup-deps) + opt_duplicate_deps=: ;; + + --quiet|--silent) preserve_args="$preserve_args $opt" + opt_silent=: + ;; + + --verbose| -v) preserve_args="$preserve_args $opt" + opt_silent=false + ;; + + --tag) test "$#" -eq 0 && func_missing_arg "$opt" && break + preserve_args="$preserve_args $opt $1" + func_enable_tag "$1" # tagname is set here + shift + ;; + + # Separate optargs to long options: + -dlopen=*|--mode=*|--tag=*) + func_opt_split "$opt" + set dummy "$func_opt_split_opt" "$func_opt_split_arg" ${1+"$@"} + shift + ;; + + -\?|-h) func_usage ;; + --help) opt_help=: ;; + --version) func_version ;; + + -*) func_fatal_help "unrecognized option \`$opt'" ;; + + *) nonopt="$opt" + break + ;; + esac + done + + + case $host in + *cygwin* | *mingw* | *pw32* | *cegcc*) + # don't eliminate duplications in $postdeps and $predeps + opt_duplicate_compiler_generated_deps=: + ;; + *) + opt_duplicate_compiler_generated_deps=$opt_duplicate_deps + ;; + esac + + # Having warned about all mis-specified options, bail out if + # anything was wrong. + $exit_cmd $EXIT_FAILURE +} + +# func_check_version_match +# Ensure that we are using m4 macros, and libtool script from the same +# release of libtool. +func_check_version_match () +{ + if test "$package_revision" != "$macro_revision"; then + if test "$VERSION" != "$macro_version"; then + if test -z "$macro_version"; then + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from an older release. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + fi + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, +$progname: but the definition of this LT_INIT comes from revision $macro_revision. +$progname: You should recreate aclocal.m4 with macros from revision $package_revision +$progname: of $PACKAGE $VERSION and run autoconf again. +_LT_EOF + fi + + exit $EXIT_MISMATCH + fi +} + + +## ----------- ## +## Main. ## +## ----------- ## + +$opt_help || { + # Sanity checks first: + func_check_version_match + + if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then + func_fatal_configuration "not configured to build any kind of library" + fi + + test -z "$mode" && func_fatal_error "error: you must specify a MODE." + + + # Darwin sucks + eval std_shrext=\"$shrext_cmds\" + + + # Only execute mode is allowed to have -dlopen flags. + if test -n "$execute_dlfiles" && test "$mode" != execute; then + func_error "unrecognized option \`-dlopen'" + $ECHO "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Change the help message to a mode-specific one. + generic_help="$help" + help="Try \`$progname --help --mode=$mode' for more information." +} + + +# func_lalib_p file +# True iff FILE is a libtool `.la' library or `.lo' object file. +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_lalib_p () +{ + test -f "$1" && + $SED -e 4q "$1" 2>/dev/null \ + | $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 +} + +# func_lalib_unsafe_p file +# True iff FILE is a libtool `.la' library or `.lo' object file. +# This function implements the same check as func_lalib_p without +# resorting to external programs. To this end, it redirects stdin and +# closes it afterwards, without saving the original file descriptor. +# As a safety measure, use it only where a negative result would be +# fatal anyway. Works if `file' does not exist. +func_lalib_unsafe_p () +{ + lalib_p=no + if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then + for lalib_p_l in 1 2 3 4 + do + read lalib_p_line + case "$lalib_p_line" in + \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; + esac + done + exec 0<&5 5<&- + fi + test "$lalib_p" = yes +} + +# func_ltwrapper_script_p file +# True iff FILE is a libtool wrapper script +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_script_p () +{ + func_lalib_p "$1" +} + +# func_ltwrapper_executable_p file +# True iff FILE is a libtool wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_executable_p () +{ + func_ltwrapper_exec_suffix= + case $1 in + *.exe) ;; + *) func_ltwrapper_exec_suffix=.exe ;; + esac + $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 +} + +# func_ltwrapper_scriptname file +# Assumes file is an ltwrapper_executable +# uses $file to determine the appropriate filename for a +# temporary ltwrapper_script. +func_ltwrapper_scriptname () +{ + func_ltwrapper_scriptname_result="" + if func_ltwrapper_executable_p "$1"; then + func_dirname_and_basename "$1" "" "." + func_stripname '' '.exe' "$func_basename_result" + func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper" + fi +} + +# func_ltwrapper_p file +# True iff FILE is a libtool wrapper script or wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_p () +{ + func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" +} + + +# func_execute_cmds commands fail_cmd +# Execute tilde-delimited COMMANDS. +# If FAIL_CMD is given, eval that upon failure. +# FAIL_CMD may read-access the current command in variable CMD! +func_execute_cmds () +{ + $opt_debug + save_ifs=$IFS; IFS='~' + for cmd in $1; do + IFS=$save_ifs + eval cmd=\"$cmd\" + func_show_eval "$cmd" "${2-:}" + done + IFS=$save_ifs +} + + +# func_source file +# Source FILE, adding directory component if necessary. +# Note that it is not necessary on cygwin/mingw to append a dot to +# FILE even if both FILE and FILE.exe exist: automatic-append-.exe +# behavior happens only for exec(3), not for open(2)! Also, sourcing +# `FILE.' does not work on cygwin managed mounts. +func_source () +{ + $opt_debug + case $1 in + */* | *\\*) . "$1" ;; + *) . "./$1" ;; + esac +} + + +# func_infer_tag arg +# Infer tagged configuration to use if any are available and +# if one wasn't chosen via the "--tag" command line option. +# Only attempt this if the compiler in the base compile +# command doesn't match the default compiler. +# arg is usually of the form 'gcc ...' +func_infer_tag () +{ + $opt_debug + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do + func_quote_for_eval "$arg" + CC_quoted="$CC_quoted $func_quote_for_eval_result" + done + case $@ in + # Blanks in the command may have been stripped by the calling shell, + # but not from the CC environment variable when configure was run. + " $CC "* | "$CC "* | " `$ECHO $CC` "* | "`$ECHO $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$ECHO $CC_quoted` "* | "`$ECHO $CC_quoted` "*) ;; + # Blanks at the start of $base_compile will cause this to fail + # if we don't check for them as well. + *) + for z in $available_tags; do + if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then + # Evaluate the configuration. + eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. + func_quote_for_eval "$arg" + CC_quoted="$CC_quoted $func_quote_for_eval_result" + done + case "$@ " in + " $CC "* | "$CC "* | " `$ECHO $CC` "* | "`$ECHO $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$ECHO $CC_quoted` "* | "`$ECHO $CC_quoted` "*) + # The compiler in the base compile command matches + # the one in the tagged configuration. + # Assume this is the tagged configuration we want. + tagname=$z + break + ;; + esac + fi + done + # If $tagname still isn't set, then no tagged configuration + # was found and let the user know that the "--tag" command + # line option must be used. + if test -z "$tagname"; then + func_echo "unable to infer tagged configuration" + func_fatal_error "specify a tag with \`--tag'" +# else +# func_verbose "using $tagname tagged configuration" + fi + ;; + esac + fi +} + + + +# func_write_libtool_object output_name pic_name nonpic_name +# Create a libtool object file (analogous to a ".la" file), +# but don't create it if we're doing a dry run. +func_write_libtool_object () +{ + write_libobj=${1} + if test "$build_libtool_libs" = yes; then + write_lobj=\'${2}\' + else + write_lobj=none + fi + + if test "$build_old_libs" = yes; then + write_oldobj=\'${3}\' + else + write_oldobj=none + fi + + $opt_dry_run || { + cat >${write_libobj}T <?"'"'"' &()|`$[]' \ + && func_warning "libobj name \`$libobj' may not contain shell special characters." + func_dirname_and_basename "$obj" "/" "" + objname="$func_basename_result" + xdir="$func_dirname_result" + lobj=${xdir}$objdir/$objname + + test -z "$base_compile" && \ + func_fatal_help "you must specify a compilation command" + + # Delete any leftover library objects. + if test "$build_old_libs" = yes; then + removelist="$obj $lobj $libobj ${libobj}T" + else + removelist="$lobj $libobj ${libobj}T" + fi + + # On Cygwin there's no "real" PIC flag so we must build both object types + case $host_os in + cygwin* | mingw* | pw32* | os2* | cegcc*) + pic_mode=default + ;; + esac + if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then + # non-PIC code in shared libraries is not supported + pic_mode=default + fi + + # Calculate the filename of the output object if compiler does + # not support -o with -c + if test "$compiler_c_o" = no; then + output_obj=`$ECHO "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext} + lockfile="$output_obj.lock" + else + output_obj= + need_locks=no + lockfile= + fi + + # Lock this critical section if it is needed + # We use this script file to make the link, it avoids creating a new file + if test "$need_locks" = yes; then + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + elif test "$need_locks" = warn; then + if test -f "$lockfile"; then + $ECHO "\ +*** ERROR, $lockfile exists and contains: +`cat $lockfile 2>/dev/null` + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + removelist="$removelist $output_obj" + $ECHO "$srcfile" > "$lockfile" + fi + + $opt_dry_run || $RM $removelist + removelist="$removelist $lockfile" + trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 + + if test -n "$fix_srcfile_path"; then + eval srcfile=\"$fix_srcfile_path\" + fi + func_quote_for_eval "$srcfile" + qsrcfile=$func_quote_for_eval_result + + # Only build a PIC object if we are building libtool libraries. + if test "$build_libtool_libs" = yes; then + # Without this assignment, base_compile gets emptied. + fbsd_hideous_sh_bug=$base_compile + + if test "$pic_mode" != no; then + command="$base_compile $qsrcfile $pic_flag" + else + # Don't build PIC code + command="$base_compile $qsrcfile" + fi + + func_mkdir_p "$xdir$objdir" + + if test -z "$output_obj"; then + # Place PIC objects in $objdir + command="$command -o $lobj" + fi + + func_show_eval_locale "$command" \ + 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' + + if test "$need_locks" = warn && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed, then go on to compile the next one + if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then + func_show_eval '$MV "$output_obj" "$lobj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + + # Allow error messages only from the first compilation. + if test "$suppress_opt" = yes; then + suppress_output=' >/dev/null 2>&1' + fi + fi + + # Only build a position-dependent object if we build old libraries. + if test "$build_old_libs" = yes; then + if test "$pic_mode" != yes; then + # Don't build PIC code + command="$base_compile $qsrcfile$pie_flag" + else + command="$base_compile $qsrcfile $pic_flag" + fi + if test "$compiler_c_o" = yes; then + command="$command -o $obj" + fi + + # Suppress compiler output if we already did a PIC compilation. + command="$command$suppress_output" + func_show_eval_locale "$command" \ + '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' + + if test "$need_locks" = warn && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed + if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then + func_show_eval '$MV "$output_obj" "$obj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + fi + + $opt_dry_run || { + func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" + + # Unlock the critical section if it was locked + if test "$need_locks" != no; then + removelist=$lockfile + $RM "$lockfile" + fi + } + + exit $EXIT_SUCCESS +} + +$opt_help || { +test "$mode" = compile && func_mode_compile ${1+"$@"} +} + +func_mode_help () +{ + # We need to display help for each of the modes. + case $mode in + "") + # Generic help is extracted from the usage comments + # at the start of this file. + func_help + ;; + + clean) + $ECHO \ +"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... + +Remove files from the build directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. + +If FILE is a libtool library, object or program, all the files associated +with it are deleted. Otherwise, only FILE itself is deleted using RM." + ;; + + compile) + $ECHO \ +"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE + +Compile a source file into a libtool library object. + +This mode accepts the following additional options: + + -o OUTPUT-FILE set the output file name to OUTPUT-FILE + -no-suppress do not suppress compiler output for multiple passes + -prefer-pic try to building PIC objects only + -prefer-non-pic try to building non-PIC objects only + -shared do not build a \`.o' file suitable for static linking + -static only build a \`.o' file suitable for static linking + +COMPILE-COMMAND is a command to be used in creating a \`standard' object file +from the given SOURCEFILE. + +The output file name is determined by removing the directory component from +SOURCEFILE, then substituting the C source code suffix \`.c' with the +library object suffix, \`.lo'." + ;; + + execute) + $ECHO \ +"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... + +Automatically set library path, then run a program. + +This mode accepts the following additional options: + + -dlopen FILE add the directory containing FILE to the library path + +This mode sets the library path environment variable according to \`-dlopen' +flags. + +If any of the ARGS are libtool executable wrappers, then they are translated +into their corresponding uninstalled binary, and any of their required library +directories are added to the library path. + +Then, COMMAND is executed, with ARGS as arguments." + ;; + + finish) + $ECHO \ +"Usage: $progname [OPTION]... --mode=finish [LIBDIR]... + +Complete the installation of libtool libraries. + +Each LIBDIR is a directory that contains libtool libraries. + +The commands that this mode executes may require superuser privileges. Use +the \`--dry-run' option if you just want to see what would be executed." + ;; + + install) + $ECHO \ +"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... + +Install executables or libraries. + +INSTALL-COMMAND is the installation command. The first component should be +either the \`install' or \`cp' program. + +The following components of INSTALL-COMMAND are treated specially: + + -inst-prefix PREFIX-DIR Use PREFIX-DIR as a staging area for installation + +The rest of the components are interpreted as arguments to that command (only +BSD-compatible install options are recognized)." + ;; + + link) + $ECHO \ +"Usage: $progname [OPTION]... --mode=link LINK-COMMAND... + +Link object files or libraries together to form another library, or to +create an executable program. + +LINK-COMMAND is a command using the C compiler that you would use to create +a program from several object files. + +The following components of LINK-COMMAND are treated specially: + + -all-static do not do any dynamic linking at all + -avoid-version do not add a version suffix if possible + -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime + -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols + -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) + -export-symbols SYMFILE + try to export only the symbols listed in SYMFILE + -export-symbols-regex REGEX + try to export only the symbols matching REGEX + -LLIBDIR search LIBDIR for required installed libraries + -lNAME OUTPUT-FILE requires the installed library libNAME + -module build a library that can dlopened + -no-fast-install disable the fast-install mode + -no-install link a not-installable executable + -no-undefined declare that a library does not refer to external symbols + -o OUTPUT-FILE create OUTPUT-FILE from the specified objects + -objectlist FILE Use a list of object files found in FILE to specify objects + -precious-files-regex REGEX + don't remove output files matching REGEX + -release RELEASE specify package release information + -rpath LIBDIR the created library will eventually be installed in LIBDIR + -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries + -shared only do dynamic linking of libtool libraries + -shrext SUFFIX override the standard shared library file extension + -static do not do any dynamic linking of uninstalled libtool libraries + -static-libtool-libs + do not do any dynamic linking of libtool libraries + -version-info CURRENT[:REVISION[:AGE]] + specify library version info [each variable defaults to 0] + -weak LIBNAME declare that the target provides the LIBNAME interface + +All other options (arguments beginning with \`-') are ignored. + +Every other argument is treated as a filename. Files ending in \`.la' are +treated as uninstalled libtool libraries, other files are standard or library +object files. + +If the OUTPUT-FILE ends in \`.la', then a libtool library is created, +only library objects (\`.lo' files) may be specified, and \`-rpath' is +required, except when creating a convenience library. + +If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created +using \`ar' and \`ranlib', or on Windows using \`lib'. + +If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file +is created, otherwise an executable program is created." + ;; + + uninstall) + $ECHO \ +"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... + +Remove libraries from an installation directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. + +If FILE is a libtool library, all the files associated with it are deleted. +Otherwise, only FILE itself is deleted using RM." + ;; + + *) + func_fatal_help "invalid operation mode \`$mode'" + ;; + esac + + $ECHO + $ECHO "Try \`$progname --help' for more information about other modes." + + exit $? +} + + # Now that we've collected a possible --mode arg, show help if necessary + $opt_help && func_mode_help + + +# func_mode_execute arg... +func_mode_execute () +{ + $opt_debug + # The first argument is the command name. + cmd="$nonopt" + test -z "$cmd" && \ + func_fatal_help "you must specify a COMMAND" + + # Handle -dlopen flags immediately. + for file in $execute_dlfiles; do + test -f "$file" \ + || func_fatal_help "\`$file' is not a file" + + dir= + case $file in + *.la) + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "\`$lib' is not a valid libtool archive" + + # Read the libtool library. + dlname= + library_names= + func_source "$file" + + # Skip this library if it cannot be dlopened. + if test -z "$dlname"; then + # Warn if it was a shared library. + test -n "$library_names" && \ + func_warning "\`$file' was not linked with \`-export-dynamic'" + continue + fi + + func_dirname "$file" "" "." + dir="$func_dirname_result" + + if test -f "$dir/$objdir/$dlname"; then + dir="$dir/$objdir" + else + if test ! -f "$dir/$dlname"; then + func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" + fi + fi + ;; + + *.lo) + # Just add the directory containing the .lo file. + func_dirname "$file" "" "." + dir="$func_dirname_result" + ;; + + *) + func_warning "\`-dlopen' is ignored for non-libtool libraries and objects" + continue + ;; + esac + + # Get the absolute pathname. + absdir=`cd "$dir" && pwd` + test -n "$absdir" && dir="$absdir" + + # Now add the directory to shlibpath_var. + if eval "test -z \"\$$shlibpath_var\""; then + eval "$shlibpath_var=\"\$dir\"" + else + eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" + fi + done + + # This variable tells wrapper scripts just to set shlibpath_var + # rather than running their programs. + libtool_execute_magic="$magic" + + # Check if any of the arguments is a wrapper script. + args= + for file + do + case $file in + -*) ;; + *) + # Do a test to see if this is really a libtool program. + if func_ltwrapper_script_p "$file"; then + func_source "$file" + # Transform arg to wrapped name. + file="$progdir/$program" + elif func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + func_source "$func_ltwrapper_scriptname_result" + # Transform arg to wrapped name. + file="$progdir/$program" + fi + ;; + esac + # Quote arguments (to preserve shell metacharacters). + func_quote_for_eval "$file" + args="$args $func_quote_for_eval_result" + done + + if test "X$opt_dry_run" = Xfalse; then + if test -n "$shlibpath_var"; then + # Export the shlibpath_var. + eval "export $shlibpath_var" + fi + + # Restore saved environment variables + for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES + do + eval "if test \"\${save_$lt_var+set}\" = set; then + $lt_var=\$save_$lt_var; export $lt_var + else + $lt_unset $lt_var + fi" + done + + # Now prepare to actually exec the command. + exec_cmd="\$cmd$args" + else + # Display what would be done. + if test -n "$shlibpath_var"; then + eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" + $ECHO "export $shlibpath_var" + fi + $ECHO "$cmd$args" + exit $EXIT_SUCCESS + fi +} + +test "$mode" = execute && func_mode_execute ${1+"$@"} + + +# func_mode_finish arg... +func_mode_finish () +{ + $opt_debug + libdirs="$nonopt" + admincmds= + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + for dir + do + libdirs="$libdirs $dir" + done + + for libdir in $libdirs; do + if test -n "$finish_cmds"; then + # Do each command in the finish commands. + func_execute_cmds "$finish_cmds" 'admincmds="$admincmds +'"$cmd"'"' + fi + if test -n "$finish_eval"; then + # Do the single finish_eval. + eval cmds=\"$finish_eval\" + $opt_dry_run || eval "$cmds" || admincmds="$admincmds + $cmds" + fi + done + fi + + # Exit here if they wanted silent mode. + $opt_silent && exit $EXIT_SUCCESS + + $ECHO "X----------------------------------------------------------------------" | $Xsed + $ECHO "Libraries have been installed in:" + for libdir in $libdirs; do + $ECHO " $libdir" + done + $ECHO + $ECHO "If you ever happen to want to link against installed libraries" + $ECHO "in a given directory, LIBDIR, you must either use libtool, and" + $ECHO "specify the full pathname of the library, or use the \`-LLIBDIR'" + $ECHO "flag during linking and do at least one of the following:" + if test -n "$shlibpath_var"; then + $ECHO " - add LIBDIR to the \`$shlibpath_var' environment variable" + $ECHO " during execution" + fi + if test -n "$runpath_var"; then + $ECHO " - add LIBDIR to the \`$runpath_var' environment variable" + $ECHO " during linking" + fi + if test -n "$hardcode_libdir_flag_spec"; then + libdir=LIBDIR + eval flag=\"$hardcode_libdir_flag_spec\" + + $ECHO " - use the \`$flag' linker flag" + fi + if test -n "$admincmds"; then + $ECHO " - have your system administrator run these commands:$admincmds" + fi + if test -f /etc/ld.so.conf; then + $ECHO " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" + fi + $ECHO + + $ECHO "See any operating system documentation about shared libraries for" + case $host in + solaris2.[6789]|solaris2.1[0-9]) + $ECHO "more information, such as the ld(1), crle(1) and ld.so(8) manual" + $ECHO "pages." + ;; + *) + $ECHO "more information, such as the ld(1) and ld.so(8) manual pages." + ;; + esac + $ECHO "X----------------------------------------------------------------------" | $Xsed + exit $EXIT_SUCCESS +} + +test "$mode" = finish && func_mode_finish ${1+"$@"} + + +# func_mode_install arg... +func_mode_install () +{ + $opt_debug + # There may be an optional sh(1) argument at the beginning of + # install_prog (especially on Windows NT). + if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh || + # Allow the use of GNU shtool's install command. + $ECHO "X$nonopt" | $GREP shtool >/dev/null; then + # Aesthetically quote it. + func_quote_for_eval "$nonopt" + install_prog="$func_quote_for_eval_result " + arg=$1 + shift + else + install_prog= + arg=$nonopt + fi + + # The real first argument should be the name of the installation program. + # Aesthetically quote it. + func_quote_for_eval "$arg" + install_prog="$install_prog$func_quote_for_eval_result" + + # We need to accept at least all the BSD install flags. + dest= + files= + opts= + prev= + install_type= + isdir=no + stripme= + for arg + do + if test -n "$dest"; then + files="$files $dest" + dest=$arg + continue + fi + + case $arg in + -d) isdir=yes ;; + -f) + case " $install_prog " in + *[\\\ /]cp\ *) ;; + *) prev=$arg ;; + esac + ;; + -g | -m | -o) + prev=$arg + ;; + -s) + stripme=" -s" + continue + ;; + -*) + ;; + *) + # If the previous option needed an argument, then skip it. + if test -n "$prev"; then + prev= + else + dest=$arg + continue + fi + ;; + esac + + # Aesthetically quote the argument. + func_quote_for_eval "$arg" + install_prog="$install_prog $func_quote_for_eval_result" + done + + test -z "$install_prog" && \ + func_fatal_help "you must specify an install program" + + test -n "$prev" && \ + func_fatal_help "the \`$prev' option requires an argument" + + if test -z "$files"; then + if test -z "$dest"; then + func_fatal_help "no file or destination specified" + else + func_fatal_help "you must specify a destination" + fi + fi + + # Strip any trailing slash from the destination. + func_stripname '' '/' "$dest" + dest=$func_stripname_result + + # Check to see that the destination is a directory. + test -d "$dest" && isdir=yes + if test "$isdir" = yes; then + destdir="$dest" + destname= + else + func_dirname_and_basename "$dest" "" "." + destdir="$func_dirname_result" + destname="$func_basename_result" + + # Not a directory, so check to see that there is only one file specified. + set dummy $files; shift + test "$#" -gt 1 && \ + func_fatal_help "\`$dest' is not a directory" + fi + case $destdir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + for file in $files; do + case $file in + *.lo) ;; + *) + func_fatal_help "\`$destdir' must be an absolute directory name" + ;; + esac + done + ;; + esac + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" + + staticlibs= + future_libdirs= + current_libdirs= + for file in $files; do + + # Do each installation. + case $file in + *.$libext) + # Do the static libraries later. + staticlibs="$staticlibs $file" + ;; + + *.la) + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "\`$file' is not a valid libtool archive" + + library_names= + old_library= + relink_command= + func_source "$file" + + # Add the libdir to current_libdirs if it is the destination. + if test "X$destdir" = "X$libdir"; then + case "$current_libdirs " in + *" $libdir "*) ;; + *) current_libdirs="$current_libdirs $libdir" ;; + esac + else + # Note the libdir as a future libdir. + case "$future_libdirs " in + *" $libdir "*) ;; + *) future_libdirs="$future_libdirs $libdir" ;; + esac + fi + + func_dirname "$file" "/" "" + dir="$func_dirname_result" + dir="$dir$objdir" + + if test -n "$relink_command"; then + # Determine the prefix the user has applied to our future dir. + inst_prefix_dir=`$ECHO "X$destdir" | $Xsed -e "s%$libdir\$%%"` + + # Don't allow the user to place us outside of our expected + # location b/c this prevents finding dependent libraries that + # are installed to the same prefix. + # At present, this check doesn't affect windows .dll's that + # are installed into $libdir/../bin (currently, that works fine) + # but it's something to keep an eye on. + test "$inst_prefix_dir" = "$destdir" && \ + func_fatal_error "error: cannot install \`$file' to a directory not ending in $libdir" + + if test -n "$inst_prefix_dir"; then + # Stick the inst_prefix_dir data into the link command. + relink_command=`$ECHO "X$relink_command" | $Xsed -e "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` + else + relink_command=`$ECHO "X$relink_command" | $Xsed -e "s%@inst_prefix_dir@%%"` + fi + + func_warning "relinking \`$file'" + func_show_eval "$relink_command" \ + 'func_fatal_error "error: relink \`$file'\'' with the above command before installing it"' + fi + + # See the names of the shared library. + set dummy $library_names; shift + if test -n "$1"; then + realname="$1" + shift + + srcname="$realname" + test -n "$relink_command" && srcname="$realname"T + + # Install the shared library and build the symlinks. + func_show_eval "$install_prog $dir/$srcname $destdir/$realname" \ + 'exit $?' + tstripme="$stripme" + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + case $realname in + *.dll.a) + tstripme="" + ;; + esac + ;; + esac + if test -n "$tstripme" && test -n "$striplib"; then + func_show_eval "$striplib $destdir/$realname" 'exit $?' + fi + + if test "$#" -gt 0; then + # Delete the old symlinks, and create new ones. + # Try `ln -sf' first, because the `ln' binary might depend on + # the symlink we replace! Solaris /bin/ln does not understand -f, + # so we also need to try rm && ln -s. + for linkname + do + test "$linkname" != "$realname" \ + && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" + done + fi + + # Do each command in the postinstall commands. + lib="$destdir/$realname" + func_execute_cmds "$postinstall_cmds" 'exit $?' + fi + + # Install the pseudo-library for information purposes. + func_basename "$file" + name="$func_basename_result" + instname="$dir/$name"i + func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' + + # Maybe install the static library, too. + test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library" + ;; + + *.lo) + # Install (i.e. copy) a libtool object. + + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + func_basename "$file" + destfile="$func_basename_result" + destfile="$destdir/$destfile" + fi + + # Deduce the name of the destination old-style object file. + case $destfile in + *.lo) + func_lo2o "$destfile" + staticdest=$func_lo2o_result + ;; + *.$objext) + staticdest="$destfile" + destfile= + ;; + *) + func_fatal_help "cannot copy a libtool object to \`$destfile'" + ;; + esac + + # Install the libtool object if requested. + test -n "$destfile" && \ + func_show_eval "$install_prog $file $destfile" 'exit $?' + + # Install the old object if enabled. + if test "$build_old_libs" = yes; then + # Deduce the name of the old-style object file. + func_lo2o "$file" + staticobj=$func_lo2o_result + func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' + fi + exit $EXIT_SUCCESS + ;; + + *) + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + func_basename "$file" + destfile="$func_basename_result" + destfile="$destdir/$destfile" + fi + + # If the file is missing, and there is a .exe on the end, strip it + # because it is most likely a libtool script we actually want to + # install + stripped_ext="" + case $file in + *.exe) + if test ! -f "$file"; then + func_stripname '' '.exe' "$file" + file=$func_stripname_result + stripped_ext=".exe" + fi + ;; + esac + + # Do a test to see if this is really a libtool program. + case $host in + *cygwin* | *mingw*) + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + wrapper=$func_ltwrapper_scriptname_result + else + func_stripname '' '.exe' "$file" + wrapper=$func_stripname_result + fi + ;; + *) + wrapper=$file + ;; + esac + if func_ltwrapper_script_p "$wrapper"; then + notinst_deplibs= + relink_command= + + func_source "$wrapper" + + # Check the variables that should have been set. + test -z "$generated_by_libtool_version" && \ + func_fatal_error "invalid libtool wrapper script \`$wrapper'" + + finalize=yes + for lib in $notinst_deplibs; do + # Check to see that each library is installed. + libdir= + if test -f "$lib"; then + func_source "$lib" + fi + libfile="$libdir/"`$ECHO "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test + if test -n "$libdir" && test ! -f "$libfile"; then + func_warning "\`$lib' has not been installed in \`$libdir'" + finalize=no + fi + done + + relink_command= + func_source "$wrapper" + + outputname= + if test "$fast_install" = no && test -n "$relink_command"; then + $opt_dry_run || { + if test "$finalize" = yes; then + tmpdir=`func_mktempdir` + func_basename "$file$stripped_ext" + file="$func_basename_result" + outputname="$tmpdir/$file" + # Replace the output file specification. + relink_command=`$ECHO "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'` + + $opt_silent || { + func_quote_for_expand "$relink_command" + eval "func_echo $func_quote_for_expand_result" + } + if eval "$relink_command"; then : + else + func_error "error: relink \`$file' with the above command before installing it" + $opt_dry_run || ${RM}r "$tmpdir" + continue + fi + file="$outputname" + else + func_warning "cannot relink \`$file'" + fi + } + else + # Install the binary that we compiled earlier. + file=`$ECHO "X$file$stripped_ext" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"` + fi + fi + + # remove .exe since cygwin /usr/bin/install will append another + # one anyway + case $install_prog,$host in + */usr/bin/install*,*cygwin*) + case $file:$destfile in + *.exe:*.exe) + # this is ok + ;; + *.exe:*) + destfile=$destfile.exe + ;; + *:*.exe) + func_stripname '' '.exe' "$destfile" + destfile=$func_stripname_result + ;; + esac + ;; + esac + func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' + $opt_dry_run || if test -n "$outputname"; then + ${RM}r "$tmpdir" + fi + ;; + esac + done + + for file in $staticlibs; do + func_basename "$file" + name="$func_basename_result" + + # Set up the ranlib parameters. + oldlib="$destdir/$name" + + func_show_eval "$install_prog \$file \$oldlib" 'exit $?' + + if test -n "$stripme" && test -n "$old_striplib"; then + func_show_eval "$old_striplib $oldlib" 'exit $?' + fi + + # Do each command in the postinstall commands. + func_execute_cmds "$old_postinstall_cmds" 'exit $?' + done + + test -n "$future_libdirs" && \ + func_warning "remember to run \`$progname --finish$future_libdirs'" + + if test -n "$current_libdirs"; then + # Maybe just do a dry run. + $opt_dry_run && current_libdirs=" -n$current_libdirs" + exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' + else + exit $EXIT_SUCCESS + fi +} + +test "$mode" = install && func_mode_install ${1+"$@"} + + +# func_generate_dlsyms outputname originator pic_p +# Extract symbols from dlprefiles and create ${outputname}S.o with +# a dlpreopen symbol table. +func_generate_dlsyms () +{ + $opt_debug + my_outputname="$1" + my_originator="$2" + my_pic_p="${3-no}" + my_prefix=`$ECHO "$my_originator" | sed 's%[^a-zA-Z0-9]%_%g'` + my_dlsyms= + + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + if test -n "$NM" && test -n "$global_symbol_pipe"; then + my_dlsyms="${my_outputname}S.c" + else + func_error "not configured to extract global symbols from dlpreopened files" + fi + fi + + if test -n "$my_dlsyms"; then + case $my_dlsyms in + "") ;; + *.c) + # Discover the nlist of each of the dlfiles. + nlist="$output_objdir/${my_outputname}.nm" + + func_show_eval "$RM $nlist ${nlist}S ${nlist}T" + + # Parse the name list into a source file. + func_verbose "creating $output_objdir/$my_dlsyms" + + $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ +/* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */ +/* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +/* External symbol declarations for the compiler. */\ +" + + if test "$dlself" = yes; then + func_verbose "generating symbol list for \`$output'" + + $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" + + # Add our own program objects to the symbol list. + progfiles=`$ECHO "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + for progfile in $progfiles; do + func_verbose "extracting global C symbols from \`$progfile'" + $opt_dry_run || eval "$NM $progfile | $global_symbol_pipe >> '$nlist'" + done + + if test -n "$exclude_expsyms"; then + $opt_dry_run || { + eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + if test -n "$export_symbols_regex"; then + $opt_dry_run || { + eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + export_symbols="$output_objdir/$outputname.exp" + $opt_dry_run || { + $RM $export_symbols + eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' + ;; + esac + } + else + $opt_dry_run || { + eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' + eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + case $host in + *cygwin | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' + ;; + esac + } + fi + fi + + for dlprefile in $dlprefiles; do + func_verbose "extracting global C symbols from \`$dlprefile'" + func_basename "$dlprefile" + name="$func_basename_result" + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + eval "$NM $dlprefile 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + done + + $opt_dry_run || { + # Make sure we have at least an empty file. + test -f "$nlist" || : > "$nlist" + + if test -n "$exclude_expsyms"; then + $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T + $MV "$nlist"T "$nlist" + fi + + # Try sorting and uniquifying the output. + if $GREP -v "^: " < "$nlist" | + if sort -k 3 /dev/null 2>&1; then + sort -k 3 + else + sort +2 + fi | + uniq > "$nlist"S; then + : + else + $GREP -v "^: " < "$nlist" > "$nlist"S + fi + + if test -f "$nlist"S; then + eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' + else + $ECHO '/* NONE */' >> "$output_objdir/$my_dlsyms" + fi + + $ECHO >> "$output_objdir/$my_dlsyms" "\ + +/* The mapping between symbol names and symbols. */ +typedef struct { + const char *name; + void *address; +} lt_dlsymlist; +" + case $host in + *cygwin* | *mingw* | *cegcc* ) + $ECHO >> "$output_objdir/$my_dlsyms" "\ +/* DATA imports from DLLs on WIN32 con't be const, because + runtime relocations are performed -- see ld's documentation + on pseudo-relocs. */" + lt_dlsym_const= ;; + *osf5*) + echo >> "$output_objdir/$my_dlsyms" "\ +/* This system does not cope well with relocations in const data */" + lt_dlsym_const= ;; + *) + lt_dlsym_const=const ;; + esac + + $ECHO >> "$output_objdir/$my_dlsyms" "\ +extern $lt_dlsym_const lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[]; +$lt_dlsym_const lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[] = +{\ + { \"$my_originator\", (void *) 0 }," + + case $need_lib_prefix in + no) + eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + *) + eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + esac + $ECHO >> "$output_objdir/$my_dlsyms" "\ + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_${my_prefix}_LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif\ +" + } # !$opt_dry_run + + pic_flag_for_symtable= + case "$compile_command " in + *" -static "*) ;; + *) + case $host in + # compiling the symbol table file with pic_flag works around + # a FreeBSD bug that causes programs to crash when -lm is + # linked before any other PIC object. But we must not use + # pic_flag when linking with -static. The problem exists in + # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. + *-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) + pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; + *-*-hpux*) + pic_flag_for_symtable=" $pic_flag" ;; + *) + if test "X$my_pic_p" != Xno; then + pic_flag_for_symtable=" $pic_flag" + fi + ;; + esac + ;; + esac + symtab_cflags= + for arg in $LTCFLAGS; do + case $arg in + -pie | -fpie | -fPIE) ;; + *) symtab_cflags="$symtab_cflags $arg" ;; + esac + done + + # Now compile the dynamic symbol file. + func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' + + # Clean up the generated files. + func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T"' + + # Transform the symbol file into the correct name. + symfileobj="$output_objdir/${my_outputname}S.$objext" + case $host in + *cygwin* | *mingw* | *cegcc* ) + if test -f "$output_objdir/$my_outputname.def"; then + compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + else + compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"` + fi + ;; + *) + compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"` + ;; + esac + ;; + *) + func_fatal_error "unknown suffix for \`$my_dlsyms'" + ;; + esac + else + # We keep going just in case the user didn't refer to + # lt_preloaded_symbols. The linker will fail if global_symbol_pipe + # really was required. + + # Nullify the symbol file. + compile_command=`$ECHO "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"` + finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"` + fi +} + +# func_win32_libid arg +# return the library type of file 'arg' +# +# Need a lot of goo to handle *both* DLLs and import libs +# Has to be a shell function in order to 'eat' the argument +# that is supplied when $file_magic_command is called. +func_win32_libid () +{ + $opt_debug + win32_libid_type="unknown" + win32_fileres=`file -L $1 2>/dev/null` + case $win32_fileres in + *ar\ archive\ import\ library*) # definitely import + win32_libid_type="x86 archive import" + ;; + *ar\ archive*) # could be an import, or static + if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | + $EGREP 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then + win32_nmres=`eval $NM -f posix -A $1 | + $SED -n -e ' + 1,100{ + / I /{ + s,.*,import, + p + q + } + }'` + case $win32_nmres in + import*) win32_libid_type="x86 archive import";; + *) win32_libid_type="x86 archive static";; + esac + fi + ;; + *DLL*) + win32_libid_type="x86 DLL" + ;; + *executable*) # but shell scripts are "executable" too... + case $win32_fileres in + *MS\ Windows\ PE\ Intel*) + win32_libid_type="x86 DLL" + ;; + esac + ;; + esac + $ECHO "$win32_libid_type" +} + + + +# func_extract_an_archive dir oldlib +func_extract_an_archive () +{ + $opt_debug + f_ex_an_ar_dir="$1"; shift + f_ex_an_ar_oldlib="$1" + func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" 'exit $?' + if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then + : + else + func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" + fi +} + + +# func_extract_archives gentop oldlib ... +func_extract_archives () +{ + $opt_debug + my_gentop="$1"; shift + my_oldlibs=${1+"$@"} + my_oldobjs="" + my_xlib="" + my_xabs="" + my_xdir="" + + for my_xlib in $my_oldlibs; do + # Extract the objects. + case $my_xlib in + [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; + *) my_xabs=`pwd`"/$my_xlib" ;; + esac + func_basename "$my_xlib" + my_xlib="$func_basename_result" + my_xlib_u=$my_xlib + while :; do + case " $extracted_archives " in + *" $my_xlib_u "*) + func_arith $extracted_serial + 1 + extracted_serial=$func_arith_result + my_xlib_u=lt$extracted_serial-$my_xlib ;; + *) break ;; + esac + done + extracted_archives="$extracted_archives $my_xlib_u" + my_xdir="$my_gentop/$my_xlib_u" + + func_mkdir_p "$my_xdir" + + case $host in + *-darwin*) + func_verbose "Extracting $my_xabs" + # Do not bother doing anything if just a dry run + $opt_dry_run || { + darwin_orig_dir=`pwd` + cd $my_xdir || exit $? + darwin_archive=$my_xabs + darwin_curdir=`pwd` + darwin_base_archive=`basename "$darwin_archive"` + darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` + if test -n "$darwin_arches"; then + darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` + darwin_arch= + func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" + for darwin_arch in $darwin_arches ; do + func_mkdir_p "unfat-$$/${darwin_base_archive}-${darwin_arch}" + $LIPO -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" + cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" + func_extract_an_archive "`pwd`" "${darwin_base_archive}" + cd "$darwin_curdir" + $RM "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" + done # $darwin_arches + ## Okay now we've a bunch of thin objects, gotta fatten them up :) + darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$basename" | sort -u` + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do + darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP` + $LIPO -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + $RM -rf unfat-$$ + cd "$darwin_orig_dir" + else + cd $darwin_orig_dir + func_extract_an_archive "$my_xdir" "$my_xabs" + fi # $darwin_arches + } # !$opt_dry_run + ;; + *) + func_extract_an_archive "$my_xdir" "$my_xabs" + ;; + esac + my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP` + done + + func_extract_archives_result="$my_oldobjs" +} + + + +# func_emit_wrapper_part1 [arg=no] +# +# Emit the first part of a libtool wrapper script on stdout. +# For more information, see the description associated with +# func_emit_wrapper(), below. +func_emit_wrapper_part1 () +{ + func_emit_wrapper_part1_arg1=no + if test -n "$1" ; then + func_emit_wrapper_part1_arg1=$1 + fi + + $ECHO "\ +#! $SHELL + +# $output - temporary wrapper script for $objdir/$outputname +# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION +# +# The $output program cannot be directly executed until all the libtool +# libraries that it depends on are installed. +# +# This wrapper script should never be moved out of the build directory. +# If it is, it will not operate correctly. + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed='${SED} -e 1s/^X//' +sed_quote_subst='$sed_quote_subst' + +# Be Bourne compatible +if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +relink_command=\"$relink_command\" + +# This environment variable determines our operation mode. +if test \"\$libtool_install_magic\" = \"$magic\"; then + # install mode needs the following variables: + generated_by_libtool_version='$macro_version' + notinst_deplibs='$notinst_deplibs' +else + # When we are sourced in execute mode, \$file and \$ECHO are already set. + if test \"\$libtool_execute_magic\" != \"$magic\"; then + ECHO=\"$qecho\" + file=\"\$0\" + # Make sure echo works. + if test \"X\$1\" = X--no-reexec; then + # Discard the --no-reexec flag, and continue. + shift + elif test \"X\`{ \$ECHO '\t'; } 2>/dev/null\`\" = 'X\t'; then + # Yippee, \$ECHO works! + : + else + # Restart under the correct shell, and then maybe \$ECHO will work. + exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"} + fi + fi\ +" + $ECHO "\ + + # Find the directory that this script lives in. + thisdir=\`\$ECHO \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\` + test \"x\$thisdir\" = \"x\$file\" && thisdir=. + + # Follow symbolic links until we get to the real thisdir. + file=\`ls -ld \"\$file\" | ${SED} -n 's/.*-> //p'\` + while test -n \"\$file\"; do + destdir=\`\$ECHO \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\` + + # If there was a directory component, then change thisdir. + if test \"x\$destdir\" != \"x\$file\"; then + case \"\$destdir\" in + [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; + *) thisdir=\"\$thisdir/\$destdir\" ;; + esac + fi + + file=\`\$ECHO \"X\$file\" | \$Xsed -e 's%^.*/%%'\` + file=\`ls -ld \"\$thisdir/\$file\" | ${SED} -n 's/.*-> //p'\` + done +" +} +# end: func_emit_wrapper_part1 + +# func_emit_wrapper_part2 [arg=no] +# +# Emit the second part of a libtool wrapper script on stdout. +# For more information, see the description associated with +# func_emit_wrapper(), below. +func_emit_wrapper_part2 () +{ + func_emit_wrapper_part2_arg1=no + if test -n "$1" ; then + func_emit_wrapper_part2_arg1=$1 + fi + + $ECHO "\ + + # Usually 'no', except on cygwin/mingw when embedded into + # the cwrapper. + WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_part2_arg1 + if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then + # special case for '.' + if test \"\$thisdir\" = \".\"; then + thisdir=\`pwd\` + fi + # remove .libs from thisdir + case \"\$thisdir\" in + *[\\\\/]$objdir ) thisdir=\`\$ECHO \"X\$thisdir\" | \$Xsed -e 's%[\\\\/][^\\\\/]*$%%'\` ;; + $objdir ) thisdir=. ;; + esac + fi + + # Try to get the absolute directory name. + absdir=\`cd \"\$thisdir\" && pwd\` + test -n \"\$absdir\" && thisdir=\"\$absdir\" +" + + if test "$fast_install" = yes; then + $ECHO "\ + program=lt-'$outputname'$exeext + progdir=\"\$thisdir/$objdir\" + + if test ! -f \"\$progdir/\$program\" || + { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\ + test \"X\$file\" != \"X\$progdir/\$program\"; }; then + + file=\"\$\$-\$program\" + + if test ! -d \"\$progdir\"; then + $MKDIR \"\$progdir\" + else + $RM \"\$progdir/\$file\" + fi" + + $ECHO "\ + + # relink executable if necessary + if test -n \"\$relink_command\"; then + if relink_command_output=\`eval \$relink_command 2>&1\`; then : + else + $ECHO \"\$relink_command_output\" >&2 + $RM \"\$progdir/\$file\" + exit 1 + fi + fi + + $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || + { $RM \"\$progdir/\$program\"; + $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } + $RM \"\$progdir/\$file\" + fi" + else + $ECHO "\ + program='$outputname' + progdir=\"\$thisdir/$objdir\" +" + fi + + $ECHO "\ + + if test -f \"\$progdir/\$program\"; then" + + # Export our shlibpath_var if we have one. + if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then + $ECHO "\ + # Add our own library path to $shlibpath_var + $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" + + # Some systems cannot cope with colon-terminated $shlibpath_var + # The second colon is a workaround for a bug in BeOS R4 sed + $shlibpath_var=\`\$ECHO \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\` + + export $shlibpath_var +" + fi + + # fixup the dll searchpath if we need to. + if test -n "$dllsearchpath"; then + $ECHO "\ + # Add the dll search path components to the executable PATH + PATH=$dllsearchpath:\$PATH +" + fi + + $ECHO "\ + if test \"\$libtool_execute_magic\" != \"$magic\"; then + # Run the actual program with our arguments. +" + case $host in + # Backslashes separate directories on plain windows + *-*-mingw | *-*-os2* | *-cegcc*) + $ECHO "\ + exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} +" + ;; + + *) + $ECHO "\ + exec \"\$progdir/\$program\" \${1+\"\$@\"} +" + ;; + esac + $ECHO "\ + \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 + exit 1 + fi + else + # The program doesn't exist. + \$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2 + \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 + $ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 + exit 1 + fi +fi\ +" +} +# end: func_emit_wrapper_part2 + + +# func_emit_wrapper [arg=no] +# +# Emit a libtool wrapper script on stdout. +# Don't directly open a file because we may want to +# incorporate the script contents within a cygwin/mingw +# wrapper executable. Must ONLY be called from within +# func_mode_link because it depends on a number of variables +# set therein. +# +# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR +# variable will take. If 'yes', then the emitted script +# will assume that the directory in which it is stored is +# the $objdir directory. This is a cygwin/mingw-specific +# behavior. +func_emit_wrapper () +{ + func_emit_wrapper_arg1=no + if test -n "$1" ; then + func_emit_wrapper_arg1=$1 + fi + + # split this up so that func_emit_cwrapperexe_src + # can call each part independently. + func_emit_wrapper_part1 "${func_emit_wrapper_arg1}" + func_emit_wrapper_part2 "${func_emit_wrapper_arg1}" +} + + +# func_to_host_path arg +# +# Convert paths to host format when used with build tools. +# Intended for use with "native" mingw (where libtool itself +# is running under the msys shell), or in the following cross- +# build environments: +# $build $host +# mingw (msys) mingw [e.g. native] +# cygwin mingw +# *nix + wine mingw +# where wine is equipped with the `winepath' executable. +# In the native mingw case, the (msys) shell automatically +# converts paths for any non-msys applications it launches, +# but that facility isn't available from inside the cwrapper. +# Similar accommodations are necessary for $host mingw and +# $build cygwin. Calling this function does no harm for other +# $host/$build combinations not listed above. +# +# ARG is the path (on $build) that should be converted to +# the proper representation for $host. The result is stored +# in $func_to_host_path_result. +func_to_host_path () +{ + func_to_host_path_result="$1" + if test -n "$1" ; then + case $host in + *mingw* ) + lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' + case $build in + *mingw* ) # actually, msys + # awkward: cmd appends spaces to result + lt_sed_strip_trailing_spaces="s/[ ]*\$//" + func_to_host_path_tmp1=`( cmd //c echo "$1" |\ + $SED -e "$lt_sed_strip_trailing_spaces" ) 2>/dev/null || echo ""` + func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\ + $SED -e "$lt_sed_naive_backslashify"` + ;; + *cygwin* ) + func_to_host_path_tmp1=`cygpath -w "$1"` + func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\ + $SED -e "$lt_sed_naive_backslashify"` + ;; + * ) + # Unfortunately, winepath does not exit with a non-zero + # error code, so we are forced to check the contents of + # stdout. On the other hand, if the command is not + # found, the shell will set an exit code of 127 and print + # *an error message* to stdout. So we must check for both + # error code of zero AND non-empty stdout, which explains + # the odd construction: + func_to_host_path_tmp1=`winepath -w "$1" 2>/dev/null` + if test "$?" -eq 0 && test -n "${func_to_host_path_tmp1}"; then + func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\ + $SED -e "$lt_sed_naive_backslashify"` + else + # Allow warning below. + func_to_host_path_result="" + fi + ;; + esac + if test -z "$func_to_host_path_result" ; then + func_error "Could not determine host path corresponding to" + func_error " '$1'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback: + func_to_host_path_result="$1" + fi + ;; + esac + fi +} +# end: func_to_host_path + +# func_to_host_pathlist arg +# +# Convert pathlists to host format when used with build tools. +# See func_to_host_path(), above. This function supports the +# following $build/$host combinations (but does no harm for +# combinations not listed here): +# $build $host +# mingw (msys) mingw [e.g. native] +# cygwin mingw +# *nix + wine mingw +# +# Path separators are also converted from $build format to +# $host format. If ARG begins or ends with a path separator +# character, it is preserved (but converted to $host format) +# on output. +# +# ARG is a pathlist (on $build) that should be converted to +# the proper representation on $host. The result is stored +# in $func_to_host_pathlist_result. +func_to_host_pathlist () +{ + func_to_host_pathlist_result="$1" + if test -n "$1" ; then + case $host in + *mingw* ) + lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' + # Remove leading and trailing path separator characters from + # ARG. msys behavior is inconsistent here, cygpath turns them + # into '.;' and ';.', and winepath ignores them completely. + func_to_host_pathlist_tmp2="$1" + # Once set for this call, this variable should not be + # reassigned. It is used in tha fallback case. + func_to_host_pathlist_tmp1=`echo "$func_to_host_pathlist_tmp2" |\ + $SED -e 's|^:*||' -e 's|:*$||'` + case $build in + *mingw* ) # Actually, msys. + # Awkward: cmd appends spaces to result. + lt_sed_strip_trailing_spaces="s/[ ]*\$//" + func_to_host_pathlist_tmp2=`( cmd //c echo "$func_to_host_pathlist_tmp1" |\ + $SED -e "$lt_sed_strip_trailing_spaces" ) 2>/dev/null || echo ""` + func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp2" |\ + $SED -e "$lt_sed_naive_backslashify"` + ;; + *cygwin* ) + func_to_host_pathlist_tmp2=`cygpath -w -p "$func_to_host_pathlist_tmp1"` + func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp2" |\ + $SED -e "$lt_sed_naive_backslashify"` + ;; + * ) + # unfortunately, winepath doesn't convert pathlists + func_to_host_pathlist_result="" + func_to_host_pathlist_oldIFS=$IFS + IFS=: + for func_to_host_pathlist_f in $func_to_host_pathlist_tmp1 ; do + IFS=$func_to_host_pathlist_oldIFS + if test -n "$func_to_host_pathlist_f" ; then + func_to_host_path "$func_to_host_pathlist_f" + if test -n "$func_to_host_path_result" ; then + if test -z "$func_to_host_pathlist_result" ; then + func_to_host_pathlist_result="$func_to_host_path_result" + else + func_to_host_pathlist_result="$func_to_host_pathlist_result;$func_to_host_path_result" + fi + fi + fi + IFS=: + done + IFS=$func_to_host_pathlist_oldIFS + ;; + esac + if test -z "$func_to_host_pathlist_result" ; then + func_error "Could not determine the host path(s) corresponding to" + func_error " '$1'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback. This may break if $1 contains DOS-style drive + # specifications. The fix is not to complicate the expression + # below, but for the user to provide a working wine installation + # with winepath so that path translation in the cross-to-mingw + # case works properly. + lt_replace_pathsep_nix_to_dos="s|:|;|g" + func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp1" |\ + $SED -e "$lt_replace_pathsep_nix_to_dos"` + fi + # Now, add the leading and trailing path separators back + case "$1" in + :* ) func_to_host_pathlist_result=";$func_to_host_pathlist_result" + ;; + esac + case "$1" in + *: ) func_to_host_pathlist_result="$func_to_host_pathlist_result;" + ;; + esac + ;; + esac + fi +} +# end: func_to_host_pathlist + +# func_emit_cwrapperexe_src +# emit the source code for a wrapper executable on stdout +# Must ONLY be called from within func_mode_link because +# it depends on a number of variable set therein. +func_emit_cwrapperexe_src () +{ + cat < +#include +#ifdef _MSC_VER +# include +# include +# include +# define setmode _setmode +#else +# include +# include +# ifdef __CYGWIN__ +# include +# define HAVE_SETENV +# ifdef __STRICT_ANSI__ +char *realpath (const char *, char *); +int putenv (char *); +int setenv (const char *, const char *, int); +# endif +# endif +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(PATH_MAX) +# define LT_PATHMAX PATH_MAX +#elif defined(MAXPATHLEN) +# define LT_PATHMAX MAXPATHLEN +#else +# define LT_PATHMAX 1024 +#endif + +#ifndef S_IXOTH +# define S_IXOTH 0 +#endif +#ifndef S_IXGRP +# define S_IXGRP 0 +#endif + +#ifdef _MSC_VER +# define S_IXUSR _S_IEXEC +# define stat _stat +# ifndef _INTPTR_T_DEFINED +# define intptr_t int +# endif +#endif + +#ifndef DIR_SEPARATOR +# define DIR_SEPARATOR '/' +# define PATH_SEPARATOR ':' +#endif + +#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \ + defined (__OS2__) +# define HAVE_DOS_BASED_FILE_SYSTEM +# define FOPEN_WB "wb" +# ifndef DIR_SEPARATOR_2 +# define DIR_SEPARATOR_2 '\\' +# endif +# ifndef PATH_SEPARATOR_2 +# define PATH_SEPARATOR_2 ';' +# endif +#endif + +#ifndef DIR_SEPARATOR_2 +# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) +#else /* DIR_SEPARATOR_2 */ +# define IS_DIR_SEPARATOR(ch) \ + (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) +#endif /* DIR_SEPARATOR_2 */ + +#ifndef PATH_SEPARATOR_2 +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) +#else /* PATH_SEPARATOR_2 */ +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) +#endif /* PATH_SEPARATOR_2 */ + +#ifdef __CYGWIN__ +# define FOPEN_WB "wb" +#endif + +#ifndef FOPEN_WB +# define FOPEN_WB "w" +#endif +#ifndef _O_BINARY +# define _O_BINARY 0 +#endif + +#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) +#define XFREE(stale) do { \ + if (stale) { free ((void *) stale); stale = 0; } \ +} while (0) + +#undef LTWRAPPER_DEBUGPRINTF +#if defined DEBUGWRAPPER +# define LTWRAPPER_DEBUGPRINTF(args) ltwrapper_debugprintf args +static void +ltwrapper_debugprintf (const char *fmt, ...) +{ + va_list args; + va_start (args, fmt); + (void) vfprintf (stderr, fmt, args); + va_end (args); +} +#else +# define LTWRAPPER_DEBUGPRINTF(args) +#endif + +const char *program_name = NULL; + +void *xmalloc (size_t num); +char *xstrdup (const char *string); +const char *base_name (const char *name); +char *find_executable (const char *wrapper); +char *chase_symlinks (const char *pathspec); +int make_executable (const char *path); +int check_executable (const char *path); +char *strendzap (char *str, const char *pat); +void lt_fatal (const char *message, ...); +void lt_setenv (const char *name, const char *value); +char *lt_extend_str (const char *orig_value, const char *add, int to_end); +void lt_opt_process_env_set (const char *arg); +void lt_opt_process_env_prepend (const char *arg); +void lt_opt_process_env_append (const char *arg); +int lt_split_name_value (const char *arg, char** name, char** value); +void lt_update_exe_path (const char *name, const char *value); +void lt_update_lib_path (const char *name, const char *value); + +static const char *script_text_part1 = +EOF + + func_emit_wrapper_part1 yes | + $SED -e 's/\([\\"]\)/\\\1/g' \ + -e 's/^/ "/' -e 's/$/\\n"/' + echo ";" + cat <"))); + for (i = 0; i < newargc; i++) + { + LTWRAPPER_DEBUGPRINTF (("(main) newargz[%d] : %s\n", i, (newargz[i] ? newargz[i] : ""))); + } + +EOF + + case $host_os in + mingw*) + cat <<"EOF" + /* execv doesn't actually work on mingw as expected on unix */ + rval = _spawnv (_P_WAIT, lt_argv_zero, (const char * const *) newargz); + if (rval == -1) + { + /* failed to start process */ + LTWRAPPER_DEBUGPRINTF (("(main) failed to launch target \"%s\": errno = %d\n", lt_argv_zero, errno)); + return 127; + } + return rval; +EOF + ;; + *) + cat <<"EOF" + execv (lt_argv_zero, newargz); + return rval; /* =127, but avoids unused variable warning */ +EOF + ;; + esac + + cat <<"EOF" +} + +void * +xmalloc (size_t num) +{ + void *p = (void *) malloc (num); + if (!p) + lt_fatal ("Memory exhausted"); + + return p; +} + +char * +xstrdup (const char *string) +{ + return string ? strcpy ((char *) xmalloc (strlen (string) + 1), + string) : NULL; +} + +const char * +base_name (const char *name) +{ + const char *base; + +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + /* Skip over the disk name in MSDOS pathnames. */ + if (isalpha ((unsigned char) name[0]) && name[1] == ':') + name += 2; +#endif + + for (base = name; *name; name++) + if (IS_DIR_SEPARATOR (*name)) + base = name + 1; + return base; +} + +int +check_executable (const char *path) +{ + struct stat st; + + LTWRAPPER_DEBUGPRINTF (("(check_executable) : %s\n", + path ? (*path ? path : "EMPTY!") : "NULL!")); + if ((!path) || (!*path)) + return 0; + + if ((stat (path, &st) >= 0) + && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) + return 1; + else + return 0; +} + +int +make_executable (const char *path) +{ + int rval = 0; + struct stat st; + + LTWRAPPER_DEBUGPRINTF (("(make_executable) : %s\n", + path ? (*path ? path : "EMPTY!") : "NULL!")); + if ((!path) || (!*path)) + return 0; + + if (stat (path, &st) >= 0) + { + rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); + } + return rval; +} + +/* Searches for the full path of the wrapper. Returns + newly allocated full path name if found, NULL otherwise + Does not chase symlinks, even on platforms that support them. +*/ +char * +find_executable (const char *wrapper) +{ + int has_slash = 0; + const char *p; + const char *p_next; + /* static buffer for getcwd */ + char tmp[LT_PATHMAX + 1]; + int tmp_len; + char *concat_name; + + LTWRAPPER_DEBUGPRINTF (("(find_executable) : %s\n", + wrapper ? (*wrapper ? wrapper : "EMPTY!") : "NULL!")); + + if ((wrapper == NULL) || (*wrapper == '\0')) + return NULL; + + /* Absolute path? */ +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + else + { +#endif + if (IS_DIR_SEPARATOR (wrapper[0])) + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + } +#endif + + for (p = wrapper; *p; p++) + if (*p == '/') + { + has_slash = 1; + break; + } + if (!has_slash) + { + /* no slashes; search PATH */ + const char *path = getenv ("PATH"); + if (path != NULL) + { + for (p = path; *p; p = p_next) + { + const char *q; + size_t p_len; + for (q = p; *q; q++) + if (IS_PATH_SEPARATOR (*q)) + break; + p_len = q - p; + p_next = (*q == '\0' ? q : q + 1); + if (p_len == 0) + { + /* empty path: current directory */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal ("getcwd failed"); + tmp_len = strlen (tmp); + concat_name = + XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + } + else + { + concat_name = + XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, p, p_len); + concat_name[p_len] = '/'; + strcpy (concat_name + p_len + 1, wrapper); + } + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + } + /* not found in PATH; assume curdir */ + } + /* Relative path | not found in path: prepend cwd */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal ("getcwd failed"); + tmp_len = strlen (tmp); + concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + return NULL; +} + +char * +chase_symlinks (const char *pathspec) +{ +#ifndef S_ISLNK + return xstrdup (pathspec); +#else + char buf[LT_PATHMAX]; + struct stat s; + char *tmp_pathspec = xstrdup (pathspec); + char *p; + int has_symlinks = 0; + while (strlen (tmp_pathspec) && !has_symlinks) + { + LTWRAPPER_DEBUGPRINTF (("checking path component for symlinks: %s\n", + tmp_pathspec)); + if (lstat (tmp_pathspec, &s) == 0) + { + if (S_ISLNK (s.st_mode) != 0) + { + has_symlinks = 1; + break; + } + + /* search backwards for last DIR_SEPARATOR */ + p = tmp_pathspec + strlen (tmp_pathspec) - 1; + while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + p--; + if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + { + /* no more DIR_SEPARATORS left */ + break; + } + *p = '\0'; + } + else + { + char *errstr = strerror (errno); + lt_fatal ("Error accessing file %s (%s)", tmp_pathspec, errstr); + } + } + XFREE (tmp_pathspec); + + if (!has_symlinks) + { + return xstrdup (pathspec); + } + + tmp_pathspec = realpath (pathspec, buf); + if (tmp_pathspec == 0) + { + lt_fatal ("Could not follow symlinks for %s", pathspec); + } + return xstrdup (tmp_pathspec); +#endif +} + +char * +strendzap (char *str, const char *pat) +{ + size_t len, patlen; + + assert (str != NULL); + assert (pat != NULL); + + len = strlen (str); + patlen = strlen (pat); + + if (patlen <= len) + { + str += len - patlen; + if (strcmp (str, pat) == 0) + *str = '\0'; + } + return str; +} + +static void +lt_error_core (int exit_status, const char *mode, + const char *message, va_list ap) +{ + fprintf (stderr, "%s: %s: ", program_name, mode); + vfprintf (stderr, message, ap); + fprintf (stderr, ".\n"); + + if (exit_status >= 0) + exit (exit_status); +} + +void +lt_fatal (const char *message, ...) +{ + va_list ap; + va_start (ap, message); + lt_error_core (EXIT_FAILURE, "FATAL", message, ap); + va_end (ap); +} + +void +lt_setenv (const char *name, const char *value) +{ + LTWRAPPER_DEBUGPRINTF (("(lt_setenv) setting '%s' to '%s'\n", + (name ? name : ""), + (value ? value : ""))); + { +#ifdef HAVE_SETENV + /* always make a copy, for consistency with !HAVE_SETENV */ + char *str = xstrdup (value); + setenv (name, str, 1); +#else + int len = strlen (name) + 1 + strlen (value) + 1; + char *str = XMALLOC (char, len); + sprintf (str, "%s=%s", name, value); + if (putenv (str) != EXIT_SUCCESS) + { + XFREE (str); + } +#endif + } +} + +char * +lt_extend_str (const char *orig_value, const char *add, int to_end) +{ + char *new_value; + if (orig_value && *orig_value) + { + int orig_value_len = strlen (orig_value); + int add_len = strlen (add); + new_value = XMALLOC (char, add_len + orig_value_len + 1); + if (to_end) + { + strcpy (new_value, orig_value); + strcpy (new_value + orig_value_len, add); + } + else + { + strcpy (new_value, add); + strcpy (new_value + add_len, orig_value); + } + } + else + { + new_value = xstrdup (add); + } + return new_value; +} + +int +lt_split_name_value (const char *arg, char** name, char** value) +{ + const char *p; + int len; + if (!arg || !*arg) + return 1; + + p = strchr (arg, (int)'='); + + if (!p) + return 1; + + *value = xstrdup (++p); + + len = strlen (arg) - strlen (*value); + *name = XMALLOC (char, len); + strncpy (*name, arg, len-1); + (*name)[len - 1] = '\0'; + + return 0; +} + +void +lt_opt_process_env_set (const char *arg) +{ + char *name = NULL; + char *value = NULL; + + if (lt_split_name_value (arg, &name, &value) != 0) + { + XFREE (name); + XFREE (value); + lt_fatal ("bad argument for %s: '%s'", env_set_opt, arg); + } + + lt_setenv (name, value); + XFREE (name); + XFREE (value); +} + +void +lt_opt_process_env_prepend (const char *arg) +{ + char *name = NULL; + char *value = NULL; + char *new_value = NULL; + + if (lt_split_name_value (arg, &name, &value) != 0) + { + XFREE (name); + XFREE (value); + lt_fatal ("bad argument for %s: '%s'", env_prepend_opt, arg); + } + + new_value = lt_extend_str (getenv (name), value, 0); + lt_setenv (name, new_value); + XFREE (new_value); + XFREE (name); + XFREE (value); +} + +void +lt_opt_process_env_append (const char *arg) +{ + char *name = NULL; + char *value = NULL; + char *new_value = NULL; + + if (lt_split_name_value (arg, &name, &value) != 0) + { + XFREE (name); + XFREE (value); + lt_fatal ("bad argument for %s: '%s'", env_append_opt, arg); + } + + new_value = lt_extend_str (getenv (name), value, 1); + lt_setenv (name, new_value); + XFREE (new_value); + XFREE (name); + XFREE (value); +} + +void +lt_update_exe_path (const char *name, const char *value) +{ + LTWRAPPER_DEBUGPRINTF (("(lt_update_exe_path) modifying '%s' by prepending '%s'\n", + (name ? name : ""), + (value ? value : ""))); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + /* some systems can't cope with a ':'-terminated path #' */ + int len = strlen (new_value); + while (((len = strlen (new_value)) > 0) && IS_PATH_SEPARATOR (new_value[len-1])) + { + new_value[len-1] = '\0'; + } + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +void +lt_update_lib_path (const char *name, const char *value) +{ + LTWRAPPER_DEBUGPRINTF (("(lt_update_lib_path) modifying '%s' by prepending '%s'\n", + (name ? name : ""), + (value ? value : ""))); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + lt_setenv (name, new_value); + XFREE (new_value); + } +} + + +EOF +} +# end: func_emit_cwrapperexe_src + +# func_mode_link arg... +func_mode_link () +{ + $opt_debug + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + # It is impossible to link a dll without this setting, and + # we shouldn't force the makefile maintainer to figure out + # which system we are compiling for in order to pass an extra + # flag for every libtool invocation. + # allow_undefined=no + + # FIXME: Unfortunately, there are problems with the above when trying + # to make a dll which has undefined symbols, in which case not + # even a static library is built. For now, we need to specify + # -no-undefined on the libtool link line when we can be certain + # that all symbols are satisfied, otherwise we get a static library. + allow_undefined=yes + ;; + *) + allow_undefined=yes + ;; + esac + libtool_args=$nonopt + base_compile="$nonopt $@" + compile_command=$nonopt + finalize_command=$nonopt + + compile_rpath= + finalize_rpath= + compile_shlibpath= + finalize_shlibpath= + convenience= + old_convenience= + deplibs= + old_deplibs= + compiler_flags= + linker_flags= + dllsearchpath= + lib_search_path=`pwd` + inst_prefix_dir= + new_inherited_linker_flags= + + avoid_version=no + dlfiles= + dlprefiles= + dlself=no + export_dynamic=no + export_symbols= + export_symbols_regex= + generated= + libobjs= + ltlibs= + module=no + no_install=no + objs= + non_pic_objects= + precious_files_regex= + prefer_static_libs=no + preload=no + prev= + prevarg= + release= + rpath= + xrpath= + perm_rpath= + temp_rpath= + thread_safe=no + vinfo= + vinfo_number=no + weak_libs= + single_module="${wl}-single_module" + func_infer_tag $base_compile + + # We need to know -static, to get the right output filenames. + for arg + do + case $arg in + -shared) + test "$build_libtool_libs" != yes && \ + func_fatal_configuration "can not build a shared library" + build_old_libs=no + break + ;; + -all-static | -static | -static-libtool-libs) + case $arg in + -all-static) + if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then + func_warning "complete static linking is impossible in this configuration" + fi + if test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + -static) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=built + ;; + -static-libtool-libs) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + esac + build_libtool_libs=no + build_old_libs=yes + break + ;; + esac + done + + # See if our shared archives depend on static archives. + test -n "$old_archive_from_new_cmds" && build_old_libs=yes + + # Go through the arguments, transforming them on the way. + while test "$#" -gt 0; do + arg="$1" + shift + func_quote_for_eval "$arg" + qarg=$func_quote_for_eval_unquoted_result + func_append libtool_args " $func_quote_for_eval_result" + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + output) + func_append compile_command " @OUTPUT@" + func_append finalize_command " @OUTPUT@" + ;; + esac + + case $prev in + dlfiles|dlprefiles) + if test "$preload" = no; then + # Add the symbol object into the linking commands. + func_append compile_command " @SYMFILE@" + func_append finalize_command " @SYMFILE@" + preload=yes + fi + case $arg in + *.la | *.lo) ;; # We handle these cases below. + force) + if test "$dlself" = no; then + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + self) + if test "$prev" = dlprefiles; then + dlself=yes + elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then + dlself=yes + else + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + *) + if test "$prev" = dlfiles; then + dlfiles="$dlfiles $arg" + else + dlprefiles="$dlprefiles $arg" + fi + prev= + continue + ;; + esac + ;; + expsyms) + export_symbols="$arg" + test -f "$arg" \ + || func_fatal_error "symbol file \`$arg' does not exist" + prev= + continue + ;; + expsyms_regex) + export_symbols_regex="$arg" + prev= + continue + ;; + framework) + case $host in + *-*-darwin*) + case "$deplibs " in + *" $qarg.ltframework "*) ;; + *) deplibs="$deplibs $qarg.ltframework" # this is fixed later + ;; + esac + ;; + esac + prev= + continue + ;; + inst_prefix) + inst_prefix_dir="$arg" + prev= + continue + ;; + objectlist) + if test -f "$arg"; then + save_arg=$arg + moreargs= + for fil in `cat "$save_arg"` + do +# moreargs="$moreargs $fil" + arg=$fil + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test "$pic_object" = none && + test "$non_pic_object" = none; then + func_fatal_error "cannot find name of object for \`$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + dlfiles="$dlfiles $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + dlprefiles="$dlprefiles $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg="$pic_object" + fi + + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object="$pic_object" + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "\`$arg' is not a valid libtool object" + fi + fi + done + else + func_fatal_error "link input file \`$arg' does not exist" + fi + arg=$save_arg + prev= + continue + ;; + precious_regex) + precious_files_regex="$arg" + prev= + continue + ;; + release) + release="-$arg" + prev= + continue + ;; + rpath | xrpath) + # We need an absolute path. + case $arg in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + if test "$prev" = rpath; then + case "$rpath " in + *" $arg "*) ;; + *) rpath="$rpath $arg" ;; + esac + else + case "$xrpath " in + *" $arg "*) ;; + *) xrpath="$xrpath $arg" ;; + esac + fi + prev= + continue + ;; + shrext) + shrext_cmds="$arg" + prev= + continue + ;; + weak) + weak_libs="$weak_libs $arg" + prev= + continue + ;; + xcclinker) + linker_flags="$linker_flags $qarg" + compiler_flags="$compiler_flags $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xcompiler) + compiler_flags="$compiler_flags $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xlinker) + linker_flags="$linker_flags $qarg" + compiler_flags="$compiler_flags $wl$qarg" + prev= + func_append compile_command " $wl$qarg" + func_append finalize_command " $wl$qarg" + continue + ;; + *) + eval "$prev=\"\$arg\"" + prev= + continue + ;; + esac + fi # test -n "$prev" + + prevarg="$arg" + + case $arg in + -all-static) + if test -n "$link_static_flag"; then + # See comment for -static flag below, for more details. + func_append compile_command " $link_static_flag" + func_append finalize_command " $link_static_flag" + fi + continue + ;; + + -allow-undefined) + # FIXME: remove this flag sometime in the future. + func_fatal_error "\`-allow-undefined' must not be used because it is the default" + ;; + + -avoid-version) + avoid_version=yes + continue + ;; + + -dlopen) + prev=dlfiles + continue + ;; + + -dlpreopen) + prev=dlprefiles + continue + ;; + + -export-dynamic) + export_dynamic=yes + continue + ;; + + -export-symbols | -export-symbols-regex) + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + func_fatal_error "more than one -exported-symbols argument is not allowed" + fi + if test "X$arg" = "X-export-symbols"; then + prev=expsyms + else + prev=expsyms_regex + fi + continue + ;; + + -framework) + prev=framework + continue + ;; + + -inst-prefix-dir) + prev=inst_prefix + continue + ;; + + # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* + # so, if we see these flags be careful not to treat them like -L + -L[A-Z][A-Z]*:*) + case $with_gcc/$host in + no/*-*-irix* | /*-*-irix*) + func_append compile_command " $arg" + func_append finalize_command " $arg" + ;; + esac + continue + ;; + + -L*) + func_stripname '-L' '' "$arg" + dir=$func_stripname_result + if test -z "$dir"; then + if test "$#" -gt 0; then + func_fatal_error "require no space between \`-L' and \`$1'" + else + func_fatal_error "need path for \`-L' option" + fi + fi + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + absdir=`cd "$dir" && pwd` + test -z "$absdir" && \ + func_fatal_error "cannot determine absolute directory name of \`$dir'" + dir="$absdir" + ;; + esac + case "$deplibs " in + *" -L$dir "*) ;; + *) + deplibs="$deplibs -L$dir" + lib_search_path="$lib_search_path $dir" + ;; + esac + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "X$dir" | $Xsed -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$dir:"*) ;; + ::) dllsearchpath=$dir;; + *) dllsearchpath="$dllsearchpath:$dir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) dllsearchpath="$dllsearchpath:$testbindir";; + esac + ;; + esac + continue + ;; + + -l*) + if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc*) + # These systems don't actually have a C or math library (as such) + continue + ;; + *-*-os2*) + # These systems don't actually have a C library (as such) + test "X$arg" = "X-lc" && continue + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + test "X$arg" = "X-lc" && continue + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C and math libraries are in the System framework + deplibs="$deplibs System.ltframework" + continue + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + test "X$arg" = "X-lc" && continue + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + test "X$arg" = "X-lc" && continue + ;; + esac + elif test "X$arg" = "X-lc_r"; then + case $host in + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc_r directly, use -pthread flag. + continue + ;; + esac + fi + deplibs="$deplibs $arg" + continue + ;; + + -module) + module=yes + continue + ;; + + # Tru64 UNIX uses -model [arg] to determine the layout of C++ + # classes, name mangling, and exception handling. + # Darwin uses the -arch flag to determine output architecture. + -model|-arch|-isysroot) + compiler_flags="$compiler_flags $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + prev=xcompiler + continue + ;; + + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads) + compiler_flags="$compiler_flags $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case "$new_inherited_linker_flags " in + *" $arg "*) ;; + * ) new_inherited_linker_flags="$new_inherited_linker_flags $arg" ;; + esac + continue + ;; + + -multi_module) + single_module="${wl}-multi_module" + continue + ;; + + -no-fast-install) + fast_install=no + continue + ;; + + -no-install) + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) + # The PATH hackery in wrapper scripts is required on Windows + # and Darwin in order for the loader to find any dlls it needs. + func_warning "\`-no-install' is ignored for $host" + func_warning "assuming \`-no-fast-install' instead" + fast_install=no + ;; + *) no_install=yes ;; + esac + continue + ;; + + -no-undefined) + allow_undefined=no + continue + ;; + + -objectlist) + prev=objectlist + continue + ;; + + -o) prev=output ;; + + -precious-files-regex) + prev=precious_regex + continue + ;; + + -release) + prev=release + continue + ;; + + -rpath) + prev=rpath + continue + ;; + + -R) + prev=xrpath + continue + ;; + + -R*) + func_stripname '-R' '' "$arg" + dir=$func_stripname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + case "$xrpath " in + *" $dir "*) ;; + *) xrpath="$xrpath $dir" ;; + esac + continue + ;; + + -shared) + # The effects of -shared are defined in a previous loop. + continue + ;; + + -shrext) + prev=shrext + continue + ;; + + -static | -static-libtool-libs) + # The effects of -static are defined in a previous loop. + # We used to do the same as -all-static on platforms that + # didn't have a PIC flag, but the assumption that the effects + # would be equivalent was wrong. It would break on at least + # Digital Unix and AIX. + continue + ;; + + -thread-safe) + thread_safe=yes + continue + ;; + + -version-info) + prev=vinfo + continue + ;; + + -version-number) + prev=vinfo + vinfo_number=yes + continue + ;; + + -weak) + prev=weak + continue + ;; + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + func_quote_for_eval "$flag" + arg="$arg $wl$func_quote_for_eval_result" + compiler_flags="$compiler_flags $func_quote_for_eval_result" + done + IFS="$save_ifs" + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Wl,*) + func_stripname '-Wl,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + func_quote_for_eval "$flag" + arg="$arg $wl$func_quote_for_eval_result" + compiler_flags="$compiler_flags $wl$func_quote_for_eval_result" + linker_flags="$linker_flags $func_quote_for_eval_result" + done + IFS="$save_ifs" + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Xcompiler) + prev=xcompiler + continue + ;; + + -Xlinker) + prev=xlinker + continue + ;; + + -XCClinker) + prev=xcclinker + continue + ;; + + # -msg_* for osf cc + -msg_*) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; + + # -64, -mips[0-9] enable 64-bit mode on the SGI compiler + # -r[0-9][0-9]* specifies the processor on the SGI compiler + # -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler + # +DA*, +DD* enable 64-bit mode on the HP compiler + # -q* pass through compiler args for the IBM compiler + # -m*, -t[45]*, -txscale* pass through architecture-specific + # compiler args for GCC + # -F/path gives path to uninstalled frameworks, gcc on darwin + # -p, -pg, --coverage, -fprofile-* pass through profiling flag for GCC + # @file GCC response files + -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ + -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + func_append compile_command " $arg" + func_append finalize_command " $arg" + compiler_flags="$compiler_flags $arg" + continue + ;; + + # Some other compiler flag. + -* | +*) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; + + *.$objext) + # A standard object. + objs="$objs $arg" + ;; + + *.lo) + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test "$pic_object" = none && + test "$non_pic_object" = none; then + func_fatal_error "cannot find name of object for \`$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + dlfiles="$dlfiles $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + dlprefiles="$dlprefiles $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg="$pic_object" + fi + + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object="$pic_object" + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "\`$arg' is not a valid libtool object" + fi + fi + ;; + + *.$libext) + # An archive. + deplibs="$deplibs $arg" + old_deplibs="$old_deplibs $arg" + continue + ;; + + *.la) + # A libtool-controlled library. + + if test "$prev" = dlfiles; then + # This library was specified with -dlopen. + dlfiles="$dlfiles $arg" + prev= + elif test "$prev" = dlprefiles; then + # The library was specified with -dlpreopen. + dlprefiles="$dlprefiles $arg" + prev= + else + deplibs="$deplibs $arg" + fi + continue + ;; + + # Some other compiler argument. + *) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; + esac # arg + + # Now actually substitute the argument into the commands. + if test -n "$arg"; then + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + done # argument parsing loop + + test -n "$prev" && \ + func_fatal_help "the \`$prevarg' option requires an argument" + + if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then + eval arg=\"$export_dynamic_flag_spec\" + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + + oldlibs= + # calculate the name of the file, without its directory + func_basename "$output" + outputname="$func_basename_result" + libobjs_save="$libobjs" + + if test -n "$shlibpath_var"; then + # get the directories listed in $shlibpath_var + eval shlib_search_path=\`\$ECHO \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\` + else + shlib_search_path= + fi + eval sys_lib_search_path=\"$sys_lib_search_path_spec\" + eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" + + func_dirname "$output" "/" "" + output_objdir="$func_dirname_result$objdir" + # Create the object directory. + func_mkdir_p "$output_objdir" + + # Determine the type of output + case $output in + "") + func_fatal_help "you must specify an output file" + ;; + *.$libext) linkmode=oldlib ;; + *.lo | *.$objext) linkmode=obj ;; + *.la) linkmode=lib ;; + *) linkmode=prog ;; # Anything else should be a program. + esac + + specialdeplibs= + + libs= + # Find all interdependent deplibs by searching for libraries + # that are linked more than once (e.g. -la -lb -la) + for deplib in $deplibs; do + if $opt_duplicate_deps ; then + case "$libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + libs="$libs $deplib" + done + + if test "$linkmode" = lib; then + libs="$predeps $libs $compiler_lib_search_path $postdeps" + + # Compute libraries that are listed more than once in $predeps + # $postdeps and mark them as special (i.e., whose duplicates are + # not to be eliminated). + pre_post_deps= + if $opt_duplicate_compiler_generated_deps; then + for pre_post_dep in $predeps $postdeps; do + case "$pre_post_deps " in + *" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;; + esac + pre_post_deps="$pre_post_deps $pre_post_dep" + done + fi + pre_post_deps= + fi + + deplibs= + newdependency_libs= + newlib_search_path= + need_relink=no # whether we're linking any uninstalled libtool libraries + notinst_deplibs= # not-installed libtool libraries + notinst_path= # paths that contain not-installed libtool libraries + + case $linkmode in + lib) + passes="conv dlpreopen link" + for file in $dlfiles $dlprefiles; do + case $file in + *.la) ;; + *) + func_fatal_help "libraries can \`-dlopen' only libtool libraries: $file" + ;; + esac + done + ;; + prog) + compile_deplibs= + finalize_deplibs= + alldeplibs=no + newdlfiles= + newdlprefiles= + passes="conv scan dlopen dlpreopen link" + ;; + *) passes="conv" + ;; + esac + + for pass in $passes; do + # The preopen pass in lib mode reverses $deplibs; put it back here + # so that -L comes before libs that need it for instance... + if test "$linkmode,$pass" = "lib,link"; then + ## FIXME: Find the place where the list is rebuilt in the wrong + ## order, and fix it there properly + tmp_deplibs= + for deplib in $deplibs; do + tmp_deplibs="$deplib $tmp_deplibs" + done + deplibs="$tmp_deplibs" + fi + + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan"; then + libs="$deplibs" + deplibs= + fi + if test "$linkmode" = prog; then + case $pass in + dlopen) libs="$dlfiles" ;; + dlpreopen) libs="$dlprefiles" ;; + link) libs="$deplibs %DEPLIBS% $dependency_libs" ;; + esac + fi + if test "$linkmode,$pass" = "lib,dlpreopen"; then + # Collect and forward deplibs of preopened libtool libs + for lib in $dlprefiles; do + # Ignore non-libtool-libs + dependency_libs= + case $lib in + *.la) func_source "$lib" ;; + esac + + # Collect preopened libtool deplibs, except any this library + # has declared as weak libs + for deplib in $dependency_libs; do + deplib_base=`$ECHO "X$deplib" | $Xsed -e "$basename"` + case " $weak_libs " in + *" $deplib_base "*) ;; + *) deplibs="$deplibs $deplib" ;; + esac + done + done + libs="$dlprefiles" + fi + if test "$pass" = dlopen; then + # Collect dlpreopened libraries + save_deplibs="$deplibs" + deplibs= + fi + + for deplib in $libs; do + lib= + found=no + case $deplib in + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads) + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + compiler_flags="$compiler_flags $deplib" + if test "$linkmode" = lib ; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;; + esac + fi + fi + continue + ;; + -l*) + if test "$linkmode" != lib && test "$linkmode" != prog; then + func_warning "\`-l' is ignored for archives/objects" + continue + fi + func_stripname '-l' '' "$deplib" + name=$func_stripname_result + if test "$linkmode" = lib; then + searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" + else + searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" + fi + for searchdir in $searchdirs; do + for search_ext in .la $std_shrext .so .a; do + # Search the libtool library + lib="$searchdir/lib${name}${search_ext}" + if test -f "$lib"; then + if test "$search_ext" = ".la"; then + found=yes + else + found=no + fi + break 2 + fi + done + done + if test "$found" != yes; then + # deplib doesn't seem to be a libtool library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + else # deplib is a libtool library + # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, + # We need to do some special things here, and not later. + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $deplib "*) + if func_lalib_p "$lib"; then + library_names= + old_library= + func_source "$lib" + for l in $old_library $library_names; do + ll="$l" + done + if test "X$ll" = "X$old_library" ; then # only static version available + found=no + func_dirname "$lib" "" "." + ladir="$func_dirname_result" + lib=$ladir/$old_library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + fi + ;; + *) ;; + esac + fi + fi + ;; # -l + *.ltframework) + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + if test "$linkmode" = lib ; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;; + esac + fi + fi + continue + ;; + -L*) + case $linkmode in + lib) + deplibs="$deplib $deplibs" + test "$pass" = conv && continue + newdependency_libs="$deplib $newdependency_libs" + func_stripname '-L' '' "$deplib" + newlib_search_path="$newlib_search_path $func_stripname_result" + ;; + prog) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + if test "$pass" = scan; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + func_stripname '-L' '' "$deplib" + newlib_search_path="$newlib_search_path $func_stripname_result" + ;; + *) + func_warning "\`-L' is ignored for archives/objects" + ;; + esac # linkmode + continue + ;; # -L + -R*) + if test "$pass" = link; then + func_stripname '-R' '' "$deplib" + dir=$func_stripname_result + # Make sure the xrpath contains only unique directories. + case "$xrpath " in + *" $dir "*) ;; + *) xrpath="$xrpath $dir" ;; + esac + fi + deplibs="$deplib $deplibs" + continue + ;; + *.la) lib="$deplib" ;; + *.$libext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + case $linkmode in + lib) + # Linking convenience modules into shared libraries is allowed, + # but linking other static libraries is non-portable. + case " $dlpreconveniencelibs " in + *" $deplib "*) ;; + *) + valid_a_lib=no + case $deplibs_check_method in + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + if eval "\$ECHO \"X$deplib\"" 2>/dev/null | $Xsed -e 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + valid_a_lib=yes + fi + ;; + pass_all) + valid_a_lib=yes + ;; + esac + if test "$valid_a_lib" != yes; then + $ECHO + $ECHO "*** Warning: Trying to link with static lib archive $deplib." + $ECHO "*** I have the capability to make that library automatically link in when" + $ECHO "*** you link to this library. But I can only do this if you have a" + $ECHO "*** shared version of the library, which you do not appear to have" + $ECHO "*** because the file extensions .$libext of this argument makes me believe" + $ECHO "*** that it is just a static archive that I should not use here." + else + $ECHO + $ECHO "*** Warning: Linking the shared library $output against the" + $ECHO "*** static library $deplib is not portable!" + deplibs="$deplib $deplibs" + fi + ;; + esac + continue + ;; + prog) + if test "$pass" != link; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + continue + ;; + esac # linkmode + ;; # *.$libext + *.lo | *.$objext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + elif test "$linkmode" = prog; then + if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then + # If there is no dlopen support or we're linking statically, + # we need to preload. + newdlprefiles="$newdlprefiles $deplib" + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + newdlfiles="$newdlfiles $deplib" + fi + fi + continue + ;; + %DEPLIBS%) + alldeplibs=yes + continue + ;; + esac # case $deplib + + if test "$found" = yes || test -f "$lib"; then : + else + func_fatal_error "cannot find the library \`$lib' or unhandled argument \`$deplib'" + fi + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$lib" \ + || func_fatal_error "\`$lib' is not a valid libtool archive" + + func_dirname "$lib" "" "." + ladir="$func_dirname_result" + + dlname= + dlopen= + dlpreopen= + libdir= + library_names= + old_library= + inherited_linker_flags= + # If the library was installed with an old release of libtool, + # it will not redefine variables installed, or shouldnotlink + installed=yes + shouldnotlink=no + avoidtemprpath= + + + # Read the .la file + func_source "$lib" + + # Convert "-framework foo" to "foo.ltframework" + if test -n "$inherited_linker_flags"; then + tmp_inherited_linker_flags=`$ECHO "X$inherited_linker_flags" | $Xsed -e 's/-framework \([^ $]*\)/\1.ltframework/g'` + for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do + case " $new_inherited_linker_flags " in + *" $tmp_inherited_linker_flag "*) ;; + *) new_inherited_linker_flags="$new_inherited_linker_flags $tmp_inherited_linker_flag";; + esac + done + fi + dependency_libs=`$ECHO "X $dependency_libs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan" || + { test "$linkmode" != prog && test "$linkmode" != lib; }; then + test -n "$dlopen" && dlfiles="$dlfiles $dlopen" + test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen" + fi + + if test "$pass" = conv; then + # Only check for convenience libraries + deplibs="$lib $deplibs" + if test -z "$libdir"; then + if test -z "$old_library"; then + func_fatal_error "cannot find name of link library for \`$lib'" + fi + # It is a libtool convenience library, so add in its objects. + convenience="$convenience $ladir/$objdir/$old_library" + old_convenience="$old_convenience $ladir/$objdir/$old_library" + elif test "$linkmode" != prog && test "$linkmode" != lib; then + func_fatal_error "\`$lib' is not a convenience library" + fi + tmp_libs= + for deplib in $dependency_libs; do + deplibs="$deplib $deplibs" + if $opt_duplicate_deps ; then + case "$tmp_libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + tmp_libs="$tmp_libs $deplib" + done + continue + fi # $pass = conv + + + # Get the name of the library we link against. + linklib= + for l in $old_library $library_names; do + linklib="$l" + done + if test -z "$linklib"; then + func_fatal_error "cannot find name of link library for \`$lib'" + fi + + # This library was specified with -dlopen. + if test "$pass" = dlopen; then + if test -z "$libdir"; then + func_fatal_error "cannot -dlopen a convenience library: \`$lib'" + fi + if test -z "$dlname" || + test "$dlopen_support" != yes || + test "$build_libtool_libs" = no; then + # If there is no dlname, no dlopen support or we're linking + # statically, we need to preload. We also need to preload any + # dependent libraries so libltdl's deplib preloader doesn't + # bomb out in the load deplibs phase. + dlprefiles="$dlprefiles $lib $dependency_libs" + else + newdlfiles="$newdlfiles $lib" + fi + continue + fi # $pass = dlopen + + # We need an absolute path. + case $ladir in + [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;; + *) + abs_ladir=`cd "$ladir" && pwd` + if test -z "$abs_ladir"; then + func_warning "cannot determine absolute directory name of \`$ladir'" + func_warning "passing it literally to the linker, although it might fail" + abs_ladir="$ladir" + fi + ;; + esac + func_basename "$lib" + laname="$func_basename_result" + + # Find the relevant object directory and library name. + if test "X$installed" = Xyes; then + if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then + func_warning "library \`$lib' was moved." + dir="$ladir" + absdir="$abs_ladir" + libdir="$abs_ladir" + else + dir="$libdir" + absdir="$libdir" + fi + test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes + else + if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then + dir="$ladir" + absdir="$abs_ladir" + # Remove this search path later + notinst_path="$notinst_path $abs_ladir" + else + dir="$ladir/$objdir" + absdir="$abs_ladir/$objdir" + # Remove this search path later + notinst_path="$notinst_path $abs_ladir" + fi + fi # $installed = yes + func_stripname 'lib' '.la' "$laname" + name=$func_stripname_result + + # This library was specified with -dlpreopen. + if test "$pass" = dlpreopen; then + if test -z "$libdir" && test "$linkmode" = prog; then + func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'" + fi + # Prefer using a static library (so that no silly _DYNAMIC symbols + # are required to link). + if test -n "$old_library"; then + newdlprefiles="$newdlprefiles $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + dlpreconveniencelibs="$dlpreconveniencelibs $dir/$old_library" + # Otherwise, use the dlname, so that lt_dlopen finds it. + elif test -n "$dlname"; then + newdlprefiles="$newdlprefiles $dir/$dlname" + else + newdlprefiles="$newdlprefiles $dir/$linklib" + fi + fi # $pass = dlpreopen + + if test -z "$libdir"; then + # Link the convenience library + if test "$linkmode" = lib; then + deplibs="$dir/$old_library $deplibs" + elif test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$dir/$old_library $compile_deplibs" + finalize_deplibs="$dir/$old_library $finalize_deplibs" + else + deplibs="$lib $deplibs" # used for prog,scan pass + fi + continue + fi + + + if test "$linkmode" = prog && test "$pass" != link; then + newlib_search_path="$newlib_search_path $ladir" + deplibs="$lib $deplibs" + + linkalldeplibs=no + if test "$link_all_deplibs" != no || test -z "$library_names" || + test "$build_libtool_libs" = no; then + linkalldeplibs=yes + fi + + tmp_libs= + for deplib in $dependency_libs; do + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + newlib_search_path="$newlib_search_path $func_stripname_result" + ;; + esac + # Need to link against all dependency_libs? + if test "$linkalldeplibs" = yes; then + deplibs="$deplib $deplibs" + else + # Need to hardcode shared library paths + # or/and link against static libraries + newdependency_libs="$deplib $newdependency_libs" + fi + if $opt_duplicate_deps ; then + case "$tmp_libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + tmp_libs="$tmp_libs $deplib" + done # for deplib + continue + fi # $linkmode = prog... + + if test "$linkmode,$pass" = "prog,link"; then + if test -n "$library_names" && + { { test "$prefer_static_libs" = no || + test "$prefer_static_libs,$installed" = "built,yes"; } || + test -z "$old_library"; }; then + # We need to hardcode the library path + if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then + # Make sure the rpath contains only unique directories. + case "$temp_rpath:" in + *"$absdir:"*) ;; + *) temp_rpath="$temp_rpath$absdir:" ;; + esac + fi + + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) compile_rpath="$compile_rpath $absdir" + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" + esac + ;; + esac + fi # $linkmode,$pass = prog,link... + + if test "$alldeplibs" = yes && + { test "$deplibs_check_method" = pass_all || + { test "$build_libtool_libs" = yes && + test -n "$library_names"; }; }; then + # We only need to search for static libraries + continue + fi + fi + + link_static=no # Whether the deplib will be linked statically + use_static_libs=$prefer_static_libs + if test "$use_static_libs" = built && test "$installed" = yes; then + use_static_libs=no + fi + if test -n "$library_names" && + { test "$use_static_libs" = no || test -z "$old_library"; }; then + case $host in + *cygwin* | *mingw* | *cegcc*) + # No point in relinking DLLs because paths are not encoded + notinst_deplibs="$notinst_deplibs $lib" + need_relink=no + ;; + *) + if test "$installed" = no; then + notinst_deplibs="$notinst_deplibs $lib" + need_relink=yes + fi + ;; + esac + # This is a shared library + + # Warn about portability, can't link against -module's on some + # systems (darwin). Don't bleat about dlopened modules though! + dlopenmodule="" + for dlpremoduletest in $dlprefiles; do + if test "X$dlpremoduletest" = "X$lib"; then + dlopenmodule="$dlpremoduletest" + break + fi + done + if test -z "$dlopenmodule" && test "$shouldnotlink" = yes && test "$pass" = link; then + $ECHO + if test "$linkmode" = prog; then + $ECHO "*** Warning: Linking the executable $output against the loadable module" + else + $ECHO "*** Warning: Linking the shared library $output against the loadable module" + fi + $ECHO "*** $linklib is not portable!" + fi + if test "$linkmode" = lib && + test "$hardcode_into_libs" = yes; then + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) compile_rpath="$compile_rpath $absdir" + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" + esac + ;; + esac + fi + + if test -n "$old_archive_from_expsyms_cmds"; then + # figure out the soname + set dummy $library_names + shift + realname="$1" + shift + libname=`eval "\\$ECHO \"$libname_spec\""` + # use dlname if we got it. it's perfectly good, no? + if test -n "$dlname"; then + soname="$dlname" + elif test -n "$soname_spec"; then + # bleh windows + case $host in + *cygwin* | mingw* | *cegcc*) + func_arith $current - $age + major=$func_arith_result + versuffix="-$major" + ;; + esac + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + + # Make a new name for the extract_expsyms_cmds to use + soroot="$soname" + func_basename "$soroot" + soname="$func_basename_result" + func_stripname 'lib' '.dll' "$soname" + newlib=libimp-$func_stripname_result.a + + # If the library has no export list, then create one now + if test -f "$output_objdir/$soname-def"; then : + else + func_verbose "extracting exported symbol list from \`$soname'" + func_execute_cmds "$extract_expsyms_cmds" 'exit $?' + fi + + # Create $newlib + if test -f "$output_objdir/$newlib"; then :; else + func_verbose "generating import library for \`$soname'" + func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' + fi + # make sure the library variables are pointing to the new library + dir=$output_objdir + linklib=$newlib + fi # test -n "$old_archive_from_expsyms_cmds" + + if test "$linkmode" = prog || test "$mode" != relink; then + add_shlibpath= + add_dir= + add= + lib_linked=yes + case $hardcode_action in + immediate | unsupported) + if test "$hardcode_direct" = no; then + add="$dir/$linklib" + case $host in + *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;; + *-*-sysv4*uw2*) add_dir="-L$dir" ;; + *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ + *-*-unixware7*) add_dir="-L$dir" ;; + *-*-darwin* ) + # if the lib is a (non-dlopened) module then we can not + # link against it, someone is ignoring the earlier warnings + if /usr/bin/file -L $add 2> /dev/null | + $GREP ": [^:]* bundle" >/dev/null ; then + if test "X$dlopenmodule" != "X$lib"; then + $ECHO "*** Warning: lib $linklib is a module, not a shared library" + if test -z "$old_library" ; then + $ECHO + $ECHO "*** And there doesn't seem to be a static archive available" + $ECHO "*** The link will probably fail, sorry" + else + add="$dir/$old_library" + fi + elif test -n "$old_library"; then + add="$dir/$old_library" + fi + fi + esac + elif test "$hardcode_minus_L" = no; then + case $host in + *-*-sunos*) add_shlibpath="$dir" ;; + esac + add_dir="-L$dir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = no; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + relink) + if test "$hardcode_direct" = yes && + test "$hardcode_direct_absolute" = no; then + add="$dir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$dir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + add_dir="$add_dir -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + *) lib_linked=no ;; + esac + + if test "$lib_linked" != yes; then + func_fatal_configuration "unsupported hardcode properties" + fi + + if test -n "$add_shlibpath"; then + case :$compile_shlibpath: in + *":$add_shlibpath:"*) ;; + *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;; + esac + fi + if test "$linkmode" = prog; then + test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" + test -n "$add" && compile_deplibs="$add $compile_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + if test "$hardcode_direct" != yes && + test "$hardcode_minus_L" != yes && + test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; + esac + fi + fi + fi + + if test "$linkmode" = prog || test "$mode" = relink; then + add_shlibpath= + add_dir= + add= + # Finalize command for both is simple: just hardcode it. + if test "$hardcode_direct" = yes && + test "$hardcode_direct_absolute" = no; then + add="$libdir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$libdir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; + esac + add="-l$name" + elif test "$hardcode_automatic" = yes; then + if test -n "$inst_prefix_dir" && + test -f "$inst_prefix_dir$libdir/$linklib" ; then + add="$inst_prefix_dir$libdir/$linklib" + else + add="$libdir/$linklib" + fi + else + # We cannot seem to hardcode it, guess we'll fake it. + add_dir="-L$libdir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + add_dir="$add_dir -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + fi + + if test "$linkmode" = prog; then + test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" + test -n "$add" && finalize_deplibs="$add $finalize_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + fi + fi + elif test "$linkmode" = prog; then + # Here we assume that one of hardcode_direct or hardcode_minus_L + # is not unsupported. This is valid on all known static and + # shared platforms. + if test "$hardcode_direct" != unsupported; then + test -n "$old_library" && linklib="$old_library" + compile_deplibs="$dir/$linklib $compile_deplibs" + finalize_deplibs="$dir/$linklib $finalize_deplibs" + else + compile_deplibs="-l$name -L$dir $compile_deplibs" + finalize_deplibs="-l$name -L$dir $finalize_deplibs" + fi + elif test "$build_libtool_libs" = yes; then + # Not a shared library + if test "$deplibs_check_method" != pass_all; then + # We're trying link a shared library against a static one + # but the system doesn't support it. + + # Just print a warning and add the library to dependency_libs so + # that the program can be linked against the static library. + $ECHO + $ECHO "*** Warning: This system can not link to static lib archive $lib." + $ECHO "*** I have the capability to make that library automatically link in when" + $ECHO "*** you link to this library. But I can only do this if you have a" + $ECHO "*** shared version of the library, which you do not appear to have." + if test "$module" = yes; then + $ECHO "*** But as you try to build a module library, libtool will still create " + $ECHO "*** a static module, that should work as long as the dlopening application" + $ECHO "*** is linked with the -dlopen flag to resolve symbols at runtime." + if test -z "$global_symbol_pipe"; then + $ECHO + $ECHO "*** However, this would only work if libtool was able to extract symbol" + $ECHO "*** lists from a program, using \`nm' or equivalent, but libtool could" + $ECHO "*** not find such a program. So, this module is probably useless." + $ECHO "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + else + deplibs="$dir/$old_library $deplibs" + link_static=yes + fi + fi # link shared/static library? + + if test "$linkmode" = lib; then + if test -n "$dependency_libs" && + { test "$hardcode_into_libs" != yes || + test "$build_old_libs" = yes || + test "$link_static" = yes; }; then + # Extract -R from dependency_libs + temp_deplibs= + for libdir in $dependency_libs; do + case $libdir in + -R*) func_stripname '-R' '' "$libdir" + temp_xrpath=$func_stripname_result + case " $xrpath " in + *" $temp_xrpath "*) ;; + *) xrpath="$xrpath $temp_xrpath";; + esac;; + *) temp_deplibs="$temp_deplibs $libdir";; + esac + done + dependency_libs="$temp_deplibs" + fi + + newlib_search_path="$newlib_search_path $absdir" + # Link against this library + test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" + # ... and its dependency_libs + tmp_libs= + for deplib in $dependency_libs; do + newdependency_libs="$deplib $newdependency_libs" + if $opt_duplicate_deps ; then + case "$tmp_libs " in + *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; + esac + fi + tmp_libs="$tmp_libs $deplib" + done + + if test "$link_all_deplibs" != no; then + # Add the search paths of all dependency libraries + for deplib in $dependency_libs; do + case $deplib in + -L*) path="$deplib" ;; + *.la) + func_dirname "$deplib" "" "." + dir="$func_dirname_result" + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + func_warning "cannot determine absolute directory name of \`$dir'" + absdir="$dir" + fi + ;; + esac + if $GREP "^installed=no" $deplib > /dev/null; then + case $host in + *-*-darwin*) + depdepl= + eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + if test -n "$deplibrary_names" ; then + for tmp in $deplibrary_names ; do + depdepl=$tmp + done + if test -f "$absdir/$objdir/$depdepl" ; then + depdepl="$absdir/$objdir/$depdepl" + darwin_install_name=`${OTOOL} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + if test -z "$darwin_install_name"; then + darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + fi + compiler_flags="$compiler_flags ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}" + linker_flags="$linker_flags -dylib_file ${darwin_install_name}:${depdepl}" + path= + fi + fi + ;; + *) + path="-L$absdir/$objdir" + ;; + esac + else + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + test -z "$libdir" && \ + func_fatal_error "\`$deplib' is not a valid libtool archive" + test "$absdir" != "$libdir" && \ + func_warning "\`$deplib' seems to be moved" + + path="-L$absdir" + fi + ;; + esac + case " $deplibs " in + *" $path "*) ;; + *) deplibs="$path $deplibs" ;; + esac + done + fi # link_all_deplibs != no + fi # linkmode = lib + done # for deplib in $libs + if test "$pass" = link; then + if test "$linkmode" = "prog"; then + compile_deplibs="$new_inherited_linker_flags $compile_deplibs" + finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" + else + compiler_flags="$compiler_flags "`$ECHO "X $new_inherited_linker_flags" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` + fi + fi + dependency_libs="$newdependency_libs" + if test "$pass" = dlpreopen; then + # Link the dlpreopened libraries before other libraries + for deplib in $save_deplibs; do + deplibs="$deplib $deplibs" + done + fi + if test "$pass" != dlopen; then + if test "$pass" != conv; then + # Make sure lib_search_path contains only unique directories. + lib_search_path= + for dir in $newlib_search_path; do + case "$lib_search_path " in + *" $dir "*) ;; + *) lib_search_path="$lib_search_path $dir" ;; + esac + done + newlib_search_path= + fi + + if test "$linkmode,$pass" != "prog,link"; then + vars="deplibs" + else + vars="compile_deplibs finalize_deplibs" + fi + for var in $vars dependency_libs; do + # Add libraries to $var in reverse order + eval tmp_libs=\"\$$var\" + new_libs= + for deplib in $tmp_libs; do + # FIXME: Pedantically, this is the right thing to do, so + # that some nasty dependency loop isn't accidentally + # broken: + #new_libs="$deplib $new_libs" + # Pragmatically, this seems to cause very few problems in + # practice: + case $deplib in + -L*) new_libs="$deplib $new_libs" ;; + -R*) ;; + *) + # And here is the reason: when a library appears more + # than once as an explicit dependence of a library, or + # is implicitly linked in more than once by the + # compiler, it is considered special, and multiple + # occurrences thereof are not removed. Compare this + # with having the same library being listed as a + # dependency of multiple other libraries: in this case, + # we know (pedantically, we assume) the library does not + # need to be listed more than once, so we keep only the + # last copy. This is not always right, but it is rare + # enough that we require users that really mean to play + # such unportable linking tricks to link the library + # using -Wl,-lname, so that libtool does not consider it + # for duplicate removal. + case " $specialdeplibs " in + *" $deplib "*) new_libs="$deplib $new_libs" ;; + *) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$deplib $new_libs" ;; + esac + ;; + esac + ;; + esac + done + tmp_libs= + for deplib in $new_libs; do + case $deplib in + -L*) + case " $tmp_libs " in + *" $deplib "*) ;; + *) tmp_libs="$tmp_libs $deplib" ;; + esac + ;; + *) tmp_libs="$tmp_libs $deplib" ;; + esac + done + eval $var=\"$tmp_libs\" + done # for var + fi + # Last step: remove runtime libs from dependency_libs + # (they stay in deplibs) + tmp_libs= + for i in $dependency_libs ; do + case " $predeps $postdeps $compiler_lib_search_path " in + *" $i "*) + i="" + ;; + esac + if test -n "$i" ; then + tmp_libs="$tmp_libs $i" + fi + done + dependency_libs=$tmp_libs + done # for pass + if test "$linkmode" = prog; then + dlfiles="$newdlfiles" + fi + if test "$linkmode" = prog || test "$linkmode" = lib; then + dlprefiles="$newdlprefiles" + fi + + case $linkmode in + oldlib) + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + func_warning "\`-dlopen' is ignored for archives" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "\`-l' and \`-L' are ignored for archives" ;; + esac + + test -n "$rpath" && \ + func_warning "\`-rpath' is ignored for archives" + + test -n "$xrpath" && \ + func_warning "\`-R' is ignored for archives" + + test -n "$vinfo" && \ + func_warning "\`-version-info/-version-number' is ignored for archives" + + test -n "$release" && \ + func_warning "\`-release' is ignored for archives" + + test -n "$export_symbols$export_symbols_regex" && \ + func_warning "\`-export-symbols' is ignored for archives" + + # Now set the variables for building old libraries. + build_libtool_libs=no + oldlibs="$output" + objs="$objs$old_deplibs" + ;; + + lib) + # Make sure we only generate libraries of the form `libNAME.la'. + case $outputname in + lib*) + func_stripname 'lib' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + ;; + *) + test "$module" = no && \ + func_fatal_help "libtool library \`$output' must begin with \`lib'" + + if test "$need_lib_prefix" != no; then + # Add the "lib" prefix for modules if required + func_stripname '' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + else + func_stripname '' '.la' "$outputname" + libname=$func_stripname_result + fi + ;; + esac + + if test -n "$objs"; then + if test "$deplibs_check_method" != pass_all; then + func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs" + else + $ECHO + $ECHO "*** Warning: Linking the shared library $output against the non-libtool" + $ECHO "*** objects $objs is not portable!" + libobjs="$libobjs $objs" + fi + fi + + test "$dlself" != no && \ + func_warning "\`-dlopen self' is ignored for libtool libraries" + + set dummy $rpath + shift + test "$#" -gt 1 && \ + func_warning "ignoring multiple \`-rpath's for a libtool library" + + install_libdir="$1" + + oldlibs= + if test -z "$rpath"; then + if test "$build_libtool_libs" = yes; then + # Building a libtool convenience library. + # Some compilers have problems with a `.al' extension so + # convenience libraries should have the same extension an + # archive normally would. + oldlibs="$output_objdir/$libname.$libext $oldlibs" + build_libtool_libs=convenience + build_old_libs=yes + fi + + test -n "$vinfo" && \ + func_warning "\`-version-info/-version-number' is ignored for convenience libraries" + + test -n "$release" && \ + func_warning "\`-release' is ignored for convenience libraries" + else + + # Parse the version information argument. + save_ifs="$IFS"; IFS=':' + set dummy $vinfo 0 0 0 + shift + IFS="$save_ifs" + + test -n "$7" && \ + func_fatal_help "too many parameters to \`-version-info'" + + # convert absolute version numbers to libtool ages + # this retains compatibility with .la files and attempts + # to make the code below a bit more comprehensible + + case $vinfo_number in + yes) + number_major="$1" + number_minor="$2" + number_revision="$3" + # + # There are really only two kinds -- those that + # use the current revision as the major version + # and those that subtract age and use age as + # a minor version. But, then there is irix + # which has an extra 1 added just for fun + # + case $version_type in + darwin|linux|osf|windows|none) + func_arith $number_major + $number_minor + current=$func_arith_result + age="$number_minor" + revision="$number_revision" + ;; + freebsd-aout|freebsd-elf|sunos) + current="$number_major" + revision="$number_minor" + age="0" + ;; + irix|nonstopux) + func_arith $number_major + $number_minor + current=$func_arith_result + age="$number_minor" + revision="$number_minor" + lt_irix_increment=no + ;; + esac + ;; + no) + current="$1" + revision="$2" + age="$3" + ;; + esac + + # Check that each of the things are valid numbers. + case $current in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "CURRENT \`$current' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + case $revision in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "REVISION \`$revision' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + case $age in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "AGE \`$age' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + if test "$age" -gt "$current"; then + func_error "AGE \`$age' is greater than the current interface number \`$current'" + func_fatal_error "\`$vinfo' is not valid version information" + fi + + # Calculate the version variables. + major= + versuffix= + verstring= + case $version_type in + none) ;; + + darwin) + # Like Linux, but with the current version available in + # verstring for coding it into the library header + func_arith $current - $age + major=.$func_arith_result + versuffix="$major.$age.$revision" + # Darwin ld doesn't like 0 for these options... + func_arith $current + 1 + minor_current=$func_arith_result + xlcverstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + ;; + + freebsd-aout) + major=".$current" + versuffix=".$current.$revision"; + ;; + + freebsd-elf) + major=".$current" + versuffix=".$current" + ;; + + irix | nonstopux) + if test "X$lt_irix_increment" = "Xno"; then + func_arith $current - $age + else + func_arith $current - $age + 1 + fi + major=$func_arith_result + + case $version_type in + nonstopux) verstring_prefix=nonstopux ;; + *) verstring_prefix=sgi ;; + esac + verstring="$verstring_prefix$major.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$revision + while test "$loop" -ne 0; do + func_arith $revision - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring="$verstring_prefix$major.$iface:$verstring" + done + + # Before this point, $major must not contain `.'. + major=.$major + versuffix="$major.$revision" + ;; + + linux) + func_arith $current - $age + major=.$func_arith_result + versuffix="$major.$age.$revision" + ;; + + osf) + func_arith $current - $age + major=.$func_arith_result + versuffix=".$current.$age.$revision" + verstring="$current.$age.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$age + while test "$loop" -ne 0; do + func_arith $current - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring="$verstring:${iface}.0" + done + + # Make executables depend on our current version. + verstring="$verstring:${current}.0" + ;; + + qnx) + major=".$current" + versuffix=".$current" + ;; + + sunos) + major=".$current" + versuffix=".$current.$revision" + ;; + + windows) + # Use '-' rather than '.', since we only want one + # extension on DOS 8.3 filesystems. + func_arith $current - $age + major=$func_arith_result + versuffix="-$major" + ;; + + *) + func_fatal_configuration "unknown library version type \`$version_type'" + ;; + esac + + # Clear the version info if we defaulted, and they specified a release. + if test -z "$vinfo" && test -n "$release"; then + major= + case $version_type in + darwin) + # we can't check for "0.0" in archive_cmds due to quoting + # problems, so we reset it completely + verstring= + ;; + *) + verstring="0.0" + ;; + esac + if test "$need_version" = no; then + versuffix= + else + versuffix=".0.0" + fi + fi + + # Remove version info from name if versioning should be avoided + if test "$avoid_version" = yes && test "$need_version" = no; then + major= + versuffix= + verstring="" + fi + + # Check to see if the archive will have undefined symbols. + if test "$allow_undefined" = yes; then + if test "$allow_undefined_flag" = unsupported; then + func_warning "undefined symbols not allowed in $host shared libraries" + build_libtool_libs=no + build_old_libs=yes + fi + else + # Don't allow undefined symbols. + allow_undefined_flag="$no_undefined_flag" + fi + + fi + + func_generate_dlsyms "$libname" "$libname" "yes" + libobjs="$libobjs $symfileobj" + test "X$libobjs" = "X " && libobjs= + + if test "$mode" != relink; then + # Remove our outputs, but don't remove object files since they + # may have been created when compiling PIC objects. + removelist= + tempremovelist=`$ECHO "$output_objdir/*"` + for p in $tempremovelist; do + case $p in + *.$objext | *.gcno) + ;; + $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) + if test "X$precious_files_regex" != "X"; then + if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 + then + continue + fi + fi + removelist="$removelist $p" + ;; + *) ;; + esac + done + test -n "$removelist" && \ + func_show_eval "${RM}r \$removelist" + fi + + # Now set the variables for building old libraries. + if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then + oldlibs="$oldlibs $output_objdir/$libname.$libext" + + # Transform .lo files to .o files. + oldobjs="$objs "`$ECHO "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP` + fi + + # Eliminate all temporary directories. + #for path in $notinst_path; do + # lib_search_path=`$ECHO "X$lib_search_path " | $Xsed -e "s% $path % %g"` + # deplibs=`$ECHO "X$deplibs " | $Xsed -e "s% -L$path % %g"` + # dependency_libs=`$ECHO "X$dependency_libs " | $Xsed -e "s% -L$path % %g"` + #done + + if test -n "$xrpath"; then + # If the user specified any rpath flags, then add them. + temp_xrpath= + for libdir in $xrpath; do + temp_xrpath="$temp_xrpath -R$libdir" + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" ;; + esac + done + if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then + dependency_libs="$temp_xrpath $dependency_libs" + fi + fi + + # Make sure dlfiles contains only unique files that won't be dlpreopened + old_dlfiles="$dlfiles" + dlfiles= + for lib in $old_dlfiles; do + case " $dlprefiles $dlfiles " in + *" $lib "*) ;; + *) dlfiles="$dlfiles $lib" ;; + esac + done + + # Make sure dlprefiles contains only unique files + old_dlprefiles="$dlprefiles" + dlprefiles= + for lib in $old_dlprefiles; do + case "$dlprefiles " in + *" $lib "*) ;; + *) dlprefiles="$dlprefiles $lib" ;; + esac + done + + if test "$build_libtool_libs" = yes; then + if test -n "$rpath"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc*) + # these systems don't actually have a c library (as such)! + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C library is in the System framework + deplibs="$deplibs System.ltframework" + ;; + *-*-netbsd*) + # Don't link with libc until the a.out ld.so is fixed. + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + ;; + *) + # Add libc to deplibs on all other systems if necessary. + if test "$build_libtool_need_lc" = "yes"; then + deplibs="$deplibs -lc" + fi + ;; + esac + fi + + # Transform deplibs into only deplibs that can be linked in shared. + name_save=$name + libname_save=$libname + release_save=$release + versuffix_save=$versuffix + major_save=$major + # I'm not sure if I'm treating the release correctly. I think + # release should show up in the -l (ie -lgmp5) so we don't want to + # add it in twice. Is that correct? + release="" + versuffix="" + major="" + newdeplibs= + droppeddeps=no + case $deplibs_check_method in + pass_all) + # Don't check for shared/static. Everything works. + # This might be a little naive. We might want to check + # whether the library exists or not. But this is on + # osf3 & osf4 and I'm not really sure... Just + # implementing what was already the behavior. + newdeplibs=$deplibs + ;; + test_compile) + # This code stresses the "libraries are programs" paradigm to its + # limits. Maybe even breaks it. We compile a program, linking it + # against the deplibs as a proxy for the library. Then we can check + # whether they linked in statically or dynamically with ldd. + $opt_dry_run || $RM conftest.c + cat > conftest.c </dev/null` + for potent_lib in $potential_libs; do + # Follow soft links. + if ls -lLd "$potent_lib" 2>/dev/null | + $GREP " -> " >/dev/null; then + continue + fi + # The statement above tries to avoid entering an + # endless loop below, in case of cyclic links. + # We might still enter an endless loop, since a link + # loop can be closed while we follow links, + # but so what? + potlib="$potent_lib" + while test -h "$potlib" 2>/dev/null; do + potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'` + case $potliblink in + [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";; + *) potlib=`$ECHO "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";; + esac + done + if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | + $SED -e 10q | + $EGREP "$file_magic_regex" > /dev/null; then + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + $ECHO + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + $ECHO "*** I have the capability to make that library automatically link in when" + $ECHO "*** you link to this library. But I can only do this if you have a" + $ECHO "*** shared version of the library, which you do not appear to have" + $ECHO "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $ECHO "*** with $libname but no candidates were found. (...for file magic test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a file magic. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + newdeplibs="$newdeplibs $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + for a_deplib in $deplibs; do + case $a_deplib in + -l*) + func_stripname -l '' "$a_deplib" + name=$func_stripname_result + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $a_deplib "*) + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + ;; + esac + fi + if test -n "$a_deplib" ; then + libname=`eval "\\$ECHO \"$libname_spec\""` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do + potential_libs=`ls $i/$libname[.-]* 2>/dev/null` + for potent_lib in $potential_libs; do + potlib="$potent_lib" # see symlink-check above in file_magic test + if eval "\$ECHO \"X$potent_lib\"" 2>/dev/null | $Xsed -e 10q | \ + $EGREP "$match_pattern_regex" > /dev/null; then + newdeplibs="$newdeplibs $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + $ECHO + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + $ECHO "*** I have the capability to make that library automatically link in when" + $ECHO "*** you link to this library. But I can only do this if you have a" + $ECHO "*** shared version of the library, which you do not appear to have" + $ECHO "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a regex pattern. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + newdeplibs="$newdeplibs $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + none | unknown | *) + newdeplibs="" + tmp_deplibs=`$ECHO "X $deplibs" | $Xsed \ + -e 's/ -lc$//' -e 's/ -[LR][^ ]*//g'` + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + for i in $predeps $postdeps ; do + # can't use Xsed below, because $i might contain '/' + tmp_deplibs=`$ECHO "X $tmp_deplibs" | $Xsed -e "s,$i,,"` + done + fi + if $ECHO "X $tmp_deplibs" | $Xsed -e 's/[ ]//g' | + $GREP . >/dev/null; then + $ECHO + if test "X$deplibs_check_method" = "Xnone"; then + $ECHO "*** Warning: inter-library dependencies are not supported in this platform." + else + $ECHO "*** Warning: inter-library dependencies are not known to be supported." + fi + $ECHO "*** All declared inter-library dependencies are being dropped." + droppeddeps=yes + fi + ;; + esac + versuffix=$versuffix_save + major=$major_save + release=$release_save + libname=$libname_save + name=$name_save + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library with the System framework + newdeplibs=`$ECHO "X $newdeplibs" | $Xsed -e 's/ -lc / System.ltframework /'` + ;; + esac + + if test "$droppeddeps" = yes; then + if test "$module" = yes; then + $ECHO + $ECHO "*** Warning: libtool could not satisfy all declared inter-library" + $ECHO "*** dependencies of module $libname. Therefore, libtool will create" + $ECHO "*** a static module, that should work as long as the dlopening" + $ECHO "*** application is linked with the -dlopen flag." + if test -z "$global_symbol_pipe"; then + $ECHO + $ECHO "*** However, this would only work if libtool was able to extract symbol" + $ECHO "*** lists from a program, using \`nm' or equivalent, but libtool could" + $ECHO "*** not find such a program. So, this module is probably useless." + $ECHO "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + else + $ECHO "*** The inter-library dependencies that have been dropped here will be" + $ECHO "*** automatically added whenever a program is linked with this library" + $ECHO "*** or is declared to -dlopen it." + + if test "$allow_undefined" = no; then + $ECHO + $ECHO "*** Since this library must not contain undefined symbols," + $ECHO "*** because either the platform does not support them or" + $ECHO "*** it was explicitly requested with -no-undefined," + $ECHO "*** libtool will only create a static version of it." + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + fi + fi + # Done checking deplibs! + deplibs=$newdeplibs + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + case $host in + *-*-darwin*) + newdeplibs=`$ECHO "X $newdeplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` + new_inherited_linker_flags=`$ECHO "X $new_inherited_linker_flags" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` + deplibs=`$ECHO "X $deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $deplibs " in + *" -L$path/$objdir "*) + new_libs="$new_libs -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$new_libs $deplib" ;; + esac + ;; + *) new_libs="$new_libs $deplib" ;; + esac + done + deplibs="$new_libs" + + # All the library-specific variables (install_libdir is set above). + library_names= + old_library= + dlname= + + # Test again, we may have decided not to build it any more + if test "$build_libtool_libs" = yes; then + if test "$hardcode_into_libs" = yes; then + # Hardcode the library paths + hardcode_libdirs= + dep_rpath= + rpath="$finalize_rpath" + test "$mode" != relink && rpath="$compile_rpath$rpath" + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + dep_rpath="$dep_rpath $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) perm_rpath="$perm_rpath $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + if test -n "$hardcode_libdir_flag_spec_ld"; then + eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\" + else + eval dep_rpath=\"$hardcode_libdir_flag_spec\" + fi + fi + if test -n "$runpath_var" && test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + rpath="$rpath$dir:" + done + eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" + fi + test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" + fi + + shlibpath="$finalize_shlibpath" + test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath" + if test -n "$shlibpath"; then + eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" + fi + + # Get the real and link names of the library. + eval shared_ext=\"$shrext_cmds\" + eval library_names=\"$library_names_spec\" + set dummy $library_names + shift + realname="$1" + shift + + if test -n "$soname_spec"; then + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + if test -z "$dlname"; then + dlname=$soname + fi + + lib="$output_objdir/$realname" + linknames= + for link + do + linknames="$linknames $link" + done + + # Use standard objects if they are pic + test -z "$pic_flag" && libobjs=`$ECHO "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + test "X$libobjs" = "X " && libobjs= + + delfiles= + if test -n "$export_symbols" && test -n "$include_expsyms"; then + $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" + export_symbols="$output_objdir/$libname.uexp" + delfiles="$delfiles $export_symbols" + fi + + orig_export_symbols= + case $host_os in + cygwin* | mingw* | cegcc*) + if test -n "$export_symbols" && test -z "$export_symbols_regex"; then + # exporting using user supplied symfile + if test "x`$SED 1q $export_symbols`" != xEXPORTS; then + # and it's NOT already a .def file. Must figure out + # which of the given symbols are data symbols and tag + # them as such. So, trigger use of export_symbols_cmds. + # export_symbols gets reassigned inside the "prepare + # the list of exported symbols" if statement, so the + # include_expsyms logic still works. + orig_export_symbols="$export_symbols" + export_symbols= + always_export_symbols=yes + fi + fi + ;; + esac + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then + func_verbose "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $opt_dry_run || $RM $export_symbols + cmds=$export_symbols_cmds + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + func_len " $cmd" + len=$func_len_result + if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + func_show_eval "$cmd" 'exit $?' + skipped_export=false + else + # The command line is too long to execute in one step. + func_verbose "using reloadable object file for export list..." + skipped_export=: + # Break out early, otherwise skipped_export may be + # set to false by a later but shorter cmd. + break + fi + done + IFS="$save_ifs" + if test -n "$export_symbols_regex" && test "X$skipped_export" != "X:"; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + fi + + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols="$export_symbols" + test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" + $opt_dry_run || eval '$ECHO "X$include_expsyms" | $Xsed | $SP2NL >> "$tmp_export_symbols"' + fi + + if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + delfiles="$delfiles $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + + tmp_deplibs= + for test_deplib in $deplibs; do + case " $convenience " in + *" $test_deplib "*) ;; + *) + tmp_deplibs="$tmp_deplibs $test_deplib" + ;; + esac + done + deplibs="$tmp_deplibs" + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec" && + test "$compiler_needs_object" = yes && + test -z "$libobjs"; then + # extract the archives, so we have objects to list. + # TODO: could optimize this to just extract one archive. + whole_archive_flag_spec= + fi + if test -n "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + else + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" + + func_extract_archives $gentop $convenience + libobjs="$libobjs $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + fi + + if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then + eval flag=\"$thread_safe_flag_spec\" + linker_flags="$linker_flags $flag" + fi + + # Make a backup of the uninstalled library when relinking + if test "$mode" = relink; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? + fi + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds + else + eval test_cmds=\"$module_cmds\" + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds + else + eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi + fi + + if test "X$skipped_export" != "X:" && + func_len " $test_cmds" && + len=$func_len_result && + test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + : + else + # The command line is too long to link in one step, link piecewise + # or, if using GNU ld and skipped_export is not :, use a linker + # script. + + # Save the value of $output and $libobjs because we want to + # use them later. If we have whole_archive_flag_spec, we + # want to use save_libobjs as it was before + # whole_archive_flag_spec was expanded, because we can't + # assume the linker understands whole_archive_flag_spec. + # This may have to be revisited, in case too many + # convenience libraries get linked in and end up exceeding + # the spec. + if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + fi + save_output=$output + output_la=`$ECHO "X$output" | $Xsed -e "$basename"` + + # Clear the reloadable object creation command queue and + # initialize k to one. + test_cmds= + concat_cmds= + objlist= + last_robj= + k=1 + + if test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "$with_gnu_ld" = yes; then + output=${output_objdir}/${output_la}.lnkscript + func_verbose "creating GNU ld script: $output" + $ECHO 'INPUT (' > $output + for obj in $save_libobjs + do + $ECHO "$obj" >> $output + done + $ECHO ')' >> $output + delfiles="$delfiles $output" + elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then + output=${output_objdir}/${output_la}.lnk + func_verbose "creating linker input file list: $output" + : > $output + set x $save_libobjs + shift + firstobj= + if test "$compiler_needs_object" = yes; then + firstobj="$1 " + shift + fi + for obj + do + $ECHO "$obj" >> $output + done + delfiles="$delfiles $output" + output=$firstobj\"$file_list_spec$output\" + else + if test -n "$save_libobjs"; then + func_verbose "creating reloadable object files..." + output=$output_objdir/$output_la-${k}.$objext + eval test_cmds=\"$reload_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + + # Loop over the list of objects to be linked. + for obj in $save_libobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + if test "X$objlist" = X || + test "$len" -lt "$max_cmd_len"; then + func_append objlist " $obj" + else + # The command $test_cmds is almost too long, add a + # command to the queue. + if test "$k" -eq 1 ; then + # The first file doesn't have a previous command to add. + eval concat_cmds=\"$reload_cmds $objlist $last_robj\" + else + # All subsequent reloadable object files will link in + # the last one created. + eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj~\$RM $last_robj\" + fi + last_robj=$output_objdir/$output_la-${k}.$objext + func_arith $k + 1 + k=$func_arith_result + output=$output_objdir/$output_la-${k}.$objext + objlist=$obj + func_len " $last_robj" + func_arith $len0 + $func_len_result + len=$func_arith_result + fi + done + # Handle the remaining objects by creating one last + # reloadable object file. All subsequent reloadable object + # files will link in the last one created. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\${concat_cmds}$reload_cmds $objlist $last_robj\" + if test -n "$last_robj"; then + eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\" + fi + delfiles="$delfiles $output" + + else + output= + fi + + if ${skipped_export-false}; then + func_verbose "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $opt_dry_run || $RM $export_symbols + libobjs=$output + # Append the command to create the export file. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + fi + + test -n "$save_libobjs" && + func_verbose "creating a temporary reloadable object file: $output" + + # Loop through the commands generated above and execute them. + save_ifs="$IFS"; IFS='~' + for cmd in $concat_cmds; do + IFS="$save_ifs" + $opt_silent || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$mode" = relink; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS="$save_ifs" + + if test -n "$export_symbols_regex" && ${skipped_export-false}; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + + if ${skipped_export-false}; then + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols="$export_symbols" + test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" + $opt_dry_run || eval '$ECHO "X$include_expsyms" | $Xsed | $SP2NL >> "$tmp_export_symbols"' + fi + + if test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + delfiles="$delfiles $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + fi + + libobjs=$output + # Restore the value of output. + output=$save_output + + if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + fi + # Expand the library linking commands again to reset the + # value of $libobjs for piecewise linking. + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + cmds=$module_expsym_cmds + else + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + cmds=$archive_expsym_cmds + else + cmds=$archive_cmds + fi + fi + fi + + if test -n "$delfiles"; then + # Append the command to remove temporary files to $cmds. + eval cmds=\"\$cmds~\$RM $delfiles\" + fi + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" + + func_extract_archives $gentop $dlprefiles + libobjs="$libobjs $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $opt_silent || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$mode" = relink; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS="$save_ifs" + + # Restore the uninstalled library and exit + if test "$mode" = relink; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? + + if test -n "$convenience"; then + if test -z "$whole_archive_flag_spec"; then + func_show_eval '${RM}r "$gentop"' + fi + fi + + exit $EXIT_SUCCESS + fi + + # Create links to the real library. + for linkname in $linknames; do + if test "$realname" != "$linkname"; then + func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' + fi + done + + # If -module or -export-dynamic was specified, set the dlname. + if test "$module" = yes || test "$export_dynamic" = yes; then + # On all known operating systems, these are identical. + dlname="$soname" + fi + fi + ;; + + obj) + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + func_warning "\`-dlopen' is ignored for objects" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "\`-l' and \`-L' are ignored for objects" ;; + esac + + test -n "$rpath" && \ + func_warning "\`-rpath' is ignored for objects" + + test -n "$xrpath" && \ + func_warning "\`-R' is ignored for objects" + + test -n "$vinfo" && \ + func_warning "\`-version-info' is ignored for objects" + + test -n "$release" && \ + func_warning "\`-release' is ignored for objects" + + case $output in + *.lo) + test -n "$objs$old_deplibs" && \ + func_fatal_error "cannot build library object \`$output' from non-libtool objects" + + libobj=$output + func_lo2o "$libobj" + obj=$func_lo2o_result + ;; + *) + libobj= + obj="$output" + ;; + esac + + # Delete the old objects. + $opt_dry_run || $RM $obj $libobj + + # Objects from convenience libraries. This assumes + # single-version convenience libraries. Whenever we create + # different ones for PIC/non-PIC, this we'll have to duplicate + # the extraction. + reload_conv_objs= + gentop= + # reload_cmds runs $LD directly, so let us get rid of + # -Wl from whole_archive_flag_spec and hope we can get by with + # turning comma into space.. + wl= + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" + reload_conv_objs=$reload_objs\ `$ECHO "X$tmp_whole_archive_flags" | $Xsed -e 's|,| |g'` + else + gentop="$output_objdir/${obj}x" + generated="$generated $gentop" + + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" + fi + fi + + # Create the old-style object. + reload_objs="$objs$old_deplibs "`$ECHO "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test + + output="$obj" + func_execute_cmds "$reload_cmds" 'exit $?' + + # Exit if we aren't doing a library object file. + if test -z "$libobj"; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + fi + + if test "$build_libtool_libs" != yes; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + # Create an invalid libtool object if no PIC, so that we don't + # accidentally link it into a program. + # $show "echo timestamp > $libobj" + # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? + exit $EXIT_SUCCESS + fi + + if test -n "$pic_flag" || test "$pic_mode" != default; then + # Only do commands if we really have different PIC objects. + reload_objs="$libobjs $reload_conv_objs" + output="$libobj" + func_execute_cmds "$reload_cmds" 'exit $?' + fi + + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + ;; + + prog) + case $host in + *cygwin*) func_stripname '' '.exe' "$output" + output=$func_stripname_result.exe;; + esac + test -n "$vinfo" && \ + func_warning "\`-version-info' is ignored for programs" + + test -n "$release" && \ + func_warning "\`-release' is ignored for programs" + + test "$preload" = yes \ + && test "$dlopen_support" = unknown \ + && test "$dlopen_self" = unknown \ + && test "$dlopen_self_static" = unknown && \ + func_warning "\`LT_INIT([dlopen])' not used. Assuming no dlopen support." + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + compile_deplibs=`$ECHO "X $compile_deplibs" | $Xsed -e 's/ -lc / System.ltframework /'` + finalize_deplibs=`$ECHO "X $finalize_deplibs" | $Xsed -e 's/ -lc / System.ltframework /'` + ;; + esac + + case $host in + *-*-darwin*) + # Don't allow lazy linking, it breaks C++ global constructors + # But is supposedly fixed on 10.4 or later (yay!). + if test "$tagname" = CXX ; then + case ${MACOSX_DEPLOYMENT_TARGET-10.0} in + 10.[0123]) + compile_command="$compile_command ${wl}-bind_at_load" + finalize_command="$finalize_command ${wl}-bind_at_load" + ;; + esac + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + compile_deplibs=`$ECHO "X $compile_deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` + finalize_deplibs=`$ECHO "X $finalize_deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $compile_deplibs " in + *" -L$path/$objdir "*) + new_libs="$new_libs -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $compile_deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$new_libs $deplib" ;; + esac + ;; + *) new_libs="$new_libs $deplib" ;; + esac + done + compile_deplibs="$new_libs" + + + compile_command="$compile_command $compile_deplibs" + finalize_command="$finalize_command $finalize_deplibs" + + if test -n "$rpath$xrpath"; then + # If the user specified any rpath flags, then add them. + for libdir in $rpath $xrpath; do + # This is the magic to use -rpath. + case "$finalize_rpath " in + *" $libdir "*) ;; + *) finalize_rpath="$finalize_rpath $libdir" ;; + esac + done + fi + + # Now hardcode the library paths + rpath= + hardcode_libdirs= + for libdir in $compile_rpath $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + rpath="$rpath $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) perm_rpath="$perm_rpath $libdir" ;; + esac + fi + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`${ECHO} "$libdir" | ${SED} -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$libdir:"*) ;; + ::) dllsearchpath=$libdir;; + *) dllsearchpath="$dllsearchpath:$libdir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) dllsearchpath="$dllsearchpath:$testbindir";; + esac + ;; + esac + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + compile_rpath="$rpath" + + rpath= + hardcode_libdirs= + for libdir in $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + rpath="$rpath $flag" + fi + elif test -n "$runpath_var"; then + case "$finalize_perm_rpath " in + *" $libdir "*) ;; + *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + finalize_rpath="$rpath" + + if test -n "$libobjs" && test "$build_old_libs" = yes; then + # Transform all the library objects into standard objects. + compile_command=`$ECHO "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + finalize_command=`$ECHO "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP` + fi + + func_generate_dlsyms "$outputname" "@PROGRAM@" "no" + + # template prelinking step + if test -n "$prelink_cmds"; then + func_execute_cmds "$prelink_cmds" 'exit $?' + fi + + wrappers_required=yes + case $host in + *cygwin* | *mingw* ) + if test "$build_libtool_libs" != yes; then + wrappers_required=no + fi + ;; + *cegcc) + # Disable wrappers for cegcc, we are cross compiling anyway. + wrappers_required=no + ;; + *) + if test "$need_relink" = no || test "$build_libtool_libs" != yes; then + wrappers_required=no + fi + ;; + esac + if test "$wrappers_required" = no; then + # Replace the output file specification. + compile_command=`$ECHO "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` + link_command="$compile_command$compile_rpath" + + # We have no uninstalled library dependencies, so finalize right now. + exit_status=0 + func_show_eval "$link_command" 'exit_status=$?' + + # Delete the generated files. + if test -f "$output_objdir/${outputname}S.${objext}"; then + func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"' + fi + + exit $exit_status + fi + + if test -n "$compile_shlibpath$finalize_shlibpath"; then + compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" + fi + if test -n "$finalize_shlibpath"; then + finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" + fi + + compile_var= + finalize_var= + if test -n "$runpath_var"; then + if test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + rpath="$rpath$dir:" + done + compile_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + if test -n "$finalize_perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $finalize_perm_rpath; do + rpath="$rpath$dir:" + done + finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + fi + + if test "$no_install" = yes; then + # We don't need to create a wrapper script. + link_command="$compile_var$compile_command$compile_rpath" + # Replace the output file specification. + link_command=`$ECHO "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'` + # Delete the old output file. + $opt_dry_run || $RM $output + # Link the executable and exit + func_show_eval "$link_command" 'exit $?' + exit $EXIT_SUCCESS + fi + + if test "$hardcode_action" = relink; then + # Fast installation is not supported + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + + func_warning "this platform does not like uninstalled shared libraries" + func_warning "\`$output' will be relinked during installation" + else + if test "$fast_install" != no; then + link_command="$finalize_var$compile_command$finalize_rpath" + if test "$fast_install" = yes; then + relink_command=`$ECHO "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'` + else + # fast_install is set to needless + relink_command= + fi + else + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + fi + fi + + # Replace the output file specification. + link_command=`$ECHO "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` + + # Delete the old output files. + $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname + + func_show_eval "$link_command" 'exit $?' + + # Now create the wrapper script. + func_verbose "creating $output" + + # Quote the relink command for shipping. + if test -n "$relink_command"; then + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + relink_command="(cd `pwd`; $relink_command)" + relink_command=`$ECHO "X$relink_command" | $Xsed -e "$sed_quote_subst"` + fi + + # Quote $ECHO for shipping. + if test "X$ECHO" = "X$SHELL $progpath --fallback-echo"; then + case $progpath in + [\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $progpath --fallback-echo";; + *) qecho="$SHELL `pwd`/$progpath --fallback-echo";; + esac + qecho=`$ECHO "X$qecho" | $Xsed -e "$sed_quote_subst"` + else + qecho=`$ECHO "X$ECHO" | $Xsed -e "$sed_quote_subst"` + fi + + # Only actually do things if not in dry run mode. + $opt_dry_run || { + # win32 will think the script is a binary if it has + # a .exe suffix, so we strip it off here. + case $output in + *.exe) func_stripname '' '.exe' "$output" + output=$func_stripname_result ;; + esac + # test for cygwin because mv fails w/o .exe extensions + case $host in + *cygwin*) + exeext=.exe + func_stripname '' '.exe' "$outputname" + outputname=$func_stripname_result ;; + *) exeext= ;; + esac + case $host in + *cygwin* | *mingw* ) + func_dirname_and_basename "$output" "" "." + output_name=$func_basename_result + output_path=$func_dirname_result + cwrappersource="$output_path/$objdir/lt-$output_name.c" + cwrapper="$output_path/$output_name.exe" + $RM $cwrappersource $cwrapper + trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 + + func_emit_cwrapperexe_src > $cwrappersource + + # The wrapper executable is built using the $host compiler, + # because it contains $host paths and files. If cross- + # compiling, it, like the target executable, must be + # executed on the $host or under an emulation environment. + $opt_dry_run || { + $LTCC $LTCFLAGS -o $cwrapper $cwrappersource + $STRIP $cwrapper + } + + # Now, create the wrapper script for func_source use: + func_ltwrapper_scriptname $cwrapper + $RM $func_ltwrapper_scriptname_result + trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 + $opt_dry_run || { + # note: this script will not be executed, so do not chmod. + if test "x$build" = "x$host" ; then + $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result + else + func_emit_wrapper no > $func_ltwrapper_scriptname_result + fi + } + ;; + * ) + $RM $output + trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 + + func_emit_wrapper no > $output + chmod +x $output + ;; + esac + } + exit $EXIT_SUCCESS + ;; + esac + + # See if we need to build an old-fashioned archive. + for oldlib in $oldlibs; do + + if test "$build_libtool_libs" = convenience; then + oldobjs="$libobjs_save $symfileobj" + addlibs="$convenience" + build_libtool_libs=no + else + if test "$build_libtool_libs" = module; then + oldobjs="$libobjs_save" + build_libtool_libs=no + else + oldobjs="$old_deplibs $non_pic_objects" + if test "$preload" = yes && test -f "$symfileobj"; then + oldobjs="$oldobjs $symfileobj" + fi + fi + addlibs="$old_convenience" + fi + + if test -n "$addlibs"; then + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" + + func_extract_archives $gentop $addlibs + oldobjs="$oldobjs $func_extract_archives_result" + fi + + # Do each command in the archive commands. + if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then + cmds=$old_archive_from_new_cmds + else + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" + + func_extract_archives $gentop $dlprefiles + oldobjs="$oldobjs $func_extract_archives_result" + fi + + # POSIX demands no paths to be encoded in archives. We have + # to avoid creating archives with duplicate basenames if we + # might have to extract them afterwards, e.g., when creating a + # static archive out of a convenience library, or when linking + # the entirety of a libtool archive into another (currently + # not supported by libtool). + if (for obj in $oldobjs + do + func_basename "$obj" + $ECHO "$func_basename_result" + done | sort | sort -uc >/dev/null 2>&1); then + : + else + $ECHO "copying selected object files to avoid basename conflicts..." + gentop="$output_objdir/${outputname}x" + generated="$generated $gentop" + func_mkdir_p "$gentop" + save_oldobjs=$oldobjs + oldobjs= + counter=1 + for obj in $save_oldobjs + do + func_basename "$obj" + objbase="$func_basename_result" + case " $oldobjs " in + " ") oldobjs=$obj ;; + *[\ /]"$objbase "*) + while :; do + # Make sure we don't pick an alternate name that also + # overlaps. + newobj=lt$counter-$objbase + func_arith $counter + 1 + counter=$func_arith_result + case " $oldobjs " in + *[\ /]"$newobj "*) ;; + *) if test ! -f "$gentop/$newobj"; then break; fi ;; + esac + done + func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" + oldobjs="$oldobjs $gentop/$newobj" + ;; + *) oldobjs="$oldobjs $obj" ;; + esac + done + fi + eval cmds=\"$old_archive_cmds\" + + func_len " $cmds" + len=$func_len_result + if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + cmds=$old_archive_cmds + else + # the command line is too long to link in one step, link in parts + func_verbose "using piecewise archive linking..." + save_RANLIB=$RANLIB + RANLIB=: + objlist= + concat_cmds= + save_oldobjs=$oldobjs + oldobjs= + # Is there a better way of finding the last object in the list? + for obj in $save_oldobjs + do + last_oldobj=$obj + done + eval test_cmds=\"$old_archive_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + for obj in $save_oldobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + func_append objlist " $obj" + if test "$len" -lt "$max_cmd_len"; then + : + else + # the above command should be used before it gets too long + oldobjs=$objlist + if test "$obj" = "$last_oldobj" ; then + RANLIB=$save_RANLIB + fi + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" + objlist= + len=$len0 + fi + done + RANLIB=$save_RANLIB + oldobjs=$objlist + if test "X$oldobjs" = "X" ; then + eval cmds=\"\$concat_cmds\" + else + eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + fi + fi + fi + func_execute_cmds "$cmds" 'exit $?' + done + + test -n "$generated" && \ + func_show_eval "${RM}r$generated" + + # Now create the libtool archive. + case $output in + *.la) + old_library= + test "$build_old_libs" = yes && old_library="$libname.$libext" + func_verbose "creating $output" + + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + # Quote the link command for shipping. + relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" + relink_command=`$ECHO "X$relink_command" | $Xsed -e "$sed_quote_subst"` + if test "$hardcode_automatic" = yes ; then + relink_command= + fi + + # Only create the output if not a dry run. + $opt_dry_run || { + for installed in no yes; do + if test "$installed" = yes; then + if test -z "$install_libdir"; then + break + fi + output="$output_objdir/$outputname"i + # Replace all uninstalled libtool libraries with the installed ones + newdependency_libs= + for deplib in $dependency_libs; do + case $deplib in + *.la) + func_basename "$deplib" + name="$func_basename_result" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + test -z "$libdir" && \ + func_fatal_error "\`$deplib' is not a valid libtool archive" + newdependency_libs="$newdependency_libs $libdir/$name" + ;; + *) newdependency_libs="$newdependency_libs $deplib" ;; + esac + done + dependency_libs="$newdependency_libs" + newdlfiles= + + for lib in $dlfiles; do + case $lib in + *.la) + func_basename "$lib" + name="$func_basename_result" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "\`$lib' is not a valid libtool archive" + newdlfiles="$newdlfiles $libdir/$name" + ;; + *) newdlfiles="$newdlfiles $lib" ;; + esac + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + *.la) + # Only pass preopened files to the pseudo-archive (for + # eventual linking with the app. that links it) if we + # didn't already link the preopened objects directly into + # the library: + func_basename "$lib" + name="$func_basename_result" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "\`$lib' is not a valid libtool archive" + newdlprefiles="$newdlprefiles $libdir/$name" + ;; + esac + done + dlprefiles="$newdlprefiles" + else + newdlfiles= + for lib in $dlfiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + newdlfiles="$newdlfiles $abs" + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + newdlprefiles="$newdlprefiles $abs" + done + dlprefiles="$newdlprefiles" + fi + $RM $output + # place dlname in correct position for cygwin + tdlname=$dlname + case $host,$output,$installed,$module,$dlname in + *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;; + esac + $ECHO > $output "\ +# $outputname - a libtool library file +# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='$tdlname' + +# Names of this library. +library_names='$library_names' + +# The name of the static archive. +old_library='$old_library' + +# Linker flags that can not go in dependency_libs. +inherited_linker_flags='$new_inherited_linker_flags' + +# Libraries that this one depends upon. +dependency_libs='$dependency_libs' + +# Names of additional weak libraries provided by this library +weak_library_names='$weak_libs' + +# Version information for $libname. +current=$current +age=$age +revision=$revision + +# Is this an already installed library? +installed=$installed + +# Should we warn about portability when linking against -modules? +shouldnotlink=$module + +# Files to dlopen/dlpreopen +dlopen='$dlfiles' +dlpreopen='$dlprefiles' + +# Directory that this library needs to be installed in: +libdir='$install_libdir'" + if test "$installed" = no && test "$need_relink" = yes; then + $ECHO >> $output "\ +relink_command=\"$relink_command\"" + fi + done + } + + # Do a symbolic link so that the libtool archive can be found in + # LD_LIBRARY_PATH before the program is installed. + func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' + ;; + esac + exit $EXIT_SUCCESS +} + +{ test "$mode" = link || test "$mode" = relink; } && + func_mode_link ${1+"$@"} + + +# func_mode_uninstall arg... +func_mode_uninstall () +{ + $opt_debug + RM="$nonopt" + files= + rmforce= + exit_status=0 + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" + + for arg + do + case $arg in + -f) RM="$RM $arg"; rmforce=yes ;; + -*) RM="$RM $arg" ;; + *) files="$files $arg" ;; + esac + done + + test -z "$RM" && \ + func_fatal_help "you must specify an RM program" + + rmdirs= + + origobjdir="$objdir" + for file in $files; do + func_dirname "$file" "" "." + dir="$func_dirname_result" + if test "X$dir" = X.; then + objdir="$origobjdir" + else + objdir="$dir/$origobjdir" + fi + func_basename "$file" + name="$func_basename_result" + test "$mode" = uninstall && objdir="$dir" + + # Remember objdir for removal later, being careful to avoid duplicates + if test "$mode" = clean; then + case " $rmdirs " in + *" $objdir "*) ;; + *) rmdirs="$rmdirs $objdir" ;; + esac + fi + + # Don't error if the file doesn't exist and rm -f was used. + if { test -L "$file"; } >/dev/null 2>&1 || + { test -h "$file"; } >/dev/null 2>&1 || + test -f "$file"; then + : + elif test -d "$file"; then + exit_status=1 + continue + elif test "$rmforce" = yes; then + continue + fi + + rmfiles="$file" + + case $name in + *.la) + # Possibly a libtool archive, so verify it. + if func_lalib_p "$file"; then + func_source $dir/$name + + # Delete the libtool libraries and symlinks. + for n in $library_names; do + rmfiles="$rmfiles $objdir/$n" + done + test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library" + + case "$mode" in + clean) + case " $library_names " in + # " " in the beginning catches empty $dlname + *" $dlname "*) ;; + *) rmfiles="$rmfiles $objdir/$dlname" ;; + esac + test -n "$libdir" && rmfiles="$rmfiles $objdir/$name $objdir/${name}i" + ;; + uninstall) + if test -n "$library_names"; then + # Do each command in the postuninstall commands. + func_execute_cmds "$postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' + fi + + if test -n "$old_library"; then + # Do each command in the old_postuninstall commands. + func_execute_cmds "$old_postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' + fi + # FIXME: should reinstall the best remaining shared library. + ;; + esac + fi + ;; + + *.lo) + # Possibly a libtool object, so verify it. + if func_lalib_p "$file"; then + + # Read the .lo file + func_source $dir/$name + + # Add PIC object to the list of files to remove. + if test -n "$pic_object" && + test "$pic_object" != none; then + rmfiles="$rmfiles $dir/$pic_object" + fi + + # Add non-PIC object to the list of files to remove. + if test -n "$non_pic_object" && + test "$non_pic_object" != none; then + rmfiles="$rmfiles $dir/$non_pic_object" + fi + fi + ;; + + *) + if test "$mode" = clean ; then + noexename=$name + case $file in + *.exe) + func_stripname '' '.exe' "$file" + file=$func_stripname_result + func_stripname '' '.exe' "$name" + noexename=$func_stripname_result + # $file with .exe has already been added to rmfiles, + # add $file without .exe + rmfiles="$rmfiles $file" + ;; + esac + # Do a test to see if this is a libtool program. + if func_ltwrapper_p "$file"; then + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + relink_command= + func_source $func_ltwrapper_scriptname_result + rmfiles="$rmfiles $func_ltwrapper_scriptname_result" + else + relink_command= + func_source $dir/$noexename + fi + + # note $name still contains .exe if it was in $file originally + # as does the version of $file that was added into $rmfiles + rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}" + if test "$fast_install" = yes && test -n "$relink_command"; then + rmfiles="$rmfiles $objdir/lt-$name" + fi + if test "X$noexename" != "X$name" ; then + rmfiles="$rmfiles $objdir/lt-${noexename}.c" + fi + fi + fi + ;; + esac + func_show_eval "$RM $rmfiles" 'exit_status=1' + done + objdir="$origobjdir" + + # Try to remove the ${objdir}s in the directories where we deleted files + for dir in $rmdirs; do + if test -d "$dir"; then + func_show_eval "rmdir $dir >/dev/null 2>&1" + fi + done + + exit $exit_status +} + +{ test "$mode" = uninstall || test "$mode" = clean; } && + func_mode_uninstall ${1+"$@"} + +test -z "$mode" && { + help="$generic_help" + func_fatal_help "you must specify a MODE" +} + +test -z "$exec_cmd" && \ + func_fatal_help "invalid operation mode \`$mode'" + +if test -n "$exec_cmd"; then + eval exec "$exec_cmd" + exit $EXIT_FAILURE +fi + +exit $exit_status + + +# The TAGs below are defined such that we never get into a situation +# in which we disable both kinds of libraries. Given conflicting +# choices, we go for a static library, that is the most portable, +# since we can't tell whether shared libraries were disabled because +# the user asked for that or because the platform doesn't support +# them. This is particularly important on AIX, because we don't +# support having both static and shared libraries enabled at the same +# time on that platform, so we default to a shared-only configuration. +# If a disable-shared tag is given, we'll fallback to a static-only +# configuration. But we'll never go from static-only to shared-only. + +# ### BEGIN LIBTOOL TAG CONFIG: disable-shared +build_libtool_libs=no +build_old_libs=yes +# ### END LIBTOOL TAG CONFIG: disable-shared + +# ### BEGIN LIBTOOL TAG CONFIG: disable-static +build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` +# ### END LIBTOOL TAG CONFIG: disable-static + +# Local Variables: +# mode:shell-script +# sh-indentation:2 +# End: +# vi:sw=2 + diff --git a/regex.c b/regex.c new file mode 100644 index 0000000..14da9cc --- /dev/null +++ b/regex.c @@ -0,0 +1,101 @@ +/* + * ===================================================================================== + * + * Filename: regex.c + * + * Description: Regex management for the module + * + * Version: 0.1 + * Created: 08/08/2010 11:01:01 + * Revision: none + * Compiler: gcc + * + * Author: BlackLight (http://0x00.ath.cx), + * Licence: GNU GPL v.3 + * Company: DO WHAT YOU WANT CAUSE A PIRATE IS FREE, YOU ARE A PIRATE! + * + * ===================================================================================== + */ + +#include +#include +#include +#include + +/** + * FUNCTION: preg_match + * \brief Check if a string matches a regular expression + * \param expr Regular expression to be matched + * \param str String to be checked + * \param matches Reference to a char** that will contain the submatches (NULL if you don't need it) + * \param nmatches Reference to a int containing the number of submatches found (NULL if you don't need it) + * \return -1 if the regex is wrong, 0 if no match was found, 1 otherwise + */ + +int +preg_match ( const char* expr, char* str, char*** matches, int *nmatches ) +{ + int i; + regex_t regex; + regmatch_t *m = NULL; + *nmatches = 0; + + if ( regcomp ( ®ex, expr, REG_EXTENDED | REG_ICASE ) != 0 ) { + return -1; + } + + if ( regex.re_nsub > 0 ) + { + if ( !(m = (regmatch_t*) malloc ( (regex.re_nsub+1) * sizeof(regmatch_t) )) ) + { + regfree ( ®ex ); + fprintf ( stderr, "\nDynamic memory allocation failure at %s:%d\n", __FILE__, __LINE__ ); + exit ( EXIT_FAILURE ); + } + + if ( !( *matches = (char**) malloc ( (regex.re_nsub+1) * sizeof(char*) )) ) + { + regfree ( ®ex ); + free ( m ); + m = NULL; + fprintf ( stderr, "\nDynamic memory allocation failure at %s:%d\n", __FILE__, __LINE__ ); + exit ( EXIT_FAILURE ); + } + + if ( regexec ( ®ex, str, regex.re_nsub+1, m, 0 ) == REG_NOMATCH ) { + regfree ( ®ex ); + free ( m ); + m = NULL; + return 0; + } + } else { + if ( regexec ( ®ex, str, 0, NULL, 0 ) == REG_NOMATCH ) { + regfree ( ®ex ); + free ( m ); + m = NULL; + return 0; + } + } + + *nmatches = regex.re_nsub; + + for ( i=0; i < regex.re_nsub; i++ ) { + if ( !( (*matches)[i] = (char*) malloc ( m[i+1].rm_eo - m[i+1].rm_so + 1 )) ) + { + regfree ( ®ex ); + free ( m ); + m = NULL; + fprintf ( stderr, "\nDynamic memory allocation failure at %s:%d\n", __FILE__, __LINE__ ); + exit ( EXIT_FAILURE ); + } + + memset ( (*matches)[i], 0, m[i+1].rm_eo - m[i+1].rm_so + 1 ); + strncpy ( (*matches)[i], str + m[i+1].rm_so, m[i+1].rm_eo - m[i+1].rm_so ); + } + + regfree ( ®ex ); + free ( m ); + m = NULL; + return 1; +} /* ----- end of function preg_match ----- */ + diff --git a/sfPolicyUserData.c b/sfPolicyUserData.c new file mode 100644 index 0000000..30303c2 --- /dev/null +++ b/sfPolicyUserData.c @@ -0,0 +1,165 @@ +/**************************************************************************** + * Copyright (C) 2008-2010 Sourcefire, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License Version 2 as + * published by the Free Software Foundation. You may not use, modify or + * distribute this program under any other version of the GNU General + * Public License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ****************************************************************************/ + +#include "stdlib.h" +#include "string.h" +#include "sfPolicy.h" +#include "sfPolicyUserData.h" + +tSfPolicyId runtimePolicyId = 0; +tSfPolicyId parserPolicyId = 0; + +/** @defgroup sfPolicyConfig Sourcefire policy configuration module + * + * Create a user policy configuration context. A context provides facility for creating + * policy specific data instances. User can create as many policy instances as memory + * resources will allow. User can create/delete context, set/clear/get user date for a + * specific policy, default policy or current policy. User can also iterate over all instances + * user data. + * + * In current design, preprocessor use this module directly to manage policy specific data + * instances. A future enhancement can be to extract policy management code from each processor + * and put it in a new policy management module. Policy management module will set a single + * pointer to user data before calling appropriate callback function in a preprocessor. As + * an example, policy module will iterate over all policies and call CleanExit functions in every + * preprocessor for each policy. This will make policy management module will hide policies from + * preprocessors and make them policy agnostic. + * @{ + */ + +/**Create a user context. + * Allocates a new context and return it to user. All transactions within a context are independent from + * any other transactions in a different context. + * + * @returns tSfPolicyUserContextId +*/ +tSfPolicyUserContextId sfPolicyConfigCreate(void) +{ + tSfPolicyUserContext *pTmp = NULL; + + pTmp = calloc(1, sizeof(tSfPolicyUserContext)); + + return pTmp; +} + +/**Delete a user policy data context. + * @param pContext + */ +void sfPolicyConfigDelete( + tSfPolicyUserContextId pContext + ) +{ + if (pContext == NULL) + return; + + if (pContext->userConfig != NULL) + free(pContext->userConfig); + + free(pContext); +} + +/**Store a pointer to user data. + * @param pContext + * @param policyId is 0 based. + * @param config - pointer to user configuration. + */ +int sfPolicyUserDataSet ( + tSfPolicyUserContextId pContext, + tSfPolicyId policyId, + void *config + ) +{ + void **ppTmp; + + if (policyId >= pContext->numAllocatedPolicies) + { + //expand the array + ppTmp = (void **)calloc(policyId+POLICY_ALLOCATION_CHUNK, sizeof(void *)); + if (!(ppTmp)) + { + return -1; + } + + if (pContext->numAllocatedPolicies) + { + memcpy(ppTmp, pContext->userConfig, sizeof(void*)*(pContext->numAllocatedPolicies)); + free(pContext->userConfig); + } + + pContext->userConfig = ppTmp; + pContext->numAllocatedPolicies = policyId + POLICY_ALLOCATION_CHUNK; + } + + if (pContext->userConfig[policyId]) + { + //dont overwrite existing configuration + return -1; + } + + pContext->userConfig[policyId] = config; + pContext->numActivePolicies++; + + return 0; +} + +/**user is responsible for freeing any memory. + */ +void * sfPolicyUserDataClear ( + tSfPolicyUserContextId pContext, + tSfPolicyId policyId + ) +{ + void *pTmp = NULL; + + if (policyId < pContext->numAllocatedPolicies) + { + pTmp = pContext->userConfig[policyId]; + pContext->userConfig[policyId] = NULL; + pContext->numActivePolicies--; + } + + return pTmp; +} + +int sfPolicyUserDataIterate ( + tSfPolicyUserContextId pContext, + int (*callback)(tSfPolicyUserContextId pContext, tSfPolicyId policyId, void* config) + ) +{ + tSfPolicyId policyId; + int ret = 0; + + //must not use numActivePolicies because the callback may delete a policy + for (policyId = 0; policyId < pContext->numAllocatedPolicies; policyId++) + { + if (pContext->userConfig[policyId]) + { + ret = callback(pContext, policyId, pContext->userConfig[policyId]); + if (ret != 0) + break; + } + } + + return ret; +} + + +/** @} */ // + diff --git a/sf_dynamic_preproc_lib.c b/sf_dynamic_preproc_lib.c new file mode 100644 index 0000000..bb410cf --- /dev/null +++ b/sf_dynamic_preproc_lib.c @@ -0,0 +1,180 @@ +/* $Id$ */ +/* + ** Copyright (C) 2005-2010 Sourcefire, Inc. + ** + ** This program is free software; you can redistribute it and/or modify + ** it under the terms of the GNU General Public License Version 2 as + ** published by the Free Software Foundation. You may not use, modify or + ** distribute this program under any other version of the GNU General + ** Public License. + ** + ** This program is distributed in the hope that it will be useful, + ** but WITHOUT ANY WARRANTY; without even the implied warranty of + ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + ** GNU General Public License for more details. + ** + ** You should have received a copy of the GNU General Public License + ** along with this program; if not, write to the Free Software + ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include "sf_preproc_info.h" +#include "sf_snort_packet.h" +#include "sf_dynamic_preproc_lib.h" +#include "sf_dynamic_meta.h" +#include "sf_dynamic_preprocessor.h" +#include "sf_dynamic_common.h" +#include "sf_dynamic_define.h" +#include +#include +#include +#include +#include + +DynamicPreprocessorData _dpd; + +NORETURN void DynamicPreprocessorFatalMessage(const char *format, ...) +{ + char buf[STD_BUF]; + va_list ap; + + va_start(ap, format); + vsnprintf(buf, STD_BUF, format, ap); + va_end(ap); + + buf[STD_BUF - 1] = '\0'; + + _dpd.fatalMsg("%s", buf); + + exit(1); +} + +PREPROC_LINKAGE int InitializePreprocessor(DynamicPreprocessorData *dpd) +{ + int i; + if (dpd->version < PREPROCESSOR_DATA_VERSION) + { + return -1; + } + + if (dpd->size != sizeof(DynamicPreprocessorData)) + { + return -1; + } + + + _dpd.version = dpd->version; + _dpd.size = dpd->size; + + _dpd.altBuffer = dpd->altBuffer; + _dpd.altBufferLen = dpd->altBufferLen; + for (i=0;iuriBuffers[i]; + } + _dpd.logMsg = dpd->logMsg; + _dpd.errMsg = dpd->errMsg; + _dpd.fatalMsg = dpd->fatalMsg; + _dpd.debugMsg = dpd->debugMsg; + + _dpd.registerPreproc = dpd->registerPreproc; + _dpd.addPreproc = dpd->addPreproc; + _dpd.addPreprocRestart = dpd->addPreprocRestart; + _dpd.addPreprocExit = dpd->addPreprocExit; + _dpd.addPreprocConfCheck = dpd->addPreprocConfCheck; + _dpd.preprocOptRegister = dpd->preprocOptRegister; + _dpd.addPreprocProfileFunc = dpd->addPreprocProfileFunc; + _dpd.profilingPreprocsFunc = dpd->profilingPreprocsFunc; + _dpd.totalPerfStats = dpd->totalPerfStats; + + _dpd.alertAdd = dpd->alertAdd; + _dpd.genSnortEvent = dpd->genSnortEvent; + _dpd.thresholdCheck = dpd->thresholdCheck; + + _dpd.inlineMode = dpd->inlineMode; + _dpd.inlineDrop = dpd->inlineDrop; + + _dpd.detect = dpd->detect; + _dpd.disableDetect = dpd->disableDetect; + _dpd.disableAllDetect = dpd->disableAllDetect; + _dpd.setPreprocBit = dpd->setPreprocBit; + + _dpd.streamAPI = dpd->streamAPI; + _dpd.searchAPI = dpd->searchAPI; + + _dpd.config_file = dpd->config_file; + _dpd.config_line = dpd->config_line; + _dpd.printfappend = dpd->printfappend; + _dpd.tokenSplit = dpd->tokenSplit; + _dpd.tokenFree = dpd->tokenFree; + + _dpd.getRuleInfoByName = dpd->getRuleInfoByName; + _dpd.getRuleInfoById = dpd->getRuleInfoById; + + _dpd.preprocess = dpd->preprocess; + + _dpd.debugMsgFile = dpd->debugMsgFile; + _dpd.debugMsgLine = dpd->debugMsgLine; + + _dpd.registerPreprocStats = dpd->registerPreprocStats; + _dpd.addPreprocReset = dpd->addPreprocReset; + _dpd.addPreprocResetStats = dpd->addPreprocResetStats; + _dpd.addPreprocReassemblyPkt = dpd->addPreprocReassemblyPkt; + _dpd.setPreprocReassemblyPktBit = dpd->setPreprocReassemblyPktBit; + _dpd.disablePreprocessors = dpd->disablePreprocessors; + +#ifdef SUP_IP6 + _dpd.ip6Build = dpd->ip6Build; + _dpd.ip6SetCallbacks = dpd->ip6SetCallbacks; +#endif + + _dpd.logAlerts = dpd->logAlerts; + _dpd.resetAlerts = dpd->resetAlerts; + _dpd.pushAlerts = dpd->pushAlerts; + _dpd.popAlerts = dpd->popAlerts; + +#ifdef TARGET_BASED + _dpd.findProtocolReference = dpd->findProtocolReference; + _dpd.addProtocolReference = dpd->addProtocolReference; + _dpd.isAdaptiveConfigured = dpd->isAdaptiveConfigured; +#endif + + _dpd.preprocOptOverrideKeyword = dpd->preprocOptOverrideKeyword; + _dpd.isPreprocEnabled = dpd->isPreprocEnabled; + +#ifdef SNORT_RELOAD + _dpd.addPreprocReloadVerify = dpd->addPreprocReloadVerify; +#endif + + _dpd.getRuntimePolicy = dpd->getRuntimePolicy; + _dpd.getParserPolicy = dpd->getParserPolicy; + _dpd.getDefaultPolicy = dpd->getDefaultPolicy; + _dpd.setParserPolicy = dpd->setParserPolicy; + _dpd.setFileDataPtr = dpd->setFileDataPtr; + _dpd.SnortStrtol = dpd->SnortStrtol; + _dpd.SnortStrtoul = dpd->SnortStrtoul; + + _dpd.fpEvalRTN = dpd->fpEvalRTN; + _dpd.portObjectCharPortArray = dpd->portObjectCharPortArray; + + _dpd.obApi = dpd->obApi; + + DYNAMIC_PREPROC_SETUP(); + return 0; +} + +PREPROC_LINKAGE int LibVersion(DynamicPluginMeta *dpm) +{ + + dpm->type = TYPE_PREPROCESSOR; + dpm->major = MAJOR_VERSION; + dpm->minor = MINOR_VERSION; + dpm->build = BUILD_VERSION; + strncpy(dpm->uniqueName, PREPROC_NAME, MAX_NAME_LEN); + return 0; +} + +/* Variables to check type of InitializeEngine and LibVersion */ +//PREPROC_LINKAGE InitEngineLibFunc initEngineFunc = &InitializeEngine; +//PREPROC_LINKAGE LibVersionFunc libVersionFunc = &LibVersion; + diff --git a/sf_preproc_info.h b/sf_preproc_info.h new file mode 100644 index 0000000..1fd115a --- /dev/null +++ b/sf_preproc_info.h @@ -0,0 +1,13 @@ +#ifndef SF_PREPROC_INFO_H_ +#define SF_PREPROC_INFO_H_ + +#define MAJOR_VERSION 1 +#define MINOR_VERSION 0 +#define BUILD_VERSION 1 +#define PREPROC_NAME "SF_AI" + +#define DYNAMIC_PREPROC_SETUP AI_setup +extern void AI_setup(); + +#endif /* SF_PREPROC_INFO_H_ */ + diff --git a/spp_ai.c b/spp_ai.c new file mode 100644 index 0000000..3a2cc5a --- /dev/null +++ b/spp_ai.c @@ -0,0 +1,617 @@ +/* + * ===================================================================================== + * + * Filename: spp_ai.c + * + * Description: Main file for the spp_ai Snort preprocessor module + * + * Version: 0.1 + * Created: 26/07/2010 11:00:41 + * Revision: none + * Compiler: gcc + * + * Author: BlackLight (http://0x00.ath.cx), + * Licence: GNU GPL v.3 + * Company: DO WHAT YOU WANT CAUSE A PIRATE IS FREE, YOU ARE A PIRATE! + * + * ===================================================================================== + */ + +#include "spp_ai.h" +#include "sfPolicyUserData.h" + +#include +#include +#include + +tSfPolicyUserContextId ex_config = NULL; + +#ifdef SNORT_RELOAD +tSfPolicyUserContextId ex_swap_config = NULL; +#endif + +static void AI_init(char *); +static void AI_process(void *, void *); +static AI_config * AI_parse(char *); +#ifdef SNORT_RELOAD +static void AI_reload(char *); +static int AI_reloadSwapPolicyFree(tSfPolicyUserContextId, tSfPolicyId, void *); +static void * AI_reloadSwap(void); +static void AI_reloadSwapFree(void *); +#endif + + +/** + * FUNCTION: AI_setup + * \brief Set up the preprocessor module + */ + +void AI_setup(void) +{ +#ifndef SNORT_RELOAD + _dpd.registerPreproc("ai", AI_init); +#else + _dpd.registerPreproc("ai", AI_init, AI_reload, + AI_reloadSwap, AI_reloadSwapFree); +#endif + + DEBUG_WRAP(_dpd.debugMsg(DEBUG_PLUGIN, "Preprocessor: AI is setup\n");); +} /* ----- end of function AI_setup ----- */ + + +/** + * FUNCTION: AI_init + * \brief Initialize the preprocessor module + * \param args Configuration arguments passed to the module + */ + +static void AI_init(char *args) +{ + AI_config *config; + + pthread_t cleanup_thread, + logparse_thread; + + tSfPolicyId policy_id = _dpd.getParserPolicy(); + + _dpd.logMsg("AI dynamic preprocessor configuration\n"); + + if (ex_config == NULL) + { + ex_config = sfPolicyConfigCreate(); + if (ex_config == NULL) + _dpd.fatalMsg("Could not allocate configuration struct.\n"); + } + + config = AI_parse(args); + sfPolicyUserPolicySet(ex_config, policy_id); + sfPolicyUserDataSetCurrent(ex_config, config); + + /* If the hash_cleanup_interval of stream_expire_interval options are set to zero, + * no cleanup will be made on the streams */ + if ( config->hashCleanupInterval != 0 && config->streamExpireInterval != 0 ) + { + if ( pthread_create ( &cleanup_thread, NULL, AI_hashcleanup_thread, config ) != 0 ) + { + _dpd.fatalMsg ( "Failed to create the hash cleanup thread\n" ); + } + } + + if ( strlen ( config->alertfile ) > 0 ) + { + if ( pthread_create ( &logparse_thread, NULL, AI_alertparser_thread, config ) != 0 ) + { + _dpd.fatalMsg ( "Failed to create the alert parser thread\n" ); + } + } + + /* Register the preprocessor function, Transport layer, ID 10000 */ + _dpd.addPreproc(AI_process, PRIORITY_TRANSPORT, 10000, PROTO_BIT__TCP | PROTO_BIT__UDP); + DEBUG_WRAP(_dpd.debugMsg(DEBUG_PLUGIN, "Preprocessor: AI is initialized\n");); +} /* ----- end of function AI_init ----- */ + +/** + * FUNCTION: AI_config + * \brief Parse the arguments passed to the module saving them to a valid configuration struct + * \param args Arguments passed to the module + * \return Pointer to AI_config keeping the configuration for the module + */ + +static AI_config * AI_parse(char *args) +{ + char *arg; + char *match; + char alertfile[1024] = { 0 }; + char clusterfile[1024] = { 0 }; + + char **matches = NULL; + int nmatches = 0; + + int i; + int offset; + int len; + uint32_t netmask; + + int min_val; + int max_val; + char label[256]; + cluster_type type; + + hierarchy_node **hierarchy_nodes = NULL; + int n_hierarchy_nodes = 0; + + unsigned long cleanup_interval = 0, + stream_expire_interval = 0, + alertfile_len = 0, + clusterfile_len = 0, + alert_clustering_interval = 0; + + BOOL has_cleanup_interval = false, + has_stream_expire_interval = false, + has_alertfile = false, + has_clusterfile = false; + + AI_config *config = NULL; + + if ( !( config = ( AI_config* ) malloc ( sizeof( AI_config )) )) + _dpd.fatalMsg("Could not allocate configuration struct.\n"); + memset ( config, 0, sizeof ( AI_config )); + + /* Parsing the hashtable_cleanup_interval option */ + if (( arg = (char*) strcasestr( args, "hashtable_cleanup_interval" ) )) + { + has_cleanup_interval = true; + + for ( arg += strlen("hashtable_cleanup_interval"); + *arg && (*arg < '0' || *arg > '9'); + arg++ ); + + if ( !(*arg) ) + { + _dpd.fatalMsg("AIPreproc: hashtable_cleanup_interval option used but " + "no value specified\n"); + } + + cleanup_interval = strtoul(arg, NULL, 10); + config->hashCleanupInterval = cleanup_interval; + _dpd.logMsg(" Hash table cleanup interval: %d\n", config->hashCleanupInterval); + } + + /* Parsing the tcp_stream_expire_interval option */ + if (( arg = (char*) strcasestr( args, "tcp_stream_expire_interval" ) )) + { + has_stream_expire_interval = true; + + for ( arg += strlen("tcp_stream_expire_interval"); + *arg && (*arg < '0' || *arg > '9'); + arg++ ); + + if ( !(*arg) ) + { + _dpd.fatalMsg("AIPreproc: tcp_stream_expire_interval option used but " + "no value specified\n"); + } + + stream_expire_interval = strtoul(arg, NULL, 10); + config->streamExpireInterval = stream_expire_interval; + _dpd.logMsg(" TCP stream expire interval: %d\n", config->streamExpireInterval); + } + + /* Parsing the alert_clustering_interval option */ + if (( arg = (char*) strcasestr( args, "alert_clustering_interval" ) )) + { + for ( arg += strlen("alert_clustering_interval"); + *arg && (*arg < '0' || *arg > '9'); + arg++ ); + + if ( !(*arg) ) + { + _dpd.fatalMsg("AIPreproc: alert_clustering_interval option used but " + "no value specified\n"); + } + + alert_clustering_interval = strtoul(arg, NULL, 10); + config->alertClusteringInterval = alert_clustering_interval; + _dpd.logMsg(" Alert clustering interval: %d\n", config->alertClusteringInterval); + } + + /* Parsing the alertfile option */ + if (( arg = (char*) strcasestr( args, "alertfile" ) )) + { + for ( arg += strlen("alertfile"); + *arg && *arg != '"'; + arg++ ); + + if ( !(*(arg++)) ) + { + _dpd.fatalMsg("AIPreproc: alertfile option used but no filename specified\n"); + } + + for ( alertfile[ (++alertfile_len)-1 ] = *arg; + *arg && *arg != '"' && alertfile_len < 1024; + arg++, alertfile[ (++alertfile_len)-1 ] = *arg ); + + if ( alertfile[0] == 0 || alertfile_len <= 1 ) { + has_alertfile = false; + } else { + if ( alertfile_len >= 1024 ) { + _dpd.fatalMsg("AIPreproc: alertfile path too long ( >= 1024 )\n"); + } else if ( strlen( alertfile ) == 0 ) { + has_alertfile = false; + } else { + has_alertfile = true; + alertfile[ alertfile_len-1 ] = 0; + strncpy ( config->alertfile, alertfile, alertfile_len ); + _dpd.logMsg(" alertfile path: %s\n", config->alertfile); + } + } + } + + /* Parsing the clusterfile option */ + if (( arg = (char*) strcasestr( args, "clusterfile" ) )) + { + for ( arg += strlen("clusterfile"); + *arg && *arg != '"'; + arg++ ); + + if ( !(*(arg++)) ) + { + _dpd.fatalMsg("AIPreproc: clusterfile option used but no filename specified\n"); + } + + for ( clusterfile[ (++clusterfile_len)-1 ] = *arg; + *arg && *arg != '"' && clusterfile_len < 1024; + arg++, clusterfile[ (++clusterfile_len)-1 ] = *arg ); + + if ( clusterfile[0] == 0 || clusterfile_len <= 1 ) { + has_clusterfile = false; + } else { + if ( clusterfile_len >= 1024 ) { + _dpd.fatalMsg("AIPreproc: clusterfile path too long ( >= 1024 )\n"); + } else if ( strlen( clusterfile ) == 0 ) { + has_clusterfile = false; + } else { + has_clusterfile = true; + clusterfile[ clusterfile_len-1 ] = 0; + strncpy ( config->clusterfile, clusterfile, clusterfile_len ); + _dpd.logMsg(" clusterfile path: %s\n", config->clusterfile); + } + } + } + + /* Parsing cluster options */ + while ( preg_match ( "\\s*(cluster\\s*\\(\\s*)([^\\)]+)\\)", args, &matches, &nmatches ) > 0 ) + { + if ( !has_clusterfile ) + { + _dpd.fatalMsg ( "AIPreproc: cluster option specified in configuration: '%s'\nBut no 'clusterfile' option was specified", matches[1] ); + } + + memset ( label, 0, sizeof(label) ); + min_val = -1; + max_val = -1; + type = none; + + match = strdup ( matches[1] ); + offset = (int) strcasestr ( args, matches[0] ) - (int) args; + len = strlen ( matches[0] ); + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + + if ( preg_match ( "class\\s*=\\s*\"([^\"]+)\"", match, &matches, &nmatches ) > 0 ) + { + if ( !strcasecmp ( matches[0], "src_port" )) + type = src_port; + else if ( !strcasecmp ( matches[0], "dst_port" )) + type = dst_port; + else if ( !strcasecmp ( matches[0], "src_addr" )) + type = src_addr; + else if ( !strcasecmp ( matches[0], "dst_addr" )) + type = dst_addr; + else if ( !strcasecmp ( matches[0], "time" )) + type = timestamp; + else + _dpd.fatalMsg ( "AIPreproc: Unknown class type in configuration: '%s'\n", matches[0] ); + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + } + + if ( preg_match ( "name\\s*=\\s*\"([^\"]+)\"", match, &matches, &nmatches ) > 0 ) + { + if ( strlen( matches[0] ) > sizeof(label) ) + _dpd.fatalMsg ( "AIPreproc: Label name too long in configuration: '%s' (maximum allowed length: %d)\n", + matches[0], sizeof(label) ); + + strncpy ( label, matches[0], sizeof(label) ); + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + } + + if ( preg_match ( "range\\s*=\\s*\"([^\"]+)\"", match, &matches, &nmatches ) > 0 ) + { + arg = strdup ( matches[0] ); + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + + switch ( type ) + { + case src_port: + case dst_port: + if ( preg_match ( "^([0-9]+)-([0-9]+)$", arg, &matches, &nmatches ) > 0 ) + { + min_val = strtoul ( matches[0], NULL, 10 ); + max_val = strtoul ( matches[1], NULL, 10 ); + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + } else if ( preg_match ( "^([0-9]+)$", arg, &matches, &nmatches ) > 0 ) { + min_val = strtoul ( matches[0], NULL, 10 ); + max_val = min_val; + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + } else { + _dpd.fatalMsg ( "AIPreproc: Unallowed format for a port range in configuration file: '%s'\n", arg ); + } + + break; + + case src_addr: + case dst_addr: + if ( preg_match ( "^([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})/([0-9]{1,2})$", arg, &matches, &nmatches ) > 0 ) + { + if (( min_val = inet_addr ( matches[0] )) == INADDR_NONE ) + { + _dpd.fatalMsg ( "AIPreproc: Unallowed IPv4 format in configuration: '%s'\n", matches[0] ); + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + } + + netmask = strtoul ( matches[1], NULL, 10 ); + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + + if ( netmask > 32 ) + { + _dpd.fatalMsg ( "AIPreproc: The netmask number of bits should be in [0,32] in '%s'\n", arg ); + } + + netmask = 1 << (( 8*sizeof ( uint32_t )) - netmask ); + min_val = ntohl ( min_val ) & (~(netmask - 1)); + max_val = min_val | (netmask - 1); + } else if ( preg_match ( "^([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})$", arg, &matches, &nmatches ) > 0 ) { + if (( min_val = inet_addr ( matches[0] )) == INADDR_NONE ) + { + _dpd.fatalMsg ( "AIPreproc: Unallowed IPv4 format in configuration: '%s'\n", matches[0] ); + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + } + + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + + min_val = ntohl ( min_val ); + max_val = min_val; + } else { + _dpd.fatalMsg ( "AIPreproc: Invalid value for an IP address or a subnet in configuration: '%s'\n", arg ); + } + + break; + + /* TODO Manage ranges and clusters for timestamps (and more?) here */ + default: + break; + } + + if ( matches ) + { + for ( i=0; i < nmatches; i++ ) + free ( matches[i] ); + + free ( matches ); + matches = NULL; + } + + if ( arg ) + { + free ( arg ); + arg = NULL; + } + } + + for ( i=offset; i <= strlen(args); i++ ) + args[i] = args[ i+len ]; + + if ( min_val == -1 || max_val == -1 || type == none || strlen ( label ) == 0 ) + { + _dpd.fatalMsg ( "AIPreproc: Invalid cluster in configuration: '%s'\nAll of the following fields are required: class, range, name\n", match ); + free ( match ); + match = NULL; + } + + if ( !( hierarchy_nodes = ( hierarchy_node** ) realloc ( hierarchy_nodes, (++n_hierarchy_nodes) * sizeof(hierarchy_node) )) ) + { + _dpd.fatalMsg ( "Fatal dynamic memory allocation failure at %s:%d\n", __FILE__, __LINE__ ); + free ( match ); + match = NULL; + } + + if ( !( hierarchy_nodes[ n_hierarchy_nodes - 1 ] = ( hierarchy_node* ) malloc ( sizeof(hierarchy_node) ) )) + { + _dpd.fatalMsg ( "Fatal dynamic memory allocation failure at %s:%d\n", __FILE__, __LINE__ ); + free ( match ); + match = NULL; + } + + hierarchy_nodes[ n_hierarchy_nodes - 1 ]->type = type; + hierarchy_nodes[ n_hierarchy_nodes - 1 ]->min_val = min_val; + hierarchy_nodes[ n_hierarchy_nodes - 1 ]->max_val = max_val; + hierarchy_nodes[ n_hierarchy_nodes - 1 ]->nchildren = 0; + hierarchy_nodes[ n_hierarchy_nodes - 1 ]->children = NULL; + hierarchy_nodes[ n_hierarchy_nodes - 1 ]->parent = NULL; + + strncpy ( hierarchy_nodes[ n_hierarchy_nodes - 1 ]->label, + label, + sizeof ( hierarchy_nodes[ n_hierarchy_nodes - 1 ]->label )); + + free ( match ); + match = NULL; + } + + if ( ! has_cleanup_interval ) + { + config->hashCleanupInterval = 60; + } + + if ( ! has_stream_expire_interval ) + { + config->streamExpireInterval = 600; + } + + if ( ! has_alertfile ) + { + strcpy ( config->alertfile, "/var/log/snort/alert" ); + } + + if ( has_clusterfile ) + { + if ( ! hierarchy_nodes ) + { + _dpd.fatalMsg ( "AIPreproc: cluster file specified in the configuration but no clusters were specified\n" ); + } + + if ( ! alert_clustering_interval ) + { + config->alertClusteringInterval = 600; + } + + AI_hierarchies_build ( config, hierarchy_nodes, n_hierarchy_nodes ); + } + + return config; +} /* ----- end of function AI_config ----- */ + + +/** + * FUNCTION: AI_process + * \brief Function executed every time the module receives a packet to be processed + * \param pkt void* pointer to the packet data + * \param context void* pointer to the context + */ + +void AI_process(void *pkt, void *context) +{ + SFSnortPacket *p = (SFSnortPacket *) pkt; + AI_config *config; + + sfPolicyUserPolicySet(ex_config, _dpd.getRuntimePolicy()); + config = (AI_config * ) sfPolicyUserDataGetCurrent (ex_config); + + if (config == NULL) + return; + + if (!p->ip4_header || p->ip4_header->proto != IPPROTO_TCP || !p->tcp_header) + { + /* Not for me, return */ + return; + } + + AI_pkt_enqueue ( pkt ); +} /* ----- end of function AI_process ----- */ + +#ifdef SNORT_RELOAD +static void AI_reload(char *args) +{ + AI_config *config; + tSfPolicyId policy_id = _dpd.getParserPolicy(); + + _dpd.logMsg("AI dynamic preprocessor configuration\n"); + + if (ex_swap_config == NULL) + { + ex_swap_config = sfPolicyConfigCreate(); + if (ex_swap_config == NULL) + _dpd.fatalMsg("Could not allocate configuration struct.\n"); + } + + config = AI_parse(args); + sfPolicyUserPolicySet(ex_swap_config, policy_id); + sfPolicyUserDataSetCurrent(ex_swap_config, config); + + /* Register the preprocessor function, Transport layer, ID 10000 */ + _dpd.addPreproc(AI_process, PRIORITY_TRANSPORT, 10000, PROTO_BIT__TCP | PROTO_BIT__UDP); + + DEBUG_WRAP(_dpd.debugMsg(DEBUG_PLUGIN, "Preprocessor: AI is initialized\n");); +} + +static int AI_reloadSwapPolicyFree(tSfPolicyUserContextId config, tSfPolicyId policyId, void *data) +{ + AI_config *policy_config = (AI_config *)data; + + sfPolicyUserDataClear(config, policyId); + free(policy_config); + return 0; +} + +static void * AI_reloadSwap(void) +{ + tSfPolicyUserContextId old_config = ex_config; + + if (ex_swap_config == NULL) + return NULL; + + ex_config = ex_swap_config; + ex_swap_config = NULL; + + return (void *)old_config; +} + +static void AI_reloadSwapFree(void *data) +{ + tSfPolicyUserContextId config = (tSfPolicyUserContextId)data; + + if (data == NULL) + return; + + sfPolicyUserDataIterate(config, AI_reloadSwapPolicyFree); + sfPolicyConfigDelete(config); +} +#endif + diff --git a/spp_ai.h b/spp_ai.h new file mode 100644 index 0000000..416451e --- /dev/null +++ b/spp_ai.h @@ -0,0 +1,146 @@ +/* + * ===================================================================================== + * + * Filename: spp_ai.h + * + * Description: Header file for the preprocessor + * + * Version: 1.0 + * Created: 30/07/2010 15:47:12 + * Revision: none + * Compiler: gcc + * + * Author: BlackLight (http://0x00.ath.cx), + * Licence: GNU GPL v.3 + * Company: DO WHAT YOU WANT CAUSE A PIRATE IS FREE, YOU ARE A PIRATE! + * + * ===================================================================================== + */ + +#ifndef _SPP_AI_H +#define _SPP_AI_H + +#include "sf_snort_packet.h" +#include "sf_dynamic_preprocessor.h" +#include "uthash.h" + +#define PRIVATE static + +extern DynamicPreprocessorData _dpd; +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; + +typedef enum { false, true } BOOL; + +typedef enum { + none, src_port, dst_port, src_addr, dst_addr, timestamp +} cluster_type; + +/* Each stream in the hash table is identified by the couple (src_ip, dst_port) */ +struct pkt_key +{ + uint32_t src_ip; + uint16_t dst_port; +}; + +/* Identifier of a packet in a stream */ +struct pkt_info +{ + struct pkt_key key; /* Key of the packet (src_ip, dst_port) */ + time_t timestamp; /* Timestamp */ + SFSnortPacket* pkt; /* Reference to SFSnortPacket containing packet's information */ + struct pkt_info* next; /* Pointer to the next packet in the stream */ + BOOL observed; /* Flag set if the packet is observed, i.e. associated to a security alert */ + UT_hash_handle hh; /* Make the struct 'hashable' */ +}; + +/* Data type containing the configuration of the module */ +typedef struct +{ + unsigned long hashCleanupInterval; + unsigned long streamExpireInterval; + unsigned long alertClusteringInterval; + char alertfile[1024]; + char clusterfile[1024]; +} AI_config; + +/* Data type for hierarchies used for clustering */ +typedef struct _hierarchy_node +{ + cluster_type type; + char label[256]; + int min_val; + int max_val; + int nchildren; + struct _hierarchy_node *parent; + struct _hierarchy_node **children; +} hierarchy_node; + +/* Data type for Snort alerts */ +typedef struct _AI_snort_alert { + /* Identifiers of the alert */ + unsigned int gid; + unsigned int sid; + unsigned int rev; + + /* Snort priority, description, + * classification and timestamp + * of the alert */ + unsigned short priority; + char *desc; + char *classification; + time_t timestamp; + + /* IP header information */ + uint8_t tos; + uint16_t iplen; + uint16_t id; + uint8_t ttl; + uint8_t ipproto; + uint32_t src_addr; + uint32_t dst_addr; + + /* TCP header information */ + uint16_t src_port; + uint16_t dst_port; + uint32_t sequence; + uint32_t ack; + uint8_t tcp_flags; + uint16_t window; + uint16_t tcplen; + + /* Reference to the TCP stream + * associated to the alert, if any */ + struct pkt_info *stream; + + /* Pointer to the next alert in + * the log, if any*/ + struct _AI_snort_alert *next; + + /* Hierarchies for addresses and ports, + * if the clustering algorithm is used */ + hierarchy_node *src_addr_node; + hierarchy_node *dst_addr_node; + hierarchy_node *src_port_node; + hierarchy_node *dst_port_node; + + /* If the clustering algorithm is used, + * we also count how many alerts this + * single alert groups */ + unsigned int grouped_alarms_count; +} AI_snort_alert; + +int preg_match ( const char*, char*, char***, int* ); + +void* AI_hashcleanup_thread ( void* ); +void* AI_alertparser_thread ( void* ); + +void AI_pkt_enqueue ( SFSnortPacket* ); +void AI_set_stream_observed ( struct pkt_key key ); +void AI_hierarchies_build ( AI_config*, hierarchy_node**, int ); +struct pkt_info* AI_get_stream_by_key ( struct pkt_key ); +AI_snort_alert* AI_get_alerts ( void ); + +#endif /* _SPP_AI_H */ + diff --git a/stream.c b/stream.c new file mode 100644 index 0000000..69838c5 --- /dev/null +++ b/stream.c @@ -0,0 +1,261 @@ +/* + * ===================================================================================== + * + * Filename: stream.c + * + * Description: It manages the streams of TCP packets, keeping them in a hashtable + * + * Version: 0.1 + * Created: 30/07/2010 15:02:54 + * Revision: none + * Compiler: gcc + * + * Author: BlackLight (http://0x00.ath.cx), + * Licence: GNU GPL v.3 + * Company: DO WHAT YOU WANT CAUSE A PIRATE IS FREE, YOU ARE A PIRATE! + * + * ===================================================================================== + */ + +#include "spp_ai.h" + +#include +#include +#include +#include + + +PRIVATE struct pkt_info *hash = NULL; +PRIVATE time_t start_time = 0; + + +/** + * FUNCTION: _AI_stream_free + * \brief Remove a stream from the hash table (private function) + * \param stream Stream to be removed + */ + +PRIVATE void _AI_stream_free ( struct pkt_info* stream ) { + struct pkt_info *tmp = NULL; + + if ( !stream || !hash || HASH_COUNT(hash) == 0 ) + return; + + HASH_FIND ( hh, hash, &(stream->key), sizeof(struct pkt_key), tmp ); + + if ( !tmp ) + return; + + HASH_DEL ( hash, stream ); + + while ( stream ) { + tmp = stream->next; + + if ( stream->pkt ) { + free ( stream->pkt ); + stream->pkt = NULL; + } + + free ( stream ); + stream = tmp; + } +} /* ----- end of function _AI_stream_free ----- */ + + +/** + * FUNCTION: AI_hashcleanup_thread + * \brief Thread called for cleaning up the hash table from the traffic streams older than + * a certain threshold + * \param arg Pointer to the AI_config struct + */ + +void* AI_hashcleanup_thread ( void* arg ) { + struct pkt_info *h, *stream; + time_t max_timestamp; + AI_config* conf = (AI_config*) arg; + + while ( 1 ) { + /* Sleep for the specified number of seconds */ + sleep ( conf->hashCleanupInterval ); + + /* If the hash is empty, come back to sleep */ + if ( !hash || !HASH_COUNT(hash) ) + continue; + + /* Check all the streams in the hash */ + for ( h = hash; h; h = (struct pkt_info*) h->next ) { + if ( h->observed ) continue; + max_timestamp = 0; + + /* Find the maximum timestamp in the flow */ + for ( stream = h; stream; stream = stream->next ) { + if ( stream->timestamp > max_timestamp ) + max_timestamp = stream->timestamp; + } + + /* If the most recent packet in the stream is older than the specified threshold, remove that stream */ + if ( time(NULL) - max_timestamp > conf->streamExpireInterval ) { + stream = h; + _AI_stream_free ( stream ); + } + } + } + + /* Hey we'll never reach this point unless 1 becomes != 1, but I have to place it + * for letting not gcc annoy us */ + return (void*) 0; +} /* ----- end of function AI_hashcleanup_thread ----- */ + + +/** + * FUNCTION: AI_pkt_enqueue + * \brief Function called for appending a new packet to the hash table, + * creating a new stream or appending it to an existing stream + * \param pkt Packet to be appended + */ + +void AI_pkt_enqueue ( SFSnortPacket* pkt ) +{ + struct pkt_key key; + struct pkt_info *info; + struct pkt_info *tmp; + struct pkt_info *found = NULL; + + if ( start_time == 0 ) + start_time = time (NULL); + + /* If this is not an IP and/or TCP packet, it's not for me */ + if ( !( pkt->ip4_header && pkt->tcp_header )) + return; + + if ( !( info = (struct pkt_info*) malloc( sizeof(struct pkt_info) )) ) + { + _dpd.fatalMsg ( "\nDynamic memory allocation failure at %s:%d\n", __FILE__, __LINE__ ); + } + + memset ( &key, 0, sizeof(struct pkt_key)); + key.src_ip = pkt->ip4_header->source.s_addr; + key.dst_port = pkt->tcp_header->destination_port; + + info->key = key; + info->timestamp = time(NULL); + info->observed = false; + info->next = NULL; + + if ( !( info->pkt = (SFSnortPacket*) malloc ( sizeof (SFSnortPacket) )) ) + { + _dpd.fatalMsg ( "\nDynamic memory allocation failure at %s:%d\n", __FILE__, __LINE__ ); + } + + memcpy ( info->pkt, pkt, sizeof (SFSnortPacket) ); + + if ( hash ) { + HASH_FIND ( hh, hash, &key, sizeof(struct pkt_key), found ); + } + + /* If there is already an element of this traffic stream in my hash table, + * append the packet just received to this stream*/ + if ( found ) { + /* If the current packet contains a RST, just deallocate the stream */ + if ( info->pkt->tcp_header->flags & TCPHEADER_RST ) { + HASH_FIND ( hh, hash, &key, sizeof(struct pkt_key), found ); + + if ( found ) { + if ( !found->observed ) { + _AI_stream_free ( found ); + } + } + } else { + tmp = NULL; + + for ( ; found->next; found = found->next ) { + /* If the sequence number of the next packet in the stream + * is bigger than the sequence number of the current packet, + * place the current packet before that */ + if ( ntohl( found->next->pkt->tcp_header->sequence ) > + ntohl( info->pkt->tcp_header->sequence ) ) { + tmp = found->next; + found->next = info; + info->next = tmp; + break; + } + } + + if ( !tmp ) { + found->next = info; + } + + /* If the current packet contains an ACK and the latest one + * in this stream contained a FIN, then the communication + * on this stream is over */ + if ( found->pkt->tcp_header->flags & TCPHEADER_FIN ) { + if ( info->pkt->tcp_header->flags & TCPHEADER_ACK ) { + HASH_FIND ( hh, hash, &key, sizeof(struct pkt_key), found ); + + if ( found ) { + if ( !found->observed ) { + _AI_stream_free ( found ); + } + } + } + } + } + } else { + /* If the packet contains the ACK flag, no payload and it is + * associated to no active stream, just ignore it */ + /* if ( pkt->tcp_header->flags & TCPHEADER_ACK ) { */ + /* return; */ + /* } */ + + /* If there is no stream associated to this packet, create + * a new node in the hash table */ + HASH_ADD ( hh, hash, key, sizeof(struct pkt_key), info ); + } + + return; +} /* ----- end of function AI_pkt_enqueue ----- */ + + +/** + * FUNCTION: AI_get_stream_by_key + * \brief Get a TCP stream by key + * \param key Key of the stream to be picked up (struct pkt_key) + * \return A pkt_info pointer to the stream if found, NULL otherwise + */ + +struct pkt_info* +AI_get_stream_by_key ( struct pkt_key key ) +{ + struct pkt_info *info = NULL; + HASH_FIND ( hh, hash, &key, sizeof (struct pkt_key), info ); + + /* If no stream was found with that key, return */ + if ( info == NULL ) + return NULL; + + /* If the timestamp of the stream is older than the start time, return */ + if ( info->timestamp < start_time ) + return NULL; + + return info; +} /* ----- end of function AI_get_stream_by_key ----- */ + + +/** + * FUNCTION: AI_set_stream_observed + * \brief Set the flag "observed" on a stream associated to a security alert, so that it won't be removed from the hash table + * \param key Key of the stream to be set as "observed" + */ + +void +AI_set_stream_observed ( struct pkt_key key ) +{ + struct pkt_info *info = NULL; + HASH_FIND ( hh, hash, &key, sizeof (struct pkt_key), info ); + + if ( info == NULL ) + return; + + info->observed = true; +} /* ----- end of function AI_set_stream_observed ----- */ + diff --git a/tags b/tags new file mode 100644 index 0000000..b4461c4 --- /dev/null +++ b/tags @@ -0,0 +1,690 @@ +!_TAG_FILE_FORMAT 2 /extended format; --format=1 will not append ;" to lines/ +!_TAG_FILE_SORTED 1 /0=unsorted, 1=sorted, 2=foldcase/ +!_TAG_PROGRAM_AUTHOR Darren Hiebert /dhiebert@users.sourceforge.net/ +!_TAG_PROGRAM_NAME Exuberant Ctags // +!_TAG_PROGRAM_URL http://ctags.sourceforge.net /official site/ +!_TAG_PROGRAM_VERSION 5.8 // +AI_alertparser_thread alert_parser.c /^AI_alertparser_thread ( void* arg )$/;" f signature:( void* arg ) +AI_alertparser_thread spp_ai.h /^void* AI_alertparser_thread ( void* );$/;" p signature:( void* ) +AI_config spp_ai.h /^} AI_config;$/;" t typeref:struct:__anon3 +AI_get_alerts alert_parser.c /^AI_get_alerts ()$/;" f +AI_get_alerts spp_ai.h /^AI_snort_alert* AI_get_alerts ( void );$/;" p signature:( void ) +AI_get_stream_by_key spp_ai.h /^struct pkt_info* AI_get_stream_by_key ( struct pkt_key );$/;" p signature:( struct pkt_key ) +AI_get_stream_by_key stream.c /^AI_get_stream_by_key ( struct pkt_key key )$/;" f signature:( struct pkt_key key ) +AI_hashcleanup_thread spp_ai.h /^void* AI_hashcleanup_thread ( void* );$/;" p signature:( void* ) +AI_hashcleanup_thread stream.c /^void* AI_hashcleanup_thread ( void* arg ) {$/;" f signature:( void* arg ) +AI_hierarchies_build cluster.c /^AI_hierarchies_build ( AI_config *conf, hierarchy_node **nodes, int n_nodes )$/;" f signature:( AI_config *conf, hierarchy_node **nodes, int n_nodes ) +AI_hierarchies_build spp_ai.h /^void AI_hierarchies_build ( AI_config*, hierarchy_node**, int );$/;" p signature:( AI_config*, hierarchy_node**, int ) +AI_init spp_ai.c /^static void AI_init(char *);$/;" p file: signature:(char *) +AI_init spp_ai.c /^static void AI_init(char *args)$/;" f file: signature:(char *args) +AI_parse spp_ai.c /^static AI_config * AI_parse(char *);$/;" p file: signature:(char *) +AI_parse spp_ai.c /^static AI_config * AI_parse(char *args)$/;" f file: signature:(char *args) +AI_pkt_enqueue spp_ai.h /^void AI_pkt_enqueue ( SFSnortPacket* );$/;" p signature:( SFSnortPacket* ) +AI_pkt_enqueue stream.c /^void AI_pkt_enqueue ( SFSnortPacket* pkt )$/;" f signature:( SFSnortPacket* pkt ) +AI_process spp_ai.c /^static void AI_process(void *, void *);$/;" p file: signature:(void *, void *) +AI_process spp_ai.c /^void AI_process(void *pkt, void *context)$/;" f signature:(void *pkt, void *context) +AI_reload spp_ai.c /^static void AI_reload(char *);$/;" p file: signature:(char *) +AI_reload spp_ai.c /^static void AI_reload(char *args)$/;" f file: signature:(char *args) +AI_reloadSwap spp_ai.c /^static void * AI_reloadSwap(void)$/;" f file: signature:(void) +AI_reloadSwap spp_ai.c /^static void * AI_reloadSwap(void);$/;" p file: signature:(void) +AI_reloadSwapFree spp_ai.c /^static void AI_reloadSwapFree(void *);$/;" p file: signature:(void *) +AI_reloadSwapFree spp_ai.c /^static void AI_reloadSwapFree(void *data)$/;" f file: signature:(void *data) +AI_reloadSwapPolicyFree spp_ai.c /^static int AI_reloadSwapPolicyFree(tSfPolicyUserContextId config, tSfPolicyId policyId, void *data)$/;" f file: signature:(tSfPolicyUserContextId config, tSfPolicyId policyId, void *data) +AI_reloadSwapPolicyFree spp_ai.c /^static int AI_reloadSwapPolicyFree(tSfPolicyUserContextId, tSfPolicyId, void *);$/;" p file: signature:(tSfPolicyUserContextId, tSfPolicyId, void *) +AI_set_stream_observed spp_ai.h /^void AI_set_stream_observed ( struct pkt_key key );$/;" p signature:( struct pkt_key key ) +AI_set_stream_observed stream.c /^AI_set_stream_observed ( struct pkt_key key )$/;" f signature:( struct pkt_key key ) +AI_setup sf_preproc_info.h /^extern void AI_setup();$/;" p signature:() +AI_setup spp_ai.c /^void AI_setup(void)$/;" f signature:(void) +AI_snort_alert spp_ai.h /^} AI_snort_alert;$/;" t typeref:struct:_AI_snort_alert +BOOL spp_ai.h /^typedef enum { false, true } BOOL;$/;" t typeref:enum:__anon1 +BUILD_VERSION sf_preproc_info.h 6;" d +CDL_DELETE uthash/utlist.h 456;" d +CDL_FOREACH uthash/utlist.h 467;" d +CDL_FOREACH_SAFE uthash/utlist.h 470;" d +CDL_PREPEND uthash/utlist.h 442;" d +CDL_SEARCH uthash/utlist.h 482;" d +CDL_SEARCH_SCALAR uthash/utlist.h 475;" d +CDL_SORT uthash/utlist.h 214;" d +CMDLINE Makefile /^CMDLINE=-g -O2 -fvisibility=hidden -fno-strict-aliasing -Wall -fstack-protector$/;" m +DECLTYPE uthash/uthash.h 36;" d +DECLTYPE uthash/uthash.h 39;" d +DECLTYPE uthash/uthash.h 42;" d +DECLTYPE_ASSIGN uthash/uthash.h 46;" d +DECLTYPE_ASSIGN uthash/uthash.h 52;" d +DEFINES Makefile /^DEFINES=-D_GNU_SOURCE -D_XOPEN_SOURCE -DDYNAMIC_PLUGIN -DSUP_IP6 -DENABLE_MYSQL -DHAVE_CONFIG_H$/;" m +DL_APPEND uthash/utlist.h 396;" d +DL_DELETE uthash/utlist.h 410;" d +DL_FOREACH uthash/utlist.h 428;" d +DL_FOREACH_SAFE uthash/utlist.h 432;" d +DL_PREPEND uthash/utlist.h 384;" d +DL_SEARCH uthash/utlist.h 437;" d +DL_SEARCH_SCALAR uthash/utlist.h 436;" d +DL_SORT uthash/utlist.h 156;" d +DYNAMIC_PREPROC_SETUP sf_preproc_info.h 9;" d +Data Fields doc/latex/struct__AI__config.tex /^\\subsection*{Data Fields}$/;" b +Data Fields doc/latex/structpkt__info.tex /^\\subsection*{Data Fields}$/;" b +Data Fields doc/latex/structpkt__key.tex /^\\subsection*{Data Fields}$/;" b +Data Structure Documentation doc/latex/refman.tex /^\\chapter{Data Structure Documentation}$/;" c +Data Structure Index doc/latex/refman.tex /^\\chapter{Data Structure Index}$/;" c +Data Structures doc/latex/annotated.tex /^\\section{Data Structures}$/;" s +Defines doc/latex/sf__preproc__info_8h.tex /^\\subsection*{Defines}$/;" b +Detailed Description doc/latex/group__sfPolicyConfig.tex /^\\subsection{Detailed Description}$/;" b +DynamicPreprocessorFatalMessage sf_dynamic_preproc_lib.c /^NORETURN void DynamicPreprocessorFatalMessage(const char *format, ...)$/;" f signature:(const char *format, ...) +ELMT_FROM_HH uthash/uthash.h 80;" d +Field Documentation doc/latex/struct__AI__config.tex /^\\subsection{Field Documentation}$/;" b +Field Documentation doc/latex/structpkt__info.tex /^\\subsection{Field Documentation}$/;" b +Field Documentation doc/latex/structpkt__key.tex /^\\subsection{Field Documentation}$/;" b +File Documentation doc/latex/refman.tex /^\\chapter{File Documentation}$/;" c +File Index doc/latex/refman.tex /^\\chapter{File Index}$/;" c +File List doc/latex/files.tex /^\\section{File List}$/;" s +Function Documentation doc/latex/group__sfPolicyConfig.tex /^\\subsection{Function Documentation}$/;" b +Functions doc/latex/group__sfPolicyConfig.tex /^\\subsection*{Functions}$/;" b +HASH_ADD uthash/uthash.h 147;" d +HASH_ADD_INT uthash/uthash.h 234;" d +HASH_ADD_KEYPTR uthash/uthash.h 150;" d +HASH_ADD_PTR uthash/uthash.h 238;" d +HASH_ADD_STR uthash/uthash.h 230;" d +HASH_ADD_TO_BKT uthash/uthash.h 649;" d +HASH_BER uthash/uthash.h 325;" d +HASH_BKT_CAPACITY_THRESH uthash/uthash.h 77;" d +HASH_BLOOM_ADD uthash/uthash.h 115;" d +HASH_BLOOM_ADD uthash/uthash.h 124;" d +HASH_BLOOM_BITLEN uthash/uthash.h 96;" d +HASH_BLOOM_BITSET uthash/uthash.h 112;" d +HASH_BLOOM_BITTEST uthash/uthash.h 113;" d +HASH_BLOOM_BYTELEN uthash/uthash.h 97;" d +HASH_BLOOM_FREE uthash/uthash.h 107;" d +HASH_BLOOM_FREE uthash/uthash.h 123;" d +HASH_BLOOM_MAKE uthash/uthash.h 122;" d +HASH_BLOOM_MAKE uthash/uthash.h 98;" d +HASH_BLOOM_SIGNATURE uthash/uthash.h 914;" d +HASH_BLOOM_TEST uthash/uthash.h 118;" d +HASH_BLOOM_TEST uthash/uthash.h 125;" d +HASH_CLEAR uthash/uthash.h 879;" d +HASH_CNT uthash/uthash.h 890;" d +HASH_COUNT uthash/uthash.h 889;" d +HASH_DEL uthash/uthash.h 240;" d +HASH_DELETE uthash/uthash.h 192;" d +HASH_DEL_IN_BKT uthash/uthash.h 663;" d +HASH_EMIT_KEY uthash/uthash.h 307;" d +HASH_EMIT_KEY uthash/uthash.h 314;" d +HASH_EXPAND_BUCKETS uthash/uthash.h 704;" d +HASH_FCN uthash/uthash.h 319;" d +HASH_FCN uthash/uthash.h 321;" d +HASH_FIND uthash/uthash.h 82;" d +HASH_FIND_INT uthash/uthash.h 232;" d +HASH_FIND_IN_BKT uthash/uthash.h 635;" d +HASH_FIND_PTR uthash/uthash.h 236;" d +HASH_FIND_STR uthash/uthash.h 228;" d +HASH_FNV uthash/uthash.h 347;" d +HASH_FSCK uthash/uthash.h 248;" d +HASH_FSCK uthash/uthash.h 300;" d +HASH_INITIAL_NUM_BUCKETS uthash/uthash.h 75;" d +HASH_INITIAL_NUM_BUCKETS_LOG2 uthash/uthash.h 76;" d +HASH_JEN uthash/uthash.h 386;" d +HASH_JEN_MIX uthash/uthash.h 373;" d +HASH_KEYCMP uthash/uthash.h 632;" d +HASH_MAKE_TABLE uthash/uthash.h 128;" d +HASH_MUR uthash/uthash.h 494;" d +HASH_MUR uthash/uthash.h 496;" d +HASH_MUR_ALIGNED uthash/uthash.h 534;" d +HASH_MUR_UNALIGNED uthash/uthash.h 500;" d +HASH_OAT uthash/uthash.h 357;" d +HASH_OOPS uthash/uthash.h 247;" d +HASH_SAX uthash/uthash.h 337;" d +HASH_SELECT uthash/uthash.h 841;" d +HASH_SFH uthash/uthash.h 438;" d +HASH_SIGNATURE uthash/uthash.h 913;" d +HASH_SORT uthash/uthash.h 756;" d +HASH_SRT uthash/uthash.h 757;" d +HASH_TO_BKT uthash/uthash.h 175;" d +INCLUDES Makefile /^INCLUDES=-I. -I..\/..\/.. -I..\/include -I.\/uthash$/;" m +InitializePreprocessor sf_dynamic_preproc_lib.c /^PREPROC_LINKAGE int InitializePreprocessor(DynamicPreprocessorData *dpd)$/;" f signature:(DynamicPreprocessorData *dpd) +LDECLTYPE uthash/utlist.h 66;" d +LDECLTYPE uthash/utlist.h 69;" d +LDECLTYPE uthash/utlist.h 72;" d +LDLINKS Makefile /^LDLINKS=-lpthread$/;" m +LDOPTIONS Makefile /^LDOPTIONS=-export-dynamic -rpath ${PREPROC_PATH}$/;" m +LIBPATH Makefile /^LIBPATH=-L\/usr\/lib$/;" m +LIBTOOL Makefile /^LIBTOOL=.\/libtool --tag=CC $/;" m +LL_APPEND uthash/utlist.h 293;" d +LL_APPEND uthash/utlist.h 354;" d +LL_APPEND uthash/utlist.h 355;" d +LL_APPEND_VS2008 uthash/utlist.h 323;" d +LL_DELETE uthash/utlist.h 306;" d +LL_DELETE uthash/utlist.h 356;" d +LL_DELETE uthash/utlist.h 357;" d +LL_DELETE_VS2008 uthash/utlist.h 335;" d +LL_FOREACH uthash/utlist.h 361;" d +LL_FOREACH_SAFE uthash/utlist.h 364;" d +LL_PREPEND uthash/utlist.h 287;" d +LL_SEARCH uthash/utlist.h 374;" d +LL_SEARCH_SCALAR uthash/utlist.h 367;" d +LL_SORT uthash/utlist.h 100;" d +LibVersion sf_dynamic_preproc_lib.c /^PREPROC_LINKAGE int LibVersion(DynamicPluginMeta *dpm)$/;" f signature:(DynamicPluginMeta *dpm) +MAJOR_VERSION sf_preproc_info.h 4;" d +MINOR_VERSION sf_preproc_info.h 5;" d +Module Documentation doc/latex/refman.tex /^\\chapter{Module Documentation}$/;" c +Module Index doc/latex/refman.tex /^\\chapter{Module Index}$/;" c +Modules doc/latex/modules.tex /^\\section{Modules}$/;" s +NO_DECLTYPE uthash/uthash.h 38;" d +NO_DECLTYPE uthash/utlist.h 68;" d +OBJECTS Makefile /^OBJECTS=\\$/;" m +OUTPUT Makefile /^OUTPUT=libsf_ai_preproc.la$/;" m +PREPROC_NAME sf_preproc_info.h 7;" d +PREPROC_PATH Makefile /^PREPROC_PATH=\/home\/blacklight\/local\/snort\/lib\/snort_dynamicpreprocessor$/;" m +PRIVATE spp_ai.h 27;" d +SF_PREPROC_INFO_H_ sf_preproc_info.h 2;" d +SearchBox doc/html/search/search.js /^function SearchBox(name, resultsPath, inFrame, label)$/;" c +SearchBox.Activate doc/html/search/search.js /^ this.Activate = function(isActive)$/;" m +SearchBox.CloseResultsWindow doc/html/search/search.js /^ this.CloseResultsWindow = function()$/;" m +SearchBox.CloseSelectionWindow doc/html/search/search.js /^ this.CloseSelectionWindow = function()$/;" m +SearchBox.DOMPopupSearchResults doc/html/search/search.js /^ this.DOMPopupSearchResults = function()$/;" m +SearchBox.DOMPopupSearchResultsWindow doc/html/search/search.js /^ this.DOMPopupSearchResultsWindow = function()$/;" m +SearchBox.DOMSearchBox doc/html/search/search.js /^ this.DOMSearchBox = function()$/;" m +SearchBox.DOMSearchClose doc/html/search/search.js /^ this.DOMSearchClose = function()$/;" m +SearchBox.DOMSearchField doc/html/search/search.js /^ this.DOMSearchField = function()$/;" m +SearchBox.DOMSearchSelect doc/html/search/search.js /^ this.DOMSearchSelect = function()$/;" m +SearchBox.DOMSearchSelectWindow doc/html/search/search.js /^ this.DOMSearchSelectWindow = function()$/;" m +SearchBox.OnSearchFieldChange doc/html/search/search.js /^ this.OnSearchFieldChange = function(evt)$/;" m +SearchBox.OnSearchFieldFocus doc/html/search/search.js /^ this.OnSearchFieldFocus = function(isActive)$/;" m +SearchBox.OnSearchSelectHide doc/html/search/search.js /^ this.OnSearchSelectHide = function()$/;" m +SearchBox.OnSearchSelectKey doc/html/search/search.js /^ this.OnSearchSelectKey = function(evt)$/;" m +SearchBox.OnSearchSelectShow doc/html/search/search.js /^ this.OnSearchSelectShow = function()$/;" m +SearchBox.OnSelectItem doc/html/search/search.js /^ this.OnSelectItem = function(id)$/;" m +SearchBox.Search doc/html/search/search.js /^ this.Search = function()$/;" m +SearchBox.SelectItemCount doc/html/search/search.js /^ this.SelectItemCount = function(id)$/;" m +SearchBox.SelectItemSet doc/html/search/search.js /^ this.SelectItemSet = function(id)$/;" m +SearchResults doc/html/search/search.js /^function SearchResults(name)$/;" c +SearchResults.FindChildElement doc/html/search/search.js /^ this.FindChildElement = function(id)$/;" m +SearchResults.Nav doc/html/search/search.js /^ this.Nav = function(evt,itemIndex) $/;" m +SearchResults.NavChild doc/html/search/search.js /^ this.NavChild = function(evt,itemIndex,childIndex)$/;" m +SearchResults.NavNext doc/html/search/search.js /^ this.NavNext = function(index)$/;" m +SearchResults.NavPrev doc/html/search/search.js /^ this.NavPrev = function(index)$/;" m +SearchResults.ProcessKeys doc/html/search/search.js /^ this.ProcessKeys = function(e)$/;" m +SearchResults.Search doc/html/search/search.js /^ this.Search = function(search)$/;" m +SearchResults.Toggle doc/html/search/search.js /^ this.Toggle = function(id)$/;" m +Sourcefire policy configuration module doc/latex/group__sfPolicyConfig.tex /^\\section{Sourcefire policy configuration module}$/;" s +UTARRAY_H uthash/utarray.h 28;" d +UTARRAY_VERSION uthash/utarray.h 30;" d +UTHASH_H uthash/uthash.h 25;" d +UTHASH_VERSION uthash/uthash.h 65;" d +UTLIST_H uthash/utlist.h 25;" d +UTLIST_VERSION uthash/utlist.h 27;" d +UTSTRING_H uthash/utstring.h 28;" d +UTSTRING_VERSION uthash/utstring.h 30;" d +UT_array uthash/utarray.h /^} UT_array;$/;" t typeref:struct:__anon6 +UT_hash_bucket uthash/uthash.h /^typedef struct UT_hash_bucket {$/;" s +UT_hash_bucket uthash/uthash.h /^} UT_hash_bucket;$/;" t typeref:struct:UT_hash_bucket +UT_hash_bucket::count uthash/uthash.h /^ unsigned count;$/;" m struct:UT_hash_bucket access:public +UT_hash_bucket::expand_mult uthash/uthash.h /^ unsigned expand_mult;$/;" m struct:UT_hash_bucket access:public +UT_hash_bucket::hh_head uthash/uthash.h /^ struct UT_hash_handle *hh_head;$/;" m struct:UT_hash_bucket typeref:struct:UT_hash_bucket::UT_hash_handle access:public +UT_hash_handle uthash/uthash.h /^typedef struct UT_hash_handle {$/;" s +UT_hash_handle uthash/uthash.h /^} UT_hash_handle;$/;" t typeref:struct:UT_hash_handle +UT_hash_handle::hashv uthash/uthash.h /^ unsigned hashv; \/* result of hash-fcn(key) *\/$/;" m struct:UT_hash_handle access:public +UT_hash_handle::hh_next uthash/uthash.h /^ struct UT_hash_handle *hh_next; \/* next hh in bucket order *\/$/;" m struct:UT_hash_handle typeref:struct:UT_hash_handle::UT_hash_handle access:public +UT_hash_handle::hh_prev uthash/uthash.h /^ struct UT_hash_handle *hh_prev; \/* previous hh in bucket order *\/$/;" m struct:UT_hash_handle typeref:struct:UT_hash_handle::UT_hash_handle access:public +UT_hash_handle::key uthash/uthash.h /^ void *key; \/* ptr to enclosing struct's key *\/$/;" m struct:UT_hash_handle access:public +UT_hash_handle::keylen uthash/uthash.h /^ unsigned keylen; \/* enclosing struct's key len *\/$/;" m struct:UT_hash_handle access:public +UT_hash_handle::next uthash/uthash.h /^ void *next; \/* next element in app order *\/$/;" m struct:UT_hash_handle access:public +UT_hash_handle::prev uthash/uthash.h /^ void *prev; \/* prev element in app order *\/$/;" m struct:UT_hash_handle access:public +UT_hash_handle::tbl uthash/uthash.h /^ struct UT_hash_table *tbl;$/;" m struct:UT_hash_handle typeref:struct:UT_hash_handle::UT_hash_table access:public +UT_hash_table uthash/uthash.h /^typedef struct UT_hash_table {$/;" s +UT_hash_table uthash/uthash.h /^} UT_hash_table;$/;" t typeref:struct:UT_hash_table +UT_hash_table::bloom_bv uthash/uthash.h /^ uint8_t *bloom_bv;$/;" m struct:UT_hash_table access:public +UT_hash_table::bloom_nbits uthash/uthash.h /^ char bloom_nbits;$/;" m struct:UT_hash_table access:public +UT_hash_table::bloom_sig uthash/uthash.h /^ uint32_t bloom_sig; \/* used only to test bloom exists in external analysis *\/$/;" m struct:UT_hash_table access:public +UT_hash_table::buckets uthash/uthash.h /^ UT_hash_bucket *buckets;$/;" m struct:UT_hash_table access:public +UT_hash_table::hho uthash/uthash.h /^ ptrdiff_t hho; \/* hash handle offset (byte pos of hash handle in element *\/$/;" m struct:UT_hash_table access:public +UT_hash_table::ideal_chain_maxlen uthash/uthash.h /^ unsigned ideal_chain_maxlen;$/;" m struct:UT_hash_table access:public +UT_hash_table::ineff_expands uthash/uthash.h /^ unsigned ineff_expands, noexpand;$/;" m struct:UT_hash_table access:public +UT_hash_table::log2_num_buckets uthash/uthash.h /^ unsigned num_buckets, log2_num_buckets;$/;" m struct:UT_hash_table access:public +UT_hash_table::noexpand uthash/uthash.h /^ unsigned ineff_expands, noexpand;$/;" m struct:UT_hash_table access:public +UT_hash_table::nonideal_items uthash/uthash.h /^ unsigned nonideal_items;$/;" m struct:UT_hash_table access:public +UT_hash_table::num_buckets uthash/uthash.h /^ unsigned num_buckets, log2_num_buckets;$/;" m struct:UT_hash_table access:public +UT_hash_table::num_items uthash/uthash.h /^ unsigned num_items;$/;" m struct:UT_hash_table access:public +UT_hash_table::signature uthash/uthash.h /^ uint32_t signature; \/* used only to find hash tables in external analysis *\/$/;" m struct:UT_hash_table access:public +UT_hash_table::tail uthash/uthash.h /^ struct UT_hash_handle *tail; \/* tail hh in app order, for fast append *\/$/;" m struct:UT_hash_table typeref:struct:UT_hash_table::UT_hash_handle access:public +UT_icd uthash/utarray.h /^} UT_icd;$/;" t typeref:struct:__anon5 +UT_string uthash/utstring.h /^} UT_string;$/;" t typeref:struct:__anon4 +_ AI _ config Struct Reference doc/latex/struct__AI__config.tex /^\\section{\\_\\-AI\\_\\-config Struct Reference}$/;" s +_AI_cluster_thread cluster.c /^_AI_cluster_thread ( void* arg )$/;" f signature:( void* arg ) +_AI_snort_alert spp_ai.h /^typedef struct _AI_snort_alert {$/;" s +_AI_snort_alert::ack spp_ai.h /^ uint32_t ack;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::classification spp_ai.h /^ char *classification;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::desc spp_ai.h /^ char *desc;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::dst_addr spp_ai.h /^ uint32_t dst_addr;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::dst_addr_node spp_ai.h /^ hierarchy_node *dst_addr_node;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::dst_port spp_ai.h /^ uint16_t dst_port;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::dst_port_node spp_ai.h /^ hierarchy_node *dst_port_node;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::gid spp_ai.h /^ unsigned int gid;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::grouped_alarms_count spp_ai.h /^ unsigned int grouped_alarms_count;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::id spp_ai.h /^ uint16_t id;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::iplen spp_ai.h /^ uint16_t iplen;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::ipproto spp_ai.h /^ uint8_t ipproto;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::next spp_ai.h /^ struct _AI_snort_alert *next;$/;" m struct:_AI_snort_alert typeref:struct:_AI_snort_alert::_AI_snort_alert access:public +_AI_snort_alert::priority spp_ai.h /^ unsigned short priority;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::rev spp_ai.h /^ unsigned int rev;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::sequence spp_ai.h /^ uint32_t sequence;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::sid spp_ai.h /^ unsigned int sid;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::src_addr spp_ai.h /^ uint32_t src_addr;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::src_addr_node spp_ai.h /^ hierarchy_node *src_addr_node;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::src_port spp_ai.h /^ uint16_t src_port;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::src_port_node spp_ai.h /^ hierarchy_node *src_port_node;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::stream spp_ai.h /^ struct pkt_info *stream;$/;" m struct:_AI_snort_alert typeref:struct:_AI_snort_alert::pkt_info access:public +_AI_snort_alert::tcp_flags spp_ai.h /^ uint8_t tcp_flags;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::tcplen spp_ai.h /^ uint16_t tcplen;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::timestamp spp_ai.h /^ time_t timestamp;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::tos spp_ai.h /^ uint8_t tos;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::ttl spp_ai.h /^ uint8_t ttl;$/;" m struct:_AI_snort_alert access:public +_AI_snort_alert::window spp_ai.h /^ uint16_t window;$/;" m struct:_AI_snort_alert access:public +_AI_stream_free stream.c /^PRIVATE void _AI_stream_free ( struct pkt_info* stream ) {$/;" f signature:( struct pkt_info* stream ) +_CASTASGN uthash/utlist.h 85;" d +_CASTASGN uthash/utlist.h 93;" d +_NEXT uthash/utlist.h 80;" d +_NEXT uthash/utlist.h 88;" d +_NEXTASGN uthash/utlist.h 81;" d +_NEXTASGN uthash/utlist.h 89;" d +_PREV uthash/utlist.h 82;" d +_PREV uthash/utlist.h 90;" d +_PREVASGN uthash/utlist.h 83;" d +_PREVASGN uthash/utlist.h 91;" d +_RS uthash/utlist.h 84;" d +_RS uthash/utlist.h 92;" d +_SPP_AI_H spp_ai.h 21;" d +_SV uthash/utlist.h 79;" d +_SV uthash/utlist.h 87;" d +_UNUSED_ uthash/utarray.h /^static const UT_icd ut_int_icd _UNUSED_ = {sizeof(int),NULL,NULL,NULL};$/;" v +_UNUSED_ uthash/utarray.h /^static const UT_icd ut_str_icd _UNUSED_ = {sizeof(char*),NULL,utarray_str_cpy,utarray_str_dtor};$/;" v +_UNUSED_ uthash/utarray.h 33;" d +_UNUSED_ uthash/utarray.h 35;" d +_UNUSED_ uthash/utstring.h 33;" d +_UNUSED_ uthash/utstring.h 35;" d +__anon3::alertClusteringInterval spp_ai.h /^ unsigned long alertClusteringInterval;$/;" m struct:__anon3 access:public +__anon3::alertfile spp_ai.h /^ char alertfile[1024];$/;" m struct:__anon3 access:public +__anon3::clusterfile spp_ai.h /^ char clusterfile[1024];$/;" m struct:__anon3 access:public +__anon3::hashCleanupInterval spp_ai.h /^ unsigned long hashCleanupInterval;$/;" m struct:__anon3 access:public +__anon3::streamExpireInterval spp_ai.h /^ unsigned long streamExpireInterval;$/;" m struct:__anon3 access:public +__anon4::d uthash/utstring.h /^ char *d;$/;" m struct:__anon4 access:public +__anon4::i uthash/utstring.h /^ size_t i; \/* index of first unused byte *\/$/;" m struct:__anon4 access:public +__anon4::n uthash/utstring.h /^ size_t n; \/* allocd size *\/$/;" m struct:__anon4 access:public +__anon5::copy uthash/utarray.h /^ ctor_f *copy;$/;" m struct:__anon5 access:public +__anon5::dtor uthash/utarray.h /^ dtor_f *dtor;$/;" m struct:__anon5 access:public +__anon5::init uthash/utarray.h /^ init_f *init;$/;" m struct:__anon5 access:public +__anon5::sz uthash/utarray.h /^ size_t sz;$/;" m struct:__anon5 access:public +__anon6::d uthash/utarray.h /^ char *d; \/* n slots of size icd->sz*\/$/;" m struct:__anon6 access:public +__anon6::i uthash/utarray.h /^ unsigned i,n;\/* i: index of next available slot, n: num slots *\/$/;" m struct:__anon6 access:public +__anon6::icd uthash/utarray.h /^ const UT_icd *icd; \/* initializer, copy and destructor functions *\/$/;" m struct:__anon6 access:public +__anon6::n uthash/utarray.h /^ unsigned i,n;\/* i: index of next available slot, n: num slots *\/$/;" m struct:__anon6 access:public +_config cluster.c /^PRIVATE AI_config *_config = NULL;$/;" v +_details doc/html/group__sfPolicyConfig.html /^<\/a>

Detailed Description<\/h2>$/;" a +_dpd sf_dynamic_preproc_lib.c /^DynamicPreprocessorData _dpd;$/;" v +_hierarchy_node spp_ai.h /^typedef struct _hierarchy_node$/;" s +_hierarchy_node::children spp_ai.h /^ struct _hierarchy_node **children;$/;" m struct:_hierarchy_node typeref:struct:_hierarchy_node::_hierarchy_node access:public +_hierarchy_node::label spp_ai.h /^ char label[256];$/;" m struct:_hierarchy_node access:public +_hierarchy_node::max_val spp_ai.h /^ int max_val;$/;" m struct:_hierarchy_node access:public +_hierarchy_node::min_val spp_ai.h /^ int min_val;$/;" m struct:_hierarchy_node access:public +_hierarchy_node::nchildren spp_ai.h /^ int nchildren;$/;" m struct:_hierarchy_node access:public +_hierarchy_node::parent spp_ai.h /^ struct _hierarchy_node *parent;$/;" m struct:_hierarchy_node typeref:struct:_hierarchy_node::_hierarchy_node access:public +_hierarchy_node::type spp_ai.h /^ cluster_type type;$/;" m struct:_hierarchy_node access:public +_hierarchy_node_append cluster.c /^_hierarchy_node_append ( hierarchy_node *parent, hierarchy_node *child )$/;" f signature:( hierarchy_node *parent, hierarchy_node *child ) +_hierarchy_node_new cluster.c /^_hierarchy_node_new ( char *label, int min_val, int max_val )$/;" f signature:( char *label, int min_val, int max_val ) +_utarray_eltptr uthash/utarray.h 116;" d +ack spp_ai.h /^ uint32_t ack;$/;" m struct:_AI_snort_alert access:public +alertClusteringInterval spp_ai.h /^ unsigned long alertClusteringInterval;$/;" m struct:__anon3 access:public +alert_fp alert_parser.c /^PRIVATE FILE *alert_fp = NULL;$/;" v +alert_log cluster.c /^PRIVATE AI_snort_alert *alert_log = NULL;$/;" v +alertfile spp_ai.h /^ char alertfile[1024];$/;" m struct:__anon3 access:public +alerts alert_parser.c /^PRIVATE AI_snort_alert *alerts = NULL;$/;" v +bloom_bv uthash/uthash.h /^ uint8_t *bloom_bv;$/;" m struct:UT_hash_table access:public +bloom_nbits uthash/uthash.h /^ char bloom_nbits;$/;" m struct:UT_hash_table access:public +bloom_sig uthash/uthash.h /^ uint32_t bloom_sig; \/* used only to test bloom exists in external analysis *\/$/;" m struct:UT_hash_table access:public +buckets uthash/uthash.h /^ UT_hash_bucket *buckets;$/;" m struct:UT_hash_table access:public +children spp_ai.h /^ struct _hierarchy_node **children;$/;" m struct:_hierarchy_node typeref:struct:_hierarchy_node::_hierarchy_node access:public +classification spp_ai.h /^ char *classification;$/;" m struct:_AI_snort_alert access:public +cluster_type spp_ai.h /^} cluster_type;$/;" t typeref:enum:__anon2 +clusterfile spp_ai.h /^ char clusterfile[1024];$/;" m struct:__anon3 access:public +convertToId doc/html/search/search.js /^function convertToId(search)$/;" f +copy uthash/utarray.h /^ ctor_f *copy;$/;" m struct:__anon5 access:public +count uthash/uthash.h /^ unsigned count;$/;" m struct:UT_hash_bucket access:public +ctor_f uthash/utarray.h /^typedef void (ctor_f)(void *dst, const void *src);$/;" t +d uthash/utarray.h /^ char *d; \/* n slots of size icd->sz*\/$/;" m struct:__anon6 access:public +d uthash/utstring.h /^ char *d;$/;" m struct:__anon4 access:public +define-members doc/html/sf__preproc__info_8h.html /^

<\/a>$/;" a +define-members doc/html/spp__ai_8c.html /^

<\/a>$/;" a +desc spp_ai.h /^ char *desc;$/;" m struct:_AI_snort_alert access:public +dst _ port doc/latex/structpkt__key.tex /^\\subsubsection[{dst\\_\\-port}]{\\setlength{\\rightskip}{0pt plus 5cm}{\\bf uint16\\_\\-t} {\\bf pkt\\_\\-key::dst\\_\\-port}}}$/;" b +dst_addr spp_ai.h /^ none, src_port, dst_port, src_addr, dst_addr, timestamp$/;" e enum:__anon2 +dst_addr spp_ai.h /^ uint32_t dst_addr;$/;" m struct:_AI_snort_alert access:public +dst_addr_node spp_ai.h /^ hierarchy_node *dst_addr_node;$/;" m struct:_AI_snort_alert access:public +dst_addr_root cluster.c /^PRIVATE hierarchy_node *dst_addr_root = NULL;$/;" v +dst_port spp_ai.h /^ none, src_port, dst_port, src_addr, dst_addr, timestamp$/;" e enum:__anon2 +dst_port spp_ai.h /^ uint16_t dst_port;$/;" m struct:_AI_snort_alert access:public +dst_port spp_ai.h /^ uint16_t dst_port;$/;" m struct:pkt_key access:public +dst_port_node spp_ai.h /^ hierarchy_node *dst_port_node;$/;" m struct:_AI_snort_alert access:public +dst_port_root cluster.c /^PRIVATE hierarchy_node *dst_port_root = NULL;$/;" v +dtor uthash/utarray.h /^ dtor_f *dtor;$/;" m struct:__anon5 access:public +dtor_f uthash/utarray.h /^typedef void (dtor_f)(void *elt);$/;" t +enum-members doc/html/spp__ai_8h.html /^

<\/a>$/;" a +ex_config spp_ai.c /^tSfPolicyUserContextId ex_config = NULL;$/;" v +ex_swap_config spp_ai.c /^tSfPolicyUserContextId ex_swap_config = NULL;$/;" v +expand_mult uthash/uthash.h /^ unsigned expand_mult;$/;" m struct:UT_hash_bucket access:public +false spp_ai.h /^typedef enum { false, true } BOOL;$/;" e enum:__anon1 +func-members doc/html/group__sfPolicyConfig.html /^

<\/a>$/;" a +func-members doc/html/sfPolicyUserData_8c.html /^

<\/a>$/;" a +func-members doc/html/sf__dynamic__preproc__lib_8c.html /^

<\/a>$/;" a +func-members doc/html/sf__preproc__info_8h.html /^

<\/a>$/;" a +func-members doc/html/spp__ai_8c.html /^

<\/a>$/;" a +func-members doc/html/spp__ai_8h.html /^

<\/a>$/;" a +func-members doc/html/stream_8c.html /^

<\/a>$/;" a +func_append libtool /^func_append ()$/;" f +func_arith libtool /^func_arith ()$/;" f +func_basename libtool /^func_basename ()$/;" f +func_check_version_match libtool /^func_check_version_match ()$/;" f +func_config libtool /^func_config ()$/;" f +func_dirname libtool /^func_dirname ()$/;" f +func_dirname_and_basename libtool /^func_dirname_and_basename ()$/;" f +func_echo libtool /^func_echo ()$/;" f +func_emit_cwrapperexe_src libtool /^func_emit_cwrapperexe_src ()$/;" f +func_emit_wrapper libtool /^func_emit_wrapper ()$/;" f +func_emit_wrapper_part1 libtool /^func_emit_wrapper_part1 ()$/;" f +func_emit_wrapper_part2 libtool /^func_emit_wrapper_part2 ()$/;" f +func_enable_tag libtool /^func_enable_tag ()$/;" f +func_error libtool /^func_error ()$/;" f +func_execute_cmds libtool /^func_execute_cmds ()$/;" f +func_extract_an_archive libtool /^func_extract_an_archive ()$/;" f +func_extract_archives libtool /^func_extract_archives ()$/;" f +func_fatal_configuration libtool /^func_fatal_configuration ()$/;" f +func_fatal_error libtool /^func_fatal_error ()$/;" f +func_fatal_help libtool /^func_fatal_help ()$/;" f +func_features libtool /^func_features ()$/;" f +func_generate_dlsyms libtool /^func_generate_dlsyms ()$/;" f +func_grep libtool /^func_grep ()$/;" f +func_help libtool /^func_help ()$/;" f +func_infer_tag libtool /^func_infer_tag ()$/;" f +func_lalib_p libtool /^func_lalib_p ()$/;" f +func_lalib_unsafe_p libtool /^func_lalib_unsafe_p ()$/;" f +func_len libtool /^func_len ()$/;" f +func_lo2o libtool /^func_lo2o ()$/;" f +func_ltwrapper_executable_p libtool /^func_ltwrapper_executable_p ()$/;" f +func_ltwrapper_p libtool /^func_ltwrapper_p ()$/;" f +func_ltwrapper_script_p libtool /^func_ltwrapper_script_p ()$/;" f +func_ltwrapper_scriptname libtool /^func_ltwrapper_scriptname ()$/;" f +func_missing_arg libtool /^func_missing_arg ()$/;" f +func_mkdir_p libtool /^func_mkdir_p ()$/;" f +func_mktempdir libtool /^func_mktempdir ()$/;" f +func_mode_compile libtool /^func_mode_compile ()$/;" f +func_mode_execute libtool /^func_mode_execute ()$/;" f +func_mode_finish libtool /^func_mode_finish ()$/;" f +func_mode_help libtool /^func_mode_help ()$/;" f +func_mode_install libtool /^func_mode_install ()$/;" f +func_mode_link libtool /^func_mode_link ()$/;" f +func_mode_uninstall libtool /^func_mode_uninstall ()$/;" f +func_opt_split libtool /^func_opt_split ()$/;" f +func_quote_for_eval libtool /^func_quote_for_eval ()$/;" f +func_quote_for_expand libtool /^func_quote_for_expand ()$/;" f +func_show_eval libtool /^func_show_eval ()$/;" f +func_show_eval_locale libtool /^func_show_eval_locale ()$/;" f +func_source libtool /^func_source ()$/;" f +func_stripname libtool /^func_stripname ()$/;" f +func_to_host_path libtool /^func_to_host_path ()$/;" f +func_to_host_pathlist libtool /^func_to_host_pathlist ()$/;" f +func_usage libtool /^func_usage ()$/;" f +func_verbose libtool /^func_verbose ()$/;" f +func_version libtool /^func_version ()$/;" f +func_warning libtool /^func_warning ()$/;" f +func_win32_libid libtool /^func_win32_libid ()$/;" f +func_write_libtool_object libtool /^func_write_libtool_object ()$/;" f +func_xform libtool /^func_xform ()$/;" f +get16bits uthash/uthash.h 428;" d +get16bits uthash/uthash.h 431;" d +get16bits uthash/uthash.h 435;" d +getXPos doc/html/search/search.js /^function getXPos(item)$/;" f +getYPos doc/html/search/search.js /^function getYPos(item)$/;" f +gid spp_ai.h /^ unsigned int gid;$/;" m struct:_AI_snort_alert access:public +grouped_alarms_count spp_ai.h /^ unsigned int grouped_alarms_count;$/;" m struct:_AI_snort_alert access:public +hash stream.c /^PRIVATE struct pkt_info *hash = NULL;$/;" v typeref:struct:pkt_info +hashCleanupInterval doc/latex/struct__AI__config.tex /^\\subsubsection[{hashCleanupInterval}]{\\setlength{\\rightskip}{0pt plus 5cm}unsigned long {\\bf \\_\\-AI\\_\\-config::hashCleanupInterval}}}$/;" b +hashCleanupInterval spp_ai.h /^ unsigned long hashCleanupInterval;$/;" m struct:__anon3 access:public +hashv uthash/uthash.h /^ unsigned hashv; \/* result of hash-fcn(key) *\/$/;" m struct:UT_hash_handle access:public +hh doc/latex/structpkt__info.tex /^\\subsubsection[{hh}]{\\setlength{\\rightskip}{0pt plus 5cm}UT\\_\\-hash\\_\\-handle {\\bf pkt\\_\\-info::hh}}}$/;" b +hh spp_ai.h /^ UT_hash_handle hh; \/* Make the struct 'hashable' *\/$/;" m struct:pkt_info access:public +hh_head uthash/uthash.h /^ struct UT_hash_handle *hh_head;$/;" m struct:UT_hash_bucket typeref:struct:UT_hash_bucket::UT_hash_handle access:public +hh_next uthash/uthash.h /^ struct UT_hash_handle *hh_next; \/* next hh in bucket order *\/$/;" m struct:UT_hash_handle typeref:struct:UT_hash_handle::UT_hash_handle access:public +hh_prev uthash/uthash.h /^ struct UT_hash_handle *hh_prev; \/* previous hh in bucket order *\/$/;" m struct:UT_hash_handle typeref:struct:UT_hash_handle::UT_hash_handle access:public +hho uthash/uthash.h /^ ptrdiff_t hho; \/* hash handle offset (byte pos of hash handle in element *\/$/;" m struct:UT_hash_table access:public +hierarchy_node spp_ai.h /^} hierarchy_node;$/;" t typeref:struct:_hierarchy_node +i uthash/utarray.h /^ unsigned i,n;\/* i: index of next available slot, n: num slots *\/$/;" m struct:__anon6 access:public +i uthash/utstring.h /^ size_t i; \/* index of first unused byte *\/$/;" m struct:__anon4 access:public +icd uthash/utarray.h /^ const UT_icd *icd; \/* initializer, copy and destructor functions *\/$/;" m struct:__anon6 access:public +id spp_ai.h /^ uint16_t id;$/;" m struct:_AI_snort_alert access:public +ideal_chain_maxlen uthash/uthash.h /^ unsigned ideal_chain_maxlen;$/;" m struct:UT_hash_table access:public +indexSectionNames.0 doc/html/search/search.js /^ 0: "all",$/;" p +indexSectionNames.1 doc/html/search/search.js /^ 1: "classes",$/;" p +indexSectionNames.2 doc/html/search/search.js /^ 2: "files",$/;" p +indexSectionNames.3 doc/html/search/search.js /^ 3: "functions",$/;" p +indexSectionNames.4 doc/html/search/search.js /^ 4: "variables",$/;" p +indexSectionNames.5 doc/html/search/search.js /^ 5: "typedefs",$/;" p +indexSectionNames.6 doc/html/search/search.js /^ 6: "enums",$/;" p +indexSectionNames.7 doc/html/search/search.js /^ 7: "enumvalues",$/;" p +indexSectionNames.8 doc/html/search/search.js /^ 8: "defines"$/;" p +indexSectionsWithContent.0 doc/html/search/search.js /^ 0: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010110111111011110101111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",$/;" p +indexSectionsWithContent.1 doc/html/search/search.js /^ 1: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",$/;" p +indexSectionsWithContent.2 doc/html/search/search.js /^ 2: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",$/;" p +indexSectionsWithContent.3 doc/html/search/search.js /^ 3: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010100100001001000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",$/;" p +indexSectionsWithContent.4 doc/html/search/search.js /^ 4: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000110010010010101110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",$/;" p +indexSectionsWithContent.5 doc/html/search/search.js /^ 5: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",$/;" p +indexSectionsWithContent.6 doc/html/search/search.js /^ 6: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",$/;" p +indexSectionsWithContent.7 doc/html/search/search.js /^ 7: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",$/;" p +indexSectionsWithContent.8 doc/html/search/search.js /^ 8: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010100100000100100100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"$/;" p +ineff_expands uthash/uthash.h /^ unsigned ineff_expands, noexpand;$/;" m struct:UT_hash_table access:public +init uthash/utarray.h /^ init_f *init;$/;" m struct:__anon5 access:public +init_f uthash/utarray.h /^typedef void (init_f)(void *elt);$/;" t +iplen spp_ai.h /^ uint16_t iplen;$/;" m struct:_AI_snort_alert access:public +ipproto spp_ai.h /^ uint8_t ipproto;$/;" m struct:_AI_snort_alert access:public +key doc/latex/structpkt__info.tex /^\\subsubsection[{key}]{\\setlength{\\rightskip}{0pt plus 5cm}struct {\\bf pkt\\_\\-key} {\\bf pkt\\_\\-info::key}}}$/;" b +key spp_ai.h /^ struct pkt_key key; \/* Key of the packet (src_ip, dst_port) *\/$/;" m struct:pkt_info typeref:struct:pkt_info::pkt_key access:public +key uthash/uthash.h /^ void *key; \/* ptr to enclosing struct's key *\/$/;" m struct:UT_hash_handle access:public +keylen uthash/uthash.h /^ unsigned keylen; \/* enclosing struct's key len *\/$/;" m struct:UT_hash_handle access:public +l00001 doc/html/sf__preproc__info_8h_source.html /^Go to the documentation of this file.<\/a>
<\/a>00001 #ifndef SF_PREPROC_INFO_H_<\/span>$/;"	a
+l00001	doc/html/spp__ai_8h_source.html	/^Go to the documentation of this file.<\/a>
<\/a>00001 \/*<\/span>$/;"	a
+l00002	doc/html/sf__preproc__info_8h_source.html	/^<\/a>00002 <\/span>#define SF_PREPROC_INFO_H_<\/span>$/;"	a
+l00002	doc/html/spp__ai_8h_source.html	/^<\/a>00002  * =====================================================================================<\/span>$/;"	a
+l00003	doc/html/sf__preproc__info_8h_source.html	/^<\/a>00003 <\/span>$/;"	a
+l00003	doc/html/spp__ai_8h_source.html	/^<\/a>00003  *<\/span>$/;"	a
+l00004	doc/html/sf__preproc__info_8h_source.html	/^<\/a>00004<\/a> #define MAJOR_VERSION   1<\/span>$/;"	a
+l00004	doc/html/spp__ai_8h_source.html	/^<\/a>00004  *       Filename:  spp_ai.h<\/span>$/;"	a
+l00005	doc/html/sf__preproc__info_8h_source.html	/^<\/a>00005<\/a> <\/span>#define MINOR_VERSION   0<\/span>$/;"	a
+l00005	doc/html/spp__ai_8h_source.html	/^<\/a>00005  *<\/span>$/;"	a
+l00006	doc/html/sf__preproc__info_8h_source.html	/^<\/a>00006<\/a> <\/span>#define BUILD_VERSION   1<\/span>$/;"	a
+l00006	doc/html/spp__ai_8h_source.html	/^<\/a>00006  *    Description:  Header file for the preprocessor<\/span>$/;"	a
+l00007	doc/html/sf__preproc__info_8h_source.html	/^<\/a>00007<\/a> <\/span>#define PREPROC_NAME    "SF_AI"<\/span>$/;"	a
+l00007	doc/html/spp__ai_8h_source.html	/^<\/a>00007  *<\/span>$/;"	a
+l00008	doc/html/sf__preproc__info_8h_source.html	/^<\/a>00008 <\/span>$/;"	a
+l00008	doc/html/spp__ai_8h_source.html	/^<\/a>00008  *        Version:  1.0<\/span>$/;"	a
+l00009	doc/html/sf__preproc__info_8h_source.html	/^<\/a>00009<\/a> #define DYNAMIC_PREPROC_SETUP   AI_setup<\/span>$/;"	a
+l00009	doc/html/spp__ai_8h_source.html	/^<\/a>00009  *        Created:  30\/07\/2010 15:47:12<\/span>$/;"	a
+l00010	doc/html/sf__preproc__info_8h_source.html	/^<\/a>00010 <\/span>extern<\/span> void<\/span> AI_setup<\/a>();$/;"	a
+l00010	doc/html/spp__ai_8h_source.html	/^<\/a>00010  *       Revision:  none<\/span>$/;"	a
+l00011	doc/html/sf__preproc__info_8h_source.html	/^<\/a>00011 $/;"	a
+l00011	doc/html/spp__ai_8h_source.html	/^<\/a>00011  *       Compiler:  gcc<\/span>$/;"	a
+l00012	doc/html/sf__preproc__info_8h_source.html	/^<\/a>00012 #endif <\/span>\/* SF_PREPROC_INFO_H_ *\/<\/span>$/;"	a
+l00012	doc/html/spp__ai_8h_source.html	/^<\/a>00012  *<\/span>$/;"	a
+l00013	doc/html/sf__preproc__info_8h_source.html	/^<\/a>00013 $/;"	a
+l00013	doc/html/spp__ai_8h_source.html	/^<\/a>00013  *         Author:  BlackLight (http:\/\/0x00.ath.cx), <blacklight@autistici.org><\/span>$/;"	a
+l00014	doc/html/spp__ai_8h_source.html	/^<\/a>00014  *        Licence:  GNU GPL v.3<\/span>$/;"	a
+l00015	doc/html/spp__ai_8h_source.html	/^<\/a>00015  *        Company:  DO WHAT YOU WANT CAUSE A PIRATE IS FREE, YOU ARE A PIRATE!<\/span>$/;"	a
+l00016	doc/html/spp__ai_8h_source.html	/^<\/a>00016  *<\/span>$/;"	a
+l00017	doc/html/spp__ai_8h_source.html	/^<\/a>00017  * =====================================================================================<\/span>$/;"	a
+l00018	doc/html/spp__ai_8h_source.html	/^<\/a>00018  *\/<\/span>$/;"	a
+l00019	doc/html/spp__ai_8h_source.html	/^<\/a>00019 $/;"	a
+l00020	doc/html/spp__ai_8h_source.html	/^<\/a>00020 #ifndef _SPP_AI_H<\/span>$/;"	a
+l00021	doc/html/spp__ai_8h_source.html	/^<\/a>00021 <\/span>#define _SPP_AI_H<\/span>$/;"	a
+l00022	doc/html/spp__ai_8h_source.html	/^<\/a>00022 <\/span>$/;"	a
+l00023	doc/html/spp__ai_8h_source.html	/^<\/a>00023 #include "sf_snort_packet.h"<\/span>$/;"	a
+l00024	doc/html/spp__ai_8h_source.html	/^<\/a>00024 $/;"	a
+l00025	doc/html/spp__ai_8h_source.html	/^<\/a>00025<\/a> typedef<\/span> unsigned<\/span> int<\/span> uint32_t;$/;"	a
+l00026	doc/html/spp__ai_8h_source.html	/^<\/a>00026<\/a> typedef<\/span> unsigned<\/span> short<\/span> uint16_t;$/;"	a
+l00027	doc/html/spp__ai_8h_source.html	/^<\/a>00027 $/;"	a
+l00028	doc/html/spp__ai_8h_source.html	/^<\/a>00028<\/a> typedef<\/span> enum<\/span> { false<\/span>, true<\/span> } BOOL;$/;"	a
+l00029	doc/html/spp__ai_8h_source.html	/^<\/a>00029 $/;"	a
+l00030	doc/html/spp__ai_8h_source.html	/^<\/a>00030<\/a> typedef<\/span> struct <\/span>_AI_config<\/a>$/;"	a
+l00031	doc/html/spp__ai_8h_source.html	/^<\/a>00031 {$/;"	a
+l00032	doc/html/spp__ai_8h_source.html	/^<\/a>00032<\/a>         uint16_t<\/a> portToCheck<\/a>;$/;"	a
+l00033	doc/html/spp__ai_8h_source.html	/^<\/a>00033<\/a>         unsigned<\/span> long<\/span> hashCleanupInterval<\/a>;$/;"	a
+l00034	doc/html/spp__ai_8h_source.html	/^<\/a>00034<\/a>         unsigned<\/span> long<\/span> streamExpireInterval<\/a>;$/;"	a
+l00035	doc/html/spp__ai_8h_source.html	/^<\/a>00035 $/;"	a
+l00036	doc/html/spp__ai_8h_source.html	/^<\/a>00036 } AI_config<\/a>;$/;"	a
+l00037	doc/html/spp__ai_8h_source.html	/^<\/a>00037 $/;"	a
+l00038	doc/html/spp__ai_8h_source.html	/^<\/a>00038 void<\/span>  AI_pkt_enqueue<\/a> ( SFSnortPacket* );$/;"	a
+l00039	doc/html/spp__ai_8h_source.html	/^<\/a>00039 void<\/span>* AI_hashcleanup_thread<\/a> ( void<\/span>* );$/;"	a
+l00040	doc/html/spp__ai_8h_source.html	/^<\/a>00040 $/;"	a
+l00041	doc/html/spp__ai_8h_source.html	/^<\/a>00041 #endif  <\/span>\/* _SPP_AI_H *\/<\/span>$/;"	a
+l00042	doc/html/spp__ai_8h_source.html	/^<\/a>00042 $/;"	a
+label	spp_ai.h	/^	char                    label[256];$/;"	m	struct:_hierarchy_node	access:public
+latex_count	doc/latex/Makefile	/^	latex_count=5 ; \\$/;"	m
+letter_P	doc/html/classes.html	/^<\/a>
  P  <\/div><\/td><\/tr><\/table>$/;" a +letter__ doc/html/classes.html /^<\/td>
pkt_info<\/a>   <\/td>pkt_key<\/a>   <\/td><\/a>
  _  <\/div><\/td><\/tr><\/table>$/;" a +log2_num_buckets uthash/uthash.h /^ unsigned num_buckets, log2_num_buckets;$/;" m struct:UT_hash_table access:public +max_val spp_ai.h /^ int max_val;$/;" m struct:_hierarchy_node access:public +min_val spp_ai.h /^ int min_val;$/;" m struct:_hierarchy_node access:public +n uthash/utarray.h /^ unsigned i,n;\/* i: index of next available slot, n: num slots *\/$/;" m struct:__anon6 access:public +n uthash/utstring.h /^ size_t n; \/* allocd size *\/$/;" m struct:__anon4 access:public +nchildren spp_ai.h /^ int nchildren;$/;" m struct:_hierarchy_node access:public +nested-classes doc/html/spp__ai_8h.html /^

<\/a>$/;" a +nested-classes doc/html/stream_8c.html /^

<\/a>$/;" a +next doc/latex/structpkt__info.tex /^\\subsubsection[{next}]{\\setlength{\\rightskip}{0pt plus 5cm}struct {\\bf pkt\\_\\-info}$\\ast$ {\\bf pkt\\_\\-info::next}}}$/;" b +next spp_ai.h /^ struct _AI_snort_alert *next;$/;" m struct:_AI_snort_alert typeref:struct:_AI_snort_alert::_AI_snort_alert access:public +next spp_ai.h /^ struct pkt_info* next; \/* Pointer to the next packet in the stream *\/$/;" m struct:pkt_info typeref:struct:pkt_info::pkt_info access:public +next uthash/uthash.h /^ void *next; \/* next element in app order *\/$/;" m struct:UT_hash_handle access:public +noexpand uthash/uthash.h /^ unsigned ineff_expands, noexpand;$/;" m struct:UT_hash_table access:public +none spp_ai.h /^ none, src_port, dst_port, src_addr, dst_addr, timestamp$/;" e enum:__anon2 +nonideal_items uthash/uthash.h /^ unsigned nonideal_items;$/;" m struct:UT_hash_table access:public +num_buckets uthash/uthash.h /^ unsigned num_buckets, log2_num_buckets;$/;" m struct:UT_hash_table access:public +num_items uthash/uthash.h /^ unsigned num_items;$/;" m struct:UT_hash_table access:public +observed spp_ai.h /^ BOOL observed; \/* Flag set if the packet is observed, i.e. associated to a security alert *\/$/;" m struct:pkt_info access:public +oom uthash/utarray.h 42;" d +oom uthash/utstring.h 41;" d +parent spp_ai.h /^ struct _hierarchy_node *parent;$/;" m struct:_hierarchy_node typeref:struct:_hierarchy_node::_hierarchy_node access:public +parserPolicyId sfPolicyUserData.c /^tSfPolicyId parserPolicyId = 0;$/;" v +pkt doc/latex/structpkt__info.tex /^\\subsubsection[{pkt}]{\\setlength{\\rightskip}{0pt plus 5cm}SFSnortPacket$\\ast$ {\\bf pkt\\_\\-info::pkt}}}$/;" b +pkt spp_ai.h /^ SFSnortPacket* pkt; \/* Reference to SFSnortPacket containing packet's information *\/$/;" m struct:pkt_info access:public +pkt _ info Struct Reference doc/latex/structpkt__info.tex /^\\section{pkt\\_\\-info Struct Reference}$/;" s +pkt _ key Struct Reference doc/latex/structpkt__key.tex /^\\section{pkt\\_\\-key Struct Reference}$/;" s +pkt_info spp_ai.h /^struct pkt_info$/;" s +pkt_info::hh spp_ai.h /^ UT_hash_handle hh; \/* Make the struct 'hashable' *\/$/;" m struct:pkt_info access:public +pkt_info::key spp_ai.h /^ struct pkt_key key; \/* Key of the packet (src_ip, dst_port) *\/$/;" m struct:pkt_info typeref:struct:pkt_info::pkt_key access:public +pkt_info::next spp_ai.h /^ struct pkt_info* next; \/* Pointer to the next packet in the stream *\/$/;" m struct:pkt_info typeref:struct:pkt_info::pkt_info access:public +pkt_info::observed spp_ai.h /^ BOOL observed; \/* Flag set if the packet is observed, i.e. associated to a security alert *\/$/;" m struct:pkt_info access:public +pkt_info::pkt spp_ai.h /^ SFSnortPacket* pkt; \/* Reference to SFSnortPacket containing packet's information *\/$/;" m struct:pkt_info access:public +pkt_info::timestamp spp_ai.h /^ time_t timestamp; \/* Timestamp *\/$/;" m struct:pkt_info access:public +pkt_key spp_ai.h /^struct pkt_key$/;" s +pkt_key::dst_port spp_ai.h /^ uint16_t dst_port;$/;" m struct:pkt_key access:public +pkt_key::src_ip spp_ai.h /^ uint32_t src_ip;$/;" m struct:pkt_key access:public +portToCheck doc/latex/struct__AI__config.tex /^\\subsubsection[{portToCheck}]{\\setlength{\\rightskip}{0pt plus 5cm}{\\bf uint16\\_\\-t} {\\bf \\_\\-AI\\_\\-config::portToCheck}}}$/;" b +preg_match regex.c /^preg_match ( const char* expr, char* str, char*** matches, int *nmatches )$/;" f signature:( const char* expr, char* str, char*** matches, int *nmatches ) +preg_match spp_ai.h /^int preg_match ( const char*, char*, char***, int* );$/;" p signature:( const char*, char*, char***, int* ) +prev uthash/uthash.h /^ void *prev; \/* prev element in app order *\/$/;" m struct:UT_hash_handle access:public +priority spp_ai.h /^ unsigned short priority;$/;" m struct:_AI_snort_alert access:public +pub-attribs doc/html/struct__AI__config.html /^

<\/a>$/;" a +pub-attribs doc/html/structpkt__info.html /^

<\/a>$/;" a +pub-attribs doc/html/structpkt__key.html /^

<\/a>$/;" a +rev spp_ai.h /^ unsigned int rev;$/;" m struct:_AI_snort_alert access:public +runtimePolicyId sfPolicyUserData.c /^tSfPolicyId runtimePolicyId = 0;$/;" v +sequence spp_ai.h /^ uint32_t sequence;$/;" m struct:_AI_snort_alert access:public +sf _ dynamic _ preproc _ lib c File Reference doc/latex/sf__dynamic__preproc__lib_8c.tex /^\\section{sf\\_\\-dynamic\\_\\-preproc\\_\\-lib.c File Reference}$/;" s +sf _ preproc _ info h File Reference doc/latex/sf__preproc__info_8h.tex /^\\section{sf\\_\\-preproc\\_\\-info.h File Reference}$/;" s +sfPolicyConfigCreate doc/latex/group__sfPolicyConfig.tex /^\\subsubsection[{sfPolicyConfigCreate}]{\\setlength{\\rightskip}{0pt plus 5cm}tSfPolicyUserContextId sfPolicyConfigCreate ($/;" b +sfPolicyConfigCreate sfPolicyUserData.c /^tSfPolicyUserContextId sfPolicyConfigCreate(void)$/;" f signature:(void) +sfPolicyConfigDelete doc/latex/group__sfPolicyConfig.tex /^\\subsubsection[{sfPolicyConfigDelete}]{\\setlength{\\rightskip}{0pt plus 5cm}void sfPolicyConfigDelete ($/;" b +sfPolicyConfigDelete sfPolicyUserData.c /^void sfPolicyConfigDelete($/;" f signature:( tSfPolicyUserContextId pContext ) +sfPolicyUserData c File Reference doc/latex/sfPolicyUserData_8c.tex /^\\section{sfPolicyUserData.c File Reference}$/;" s +sfPolicyUserDataClear doc/latex/group__sfPolicyConfig.tex /^\\subsubsection[{sfPolicyUserDataClear}]{\\setlength{\\rightskip}{0pt plus 5cm}void$\\ast$ sfPolicyUserDataClear ($/;" b +sfPolicyUserDataClear sfPolicyUserData.c /^void * sfPolicyUserDataClear ($/;" f signature:( tSfPolicyUserContextId pContext, tSfPolicyId policyId ) +sfPolicyUserDataIterate doc/latex/group__sfPolicyConfig.tex /^\\subsubsection[{sfPolicyUserDataIterate}]{\\setlength{\\rightskip}{0pt plus 5cm}int sfPolicyUserDataIterate ($/;" b +sfPolicyUserDataIterate sfPolicyUserData.c /^int sfPolicyUserDataIterate ($/;" f signature:( tSfPolicyUserContextId pContext, int (*callback)(tSfPolicyUserContextId pContext, tSfPolicyId policyId, void* config) ) +sfPolicyUserDataSet doc/latex/group__sfPolicyConfig.tex /^\\subsubsection[{sfPolicyUserDataSet}]{\\setlength{\\rightskip}{0pt plus 5cm}int sfPolicyUserDataSet ($/;" b +sfPolicyUserDataSet sfPolicyUserData.c /^int sfPolicyUserDataSet ($/;" f signature:( tSfPolicyUserContextId pContext, tSfPolicyId policyId, void *config ) +sid spp_ai.h /^ unsigned int sid;$/;" m struct:_AI_snort_alert access:public +signature uthash/uthash.h /^ uint32_t signature; \/* used only to find hash tables in external analysis *\/$/;" m struct:UT_hash_table access:public +spp _ ai c File Reference doc/latex/spp__ai_8c.tex /^\\section{spp\\_\\-ai.c File Reference}$/;" s +spp _ ai h File Reference doc/latex/spp__ai_8h.tex /^\\section{spp\\_\\-ai.h File Reference}$/;" s +src _ ip doc/latex/structpkt__key.tex /^\\subsubsection[{src\\_\\-ip}]{\\setlength{\\rightskip}{0pt plus 5cm}{\\bf uint32\\_\\-t} {\\bf pkt\\_\\-key::src\\_\\-ip}}}$/;" b +src_addr spp_ai.h /^ none, src_port, dst_port, src_addr, dst_addr, timestamp$/;" e enum:__anon2 +src_addr spp_ai.h /^ uint32_t src_addr;$/;" m struct:_AI_snort_alert access:public +src_addr_node spp_ai.h /^ hierarchy_node *src_addr_node;$/;" m struct:_AI_snort_alert access:public +src_addr_root cluster.c /^PRIVATE hierarchy_node *src_addr_root = NULL;$/;" v +src_ip spp_ai.h /^ uint32_t src_ip;$/;" m struct:pkt_key access:public +src_port spp_ai.h /^ none, src_port, dst_port, src_addr, dst_addr, timestamp$/;" e enum:__anon2 +src_port spp_ai.h /^ uint16_t src_port;$/;" m struct:_AI_snort_alert access:public +src_port_node spp_ai.h /^ hierarchy_node *src_port_node;$/;" m struct:_AI_snort_alert access:public +src_port_root cluster.c /^PRIVATE hierarchy_node *src_port_root = NULL;$/;" v +start_time stream.c /^PRIVATE time_t start_time = 0;$/;" v +stream spp_ai.h /^ struct pkt_info *stream;$/;" m struct:_AI_snort_alert typeref:struct:_AI_snort_alert::pkt_info access:public +stream c File Reference doc/latex/stream_8c.tex /^\\section{stream.c File Reference}$/;" s +streamExpireInterval doc/latex/struct__AI__config.tex /^\\subsubsection[{streamExpireInterval}]{\\setlength{\\rightskip}{0pt plus 5cm}unsigned long {\\bf \\_\\-AI\\_\\-config::streamExpireInterval}}}$/;" b +streamExpireInterval spp_ai.h /^ unsigned long streamExpireInterval;$/;" m struct:__anon3 access:public +sz uthash/utarray.h /^ size_t sz;$/;" m struct:__anon5 access:public +tail uthash/uthash.h /^ struct UT_hash_handle *tail; \/* tail hh in app order, for fast append *\/$/;" m struct:UT_hash_table typeref:struct:UT_hash_table::UT_hash_handle access:public +tbl uthash/uthash.h /^ struct UT_hash_table *tbl;$/;" m struct:UT_hash_handle typeref:struct:UT_hash_handle::UT_hash_table access:public +tcp_flags spp_ai.h /^ uint8_t tcp_flags;$/;" m struct:_AI_snort_alert access:public +tcplen spp_ai.h /^ uint16_t tcplen;$/;" m struct:_AI_snort_alert access:public +timestamp doc/latex/structpkt__info.tex /^\\subsubsection[{timestamp}]{\\setlength{\\rightskip}{0pt plus 5cm}time\\_\\-t {\\bf pkt\\_\\-info::timestamp}}}$/;" b +timestamp spp_ai.h /^ none, src_port, dst_port, src_addr, dst_addr, timestamp$/;" e enum:__anon2 +timestamp spp_ai.h /^ time_t timestamp; \/* Timestamp *\/$/;" m struct:pkt_info access:public +timestamp spp_ai.h /^ time_t timestamp;$/;" m struct:_AI_snort_alert access:public +tos spp_ai.h /^ uint8_t tos;$/;" m struct:_AI_snort_alert access:public +true spp_ai.h /^typedef enum { false, true } BOOL;$/;" e enum:__anon1 +ttl spp_ai.h /^ uint8_t ttl;$/;" m struct:_AI_snort_alert access:public +type spp_ai.h /^ cluster_type type;$/;" m struct:_hierarchy_node access:public +typedef-members doc/html/spp__ai_8h.html /^

<\/a>$/;" a +uint16_t spp_ai.h /^typedef unsigned short uint16_t;$/;" t +uint32_t spp_ai.h /^typedef unsigned int uint32_t;$/;" t +uint32_t uthash/uthash.h /^typedef unsigned int uint32_t;$/;" t +uint8_t spp_ai.h /^typedef unsigned char uint8_t;$/;" t +usage doc/html/installdox /^sub usage {$/;" s +utarray_back uthash/utarray.h 208;" d +utarray_clear uthash/utarray.h 190;" d +utarray_concat uthash/utarray.h 172;" d +utarray_done uthash/utarray.h 65;" d +utarray_eltidx uthash/utarray.h 209;" d +utarray_eltptr uthash/utarray.h 115;" d +utarray_erase uthash/utarray.h 176;" d +utarray_extend_back uthash/utarray.h 106;" d +utarray_free uthash/utarray.h 83;" d +utarray_front uthash/utarray.h 206;" d +utarray_init uthash/utarray.h 60;" d +utarray_insert uthash/utarray.h 118;" d +utarray_inserta uthash/utarray.h 130;" d +utarray_len uthash/utarray.h 113;" d +utarray_new uthash/utarray.h 78;" d +utarray_next uthash/utarray.h 207;" d +utarray_pop_back uthash/utarray.h 101;" d +utarray_push_back uthash/utarray.h 95;" d +utarray_reserve uthash/utarray.h 88;" d +utarray_resize uthash/utarray.h 151;" d +utarray_sort uthash/utarray.h 202;" d +utarray_str_cpy uthash/utarray.h /^static void utarray_str_cpy(void *dst, const void *src) {$/;" f signature:(void *dst, const void *src) +utarray_str_dtor uthash/utarray.h /^static void utarray_str_dtor(void *elt) {$/;" f signature:(void *elt) +uthash_expand_fyi uthash/uthash.h 72;" d +uthash_fatal uthash/uthash.h 67;" d +uthash_free uthash/uthash.h 69;" d +uthash_malloc uthash/uthash.h 68;" d +uthash_noexpand_fyi uthash/uthash.h 71;" d +utstring_bincpy uthash/utstring.h 88;" d +utstring_body uthash/utstring.h 106;" d +utstring_clear uthash/utstring.h 83;" d +utstring_concat uthash/utstring.h 96;" d +utstring_done uthash/utstring.h 64;" d +utstring_free uthash/utstring.h 70;" d +utstring_init uthash/utstring.h 58;" d +utstring_len uthash/utstring.h 104;" d +utstring_new uthash/utstring.h 76;" d +utstring_printf uthash/utstring.h /^_UNUSED_ static void utstring_printf(UT_string *s, const char *fmt, ...) {$/;" f signature:(UT_string *s, const char *fmt, ...) +utstring_printf_va uthash/utstring.h /^_UNUSED_ static void utstring_printf_va(UT_string *s, const char *fmt, va_list ap) {$/;" f signature:(UT_string *s, const char *fmt, va_list ap) +utstring_reserve uthash/utstring.h 49;" d +var-members doc/html/sfPolicyUserData_8c.html /^

<\/a>$/;" a +var-members doc/html/sf__dynamic__preproc__lib_8c.html /^

<\/a>$/;" a +var-members doc/html/spp__ai_8c.html /^

<\/a>$/;" a +var-members doc/html/stream_8c.html /^

<\/a>$/;" a +window spp_ai.h /^ uint16_t window;$/;" m struct:_AI_snort_alert access:public diff --git a/uthash/utarray.h b/uthash/utarray.h new file mode 100644 index 0000000..e3d4074 --- /dev/null +++ b/uthash/utarray.h @@ -0,0 +1,224 @@ +/* +Copyright (c) 2008-2010, Troy D. Hanson http://uthash.sourceforge.net +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/* a dynamic array implementation using macros + * see http://uthash.sourceforge.net/utarray + */ +#ifndef UTARRAY_H +#define UTARRAY_H + +#define UTARRAY_VERSION 1.9.1 + +#ifdef __GNUC__ +#define _UNUSED_ __attribute__ ((__unused__)) +#else +#define _UNUSED_ +#endif + +#include /* size_t */ +#include /* memset, etc */ +#include /* exit */ + +#define oom() exit(-1) + +typedef void (ctor_f)(void *dst, const void *src); +typedef void (dtor_f)(void *elt); +typedef void (init_f)(void *elt); +typedef struct { + size_t sz; + init_f *init; + ctor_f *copy; + dtor_f *dtor; +} UT_icd; + +typedef struct { + unsigned i,n;/* i: index of next available slot, n: num slots */ + const UT_icd *icd; /* initializer, copy and destructor functions */ + char *d; /* n slots of size icd->sz*/ +} UT_array; + +#define utarray_init(a,_icd) do { \ + memset(a,0,sizeof(UT_array)); \ + (a)->icd=_icd; \ +} while(0) + +#define utarray_done(a) do { \ + if ((a)->n) { \ + if ((a)->icd->dtor) { \ + size_t _ut_i; \ + for(_ut_i=0; _ut_i < (a)->i; _ut_i++) { \ + (a)->icd->dtor(utarray_eltptr(a,_ut_i)); \ + } \ + } \ + free((a)->d); \ + } \ + (a)->n=0; \ +} while(0) + +#define utarray_new(a,_icd) do { \ + a=(UT_array*)malloc(sizeof(UT_array)); \ + utarray_init(a,_icd); \ +} while(0) + +#define utarray_free(a) do { \ + utarray_done(a); \ + free(a); \ +} while(0) + +#define utarray_reserve(a,by) do { \ + if (((a)->i+by) > ((a)->n)) { \ + while(((a)->i+by) > ((a)->n)) { (a)->n = ((a)->n ? (2*(a)->n) : 8); } \ + if ( ((a)->d=(char*)realloc((a)->d, (a)->n*(a)->icd->sz)) == NULL) oom(); \ + } \ +} while(0) + +#define utarray_push_back(a,p) do { \ + utarray_reserve(a,1); \ + if ((a)->icd->copy) { (a)->icd->copy( _utarray_eltptr(a,(a)->i++), p); } \ + else { memcpy(_utarray_eltptr(a,(a)->i++), p, (a)->icd->sz); }; \ +} while(0) + +#define utarray_pop_back(a) do { \ + if ((a)->icd->dtor) { (a)->icd->dtor( _utarray_eltptr(a,--((a)->i))); } \ + else { (a)->i--; } \ +} while(0) + +#define utarray_extend_back(a) do { \ + utarray_reserve(a,1); \ + if ((a)->icd->init) { (a)->icd->init(_utarray_eltptr(a,(a)->i)); } \ + else { memset(_utarray_eltptr(a,(a)->i),0,(a)->icd->sz); } \ + (a)->i++; \ +} while(0) + +#define utarray_len(a) ((a)->i) + +#define utarray_eltptr(a,j) (((j) < (a)->i) ? _utarray_eltptr(a,j) : NULL) +#define _utarray_eltptr(a,j) ((char*)((a)->d + ((a)->icd->sz*(j) ))) + +#define utarray_insert(a,p,j) do { \ + utarray_reserve(a,1); \ + if (j > (a)->i) break; \ + if ((j) < (a)->i) { \ + memmove( _utarray_eltptr(a,(j)+1), _utarray_eltptr(a,j), \ + ((a)->i - (j))*((a)->icd->sz)); \ + } \ + if ((a)->icd->copy) { (a)->icd->copy( _utarray_eltptr(a,j), p); } \ + else { memcpy(_utarray_eltptr(a,j), p, (a)->icd->sz); }; \ + (a)->i++; \ +} while(0) + +#define utarray_inserta(a,w,j) do { \ + if (utarray_len(w) == 0) break; \ + if (j > (a)->i) break; \ + utarray_reserve(a,utarray_len(w)); \ + if ((j) < (a)->i) { \ + memmove(_utarray_eltptr(a,(j)+utarray_len(w)), \ + _utarray_eltptr(a,j), \ + ((a)->i - (j))*((a)->icd->sz)); \ + } \ + if (a->icd->copy) { \ + size_t _ut_i; \ + for(_ut_i=0;_ut_i<(w)->i;_ut_i++) { \ + (a)->icd->copy(_utarray_eltptr(a,j+_ut_i), _utarray_eltptr(w,_ut_i)); \ + } \ + } else { \ + memcpy(_utarray_eltptr(a,j), _utarray_eltptr(w,0), \ + utarray_len(w)*((a)->icd->sz)); \ + } \ + (a)->i += utarray_len(w); \ +} while(0) + +#define utarray_resize(dst,num) do { \ + size_t _ut_i; \ + if (dst->i > (size_t)(num)) { \ + if ((dst)->icd->dtor) { \ + for(_ut_i=num; _ut_i < dst->i; _ut_i++) { \ + (dst)->icd->dtor(utarray_eltptr(dst,_ut_i)); \ + } \ + } \ + } else if (dst->i < (size_t)(num)) { \ + utarray_reserve(dst,num-dst->i); \ + if ((dst)->icd->init) { \ + for(_ut_i=dst->i; _ut_i < num; _ut_i++) { \ + (dst)->icd->init(utarray_eltptr(dst,_ut_i)); \ + } \ + } else { \ + memset(_utarray_eltptr(dst,dst->i),0,(dst)->icd->sz*(num-dst->i)); \ + } \ + } \ + dst->i = num; \ +} while(0) + +#define utarray_concat(dst,src) do { \ + utarray_inserta(dst,src,utarray_len(dst)); \ +} while(0) + +#define utarray_erase(a,pos,len) do { \ + if ((a)->icd->dtor) { \ + size_t _ut_i; \ + for(_ut_i=0; _ut_i < len; _ut_i++) { \ + (a)->icd->dtor(utarray_eltptr(a,pos+_ut_i)); \ + } \ + } \ + if ((a)->i > (pos+len)) { \ + memmove( _utarray_eltptr(a,pos), _utarray_eltptr(a,pos+len), \ + ((a->i)-(pos+len))*((a)->icd->sz)); \ + } \ + (a)->i -= (len); \ +} while(0) + +#define utarray_clear(a) do { \ + if ((a)->i > 0) { \ + if ((a)->icd->dtor) { \ + size_t _ut_i; \ + for(_ut_i=0; _ut_i < (a)->i; _ut_i++) { \ + (a)->icd->dtor(utarray_eltptr(a,_ut_i)); \ + } \ + } \ + (a)->i = 0; \ + } \ +} while(0) + +#define utarray_sort(a,cmp) do { \ + qsort((a)->d, (a)->i, (a)->icd->sz, cmp); \ +} while(0) + +#define utarray_front(a) (((a)->i) ? (_utarray_eltptr(a,0)) : NULL) +#define utarray_next(a,e) (((e)==NULL) ? utarray_front(a) : ((((a)->i) > (utarray_eltidx(a,e)+1)) ? _utarray_eltptr(a,utarray_eltidx(a,e)+1) : NULL)) +#define utarray_back(a) (((a)->i) ? (_utarray_eltptr(a,(a)->i-1)) : NULL) +#define utarray_eltidx(a,e) (((char*)(e) >= (char*)((a)->d)) ? (((char*)(e) - (char*)((a)->d))/(a)->icd->sz) : -1) + +/* last we pre-define a few icd for common utarrays of ints and strings */ +static void utarray_str_cpy(void *dst, const void *src) { + char **_src = (char**)src, **_dst = (char**)dst; + *_dst = (*_src == NULL) ? NULL : strdup(*_src); +} +static void utarray_str_dtor(void *elt) { + char **eltc = (char**)elt; + if (*eltc) free(*eltc); +} +static const UT_icd ut_str_icd _UNUSED_ = {sizeof(char*),NULL,utarray_str_cpy,utarray_str_dtor}; +static const UT_icd ut_int_icd _UNUSED_ = {sizeof(int),NULL,NULL,NULL}; + + +#endif /* UTARRAY_H */ diff --git a/uthash/uthash.h b/uthash/uthash.h new file mode 100644 index 0000000..ddd50a7 --- /dev/null +++ b/uthash/uthash.h @@ -0,0 +1,960 @@ +/* +Copyright (c) 2003-2010, Troy D. Hanson http://uthash.sourceforge.net +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef UTHASH_H +#define UTHASH_H + +#include /* memcmp,strlen */ +#include /* ptrdiff_t */ + +/* These macros use decltype or the earlier __typeof GNU extension. + As decltype is only available in newer compilers (VS2010 or gcc 4.3+ + when compiling c++ source) this code uses whatever method is needed + or, for VS2008 where neither is available, uses casting workarounds. */ +#ifdef _MSC_VER /* MS compiler */ +#if _MSC_VER >= 1600 && __cplusplus /* VS2010 or newer in C++ mode */ +#define DECLTYPE(x) (decltype(x)) +#else /* VS2008 or older (or VS2010 in C mode) */ +#define NO_DECLTYPE +#define DECLTYPE(x) +#endif +#else /* GNU, Sun and other compilers */ +#define DECLTYPE(x) (__typeof(x)) +#endif + +#ifdef NO_DECLTYPE +#define DECLTYPE_ASSIGN(dst,src) \ +do { \ + char **_da_dst = (char**)(&(dst)); \ + *_da_dst = (char*)(src); \ +} while(0) +#else +#define DECLTYPE_ASSIGN(dst,src) \ +do { \ + (dst) = DECLTYPE(dst)(src); \ +} while(0) +#endif + +/* a number of the hash function use uint32_t which isn't defined on win32 */ +#ifdef _MSC_VER +typedef unsigned int uint32_t; +#else +#include /* uint32_t */ +#endif + +#define UTHASH_VERSION 1.9.1 + +#define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */ +#define uthash_malloc(sz) malloc(sz) /* malloc fcn */ +#define uthash_free(ptr) free(ptr) /* free fcn */ + +#define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */ +#define uthash_expand_fyi(tbl) /* can be defined to log expands */ + +/* initial number of buckets */ +#define HASH_INITIAL_NUM_BUCKETS 32 /* initial number of buckets */ +#define HASH_INITIAL_NUM_BUCKETS_LOG2 5 /* lg2 of initial number of buckets */ +#define HASH_BKT_CAPACITY_THRESH 10 /* expand when bucket count reaches */ + +/* calculate the element whose hash handle address is hhe */ +#define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho))) + +#define HASH_FIND(hh,head,keyptr,keylen,out) \ +do { \ + unsigned _hf_bkt,_hf_hashv; \ + out=NULL; \ + if (head) { \ + HASH_FCN(keyptr,keylen, (head)->hh.tbl->num_buckets, _hf_hashv, _hf_bkt); \ + if (HASH_BLOOM_TEST((head)->hh.tbl, _hf_hashv)) { \ + HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], \ + keyptr,keylen,out); \ + } \ + } \ +} while (0) + +#ifdef HASH_BLOOM +#define HASH_BLOOM_BITLEN (1ULL << HASH_BLOOM) +#define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8) + ((HASH_BLOOM_BITLEN%8) ? 1:0) +#define HASH_BLOOM_MAKE(tbl) \ +do { \ + (tbl)->bloom_nbits = HASH_BLOOM; \ + (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \ + if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \ + memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \ + (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \ +} while (0); + +#define HASH_BLOOM_FREE(tbl) \ +do { \ + uthash_free((tbl)->bloom_bv); \ +} while (0); + +#define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8] |= (1U << ((idx)%8))) +#define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8] & (1U << ((idx)%8))) + +#define HASH_BLOOM_ADD(tbl,hashv) \ + HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1))) + +#define HASH_BLOOM_TEST(tbl,hashv) \ + HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1))) + +#else +#define HASH_BLOOM_MAKE(tbl) +#define HASH_BLOOM_FREE(tbl) +#define HASH_BLOOM_ADD(tbl,hashv) +#define HASH_BLOOM_TEST(tbl,hashv) (1) +#endif + +#define HASH_MAKE_TABLE(hh,head) \ +do { \ + (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \ + sizeof(UT_hash_table)); \ + if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \ + memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \ + (head)->hh.tbl->tail = &((head)->hh); \ + (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \ + (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \ + (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \ + (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \ + HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ + if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \ + memset((head)->hh.tbl->buckets, 0, \ + HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ + HASH_BLOOM_MAKE((head)->hh.tbl); \ + (head)->hh.tbl->signature = HASH_SIGNATURE; \ +} while(0) + +#define HASH_ADD(hh,head,fieldname,keylen_in,add) \ + HASH_ADD_KEYPTR(hh,head,&add->fieldname,keylen_in,add) + +#define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \ +do { \ + unsigned _ha_bkt; \ + (add)->hh.next = NULL; \ + (add)->hh.key = (char*)keyptr; \ + (add)->hh.keylen = keylen_in; \ + if (!(head)) { \ + head = (add); \ + (head)->hh.prev = NULL; \ + HASH_MAKE_TABLE(hh,head); \ + } else { \ + (head)->hh.tbl->tail->next = (add); \ + (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \ + (head)->hh.tbl->tail = &((add)->hh); \ + } \ + (head)->hh.tbl->num_items++; \ + (add)->hh.tbl = (head)->hh.tbl; \ + HASH_FCN(keyptr,keylen_in, (head)->hh.tbl->num_buckets, \ + (add)->hh.hashv, _ha_bkt); \ + HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt],&(add)->hh); \ + HASH_BLOOM_ADD((head)->hh.tbl,(add)->hh.hashv); \ + HASH_EMIT_KEY(hh,head,keyptr,keylen_in); \ + HASH_FSCK(hh,head); \ +} while(0) + +#define HASH_TO_BKT( hashv, num_bkts, bkt ) \ +do { \ + bkt = ((hashv) & ((num_bkts) - 1)); \ +} while(0) + +/* delete "delptr" from the hash table. + * "the usual" patch-up process for the app-order doubly-linked-list. + * The use of _hd_hh_del below deserves special explanation. + * These used to be expressed using (delptr) but that led to a bug + * if someone used the same symbol for the head and deletee, like + * HASH_DELETE(hh,users,users); + * We want that to work, but by changing the head (users) below + * we were forfeiting our ability to further refer to the deletee (users) + * in the patch-up process. Solution: use scratch space to + * copy the deletee pointer, then the latter references are via that + * scratch pointer rather than through the repointed (users) symbol. + */ +#define HASH_DELETE(hh,head,delptr) \ +do { \ + unsigned _hd_bkt; \ + struct UT_hash_handle *_hd_hh_del; \ + if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \ + uthash_free((head)->hh.tbl->buckets ); \ + HASH_BLOOM_FREE((head)->hh.tbl); \ + uthash_free((head)->hh.tbl); \ + head = NULL; \ + } else { \ + _hd_hh_del = &((delptr)->hh); \ + if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \ + (head)->hh.tbl->tail = \ + (UT_hash_handle*)((char*)((delptr)->hh.prev) + \ + (head)->hh.tbl->hho); \ + } \ + if ((delptr)->hh.prev) { \ + ((UT_hash_handle*)((char*)((delptr)->hh.prev) + \ + (head)->hh.tbl->hho))->next = (delptr)->hh.next; \ + } else { \ + DECLTYPE_ASSIGN(head,(delptr)->hh.next); \ + } \ + if (_hd_hh_del->next) { \ + ((UT_hash_handle*)((char*)_hd_hh_del->next + \ + (head)->hh.tbl->hho))->prev = \ + _hd_hh_del->prev; \ + } \ + HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ + HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \ + (head)->hh.tbl->num_items--; \ + } \ + HASH_FSCK(hh,head); \ +} while (0) + + +/* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */ +#define HASH_FIND_STR(head,findstr,out) \ + HASH_FIND(hh,head,findstr,strlen(findstr),out) +#define HASH_ADD_STR(head,strfield,add) \ + HASH_ADD(hh,head,strfield,strlen(add->strfield),add) +#define HASH_FIND_INT(head,findint,out) \ + HASH_FIND(hh,head,findint,sizeof(int),out) +#define HASH_ADD_INT(head,intfield,add) \ + HASH_ADD(hh,head,intfield,sizeof(int),add) +#define HASH_FIND_PTR(head,findptr,out) \ + HASH_FIND(hh,head,findptr,sizeof(void *),out) +#define HASH_ADD_PTR(head,ptrfield,add) \ + HASH_ADD(hh,head,ptrfield,sizeof(void *),add) +#define HASH_DEL(head,delptr) \ + HASH_DELETE(hh,head,delptr) + +/* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined. + * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined. + */ +#ifdef HASH_DEBUG +#define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0) +#define HASH_FSCK(hh,head) \ +do { \ + unsigned _bkt_i; \ + unsigned _count, _bkt_count; \ + char *_prev; \ + struct UT_hash_handle *_thh; \ + if (head) { \ + _count = 0; \ + for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \ + _bkt_count = 0; \ + _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \ + _prev = NULL; \ + while (_thh) { \ + if (_prev != (char*)(_thh->hh_prev)) { \ + HASH_OOPS("invalid hh_prev %p, actual %p\n", \ + _thh->hh_prev, _prev ); \ + } \ + _bkt_count++; \ + _prev = (char*)(_thh); \ + _thh = _thh->hh_next; \ + } \ + _count += _bkt_count; \ + if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \ + HASH_OOPS("invalid bucket count %d, actual %d\n", \ + (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \ + } \ + } \ + if (_count != (head)->hh.tbl->num_items) { \ + HASH_OOPS("invalid hh item count %d, actual %d\n", \ + (head)->hh.tbl->num_items, _count ); \ + } \ + /* traverse hh in app order; check next/prev integrity, count */ \ + _count = 0; \ + _prev = NULL; \ + _thh = &(head)->hh; \ + while (_thh) { \ + _count++; \ + if (_prev !=(char*)(_thh->prev)) { \ + HASH_OOPS("invalid prev %p, actual %p\n", \ + _thh->prev, _prev ); \ + } \ + _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \ + _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \ + (head)->hh.tbl->hho) : NULL ); \ + } \ + if (_count != (head)->hh.tbl->num_items) { \ + HASH_OOPS("invalid app item count %d, actual %d\n", \ + (head)->hh.tbl->num_items, _count ); \ + } \ + } \ +} while (0) +#else +#define HASH_FSCK(hh,head) +#endif + +/* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to + * the descriptor to which this macro is defined for tuning the hash function. + * The app can #include to get the prototype for write(2). */ +#ifdef HASH_EMIT_KEYS +#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \ +do { \ + unsigned _klen = fieldlen; \ + write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \ + write(HASH_EMIT_KEYS, keyptr, fieldlen); \ +} while (0) +#else +#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) +#endif + +/* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */ +#ifdef HASH_FUNCTION +#define HASH_FCN HASH_FUNCTION +#else +#define HASH_FCN HASH_JEN +#endif + +/* The Bernstein hash function, used in Perl prior to v5.6 */ +#define HASH_BER(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned _hb_keylen=keylen; \ + char *_hb_key=(char*)key; \ + (hashv) = 0; \ + while (_hb_keylen--) { (hashv) = ((hashv) * 33) + *_hb_key++; } \ + bkt = (hashv) & (num_bkts-1); \ +} while (0) + + +/* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at + * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */ +#define HASH_SAX(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned _sx_i; \ + char *_hs_key=(char*)key; \ + hashv = 0; \ + for(_sx_i=0; _sx_i < keylen; _sx_i++) \ + hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \ + bkt = hashv & (num_bkts-1); \ +} while (0) + +#define HASH_FNV(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned _fn_i; \ + char *_hf_key=(char*)key; \ + hashv = 2166136261UL; \ + for(_fn_i=0; _fn_i < keylen; _fn_i++) \ + hashv = (hashv * 16777619) ^ _hf_key[_fn_i]; \ + bkt = hashv & (num_bkts-1); \ +} while(0); + +#define HASH_OAT(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned _ho_i; \ + char *_ho_key=(char*)key; \ + hashv = 0; \ + for(_ho_i=0; _ho_i < keylen; _ho_i++) { \ + hashv += _ho_key[_ho_i]; \ + hashv += (hashv << 10); \ + hashv ^= (hashv >> 6); \ + } \ + hashv += (hashv << 3); \ + hashv ^= (hashv >> 11); \ + hashv += (hashv << 15); \ + bkt = hashv & (num_bkts-1); \ +} while(0) + +#define HASH_JEN_MIX(a,b,c) \ +do { \ + a -= b; a -= c; a ^= ( c >> 13 ); \ + b -= c; b -= a; b ^= ( a << 8 ); \ + c -= a; c -= b; c ^= ( b >> 13 ); \ + a -= b; a -= c; a ^= ( c >> 12 ); \ + b -= c; b -= a; b ^= ( a << 16 ); \ + c -= a; c -= b; c ^= ( b >> 5 ); \ + a -= b; a -= c; a ^= ( c >> 3 ); \ + b -= c; b -= a; b ^= ( a << 10 ); \ + c -= a; c -= b; c ^= ( b >> 15 ); \ +} while (0) + +#define HASH_JEN(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned _hj_i,_hj_j,_hj_k; \ + char *_hj_key=(char*)key; \ + hashv = 0xfeedbeef; \ + _hj_i = _hj_j = 0x9e3779b9; \ + _hj_k = keylen; \ + while (_hj_k >= 12) { \ + _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \ + + ( (unsigned)_hj_key[2] << 16 ) \ + + ( (unsigned)_hj_key[3] << 24 ) ); \ + _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \ + + ( (unsigned)_hj_key[6] << 16 ) \ + + ( (unsigned)_hj_key[7] << 24 ) ); \ + hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \ + + ( (unsigned)_hj_key[10] << 16 ) \ + + ( (unsigned)_hj_key[11] << 24 ) ); \ + \ + HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ + \ + _hj_key += 12; \ + _hj_k -= 12; \ + } \ + hashv += keylen; \ + switch ( _hj_k ) { \ + case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); \ + case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); \ + case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); \ + case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); \ + case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); \ + case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); \ + case 5: _hj_j += _hj_key[4]; \ + case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); \ + case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); \ + case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); \ + case 1: _hj_i += _hj_key[0]; \ + } \ + HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ + bkt = hashv & (num_bkts-1); \ +} while(0) + +/* The Paul Hsieh hash function */ +#undef get16bits +#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \ + || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__) +#define get16bits(d) (*((const uint16_t *) (d))) +#endif + +#if !defined (get16bits) +#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \ + +(uint32_t)(((const uint8_t *)(d))[0]) ) +#endif +#define HASH_SFH(key,keylen,num_bkts,hashv,bkt) \ +do { \ + char *_sfh_key=(char*)key; \ + uint32_t _sfh_tmp, _sfh_len = keylen; \ + \ + int _sfh_rem = _sfh_len & 3; \ + _sfh_len >>= 2; \ + hashv = 0xcafebabe; \ + \ + /* Main loop */ \ + for (;_sfh_len > 0; _sfh_len--) { \ + hashv += get16bits (_sfh_key); \ + _sfh_tmp = (get16bits (_sfh_key+2) << 11) ^ hashv; \ + hashv = (hashv << 16) ^ _sfh_tmp; \ + _sfh_key += 2*sizeof (uint16_t); \ + hashv += hashv >> 11; \ + } \ + \ + /* Handle end cases */ \ + switch (_sfh_rem) { \ + case 3: hashv += get16bits (_sfh_key); \ + hashv ^= hashv << 16; \ + hashv ^= _sfh_key[sizeof (uint16_t)] << 18; \ + hashv += hashv >> 11; \ + break; \ + case 2: hashv += get16bits (_sfh_key); \ + hashv ^= hashv << 11; \ + hashv += hashv >> 17; \ + break; \ + case 1: hashv += *_sfh_key; \ + hashv ^= hashv << 10; \ + hashv += hashv >> 1; \ + } \ + \ + /* Force "avalanching" of final 127 bits */ \ + hashv ^= hashv << 3; \ + hashv += hashv >> 5; \ + hashv ^= hashv << 4; \ + hashv += hashv >> 17; \ + hashv ^= hashv << 25; \ + hashv += hashv >> 6; \ + bkt = hashv & (num_bkts-1); \ +} while(0); + +#ifdef HASH_USING_NO_STRICT_ALIASING +/* The MurmurHash exploits some CPU's (e.g. x86) tolerance for unaligned reads. + * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error. + * So MurmurHash comes in two versions, the faster unaligned one and the slower + * aligned one. We only use the faster one on CPU's where we know it's safe. + * + * Note the preprocessor built-in defines can be emitted using: + * + * gcc -m64 -dM -E - < /dev/null (on gcc) + * cc -## a.c (where a.c is a simple test file) (Sun Studio) + */ +#if (defined(__i386__) || defined(__x86_64__)) +#define HASH_MUR HASH_MUR_UNALIGNED +#else +#define HASH_MUR HASH_MUR_ALIGNED +#endif + +/* Appleby's MurmurHash fast version for unaligned-tolerant archs like i386 */ +#define HASH_MUR_UNALIGNED(key,keylen,num_bkts,hashv,bkt) \ +do { \ + const unsigned int _mur_m = 0x5bd1e995; \ + const int _mur_r = 24; \ + hashv = 0xcafebabe ^ keylen; \ + char *_mur_key = (char *)key; \ + uint32_t _mur_tmp, _mur_len = keylen; \ + \ + for (;_mur_len >= 4; _mur_len-=4) { \ + _mur_tmp = *(uint32_t *)_mur_key; \ + _mur_tmp *= _mur_m; \ + _mur_tmp ^= _mur_tmp >> _mur_r; \ + _mur_tmp *= _mur_m; \ + hashv *= _mur_m; \ + hashv ^= _mur_tmp; \ + _mur_key += 4; \ + } \ + \ + switch(_mur_len) \ + { \ + case 3: hashv ^= _mur_key[2] << 16; \ + case 2: hashv ^= _mur_key[1] << 8; \ + case 1: hashv ^= _mur_key[0]; \ + hashv *= _mur_m; \ + }; \ + \ + hashv ^= hashv >> 13; \ + hashv *= _mur_m; \ + hashv ^= hashv >> 15; \ + \ + bkt = hashv & (num_bkts-1); \ +} while(0) + +/* Appleby's MurmurHash version for alignment-sensitive archs like Sparc */ +#define HASH_MUR_ALIGNED(key,keylen,num_bkts,hashv,bkt) \ +do { \ + const unsigned int _mur_m = 0x5bd1e995; \ + const int _mur_r = 24; \ + hashv = 0xcafebabe ^ keylen; \ + char *_mur_key = (char *)key; \ + uint32_t _mur_len = keylen; \ + int _mur_align = (int)_mur_key & 3; \ + \ + if (_mur_align && (_mur_len >= 4)) { \ + unsigned _mur_t = 0, _mur_d = 0; \ + switch(_mur_align) { \ + case 1: _mur_t |= _mur_key[2] << 16; \ + case 2: _mur_t |= _mur_key[1] << 8; \ + case 3: _mur_t |= _mur_key[0]; \ + } \ + _mur_t <<= (8 * _mur_align); \ + _mur_key += 4-_mur_align; \ + _mur_len -= 4-_mur_align; \ + int _mur_sl = 8 * (4-_mur_align); \ + int _mur_sr = 8 * _mur_align; \ + \ + for (;_mur_len >= 4; _mur_len-=4) { \ + _mur_d = *(unsigned *)_mur_key; \ + _mur_t = (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \ + unsigned _mur_k = _mur_t; \ + _mur_k *= _mur_m; \ + _mur_k ^= _mur_k >> _mur_r; \ + _mur_k *= _mur_m; \ + hashv *= _mur_m; \ + hashv ^= _mur_k; \ + _mur_t = _mur_d; \ + _mur_key += 4; \ + } \ + _mur_d = 0; \ + if(_mur_len >= _mur_align) { \ + switch(_mur_align) { \ + case 3: _mur_d |= _mur_key[2] << 16; \ + case 2: _mur_d |= _mur_key[1] << 8; \ + case 1: _mur_d |= _mur_key[0]; \ + } \ + unsigned _mur_k = (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \ + _mur_k *= _mur_m; \ + _mur_k ^= _mur_k >> _mur_r; \ + _mur_k *= _mur_m; \ + hashv *= _mur_m; \ + hashv ^= _mur_k; \ + _mur_k += _mur_align; \ + _mur_len -= _mur_align; \ + \ + switch(_mur_len) \ + { \ + case 3: hashv ^= _mur_key[2] << 16; \ + case 2: hashv ^= _mur_key[1] << 8; \ + case 1: hashv ^= _mur_key[0]; \ + hashv *= _mur_m; \ + } \ + } else { \ + switch(_mur_len) \ + { \ + case 3: _mur_d ^= _mur_key[2] << 16; \ + case 2: _mur_d ^= _mur_key[1] << 8; \ + case 1: _mur_d ^= _mur_key[0]; \ + case 0: hashv ^= (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \ + hashv *= _mur_m; \ + } \ + } \ + \ + hashv ^= hashv >> 13; \ + hashv *= _mur_m; \ + hashv ^= hashv >> 15; \ + } else { \ + for (;_mur_len >= 4; _mur_len-=4) { \ + unsigned _mur_k = *(unsigned*)_mur_key; \ + _mur_k *= _mur_m; \ + _mur_k ^= _mur_k >> _mur_r; \ + _mur_k *= _mur_m; \ + hashv *= _mur_m; \ + hashv ^= _mur_k; \ + _mur_key += 4; \ + } \ + switch(_mur_len) \ + { \ + case 3: hashv ^= _mur_key[2] << 16; \ + case 2: hashv ^= _mur_key[1] << 8; \ + case 1: hashv ^= _mur_key[0]; \ + hashv *= _mur_m; \ + } \ + \ + hashv ^= hashv >> 13; \ + hashv *= _mur_m; \ + hashv ^= hashv >> 15; \ + } \ + bkt = hashv & (num_bkts-1); \ +} while(0) +#endif /* HASH_USING_NO_STRICT_ALIASING */ + +/* key comparison function; return 0 if keys equal */ +#define HASH_KEYCMP(a,b,len) memcmp(a,b,len) + +/* iterate over items in a known bucket to find desired item */ +#define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,out) \ +do { \ + if (head.hh_head) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,head.hh_head)); \ + else out=NULL; \ + while (out) { \ + if (out->hh.keylen == keylen_in) { \ + if ((HASH_KEYCMP(out->hh.key,keyptr,keylen_in)) == 0) break; \ + } \ + if (out->hh.hh_next) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,out->hh.hh_next)); \ + else out = NULL; \ + } \ +} while(0) + +/* add an item to a bucket */ +#define HASH_ADD_TO_BKT(head,addhh) \ +do { \ + head.count++; \ + (addhh)->hh_next = head.hh_head; \ + (addhh)->hh_prev = NULL; \ + if (head.hh_head) { (head).hh_head->hh_prev = (addhh); } \ + (head).hh_head=addhh; \ + if (head.count >= ((head.expand_mult+1) * HASH_BKT_CAPACITY_THRESH) \ + && (addhh)->tbl->noexpand != 1) { \ + HASH_EXPAND_BUCKETS((addhh)->tbl); \ + } \ +} while(0) + +/* remove an item from a given bucket */ +#define HASH_DEL_IN_BKT(hh,head,hh_del) \ + (head).count--; \ + if ((head).hh_head == hh_del) { \ + (head).hh_head = hh_del->hh_next; \ + } \ + if (hh_del->hh_prev) { \ + hh_del->hh_prev->hh_next = hh_del->hh_next; \ + } \ + if (hh_del->hh_next) { \ + hh_del->hh_next->hh_prev = hh_del->hh_prev; \ + } + +/* Bucket expansion has the effect of doubling the number of buckets + * and redistributing the items into the new buckets. Ideally the + * items will distribute more or less evenly into the new buckets + * (the extent to which this is true is a measure of the quality of + * the hash function as it applies to the key domain). + * + * With the items distributed into more buckets, the chain length + * (item count) in each bucket is reduced. Thus by expanding buckets + * the hash keeps a bound on the chain length. This bounded chain + * length is the essence of how a hash provides constant time lookup. + * + * The calculation of tbl->ideal_chain_maxlen below deserves some + * explanation. First, keep in mind that we're calculating the ideal + * maximum chain length based on the *new* (doubled) bucket count. + * In fractions this is just n/b (n=number of items,b=new num buckets). + * Since the ideal chain length is an integer, we want to calculate + * ceil(n/b). We don't depend on floating point arithmetic in this + * hash, so to calculate ceil(n/b) with integers we could write + * + * ceil(n/b) = (n/b) + ((n%b)?1:0) + * + * and in fact a previous version of this hash did just that. + * But now we have improved things a bit by recognizing that b is + * always a power of two. We keep its base 2 log handy (call it lb), + * so now we can write this with a bit shift and logical AND: + * + * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0) + * + */ +#define HASH_EXPAND_BUCKETS(tbl) \ +do { \ + unsigned _he_bkt; \ + unsigned _he_bkt_i; \ + struct UT_hash_handle *_he_thh, *_he_hh_nxt; \ + UT_hash_bucket *_he_new_buckets, *_he_newbkt; \ + _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \ + 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ + if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \ + memset(_he_new_buckets, 0, \ + 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ + tbl->ideal_chain_maxlen = \ + (tbl->num_items >> (tbl->log2_num_buckets+1)) + \ + ((tbl->num_items & ((tbl->num_buckets*2)-1)) ? 1 : 0); \ + tbl->nonideal_items = 0; \ + for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \ + { \ + _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \ + while (_he_thh) { \ + _he_hh_nxt = _he_thh->hh_next; \ + HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2, _he_bkt); \ + _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \ + if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \ + tbl->nonideal_items++; \ + _he_newbkt->expand_mult = _he_newbkt->count / \ + tbl->ideal_chain_maxlen; \ + } \ + _he_thh->hh_prev = NULL; \ + _he_thh->hh_next = _he_newbkt->hh_head; \ + if (_he_newbkt->hh_head) _he_newbkt->hh_head->hh_prev = \ + _he_thh; \ + _he_newbkt->hh_head = _he_thh; \ + _he_thh = _he_hh_nxt; \ + } \ + } \ + tbl->num_buckets *= 2; \ + tbl->log2_num_buckets++; \ + uthash_free( tbl->buckets ); \ + tbl->buckets = _he_new_buckets; \ + tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \ + (tbl->ineff_expands+1) : 0; \ + if (tbl->ineff_expands > 1) { \ + tbl->noexpand=1; \ + uthash_noexpand_fyi(tbl); \ + } \ + uthash_expand_fyi(tbl); \ +} while(0) + + +/* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */ +/* Note that HASH_SORT assumes the hash handle name to be hh. + * HASH_SRT was added to allow the hash handle name to be passed in. */ +#define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn) +#define HASH_SRT(hh,head,cmpfcn) \ +do { \ + unsigned _hs_i; \ + unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \ + struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \ + if (head) { \ + _hs_insize = 1; \ + _hs_looping = 1; \ + _hs_list = &((head)->hh); \ + while (_hs_looping) { \ + _hs_p = _hs_list; \ + _hs_list = NULL; \ + _hs_tail = NULL; \ + _hs_nmerges = 0; \ + while (_hs_p) { \ + _hs_nmerges++; \ + _hs_q = _hs_p; \ + _hs_psize = 0; \ + for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \ + _hs_psize++; \ + _hs_q = (UT_hash_handle*)((_hs_q->next) ? \ + ((void*)((char*)(_hs_q->next) + \ + (head)->hh.tbl->hho)) : NULL); \ + if (! (_hs_q) ) break; \ + } \ + _hs_qsize = _hs_insize; \ + while ((_hs_psize > 0) || ((_hs_qsize > 0) && _hs_q )) { \ + if (_hs_psize == 0) { \ + _hs_e = _hs_q; \ + _hs_q = (UT_hash_handle*)((_hs_q->next) ? \ + ((void*)((char*)(_hs_q->next) + \ + (head)->hh.tbl->hho)) : NULL); \ + _hs_qsize--; \ + } else if ( (_hs_qsize == 0) || !(_hs_q) ) { \ + _hs_e = _hs_p; \ + _hs_p = (UT_hash_handle*)((_hs_p->next) ? \ + ((void*)((char*)(_hs_p->next) + \ + (head)->hh.tbl->hho)) : NULL); \ + _hs_psize--; \ + } else if (( \ + cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \ + DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \ + ) <= 0) { \ + _hs_e = _hs_p; \ + _hs_p = (UT_hash_handle*)((_hs_p->next) ? \ + ((void*)((char*)(_hs_p->next) + \ + (head)->hh.tbl->hho)) : NULL); \ + _hs_psize--; \ + } else { \ + _hs_e = _hs_q; \ + _hs_q = (UT_hash_handle*)((_hs_q->next) ? \ + ((void*)((char*)(_hs_q->next) + \ + (head)->hh.tbl->hho)) : NULL); \ + _hs_qsize--; \ + } \ + if ( _hs_tail ) { \ + _hs_tail->next = ((_hs_e) ? \ + ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \ + } else { \ + _hs_list = _hs_e; \ + } \ + _hs_e->prev = ((_hs_tail) ? \ + ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \ + _hs_tail = _hs_e; \ + } \ + _hs_p = _hs_q; \ + } \ + _hs_tail->next = NULL; \ + if ( _hs_nmerges <= 1 ) { \ + _hs_looping=0; \ + (head)->hh.tbl->tail = _hs_tail; \ + DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \ + } \ + _hs_insize *= 2; \ + } \ + HASH_FSCK(hh,head); \ + } \ +} while (0) + +/* This function selects items from one hash into another hash. + * The end result is that the selected items have dual presence + * in both hashes. There is no copy of the items made; rather + * they are added into the new hash through a secondary hash + * hash handle that must be present in the structure. */ +#define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \ +do { \ + unsigned _src_bkt, _dst_bkt; \ + void *_last_elt=NULL, *_elt; \ + UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \ + ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \ + if (src) { \ + for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \ + for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \ + _src_hh; \ + _src_hh = _src_hh->hh_next) { \ + _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \ + if (cond(_elt)) { \ + _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \ + _dst_hh->key = _src_hh->key; \ + _dst_hh->keylen = _src_hh->keylen; \ + _dst_hh->hashv = _src_hh->hashv; \ + _dst_hh->prev = _last_elt; \ + _dst_hh->next = NULL; \ + if (_last_elt_hh) { _last_elt_hh->next = _elt; } \ + if (!dst) { \ + DECLTYPE_ASSIGN(dst,_elt); \ + HASH_MAKE_TABLE(hh_dst,dst); \ + } else { \ + _dst_hh->tbl = (dst)->hh_dst.tbl; \ + } \ + HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \ + HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \ + (dst)->hh_dst.tbl->num_items++; \ + _last_elt = _elt; \ + _last_elt_hh = _dst_hh; \ + } \ + } \ + } \ + } \ + HASH_FSCK(hh_dst,dst); \ +} while (0) + +#define HASH_CLEAR(hh,head) \ +do { \ + if (head) { \ + uthash_free((head)->hh.tbl->buckets ); \ + uthash_free((head)->hh.tbl); \ + (head)=NULL; \ + } \ +} while(0) + +/* obtain a count of items in the hash */ +#define HASH_COUNT(head) HASH_CNT(hh,head) +#define HASH_CNT(hh,head) (head?(head->hh.tbl->num_items):0) + +typedef struct UT_hash_bucket { + struct UT_hash_handle *hh_head; + unsigned count; + + /* expand_mult is normally set to 0. In this situation, the max chain length + * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If + * the bucket's chain exceeds this length, bucket expansion is triggered). + * However, setting expand_mult to a non-zero value delays bucket expansion + * (that would be triggered by additions to this particular bucket) + * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. + * (The multiplier is simply expand_mult+1). The whole idea of this + * multiplier is to reduce bucket expansions, since they are expensive, in + * situations where we know that a particular bucket tends to be overused. + * It is better to let its chain length grow to a longer yet-still-bounded + * value, than to do an O(n) bucket expansion too often. + */ + unsigned expand_mult; + +} UT_hash_bucket; + +/* random signature used only to find hash tables in external analysis */ +#define HASH_SIGNATURE 0xa0111fe1 +#define HASH_BLOOM_SIGNATURE 0xb12220f2 + +typedef struct UT_hash_table { + UT_hash_bucket *buckets; + unsigned num_buckets, log2_num_buckets; + unsigned num_items; + struct UT_hash_handle *tail; /* tail hh in app order, for fast append */ + ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */ + + /* in an ideal situation (all buckets used equally), no bucket would have + * more than ceil(#items/#buckets) items. that's the ideal chain length. */ + unsigned ideal_chain_maxlen; + + /* nonideal_items is the number of items in the hash whose chain position + * exceeds the ideal chain maxlen. these items pay the penalty for an uneven + * hash distribution; reaching them in a chain traversal takes >ideal steps */ + unsigned nonideal_items; + + /* ineffective expands occur when a bucket doubling was performed, but + * afterward, more than half the items in the hash had nonideal chain + * positions. If this happens on two consecutive expansions we inhibit any + * further expansion, as it's not helping; this happens when the hash + * function isn't a good fit for the key domain. When expansion is inhibited + * the hash will still work, albeit no longer in constant time. */ + unsigned ineff_expands, noexpand; + + uint32_t signature; /* used only to find hash tables in external analysis */ +#ifdef HASH_BLOOM + uint32_t bloom_sig; /* used only to test bloom exists in external analysis */ + uint8_t *bloom_bv; + char bloom_nbits; +#endif + +} UT_hash_table; + +typedef struct UT_hash_handle { + struct UT_hash_table *tbl; + void *prev; /* prev element in app order */ + void *next; /* next element in app order */ + struct UT_hash_handle *hh_prev; /* previous hh in bucket order */ + struct UT_hash_handle *hh_next; /* next hh in bucket order */ + void *key; /* ptr to enclosing struct's key */ + unsigned keylen; /* enclosing struct's key len */ + unsigned hashv; /* result of hash-fcn(key) */ +} UT_hash_handle; + +#endif /* UTHASH_H */ diff --git a/uthash/utlist.h b/uthash/utlist.h new file mode 100644 index 0000000..35fc9db --- /dev/null +++ b/uthash/utlist.h @@ -0,0 +1,490 @@ +/* +Copyright (c) 2007-2010, Troy D. Hanson http://uthash.sourceforge.net +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef UTLIST_H +#define UTLIST_H + +#define UTLIST_VERSION 1.9.1 + +/* + * This file contains macros to manipulate singly and doubly-linked lists. + * + * 1. LL_ macros: singly-linked lists. + * 2. DL_ macros: doubly-linked lists. + * 3. CDL_ macros: circular doubly-linked lists. + * + * To use singly-linked lists, your structure must have a "next" pointer. + * To use doubly-linked lists, your structure must "prev" and "next" pointers. + * Either way, the pointer to the head of the list must be initialized to NULL. + * + * ----------------.EXAMPLE ------------------------- + * struct item { + * int id; + * struct item *prev, *next; + * } + * + * struct item *list = NULL: + * + * int main() { + * struct item *item; + * ... allocate and populate item ... + * DL_APPEND(list, item); + * } + * -------------------------------------------------- + * + * For doubly-linked lists, the append and delete macros are O(1) + * For singly-linked lists, append and delete are O(n) but prepend is O(1) + * The sort macro is O(n log(n)) for all types of single/double/circular lists. + */ + +/* These macros use decltype or the earlier __typeof GNU extension. + As decltype is only available in newer compilers (VS2010 or gcc 4.3+ + when compiling c++ code), this code uses whatever method is needed + or, for VS2008 where neither is available, uses casting workarounds. */ +#ifdef _MSC_VER /* MS compiler */ +#if _MSC_VER >= 1600 && __cplusplus /* VS2010 and newer in C++ mode */ +#define LDECLTYPE(x) decltype(x) +#else /* VS2008 or older (or VS2010 in C mode) */ +#define NO_DECLTYPE +#define LDECLTYPE(x) char* +#endif +#else /* GNU, Sun and other compilers */ +#define LDECLTYPE(x) __typeof(x) +#endif + +/* for VS2008 we use some workarounds to get around the lack of decltype, + * namely, we always reassign our tmp variable to the list head if we need + * to dereference its prev/next pointers, and save/restore the real head.*/ +#ifdef NO_DECLTYPE +#define _SV(elt,list) _tmp = (char*)(list); {char **_alias = (char**)&(list); *_alias = (elt); } +#define _NEXT(elt,list) ((char*)((list)->next)) +#define _NEXTASGN(elt,list,to) { char **_alias = (char**)&((list)->next); *_alias=(char*)(to); } +#define _PREV(elt,list) ((char*)((list)->prev)) +#define _PREVASGN(elt,list,to) { char **_alias = (char**)&((list)->prev); *_alias=(char*)(to); } +#define _RS(list) { char **_alias = (char**)&(list); *_alias=_tmp; } +#define _CASTASGN(a,b) { char **_alias = (char**)&(a); *_alias=(char*)(b); } +#else +#define _SV(elt,list) +#define _NEXT(elt,list) ((elt)->next) +#define _NEXTASGN(elt,list,to) ((elt)->next)=(to) +#define _PREV(elt,list) ((elt)->prev) +#define _PREVASGN(elt,list,to) ((elt)->prev)=(to) +#define _RS(list) +#define _CASTASGN(a,b) (a)=(b) +#endif + +/****************************************************************************** + * The sort macro is an adaptation of Simon Tatham's O(n log(n)) mergesort * + * Unwieldy variable names used here to avoid shadowing passed-in variables. * + *****************************************************************************/ +#define LL_SORT(list, cmp) \ +do { \ + LDECLTYPE(list) _ls_p; \ + LDECLTYPE(list) _ls_q; \ + LDECLTYPE(list) _ls_e; \ + LDECLTYPE(list) _ls_tail; \ + LDECLTYPE(list) _ls_oldhead; \ + LDECLTYPE(list) _tmp; \ + int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping; \ + if (list) { \ + _ls_insize = 1; \ + _ls_looping = 1; \ + while (_ls_looping) { \ + _CASTASGN(_ls_p,list); \ + _CASTASGN(_ls_oldhead,list); \ + list = NULL; \ + _ls_tail = NULL; \ + _ls_nmerges = 0; \ + while (_ls_p) { \ + _ls_nmerges++; \ + _ls_q = _ls_p; \ + _ls_psize = 0; \ + for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) { \ + _ls_psize++; \ + _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list); _RS(list); \ + if (!_ls_q) break; \ + } \ + _ls_qsize = _ls_insize; \ + while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) { \ + if (_ls_psize == 0) { \ + _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list); _RS(list); _ls_qsize--; \ + } else if (_ls_qsize == 0 || !_ls_q) { \ + _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = _NEXT(_ls_p,list); _RS(list); _ls_psize--; \ + } else if (cmp(_ls_p,_ls_q) <= 0) { \ + _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = _NEXT(_ls_p,list); _RS(list); _ls_psize--; \ + } else { \ + _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list); _RS(list); _ls_qsize--; \ + } \ + if (_ls_tail) { \ + _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,_ls_e); _RS(list); \ + } else { \ + _CASTASGN(list,_ls_e); \ + } \ + _ls_tail = _ls_e; \ + } \ + _ls_p = _ls_q; \ + } \ + _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,NULL); _RS(list); \ + if (_ls_nmerges <= 1) { \ + _ls_looping=0; \ + } \ + _ls_insize *= 2; \ + } \ + } else _tmp=NULL; /* quiet gcc unused variable warning */ \ +} while (0) + +#define DL_SORT(list, cmp) \ +do { \ + LDECLTYPE(list) _ls_p; \ + LDECLTYPE(list) _ls_q; \ + LDECLTYPE(list) _ls_e; \ + LDECLTYPE(list) _ls_tail; \ + LDECLTYPE(list) _ls_oldhead; \ + LDECLTYPE(list) _tmp; \ + int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping; \ + if (list) { \ + _ls_insize = 1; \ + _ls_looping = 1; \ + while (_ls_looping) { \ + _CASTASGN(_ls_p,list); \ + _CASTASGN(_ls_oldhead,list); \ + list = NULL; \ + _ls_tail = NULL; \ + _ls_nmerges = 0; \ + while (_ls_p) { \ + _ls_nmerges++; \ + _ls_q = _ls_p; \ + _ls_psize = 0; \ + for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) { \ + _ls_psize++; \ + _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list); _RS(list); \ + if (!_ls_q) break; \ + } \ + _ls_qsize = _ls_insize; \ + while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) { \ + if (_ls_psize == 0) { \ + _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list); _RS(list); _ls_qsize--; \ + } else if (_ls_qsize == 0 || !_ls_q) { \ + _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = _NEXT(_ls_p,list); _RS(list); _ls_psize--; \ + } else if (cmp(_ls_p,_ls_q) <= 0) { \ + _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = _NEXT(_ls_p,list); _RS(list); _ls_psize--; \ + } else { \ + _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list); _RS(list); _ls_qsize--; \ + } \ + if (_ls_tail) { \ + _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,_ls_e); _RS(list); \ + } else { \ + _CASTASGN(list,_ls_e); \ + } \ + _SV(_ls_e,list); _PREVASGN(_ls_e,list,_ls_tail); _RS(list); \ + _ls_tail = _ls_e; \ + } \ + _ls_p = _ls_q; \ + } \ + _CASTASGN(list->prev, _ls_tail); \ + _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,NULL); _RS(list); \ + if (_ls_nmerges <= 1) { \ + _ls_looping=0; \ + } \ + _ls_insize *= 2; \ + } \ + } else _tmp=NULL; /* quiet gcc unused variable warning */ \ +} while (0) + +#define CDL_SORT(list, cmp) \ +do { \ + LDECLTYPE(list) _ls_p; \ + LDECLTYPE(list) _ls_q; \ + LDECLTYPE(list) _ls_e; \ + LDECLTYPE(list) _ls_tail; \ + LDECLTYPE(list) _ls_oldhead; \ + LDECLTYPE(list) _tmp; \ + LDECLTYPE(list) _tmp2; \ + int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping; \ + if (list) { \ + _ls_insize = 1; \ + _ls_looping = 1; \ + while (_ls_looping) { \ + _CASTASGN(_ls_p,list); \ + _CASTASGN(_ls_oldhead,list); \ + list = NULL; \ + _ls_tail = NULL; \ + _ls_nmerges = 0; \ + while (_ls_p) { \ + _ls_nmerges++; \ + _ls_q = _ls_p; \ + _ls_psize = 0; \ + for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) { \ + _ls_psize++; \ + _SV(_ls_q,list); \ + if (_NEXT(_ls_q,list) == _ls_oldhead) { \ + _ls_q = NULL; \ + } else { \ + _ls_q = _NEXT(_ls_q,list); \ + } \ + _RS(list); \ + if (!_ls_q) break; \ + } \ + _ls_qsize = _ls_insize; \ + while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) { \ + if (_ls_psize == 0) { \ + _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list); _RS(list); _ls_qsize--; \ + if (_ls_q == _ls_oldhead) { _ls_q = NULL; } \ + } else if (_ls_qsize == 0 || !_ls_q) { \ + _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = _NEXT(_ls_p,list); _RS(list); _ls_psize--; \ + if (_ls_p == _ls_oldhead) { _ls_p = NULL; } \ + } else if (cmp(_ls_p,_ls_q) <= 0) { \ + _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = _NEXT(_ls_p,list); _RS(list); _ls_psize--; \ + if (_ls_p == _ls_oldhead) { _ls_p = NULL; } \ + } else { \ + _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list); _RS(list); _ls_qsize--; \ + if (_ls_q == _ls_oldhead) { _ls_q = NULL; } \ + } \ + if (_ls_tail) { \ + _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,_ls_e); _RS(list); \ + } else { \ + _CASTASGN(list,_ls_e); \ + } \ + _SV(_ls_e,list); _PREVASGN(_ls_e,list,_ls_tail); _RS(list); \ + _ls_tail = _ls_e; \ + } \ + _ls_p = _ls_q; \ + } \ + _CASTASGN(list->prev,_ls_tail); \ + _CASTASGN(_tmp2,list); \ + _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,_tmp2); _RS(list); \ + if (_ls_nmerges <= 1) { \ + _ls_looping=0; \ + } \ + _ls_insize *= 2; \ + } \ + } else _tmp=NULL; /* quiet gcc unused variable warning */ \ +} while (0) + +/****************************************************************************** + * singly linked list macros (non-circular) * + *****************************************************************************/ +#define LL_PREPEND(head,add) \ +do { \ + (add)->next = head; \ + head = add; \ +} while (0) + +#define LL_APPEND(head,add) \ +do { \ + LDECLTYPE(head) _tmp; \ + (add)->next=NULL; \ + if (head) { \ + _tmp = head; \ + while (_tmp->next) { _tmp = _tmp->next; } \ + _tmp->next=(add); \ + } else { \ + (head)=(add); \ + } \ +} while (0) + +#define LL_DELETE(head,del) \ +do { \ + LDECLTYPE(head) _tmp; \ + if ((head) == (del)) { \ + (head)=(head)->next; \ + } else { \ + _tmp = head; \ + while (_tmp->next && (_tmp->next != (del))) { \ + _tmp = _tmp->next; \ + } \ + if (_tmp->next) { \ + _tmp->next = ((del)->next); \ + } \ + } \ +} while (0) + +/* Here are VS2008 replacements for LL_APPEND and LL_DELETE */ +#define LL_APPEND_VS2008(head,add) \ +do { \ + if (head) { \ + (add)->next = head; /* use add->next as a temp variable */ \ + while ((add)->next->next) { (add)->next = (add)->next->next; } \ + (add)->next->next=(add); \ + } else { \ + (head)=(add); \ + } \ + (add)->next=NULL; \ +} while (0) + +#define LL_DELETE_VS2008(head,del) \ +do { \ + if ((head) == (del)) { \ + (head)=(head)->next; \ + } else { \ + char *_tmp = (char*)(head); \ + while (head->next && (head->next != (del))) { \ + head = head->next; \ + } \ + if (head->next) { \ + head->next = ((del)->next); \ + } \ + { \ + char **_head_alias = (char**)&(head); \ + *_head_alias = _tmp; \ + } \ + } \ +} while (0) +#ifdef NO_DECLTYPE +#undef LL_APPEND +#define LL_APPEND LL_APPEND_VS2008 +#undef LL_DELETE +#define LL_DELETE LL_DELETE_VS2008 +#endif +/* end VS2008 replacements */ + +#define LL_FOREACH(head,el) \ + for(el=head;el;el=el->next) + +#define LL_FOREACH_SAFE(head,el,tmp) \ + for((el)=(head);(el) && (tmp = (el)->next, 1); (el) = tmp) + +#define LL_SEARCH_SCALAR(head,out,field,val) \ +do { \ + LL_FOREACH(head,out) { \ + if ((out)->field == (val)) break; \ + } \ +} while(0) + +#define LL_SEARCH(head,out,elt,cmp) \ +do { \ + LL_FOREACH(head,out) { \ + if ((cmp(out,elt))==0) break; \ + } \ +} while(0) + +/****************************************************************************** + * doubly linked list macros (non-circular) * + *****************************************************************************/ +#define DL_PREPEND(head,add) \ +do { \ + (add)->next = head; \ + if (head) { \ + (add)->prev = (head)->prev; \ + (head)->prev = (add); \ + } else { \ + (add)->prev = (add); \ + } \ + (head) = (add); \ +} while (0) + +#define DL_APPEND(head,add) \ +do { \ + if (head) { \ + (add)->prev = (head)->prev; \ + (head)->prev->next = (add); \ + (head)->prev = (add); \ + (add)->next = NULL; \ + } else { \ + (head)=(add); \ + (head)->prev = (head); \ + (head)->next = NULL; \ + } \ +} while (0); + +#define DL_DELETE(head,del) \ +do { \ + if ((del)->prev == (del)) { \ + (head)=NULL; \ + } else if ((del)==(head)) { \ + (del)->next->prev = (del)->prev; \ + (head) = (del)->next; \ + } else { \ + (del)->prev->next = (del)->next; \ + if ((del)->next) { \ + (del)->next->prev = (del)->prev; \ + } else { \ + (head)->prev = (del)->prev; \ + } \ + } \ +} while (0); + + +#define DL_FOREACH(head,el) \ + for(el=head;el;el=el->next) + +/* this version is safe for deleting the elements during iteration */ +#define DL_FOREACH_SAFE(head,el,tmp) \ + for((el)=(head);(el) && (tmp = (el)->next, 1); (el) = tmp) + +/* these are identical to their singly-linked list counterparts */ +#define DL_SEARCH_SCALAR LL_SEARCH_SCALAR +#define DL_SEARCH LL_SEARCH + +/****************************************************************************** + * circular doubly linked list macros * + *****************************************************************************/ +#define CDL_PREPEND(head,add) \ +do { \ + if (head) { \ + (add)->prev = (head)->prev; \ + (add)->next = (head); \ + (head)->prev = (add); \ + (add)->prev->next = (add); \ + } else { \ + (add)->prev = (add); \ + (add)->next = (add); \ + } \ +(head)=(add); \ +} while (0) + +#define CDL_DELETE(head,del) \ +do { \ + if ( ((head)==(del)) && ((head)->next == (head))) { \ + (head) = 0L; \ + } else { \ + (del)->next->prev = (del)->prev; \ + (del)->prev->next = (del)->next; \ + if ((del) == (head)) (head)=(del)->next; \ + } \ +} while (0); + +#define CDL_FOREACH(head,el) \ + for(el=head;el;el=(el->next==head ? 0L : el->next)) + +#define CDL_FOREACH_SAFE(head,el,tmp1,tmp2) \ + for((el)=(head), ((tmp1)=(head)?((head)->prev):NULL); \ + (el) && ((tmp2)=(el)->next, 1); \ + ((el) = (((el)==(tmp1)) ? 0L : (tmp2)))) + +#define CDL_SEARCH_SCALAR(head,out,field,val) \ +do { \ + CDL_FOREACH(head,out) { \ + if ((out)->field == (val)) break; \ + } \ +} while(0) + +#define CDL_SEARCH(head,out,elt,cmp) \ +do { \ + CDL_FOREACH(head,out) { \ + if ((cmp(out,elt))==0) break; \ + } \ +} while(0) + +#endif /* UTLIST_H */ + diff --git a/uthash/utstring.h b/uthash/utstring.h new file mode 100644 index 0000000..277874f --- /dev/null +++ b/uthash/utstring.h @@ -0,0 +1,137 @@ +/* +Copyright (c) 2008-2010, Troy D. Hanson http://uthash.sourceforge.net +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/* a dynamic string implementation using macros + * see http://uthash.sourceforge.net/utstring + */ +#ifndef UTSTRING_H +#define UTSTRING_H + +#define UTSTRING_VERSION 1.9.1 + +#ifdef __GNUC__ +#define _UNUSED_ __attribute__ ((__unused__)) +#else +#define _UNUSED_ +#endif + +#include +#include +#include +#define oom() exit(-1) + +typedef struct { + char *d; + size_t n; /* allocd size */ + size_t i; /* index of first unused byte */ +} UT_string; + +#define utstring_reserve(s,amt) \ +do { \ + if (((s)->n - (s)->i) < (size_t)(amt)) { \ + (s)->d = (char*)realloc((s)->d, (s)->n + amt); \ + if ((s)->d == NULL) oom(); \ + (s)->n += amt; \ + } \ +} while(0) + +#define utstring_init(s) \ +do { \ + (s)->n = 0; (s)->i = 0; (s)->d = NULL; \ + utstring_reserve(s,100); \ +} while(0) + +#define utstring_done(s) \ +do { \ + if ((s)->d != NULL) free((s)->d); \ + (s)->n = 0; \ +} while(0) + +#define utstring_free(s) \ +do { \ + utstring_done(s); \ + free(s); \ +} while(0) + +#define utstring_new(s) \ +do { \ + s = (UT_string*)calloc(sizeof(UT_string),1); \ + if (!s) oom(); \ + utstring_init(s); \ +} while(0) + +#define utstring_clear(s) \ +do { \ + (s)->i = 0; \ +} while(0) + +#define utstring_bincpy(s,b,l) \ +do { \ + utstring_reserve(s,(l)+1); \ + if (l) memcpy(&(s)->d[(s)->i], b, l); \ + s->i += l; \ + s->d[s->i]='\0'; \ +} while(0) + +#define utstring_concat(dst,src) \ +do { \ + utstring_reserve(dst,(src->i)+1); \ + if (src->i) memcpy(&(dst)->d[(dst)->i], src->d, src->i); \ + dst->i += src->i; \ + dst->d[dst->i]='\0'; \ +} while(0) + +#define utstring_len(s) ((unsigned)((s)->i)) + +#define utstring_body(s) ((s)->d) + +_UNUSED_ static void utstring_printf_va(UT_string *s, const char *fmt, va_list ap) { + int n; + va_list cp; + while (1) { +#ifdef _WIN32 + cp = ap; +#else + va_copy(cp, ap); +#endif + n = vsnprintf (&s->d[s->i], s->n-s->i, fmt, cp); + va_end(cp); + + if ((n > -1) && (n < (int)(s->n-s->i))) { + s->i += n; + return; + } + + /* Else try again with more space. */ + if (n > -1) utstring_reserve(s,n+1); /* exact */ + else utstring_reserve(s,(s->n)*2); /* 2x */ + } +} +_UNUSED_ static void utstring_printf(UT_string *s, const char *fmt, ...) { + va_list ap; + va_start(ap,fmt); + utstring_printf_va(s,fmt,ap); + va_end(ap); +} + +#endif /* UTSTRING_H */