From 7715562cb273955cb85b9d57bf52b41747d95e2c Mon Sep 17 00:00:00 2001 From: Tom Petr Date: Wed, 19 Jan 2011 22:53:32 -0500 Subject: [PATCH] make orbited work --- lib/stomp | 1 + muxlist/mix/views.py | 9 +- muxlist/settings/base.py | 2 + muxlist/settings/tpetr.py | 2 + src/stomp.py-3.0.3/CHANGELOG | 13 + src/stomp.py-3.0.3/LICENSE | 202 +++ src/stomp.py-3.0.3/PKG-INFO | 10 + src/stomp.py-3.0.3/README | 36 + src/stomp.py-3.0.3/config.dox | 1510 +++++++++++++++++ src/stomp.py-3.0.3/setup.py | 47 + src/stomp.py-3.0.3/stomp.log.conf | 27 + src/stomp.py-3.0.3/stomp/__init__.py | 33 + src/stomp.py-3.0.3/stomp/backward.py | 63 + src/stomp.py-3.0.3/stomp/bridge/README-oracle | 31 + src/stomp.py-3.0.3/stomp/bridge/__init__.py | 0 src/stomp.py-3.0.3/stomp/bridge/bridge.py | 102 ++ src/stomp.py-3.0.3/stomp/bridge/oracleaq.py | 373 ++++ src/stomp.py-3.0.3/stomp/cli.py | 433 +++++ src/stomp.py-3.0.3/stomp/connect.py | 686 ++++++++ src/stomp.py-3.0.3/stomp/exception.py | 21 + src/stomp.py-3.0.3/stomp/listener.py | 133 ++ src/stomp.py-3.0.3/stomp/test/__init__.py | 5 + src/stomp.py-3.0.3/stomp/test/basictest.py | 32 + src/stomp.py-3.0.3/stomp/test/rabbitmqtest.py | 33 + src/stomp.py-3.0.3/stomp/test/ssltest.py | 33 + src/stomp.py-3.0.3/stomp/test/testlistener.py | 19 + .../stomp/test/threadingtest.py | 124 ++ src/stomp.py-3.0.3/stomp/test/transtest.py | 60 + src/stomp.py-3.0.3/stomp/utils.py | 138 ++ 29 files changed, 4171 insertions(+), 7 deletions(-) create mode 120000 lib/stomp create mode 100644 src/stomp.py-3.0.3/CHANGELOG create mode 100644 src/stomp.py-3.0.3/LICENSE create mode 100644 src/stomp.py-3.0.3/PKG-INFO create mode 100755 src/stomp.py-3.0.3/README create mode 100755 src/stomp.py-3.0.3/config.dox create mode 100755 src/stomp.py-3.0.3/setup.py create mode 100644 src/stomp.py-3.0.3/stomp.log.conf create mode 100755 src/stomp.py-3.0.3/stomp/__init__.py create mode 100755 src/stomp.py-3.0.3/stomp/backward.py create mode 100755 src/stomp.py-3.0.3/stomp/bridge/README-oracle create mode 100755 src/stomp.py-3.0.3/stomp/bridge/__init__.py create mode 100755 src/stomp.py-3.0.3/stomp/bridge/bridge.py create mode 100755 src/stomp.py-3.0.3/stomp/bridge/oracleaq.py create mode 100755 src/stomp.py-3.0.3/stomp/cli.py create mode 100755 src/stomp.py-3.0.3/stomp/connect.py create mode 100755 src/stomp.py-3.0.3/stomp/exception.py create mode 100755 src/stomp.py-3.0.3/stomp/listener.py create mode 100755 src/stomp.py-3.0.3/stomp/test/__init__.py create mode 100755 src/stomp.py-3.0.3/stomp/test/basictest.py create mode 100755 src/stomp.py-3.0.3/stomp/test/rabbitmqtest.py create mode 100755 src/stomp.py-3.0.3/stomp/test/ssltest.py create mode 100644 src/stomp.py-3.0.3/stomp/test/testlistener.py create mode 100755 src/stomp.py-3.0.3/stomp/test/threadingtest.py create mode 100755 src/stomp.py-3.0.3/stomp/test/transtest.py create mode 100755 src/stomp.py-3.0.3/stomp/utils.py diff --git a/lib/stomp b/lib/stomp new file mode 120000 index 0000000..5a53436 --- /dev/null +++ b/lib/stomp @@ -0,0 +1 @@ +../src/stomp.py-3.0.3/stomp \ No newline at end of file diff --git a/muxlist/mix/views.py b/muxlist/mix/views.py index 2e38360..0c92435 100644 --- a/muxlist/mix/views.py +++ b/muxlist/mix/views.py @@ -3,24 +3,20 @@ from muxlist.mix.models import Group from muxlist.music.forms import UploadForm from django.contrib.auth.decorators import login_required -from django.contrib.sites.models import Site from muxlist.music.models import Track +from settings import HOSTNAME import stomp import json import time -import desir def index(request, group_name): group = get_object_or_404(Group, name=group_name) - site = Site.objects.get_current() if not (group.is_public or (request.user.is_authenticated() and request.user in group.collaborators.all())): return HttpResponseRedirect('/account/login?next=%s' % request.path) entries = group.entries.all() form = UploadForm() - r = desir.Redis() - history = [json.loads(str) for str in r.lrange('mix_%s' % group_name, 0, -1)] - return render_to_response('mix/group.html', {'group': group, 'entries': entries, 'form': form, 'user': request.user, 'tracks': request.user.get_profile().uploaded_tracks.all(), 'history': history, 'site': site}) + return render_to_response('mix/group.html', {'group': group, 'entries': entries, 'form': form, 'user': request.user, 'tracks': request.user.get_profile().uploaded_tracks.all(), 'hostname': HOSTNAME}) def add_message(request, group_name): conn = stomp.Connection() @@ -39,6 +35,5 @@ def add_song(request, group_name): track = Track.objects.get(id=request.REQUEST['id']) msg = json.dumps({'type': 'song', 'user': request.user.username, 'artist': track.artist.name, 'title': track.title, 'url': track.get_location().url}) conn.send(msg, destination='/mix/%s' % group_name) - r = desir.Redis() r.lpush('mix_%s' % group_name, json.dumps([track.id, str(track.__unicode__())])) return HttpResponse('ok') diff --git a/muxlist/settings/base.py b/muxlist/settings/base.py index 6310c40..d7c3d62 100644 --- a/muxlist/settings/base.py +++ b/muxlist/settings/base.py @@ -88,3 +88,5 @@ AUTH_PROFILE_MODULE = 'account.UserProfile' LOGIN_URL = '/account/login' + +HOSTNAME = 'localhost' diff --git a/muxlist/settings/tpetr.py b/muxlist/settings/tpetr.py index da8a5ab..1800c37 100644 --- a/muxlist/settings/tpetr.py +++ b/muxlist/settings/tpetr.py @@ -10,3 +10,5 @@ } MEDIA_URL = 'http://muxli.st/media/' + +HOSTNAME = 'muxli.st' diff --git a/src/stomp.py-3.0.3/CHANGELOG b/src/stomp.py-3.0.3/CHANGELOG new file mode 100644 index 0000000..3ab8e6e --- /dev/null +++ b/src/stomp.py-3.0.3/CHANGELOG @@ -0,0 +1,13 @@ +stomp.py (3.0.2) beta; urgency=low + + * Fix for localhost connection problem (issue #17) + + -- Jason R Briggs Wed, 02 Jun 2010 21:48:00 +0000 + + +stomp.py (3.0.1) beta; urgency=low + + * Fixes for Oracle AQ bridge for Python3 + * Change to debian style changelog + + -- Jason R Briggs Wed, 07 Apr 2010 23:15:00 +0000 diff --git a/src/stomp.py-3.0.3/LICENSE b/src/stomp.py-3.0.3/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/src/stomp.py-3.0.3/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/stomp.py-3.0.3/PKG-INFO b/src/stomp.py-3.0.3/PKG-INFO new file mode 100644 index 0000000..38f8628 --- /dev/null +++ b/src/stomp.py-3.0.3/PKG-INFO @@ -0,0 +1,10 @@ +Metadata-Version: 1.0 +Name: stomp.py +Version: 3.0.3 +Summary: Stomp +Home-page: http://code.google.com/p/stomppy +Author: Jason R Briggs +Author-email: jasonrbriggs@gmail.com +License: Apache +Description: UNKNOWN +Platform: any diff --git a/src/stomp.py-3.0.3/README b/src/stomp.py-3.0.3/README new file mode 100755 index 0000000..9836c5c --- /dev/null +++ b/src/stomp.py-3.0.3/README @@ -0,0 +1,36 @@ +README +====== + +This distribution contains the "stomp.py" client library for +connecting Python to a message broker via the STOMP protocol. + +This file has two sections: + +- Quick Start +- What's In This Release + + +Quick Start +----------- +Please read the License (LICENSE) before using this library. + +Please visit the Stomp Project page at: + + http://code.google.com/p/stomppy + +That's where you'll find info on using the library, wiki docs, +downloads and the bug tracker. + + +What's In This Release +---------------------- +This release contains the following: + +README This file +LICENSE Software license +docs/html/index.html Automatically generated API documentation (doxygen) +stomp/ The stomp.py client library code +stomp/test/ Test code for the library +stomp/bridge/ Bridges for message brokers which don't support STOMP +stomp/bridge/README-oracle Info on the Oracle AQ bridge + diff --git a/src/stomp.py-3.0.3/config.dox b/src/stomp.py-3.0.3/config.dox new file mode 100755 index 0000000..84cc214 --- /dev/null +++ b/src/stomp.py-3.0.3/config.dox @@ -0,0 +1,1510 @@ +# Doxyfile 1.5.8 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project +# +# All text after a hash (#) is considered a comment and will be ignored +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" ") + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = stomp.py + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = 3.0 + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = docs + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Farsi, Finnish, French, German, Greek, +# Hungarian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, Polish, +# Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, Slovene, +# Spanish, Swedish, and Ukrainian. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful is your file systems +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for +# Java. For instance, namespaces will be presented as packages, qualified +# scopes will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources only. Doxygen will then generate output that is more tailored for +# Fortran. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for +# VHDL. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it parses. +# With this tag you can assign which parser to use for a given extension. +# Doxygen has a built-in mapping, but you can override or extend it using this tag. +# The format is ext=language, where ext is a file extension, and language is one of +# the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP, +# Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat +# .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C + +EXTENSION_MAPPING = + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. +# Doxygen will parse them like normal C++ but will assume all classes use public +# instead of private inheritance when no explicit protection keyword is present. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate getter +# and setter methods for a property. Setting this option to YES (the default) +# will make doxygen to replace the get and set methods by a property in the +# documentation. This will only work if the methods are indeed getting or +# setting a simple type. If this is not the case, or you want to show the +# methods anyway, you should set this option to NO. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum +# is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically +# be useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. + +TYPEDEF_HIDES_STRUCT = NO + +# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to +# determine which symbols to keep in memory and which to flush to disk. +# When the cache is full, less often used symbols will be written to disk. +# For small to medium size projects (<1000 input files) the default value is +# probably good enough. For larger projects a too small cache size can cause +# doxygen to be busy swapping symbols to and from disk most of the time +# causing a significant performance penality. +# If the system has enough physical memory increasing the cache will improve the +# performance by keeping more symbols in memory. Note that the value works on +# a logarithmic scale so increasing the size by one will rougly double the +# memory usage. The cache size is given by this formula: +# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, +# corresponding to a cache size of 2^16 = 65536 symbols + +SYMBOL_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespace are hidden. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = NO + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or define consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and defines in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# If the sources in your project are distributed over multiple directories +# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy +# in the documentation. The default is NO. + +SHOW_DIRECTORIES = NO + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the +# Namespaces page. +# This will remove the Namespaces entry from the Quick Index +# and from the Folder Tree View (if specified). The default is YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by +# doxygen. The layout file controls the global structure of the generated output files +# in an output format independent way. The create the layout file that represents +# doxygen's defaults, run doxygen with the -l option. You can optionally specify a +# file name after the option, if omitted DoxygenLayout.xml will be used as the name +# of the layout file. + +LAYOUT_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be abled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = stomp + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx +# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 + +FILE_PATTERNS = *.py + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or +# directories that are symbolic links (a Unix filesystem feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. +# If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = "python2.5 /opt/local/Library/Frameworks/Python.framework/Versions/2.5/bin/doxypy.py" + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. +# Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. +# The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER +# is applied to all files. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = YES + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. +# Otherwise they will link to the documentation. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = NO + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own +# stylesheet in the HTML output directory as well, or it will be erased! + +HTML_STYLESHEET = + +# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, +# files or namespaces will be aligned in HTML using tables. If set to +# NO a bullet list will be used. + +HTML_ALIGN_MEMBERS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. For this to work a browser that supports +# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox +# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). + +HTML_DYNAMIC_SECTIONS = NO + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING +# is used to encode HtmlHelp index (hhk), content (hhc) and project file +# content. + +CHM_INDEX_ENCODING = + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER +# are set, an additional index file will be generated that can be used as input for +# Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated +# HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add. +# For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see +# Qt Help Project / Custom Filters. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's +# filter section matches. +# Qt Help Project / Filter Attributes. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# This tag can be used to set the number of enum values (range [1..20]) +# that doxygen will group on one line in the generated HTML documentation. + +ENUM_VALUES_PER_LINE = 4 + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. +# If the tag value is set to FRAME, a side panel will be generated +# containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, +# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are +# probably better off using the HTML help feature. Other possible values +# for this tag are: HIERARCHIES, which will generate the Groups, Directories, +# and Class Hierarchy pages using a tree view instead of an ordered list; +# ALL, which combines the behavior of FRAME and HIERARCHIES; and NONE, which +# disables this behavior completely. For backwards compatibility with previous +# releases of Doxygen, the values YES and NO are equivalent to FRAME and NONE +# respectively. + +GENERATE_TREEVIEW = NONE + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +# Use this tag to change the font size of Latex formulas included +# as images in the HTML documentation. The default is 10. Note that +# when you change the font size after a successful doxygen run you need +# to manually remove any form_*.png images from the HTML output directory +# to force them to be regenerated. + +FORMULA_FONTSIZE = 10 + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = YES + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, a4wide, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4wide + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. +# This is useful +# if you want to understand what is going on. +# On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# in the INCLUDE_PATH (see below) will be search if a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all function-like macros that are alone +# on a line, have an all uppercase name, and do not end with a semicolon. Such +# function macros are typically used for boiler-plate code, and will confuse +# the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool +# does not have to be run to correct the links. +# Note that each tag file must have a unique name +# (where the name does NOT include the path) +# If a tag file is not located in the directory in which doxygen +# is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option is superseded by the HAVE_DOT option below. This is only a +# fallback. It is recommended to install and use dot, since it yields more +# powerful graphs. + +CLASS_DIAGRAMS = YES + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see +# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = NO + +# By default doxygen will write a font called FreeSans.ttf to the output +# directory and reference it in all dot files that doxygen generates. This +# font does not include all possible unicode characters however, so when you need +# these (or just want a differently looking font) you can specify the font name +# using DOT_FONTNAME. You need need to make sure dot is able to find the font, +# which can be done by putting it in a standard location or by setting the +# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory +# containing the font. + +DOT_FONTNAME = FreeSans + +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the output directory to look for the +# FreeSans.ttf font (which doxygen will put there itself). If you specify a +# different font using DOT_FONTNAME you can set the path where dot +# can find it using this tag. + +DOT_FONTPATH = + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# the CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are png, jpg, or gif +# If left blank png will be used. + +DOT_IMAGE_FORMAT = png + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = NO + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES + +#--------------------------------------------------------------------------- +# Options related to the search engine +#--------------------------------------------------------------------------- + +# The SEARCHENGINE tag specifies whether or not a search engine should be +# used. If set to NO the values of all tags below this one will be ignored. + +SEARCHENGINE = NO diff --git a/src/stomp.py-3.0.3/setup.py b/src/stomp.py-3.0.3/setup.py new file mode 100755 index 0000000..34769f4 --- /dev/null +++ b/src/stomp.py-3.0.3/setup.py @@ -0,0 +1,47 @@ +import os +from distutils.core import setup, Command + +import stomp + +class TestCommand(Command): + user_options = [ ('test=', 't', 'specific test to run') ] + + def initialize_options(self): + self.test = 'basictest' + + def finalize_options(self): + pass + + def run(self): + exec('import stomp.test.%s' % self.test) + +class DoxygenCommand(Command): + user_options = [ ] + + def initialize_options(self): + pass + + def finalize_options(self): + pass + + def run(self): + os.system('doxygen config.dox') + +def version(): + s = [] + for num in stomp.__version__: + s.append(str(num)) + return '.'.join(s) + +setup( + name = 'stomp.py', + version = version(), + description = 'Stomp ', + license = 'Apache', + url = 'http://code.google.com/p/stomppy', + author = 'Jason R Briggs', + author_email = 'jasonrbriggs@gmail.com', + platforms = ['any'], + packages = ['stomp'], + cmdclass = { 'test' : TestCommand, 'docs' : DoxygenCommand } +) diff --git a/src/stomp.py-3.0.3/stomp.log.conf b/src/stomp.py-3.0.3/stomp.log.conf new file mode 100644 index 0000000..f41a25d --- /dev/null +++ b/src/stomp.py-3.0.3/stomp.log.conf @@ -0,0 +1,27 @@ +[loggers] +keys=root,stomp.py + +[handlers] +keys=consoleHandler + +[formatters] +keys=simpleFormatter + +[logger_root] +level=ERROR +handlers=consoleHandler + +[logger_stomp.py] +level=ERROR +handlers=consoleHandler +qualname=stomp + +[handler_consoleHandler] +class=StreamHandler +level=DEBUG +formatter=simpleFormatter +args=(sys.stdout,) + +[formatter_simpleFormatter] +format=%(asctime)s - %(name)s - %(levelname)s - %(message)s +datefmt= \ No newline at end of file diff --git a/src/stomp.py-3.0.3/stomp/__init__.py b/src/stomp.py-3.0.3/stomp/__init__.py new file mode 100755 index 0000000..8840edd --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/__init__.py @@ -0,0 +1,33 @@ +""" +This provides basic connectivity to a message broker supporting the 'stomp' protocol. +At the moment ACK, SEND, SUBSCRIBE, UNSUBSCRIBE, BEGIN, ABORT, COMMIT, CONNECT and DISCONNECT operations +are supported. + +See the project page for more information. + +Meta-Data +--------- +Author: Jason R Briggs +License: http://www.apache.org/licenses/LICENSE-2.0 +Start Date: 2005/12/01 +Last Revision Date: $Date: 2011/01/12 22:45 $ +Project Page: http://www.briggs.net.nz/log/projects/stomp.py + +Notes/Attribution +----------------- + * patch from Andreas Schobel + * patches from Julian Scheid of Rising Sun Pictures (http://open.rsp.com.au) + * patch from Fernando + * patches from Eugene Strulyov +""" + +import os +import sys +sys.path.insert(0, os.path.split(__file__)[0]) + +import connect, listener, exception + +__version__ = __version__ = (3, 0, 3) +Connection = connect.Connection +ConnectionListener = listener.ConnectionListener +StatsListener = listener.StatsListener diff --git a/src/stomp.py-3.0.3/stomp/backward.py b/src/stomp.py-3.0.3/stomp/backward.py new file mode 100755 index 0000000..bff03f6 --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/backward.py @@ -0,0 +1,63 @@ +import sys + +# +# Functions for backwards compatibility +# + +def get_func_argcount(func): + """ + Return the argument count for a function + """ + if sys.hexversion > 0x03000000: + return func.__code__.co_argcount + else: + return func.func_code.co_argcount + +def input_prompt(prompt): + """ + Get user input + """ + if sys.hexversion > 0x03000000: + return input(prompt) + else: + return raw_input(prompt) + +def join(chars): + if sys.hexversion > 0x03000000: + return bytes('', 'UTF-8').join(chars).decode('UTF-8') + else: + return ''.join(chars) + +def socksend(conn, msg): + if sys.hexversion > 0x03000000: + conn.send(msg.encode()) + else: + conn.send(msg) + + +def getheader(headers, key): + if sys.hexversion > 0x03000000: + return headers[key] + else: + return headers.getheader(key) + + +class uuid(object): + def uuid4(*args): + """ + uuid courtesy of Carl Free Jr: + (http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/213761) + """ + t = int(time.time() * 1000) + r = int(random.random() * 100000000000000000) + + try: + a = socket.gethostbyname( socket.gethostname() ) + except: + # if we can't get a network address, just imagine one + a = random.random() * 100000000000000000 + data = str(t) + ' ' + str(r) + ' ' + str(a) + ' ' + str(args) + md5 = hashlib.md5() + md5.update(data.encode()) + data = md5.hexdigest() + return data \ No newline at end of file diff --git a/src/stomp.py-3.0.3/stomp/bridge/README-oracle b/src/stomp.py-3.0.3/stomp/bridge/README-oracle new file mode 100755 index 0000000..93eda4b --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/bridge/README-oracle @@ -0,0 +1,31 @@ +Stomp Bridge for Oracle AQ +========================== + +This provides a STOMP bridging mechanism to Oracle AQ (Advanced Messaging). + +Before using, you'll need to grant various privileges to your user: + +GRANT RESOURCE TO ; +GRANT CONNECT TO ; +GRANT EXECUTE ANY PROCEDURE TO ; +GRANT aq_administrator_role TO ; +GRANT aq_user_role TO ; +GRANT EXECUTE ON dbms_aqadm TO ; +GRANT EXECUTE ON dbms_aq TO ; +GRANT EXECUTE ON dbms_aqin TO ; + +You will also need to create a sequence: + +CREATE SEQUENCE stomp_client_id_seq +/ + +Startup the Oracle Stomp Server, by running the following command (from the root directory of this project): + +bridge/oracleaq.py -D localhost -B 1521 -I xe -U test -W test -N localhost -T 8888 + +Run: + +bridge/oracleaq.py --help + +to see the make up of the command line arguments. + diff --git a/src/stomp.py-3.0.3/stomp/bridge/__init__.py b/src/stomp.py-3.0.3/stomp/bridge/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/src/stomp.py-3.0.3/stomp/bridge/bridge.py b/src/stomp.py-3.0.3/stomp/bridge/bridge.py new file mode 100755 index 0000000..07a939b --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/bridge/bridge.py @@ -0,0 +1,102 @@ +import os +import socket +import sys +import threading + +sys.path.append(os.getcwd()) + +from stomp import utils, backward + +try: + import uuid +except ImportError: + from backward import uuid + + +class StompServer(threading.Thread): + def __init__(self, listen_host_and_port, connection_class): + threading.Thread.__init__(self) + self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) + self.socket.bind(listen_host_and_port) + self.socket.listen(1) + print('Listening for STOMP connections on %s:%s' % listen_host_and_port) + self.running = True + self.connections = [ ] + self.connection_class = connection_class + + def notify(self, queue, msg_id): + for conn in self.connections: + conn.notify(queue, msg_id) + + def add_connection(self, conn): + self.connections.append(conn) + + def remove_connection(self, conn): + pos = self.connections.index(conn) + if pos >= 0: + del self.connections[pos] + + def shutdown(self): + pass + + def run(self): + try: + while self.running: + conn, addr = self.socket.accept() + conn = self.connection_class(self, conn, addr) + self.add_connection(conn) + conn.start() + finally: + for conn in self.connections: + conn.shutdown() + self.shutdown() + + +class StompConnection(threading.Thread): + def __init__(self, server, conn, addr): + threading.Thread.__init__(self) + self.server = server + self.conn = conn + self.addr = addr + self.running = True + self.id = str(uuid.uuid4()) + + def send_error(self, msg): + self.send('ERROR\nmessage: %s\n\n' % msg) + + def send(self, msg): + if not msg.endswith('\x00'): + msg = msg + '\x00' + backward.socksend(self.conn, msg) + + def run(self): + try: + data = [] + while self.running: + c = self.conn.recv(1) + if c == '' or len(c) == 0: + break + data.append(c) + if ord(c) == 0: + frame = backward.join(data) + print(frame) + (frame_type, headers, body) = utils.parse_frame(frame) + method = 'handle_%s' % frame_type + print('Method = %s' % method) + if hasattr(self, method): + getattr(self, method)(headers, body) + else: + self.send_error('invalid command %s' % frame_type) + data = [] + except Exception: + _, e, tb = sys.exc_info() + print(e) + import traceback + traceback.print_tb(tb) + self.server.remove_connection(self) + self.shutdown() + + def shutdown(self): + self.conn.close() + self.running = False diff --git a/src/stomp.py-3.0.3/stomp/bridge/oracleaq.py b/src/stomp.py-3.0.3/stomp/bridge/oracleaq.py new file mode 100755 index 0000000..ceaadc1 --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/bridge/oracleaq.py @@ -0,0 +1,373 @@ +#! /usr/bin/env python + +import cx_Oracle +from optparse import OptionParser +import re +import sys +try: + from SocketServer import ThreadingMixIn, ThreadingTCPServer, BaseRequestHandler +except ImportError: + from socketserver import ThreadingMixIn, ThreadingTCPServer, BaseRequestHandler +try: + from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer +except ImportError: + from http.server import BaseHTTPRequestHandler, HTTPServer +import threading + +from bridge import StompServer, StompConnection + +from stomp import utils, backward + +global QUEUE_TABLE +QUEUE_TABLE = 'STOMP_MSG_QUEUE' + +SETUP_SQLS = ['''CREATE OR REPLACE PROCEDURE stomp_enq(queue_name in varchar2, msg in varchar2, props in varchar2) AS + enqueue_options dbms_aq.enqueue_options_t; + message_properties dbms_aq.message_properties_t; + message_handle RAW(16); +BEGIN + message_properties.user_property := sys.anyData.convertVarchar2(props); + dbms_aq.enqueue(queue_name => queue_name, enqueue_options => enqueue_options, message_properties => message_properties, payload => utl_raw.cast_to_raw(msg), msgid => message_handle); +END;''', +'''CREATE OR REPLACE PROCEDURE stomp_sub(qn in varchar2, subscriber_name in varchar2, notification_address in varchar2) AS +BEGIN +dbms_aqadm.add_subscriber(queue_name => qn, subscriber => sys.aq$_agent(subscriber_name, null, null)); +dbms_aq.register(sys.aq$_reg_info_list(sys.aq$_reg_info(qn || ':' || subscriber_name, DBMS_AQ.NAMESPACE_AQ, notification_address, HEXTORAW('FF')) ), 1); +END;''', +'''CREATE OR REPLACE PROCEDURE stomp_unsub(qn in varchar2, subscriber_name in varchar2, notification_address in varchar2) AS +subscriber_count int; +BEGIN +dbms_aq.unregister(sys.aq$_reg_info_list(sys.aq$_reg_info(qn || ':' || subscriber_name, dbms_aq.namespace_aq, notification_address, HEXTORAW('FF')) ), 1); +dbms_aqadm.remove_subscriber(queue_name => qn, subscriber => sys.aq$_agent(subscriber_name, null, null)); +select count(*) into subscriber_count from user_queue_subscribers where queue_name = qn; +IF subscriber_count = 0 THEN + dbms_aqadm.stop_queue(qn); + dbms_aqadm.drop_queue(qn); +END IF; +END;''', +'''CREATE OR REPLACE FUNCTION getvarchar2(anydata_p in sys.anydata) return varchar2 is + x number; + thevarchar2 varchar2(4000); +BEGIN + IF anydata_p IS NULL THEN + return ''; + ELSE + x := anydata_p.getvarchar2(thevarchar2); + return thevarchar2; + END IF; +END;'''] + +DEST_RE = re.compile(r'"[^"]*"."([^"]*)"') +MSGID_RE = re.compile(r'([^<]*)') + +class NotificationHandler(BaseHTTPRequestHandler): + ''' + Handler for message notifications from Oracle + ''' + def do_POST(self): + try: + length = backward.getheader(self.headers, 'Content-Length') + s = self.rfile.read(int(length)) + s = s.decode('UTF-8') + queue = DEST_RE.search(s).group(1) + msg_id = MSGID_RE.search(s).group(1).lstrip().rstrip() + self.send_response(200) + self.end_headers() + self.wfile.write("OK".encode()) + if msg_id not in self.server.msg_ids: + self.server.notify(queue, msg_id) + self.server.msg_ids.append(msg_id) + if len(self.server.msg_ids) > 100: + del self.server.msg_ids[0] + except Exception: + _, e, tb = sys.exc_info() + import traceback + traceback.print_tb(tb) + +class NotificationListener(ThreadingMixIn, HTTPServer, threading.Thread): + def __init__(self, notify, host_and_port): + HTTPServer.__init__(self, host_and_port, NotificationHandler) + threading.Thread.__init__(self) + self.setDaemon(True) + self.notify = notify + self.msg_ids = [ ] + print('Listening for Oracle Notifications on %s:%s' % host_and_port) + + def serve_forever(self): + self.stop_serving = False + while not self.stop_serving: + self.handle_request() + + def run(self): + self.serve_forever() + + def stop(self): + self.stop_serving = True + +class OracleStompConnection(StompConnection): + def __init__(self, server, conn, addr): + StompConnection.__init__(self, server, conn, addr) + self.dbconn = cx_Oracle.connect('%s/%s@//%s:%s/%s' % (server.username, server.passwd, server.oracle_host_and_port[0], server.oracle_host_and_port[1], server.db)) + print("Connected to Oracle") + self.client_id = self.__get_client_id() + print("Client Id %s" % self.client_id) + self.queues = {} + self.transactions = {} + self.semaphore = threading.BoundedSemaphore(1) + + def __get_client_id(self): + cursor = self.dbconn.cursor() + cursor.execute('SELECT stomp_client_id_seq.nextval FROM dual') + row = cursor.fetchone() + return 's%s' % row[0] + + def __is_created(self, cursor, destination): + if destination in self.queues: + return True + else: + cursor.execute('SELECT COUNT(*) FROM user_queues WHERE name = UPPER(:queue)', queue = destination) + row = cursor.fetchone() + return row[0] > 0 + + def __create(self, cursor, destination): + cursor.callproc('DBMS_AQADM.CREATE_QUEUE', [], { 'queue_name' : destination, 'queue_table' : QUEUE_TABLE }) + cursor.callproc('DBMS_AQADM.START_QUEUE', [], { 'queue_name' : destination }) + + def __sanitise(self, headers): + if 'destination' in headers: + dest = headers['destination'].replace('/', '_') + if dest.startswith('_'): + dest = dest[1:] + headers['destination'] = dest.upper() + + def __get_notification_address(self): + return 'http://%s:%s' % (self.server.notification_host_and_port[0], self.server.notification_host_and_port[1]) + + def __commit_or_rollback(self, headers, commit = True): + self.__sanitise(headers) + if 'transaction' not in headers: + self.send_error('Transaction identifier is required') + return + transaction_id = headers['transaction'] + if transaction_id not in self.transactions: + self.send_error('Transaction %s does not exist' % transaction_id) + return + else: + if commit: + for (method, headers, body) in self.transactions[transaction_id]: + getattr(self, method)(headers, body) + del self.transactions[transaction_id] + + def __save(self, command, headers, body): + transaction_id = headers['transaction'] + if transaction_id not in self.transactions: + self.send_error('No such transaction %s' % transaction_id) + else: + del headers['transaction'] + self.transactions[transaction_id].append((command, headers, body)) + + def notify(self, queue, msg_id): + if queue in self.queues.keys(): + self.semaphore.acquire() + cursor = self.dbconn.cursor() + try: + cursor.execute('SELECT user_data, getvarchar2(user_prop) AS user_props FROM %s WHERE msgid = :msgid' % QUEUE_TABLE, msgid = msg_id) + row = cursor.fetchone() + headers = utils.parse_headers(row[1].split('\n')) + headers['destination'] = self.queues[queue] + headers['message-id'] = msg_id + hdr = [ ] + for key, val in headers.items(): + hdr.append('%s:%s' % (key, val)) + msg = row[0].read().decode('UTF-8') + self.send('MESSAGE\n%s\n\n%s' % ('\n'.join(hdr), msg)) + except Exception: + _, e, tb = sys.exc_info() + import traceback + traceback.print_tb(tb) + print(e) + finally: + cursor.close() + self.semaphore.release() + + def handle_ACK(self, headers, body): + self.__sanitise(headers) + if 'transaction' in headers: + self.__save('handle_ACK', headers, body) + else: + # FIXME + self.send_error('Not currently supported') + + + def handle_BEGIN(self, headers, body): + if 'transaction' not in headers: + self.send_error('Transaction identifier is required') + return + transaction_id = headers['transaction'] + if transaction_id in self.transactions: + self.send_error('Transaction %s already started' % transaction_id) + return + else: + self.transactions[transaction_id] = [ ] + + def handle_COMMIT(self, headers, body): + self.__commit_or_rollback(headers) + + def handle_ABORT(self, headers, body): + self.__commit_or_rollback(headers, False) + + def handle_CONNECT(self, headers, body): + self.send('CONNECTED\nsession: %s\n\n' % self.id) + + def handle_DISCONNECT(self, headers, body): + self.shutdown() + + def handle_SUBSCRIBE(self, headers, body): + self.semaphore.acquire() + cursor = self.dbconn.cursor() + try: + orig_qn = headers['destination'] + self.__sanitise(headers) + if not self.__is_created(cursor, headers['destination']): + self.__create(cursor, headers['destination']) + try: + cursor.callproc('stomp_sub', [headers['destination'], self.client_id, self.__get_notification_address()]) + self.queues[headers['destination']] = orig_qn + except Exception: + _, e, _ = sys.exc_info() + print(e) + finally: + cursor.close() + self.semaphore.release() + + def handle_UNSUBSCRIBE(self, headers, body): + self.semaphore.acquire() + cursor = self.dbconn.cursor() + try: + self.__sanitise(headers) + try: + cursor.callproc('stomp_unsub', [headers['destination'], self.client_id, self.__get_notification_address()]) + except: + pass + if headers['destination'] in self.queues.keys(): + del self.queues[headers['destination']] + finally: + cursor.close() + self.semaphore.release() + + def handle_SEND(self, headers, body): + self.__sanitise(headers) + if 'transaction' in headers: + self.__save('handle_SEND', headers, body) + else: + self.semaphore.acquire() + cursor = self.dbconn.cursor() + try: + if not self.__is_created(cursor, headers['destination']): + self.__create(cursor, headers['destination']) + hdr = [ ] + for key, val in headers.items(): + hdr.append('%s:%s\n' % (key, val)) + cursor.callproc('stomp_enq', [headers['destination'], body.rstrip(), ''.join(hdr)]) + self.dbconn.commit() + except Exception: + _, e, tb = sys.exc_info() + import traceback + traceback.print_tb(tb) + print(e) + finally: + cursor.close() + self.semaphore.release() + + def shutdown(self): + self.running = False + self.semaphore.acquire() + for queue in list(self.queues.keys()): + self.handle_UNSUBSCRIBE({'destination' : queue}, '') + self.dbconn.close() + self.semaphore.release() + StompConnection.shutdown(self) + + +class OracleStompServer(StompServer): + def __init__(self, listen_host_and_port, oracle_host_and_port, username, passwd, db, notification_host_and_port): + StompServer.__init__(self, listen_host_and_port, OracleStompConnection) + self.oracle_host_and_port = oracle_host_and_port + self.username = username + self.passwd = passwd + self.db = db + self.notification_host_and_port = notification_host_and_port + + # + # setup + # + dbconn = cx_Oracle.connect('%s/%s@//%s:%s/%s' % (username, passwd, oracle_host_and_port[0], oracle_host_and_port[1], db)) + cursor = dbconn.cursor() + for sql in SETUP_SQLS: + cursor.execute(sql) + cursor.execute('SELECT COUNT(*) FROM user_queue_tables WHERE queue_table = :queue', queue = QUEUE_TABLE) + row = cursor.fetchone() + if row[0] == 0: + cursor.callproc('DBMS_AQADM.CREATE_QUEUE_TABLE', [], {'queue_table' : QUEUE_TABLE, 'queue_payload_type' : 'raw', 'multiple_consumers' : True}) + cursor.close() + dbconn.close() + + # + # Oracle notification listener + # + self.listener = NotificationListener(self.notify, self.notification_host_and_port) + self.listener.start() + + def shutdown(self): + print('OracleStompServer shutdown') + + +def main(): + parser = OptionParser() + + parser.add_option('-P', '--port', type = int, dest = 'port', default = 61613, + help = 'Port to listen for STOMP connections. Defaults to 61613, if not specified.') + parser.add_option('-D', '--dbhost', type = 'string', dest = 'db_host', default = None, + help = 'Oracle hostname to connect to') + parser.add_option('-B', '--dbport', type = 'int', dest = 'db_port', default = None, + help = 'Oracle port to connect to') + parser.add_option('-I', '--dbinst', type = 'string', dest = 'db_inst', default = None, + help = 'Oracle database instance (for example "xe")') + parser.add_option('-U', '--user', type = 'string', dest = 'db_user', default = None, + help = 'Username for the database connection') + parser.add_option('-W', '--passwd', type = 'string', dest = 'db_passwd', default = None, + help = 'Password for the database connection') + parser.add_option('-N', '--nhost', type = 'string', dest = 'notification_host', + help = 'IP address (i.e. this machine) which is listening for Oracle AQ notifications.') + parser.add_option('-T', '--nport', type = 'int', dest = 'notification_port', + help = 'Port which is listening for Oracle AQ notifications.') + + (options, args) = parser.parse_args() + + if not options.db_host: + parser.error("Database hostname (-D) is required") + + if not options.db_port: + parser.error("Database port (-B) is required") + + if not options.db_inst: + parser.error("Database instance (-I) is required") + + if not options.db_user: + parser.error("Database user (-U) is required") + + if not options.db_passwd: + parser.error("Database password (-W) is required") + + if not options.notification_host: + parser.error("Notification host or IP (-N) is required") + + if not options.notification_port: + parser.error("Notification port (-T) is required") + + server = OracleStompServer(('', options.port), (options.db_host, options.db_port), options.db_user, options.db_passwd, options.db_inst, (options.notification_host, options.notification_port)) + server.start() + + +if __name__ == '__main__': + main() diff --git a/src/stomp.py-3.0.3/stomp/cli.py b/src/stomp.py-3.0.3/stomp/cli.py new file mode 100755 index 0000000..e42b3ca --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/cli.py @@ -0,0 +1,433 @@ +import base64 +import os +import sys +import time + +from optparse import OptionParser + +from connect import Connection +from listener import ConnectionListener, StatsListener +from exception import NotConnectedException +from backward import input_prompt + +def sysout(msg, end='\n'): + sys.stdout.write(str(msg) + end) + +def get_commands(): + """ + Return a list of commands available on a \link StompCLI \endlink (the command line interface + to stomp.py) + """ + commands = [ ] + for f in dir(StompCLI): + if f.startswith('_') or f.startswith('on_') or f == 'c': + continue + else: + commands.append(f) + return commands + + +class StompCLI(ConnectionListener): + """ + A command line interface to the stomp.py client. See \link stomp::internal::connect::Connection \endlink + for more information on establishing a connection to a stomp server. + """ + def __init__(self, host='localhost', port=61613, user='', passcode=''): + self.conn = Connection([(host, port)], user, passcode) + self.conn.set_listener('', self) + self.conn.start() + self.__commands = get_commands() + self.transaction_id = None + + def __print_async(self, frame_type, headers, body): + """ + Utility function to print a message and setup the command prompt + for the next input + """ + sysout("\r \r", end='') + sysout(frame_type) + for header_key in headers.keys(): + sysout('%s: %s' % (header_key, headers[header_key])) + sysout('') + sysout(body) + sysout('> ', end='') + sys.stdout.flush() + + def on_connecting(self, host_and_port): + """ + \see ConnectionListener::on_connecting + """ + self.conn.connect(wait=True) + + def on_disconnected(self): + """ + \see ConnectionListener::on_disconnected + """ + sysout("lost connection") + + def on_message(self, headers, body): + """ + \see ConnectionListener::on_message + + Special case: if the header 'filename' is present, the content is written out + as a file + """ + if 'filename' in headers: + content = base64.b64decode(body.encode()) + if os.path.exists(headers['filename']): + fname = '%s.%s' % (headers['filename'], int(time.time())) + else: + fname = headers['filename'] + f = open(fname, 'wb') + f.write(content) + f.close() + self.__print_async("MESSAGE", headers, "Saved file: %s" % fname) + else: + self.__print_async("MESSAGE", headers, body) + + def on_error(self, headers, body): + """ + \see ConnectionListener::on_error + """ + self.__print_async("ERROR", headers, body) + + def on_receipt(self, headers, body): + """ + \see ConnectionListener::on_receipt + """ + self.__print_async("RECEIPT", headers, body) + + def on_connected(self, headers, body): + """ + \see ConnectionListener::on_connected + """ + self.__print_async("CONNECTED", headers, body) + + def ack(self, args): + """ + Usage: + ack + + Required Parameters: + message-id - the id of the message being acknowledged + + Description: + The command 'ack' is used to acknowledge consumption of a message from a subscription using client + acknowledgment. When a client has issued a 'subscribe' with the ack flag set to client, any messages + received from that destination will not be considered to have been consumed (by the server) until + the message has been acknowledged. + """ + if len(args) < 2: + sysout("Expecting: ack ") + elif not self.transaction_id: + self.conn.ack(headers = { 'message-id' : args[1] }) + else: + self.conn.ack(headers = { 'message-id' : args[1] }, transaction=self.transaction_id) + + def abort(self, args): + """ + Usage: + abort + + Description: + Roll back a transaction in progress. + """ + if not self.transaction_id: + sysout("Not currently in a transaction") + else: + self.conn.abort(transaction = self.transaction_id) + self.transaction_id = None + + def begin(self, args): + """ + Usage: + begin + + Description: + Start a transaction. Transactions in this case apply to sending and acknowledging - + any messages sent or acknowledged during a transaction will be handled atomically based on the + transaction. + """ + if self.transaction_id: + sysout("Currently in a transaction (%s)" % self.transaction_id) + else: + self.transaction_id = self.conn.begin() + sysout('Transaction id: %s' % self.transaction_id) + + def commit(self, args): + """ + Usage: + commit + + Description: + Commit a transaction in progress. + """ + if not self.transaction_id: + sysout("Not currently in a transaction") + else: + sysout('Committing %s' % self.transaction_id) + self.conn.commit(transaction=self.transaction_id) + self.transaction_id = None + + def disconnect(self, args): + """ + Usage: + disconnect + + Description: + Gracefully disconnect from the server. + """ + try: + self.conn.disconnect() + except NotConnectedException: + pass # ignore if no longer connected + + def send(self, args): + """ + Usage: + send + + Required Parameters: + destination - where to send the message + message - the content to send + + Description: + Sends a message to a destination in the messaging system. + """ + if len(args) < 3: + sysout('Expecting: send ') + elif not self.transaction_id: + self.conn.send(destination=args[1], message=' '.join(args[2:])) + else: + self.conn.send(destination=args[1], message=' '.join(args[2:]), transaction=self.transaction_id) + + def sendreply(self, args): + """ + Usage: + sendreply + + Required Parameters: + destination - where to send the message + correlation-id - the correlating identifier to send with the response + message - the content to send + + Description: + Sends a reply message to a destination in the messaging system. + """ + if len(args) < 4: + sysout('expecting: sendreply ') + else: + self.conn.send(destination=args[1], message="%s\n" % ' '.join(args[3:]), headers={'correlation-id': args[2]}) + + def sendfile(self, args): + """ + Usage: + sendfile + + Required Parameters: + destination - where to send the message + filename - the file to send + + Description: + Sends a file to a destination in the messaging system. + """ + if len(args) < 3: + sysout('Expecting: sendfile ') + elif not os.path.exists(args[2]): + sysout('File %s does not exist' % args[2]) + else: + s = open(args[2], mode='rb').read() + msg = base64.b64encode(s).decode() + if not self.transaction_id: + self.conn.send(destination=args[1], message=msg, filename=args[2]) + else: + self.conn.send(destination=args[1], message=msg, filename=args[2], transaction=self.transaction_id) + + def subscribe(self, args): + """ + Usage: + subscribe [ack] + + Required Parameters: + destination - the name to subscribe to + + Optional Parameters: + ack - how to handle acknowledgements for a message; either automatically (auto) or manually (client) + + Description: + Register to listen to a given destination. Like send, the subscribe command requires a destination + header indicating which destination to subscribe to. The ack parameter is optional, and defaults to + auto. + """ + if len(args) < 2: + sysout('Expecting: subscribe [ack]') + elif len(args) > 2: + sysout('Subscribing to "%s" with acknowledge set to "%s"' % (args[1], args[2])) + self.conn.subscribe(destination=args[1], ack=args[2]) + else: + sysout('Subscribing to "%s" with auto acknowledge' % args[1]) + self.conn.subscribe(destination=args[1], ack='auto') + + def unsubscribe(self, args): + """ + Usage: + unsubscribe + + Required Parameters: + destination - the name to unsubscribe from + + Description: + Remove an existing subscription - so that the client no longer receive messages from that destination. + """ + if len(args) < 2: + sysout('Expecting: unsubscribe ') + else: + sysout('Unsubscribing from "%s"' % args[1]) + self.conn.unsubscribe(destination=args[1]) + + def stats(self, args): + """ + Usage: + stats [on|off] + + Description: + Record statistics on messages sent, received, errors, etc. If no argument (on|off) is specified, + dump the current statistics. + """ + if len(args) < 2: + stats = self.conn.get_listener('stats') + if stats: + sysout(stats) + else: + sysout('No stats available') + elif args[1] == 'on': + self.conn.set_listener('stats', StatsListener()) + elif args[1] == 'off': + self.conn.remove_listener('stats') + else: + sysout('Expecting: stats [on|off]') + + def run(self, args): + """ + Usage: + run + + Description: + Execute commands in a specified file + """ + if len(args) == 1: + sysout("Expecting: run ") + elif not os.path.exists(args[1]): + sysout("File %s was not found" % args[1]) + else: + filecommands = open(args[1]).read().split('\n') + for x in range(len(filecommands)): + split = filecommands[x].split() + if len(split) < 1: + continue + elif split[0] in self.__commands: + getattr(self, split[0])(split) + else: + sysout('Unrecognized command "%s" at line %s' % (split[0], x)) + break + + def help(self, args): + """ + Usage: + help [command] + + Description: + Display info on a specified command, or a list of available commands + """ + if len(args) == 1: + sysout('Usage: help , where command is one of the following:') + sysout(' ') + for f in self.__commands: + sysout('%s ' % f, end='') + sysout('') + return + elif not hasattr(self, args[1]): + sysout('There is no command "%s"' % args[1]) + return + + func = getattr(self, args[1]) + if hasattr(func, '__doc__') and getattr(func, '__doc__') is not None: + sysout(func.__doc__) + else: + sysout('There is no help for command "%s"' % args[1]) + man = help + + def version(self, args): + sysout('Stomp.py Version %s.%s' % internal.__version__) + ver = version + + def quit(self, args): + pass + exit = quit + + +def main(): + commands = get_commands() + + parser = OptionParser() + + parser.add_option('-H', '--host', type = 'string', dest = 'host', default = 'localhost', + help = 'Hostname or IP to connect to. Defaults to localhost if not specified.') + parser.add_option('-P', '--port', type = int, dest = 'port', default = 61613, + help = 'Port providing stomp protocol connections. Defaults to 61613 if not specified.') + parser.add_option('-U', '--user', type = 'string', dest = 'user', default = None, + help = 'Username for the connection') + parser.add_option('-W', '--password', type = 'string', dest = 'password', default = None, + help = 'Password for the connection') + parser.add_option('-F', '--file', type = 'string', dest = 'filename', + help = 'File containing commands to be executed, instead of prompting from the command prompt.') + + (options, args) = parser.parse_args() + + st = StompCLI(options.host, options.port, options.user, options.password) + try: + try: + if not options.filename: + # If the readline module is available, make command input easier + try: + import readline + def stomp_completer(text, state): + for command in commands[state:]: + if command.startswith(text): + return "%s " % command + return None + + readline.parse_and_bind("tab: complete") + readline.set_completer(stomp_completer) + readline.set_completer_delims("") + except ImportError: + pass # ignore unavailable readline module + + while True: + line = input_prompt("\r> ") + if not line or line.lstrip().rstrip() == '': + continue + line = line.lstrip().rstrip() + if line.startswith('quit') or line.startswith('exit') or line.startswith('disconnect'): + break + split = line.split() + command = split[0] + if command in commands: + getattr(st, command)(split) + else: + sysout('Unrecognized command') + else: + st.run(['run', options.filename]) + except RuntimeError: + pass + finally: + st.disconnect(None) + + + +# +# command line testing +# +if __name__ == '__main__': + main() + diff --git a/src/stomp.py-3.0.3/stomp/connect.py b/src/stomp.py-3.0.3/stomp/connect.py new file mode 100755 index 0000000..707374a --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/connect.py @@ -0,0 +1,686 @@ +import math +import random +import re +import socket +import sys +import threading +import time +import types +import xml.dom.minidom + +try: + from cStringIO import StringIO +except ImportError: + from io import StringIO + +try: + import ssl + from ssl import SSLError +except ImportError: # python version < 2.6 without the backported ssl module + ssl = None + class SSLError: + pass + +import exception +import listener +import utils +import backward + +try: + import uuid +except ImportError: + from backward import uuid + +import logging +import logging.config +try: + logging.config.fileConfig('stomp.log.conf') +except: + pass +log = logging.getLogger('stomp.py') +if not log: + log = utils.DevNullLogger() + + +class Connection(object): + """ + Represents a STOMP client connection. + """ + + # ========= PRIVATE MEMBERS ========= + + # List of all host names (unqualified, fully-qualified, and IP + # addresses) that refer to the local host (both loopback interface + # and external interfaces). This is used for determining + # preferred targets. + __localhost_names = [ "localhost", "127.0.0.1" ] + + try: + __localhost_names.append(socket.gethostbyname(socket.gethostname())) + except: + pass + + try: + __localhost_names.append(socket.gethostname()) + except: + pass + + try: + __localhost_names.append(socket.getfqdn(socket.gethostname())) + except: + pass + + # + # Used to parse the STOMP "content-length" header lines, + # + __content_length_re = re.compile('^content-length[:]\\s*(?P[0-9]+)', re.MULTILINE) + + + def __init__(self, + host_and_ports = [ ('localhost', 61613) ], + user = None, + passcode = None, + prefer_localhost = True, + try_loopback_connect = True, + reconnect_sleep_initial = 0.1, + reconnect_sleep_increase = 0.5, + reconnect_sleep_jitter = 0.1, + reconnect_sleep_max = 60.0, + reconnect_attempts_max = 3, + use_ssl = False, + ssl_key_file = None, + ssl_cert_file = None, + ssl_ca_certs = None, + ssl_cert_validator = None): + """ + Initialize and start this connection. + + \param host_and_ports + a list of (host, port) tuples. + + \param prefer_localhost + if True and the local host is mentioned in the (host, + port) tuples, try to connect to this first + + \param try_loopback_connect + if True and the local host is found in the host + tuples, try connecting to it using loopback interface + (127.0.0.1) + + \param reconnect_sleep_initial + initial delay in seconds to wait before reattempting + to establish a connection if connection to any of the + hosts fails. + + \param reconnect_sleep_increase + factor by which the sleep delay is increased after + each connection attempt. For example, 0.5 means + to wait 50% longer than before the previous attempt, + 1.0 means wait twice as long, and 0.0 means keep + the delay constant. + + \param reconnect_sleep_max + maximum delay between connection attempts, regardless + of the reconnect_sleep_increase. + + \param reconnect_sleep_jitter + random additional time to wait (as a percentage of + the time determined using the previous parameters) + between connection attempts in order to avoid + stampeding. For example, a value of 0.1 means to wait + an extra 0%-10% (randomly determined) of the delay + calculated using the previous three parameters. + + \param reconnect_attempts_max + maximum attempts to reconnect + + \param use_ssl + connect using SSL to the socket. This wraps the + socket in a SSL connection. The constructor will + raise an exception if you ask for SSL, but it can't + find the SSL module. + + \param ssl_cert_file + the path to a X509 certificate + + \param ssl_key_file + the path to a X509 key file + + \param ssl_ca_certs + the path to the a file containing CA certificates + to validate the server against. If this is not set, + server side certificate validation is not done. + + \param ssl_cert_validator + function which performs extra validation on the client + certificate, for example checking the returned + certificate has a commonName attribute equal to the + hostname (to avoid man in the middle attacks) + + The signature is: + (OK, err_msg) = validation_function(cert, hostname) + + where OK is a boolean, and cert is a certificate structure + as returned by ssl.SSLSocket.getpeercert() + """ + + sorted_host_and_ports = [] + sorted_host_and_ports.extend(host_and_ports) + + # + # If localhost is preferred, make sure all (host, port) tuples that refer to the local host come first in the list + # + if prefer_localhost: + sorted_host_and_ports.sort(key = self.is_localhost) + + # + # If the user wishes to attempt connecting to local ports using the loopback interface, for each (host, port) tuple + # referring to a local host, add an entry with the host name replaced by 127.0.0.1 if it doesn't exist already + # + loopback_host_and_ports = [] + if try_loopback_connect: + for host_and_port in sorted_host_and_ports: + if self.is_localhost(host_and_port) == 1: + port = host_and_port[1] + if (not ("127.0.0.1", port) in sorted_host_and_ports + and not ("localhost", port) in sorted_host_and_ports): + loopback_host_and_ports.append(("127.0.0.1", port)) + + # + # Assemble the final, possibly sorted list of (host, port) tuples + # + self.__host_and_ports = [] + self.__host_and_ports.extend(loopback_host_and_ports) + self.__host_and_ports.extend(sorted_host_and_ports) + + self.__recvbuf = '' + + self.__listeners = {} + + self.__reconnect_sleep_initial = reconnect_sleep_initial + self.__reconnect_sleep_increase = reconnect_sleep_increase + self.__reconnect_sleep_jitter = reconnect_sleep_jitter + self.__reconnect_sleep_max = reconnect_sleep_max + self.__reconnect_attempts_max = reconnect_attempts_max + + self.__connect_headers = {} + if user is not None and passcode is not None: + self.__connect_headers['login'] = user + self.__connect_headers['passcode'] = passcode + + self.__socket = None + self.__socket_semaphore = threading.BoundedSemaphore(1) + self.__current_host_and_port = None + + self.__receiver_thread_exit_condition = threading.Condition() + self.__receiver_thread_exited = False + + self.blocking = None + + if use_ssl and not ssl: + raise Exception("SSL connection requested, but SSL library not found.") + self.__ssl = use_ssl + self.__ssl_cert_file = ssl_cert_file + self.__ssl_key_file = ssl_key_file + self.__ssl_ca_certs = ssl_ca_certs + self.__ssl_cert_validator = ssl_cert_validator + + def is_localhost(self, host_and_port): + """ + Return true if the specified host+port is a member of the 'localhost' list of hosts + """ + (host, port) = host_and_port + if host in Connection.__localhost_names: + return 1 + else: + return 2 + + # + # Manage the connection + # + + def start(self): + """ + Start the connection. This should be called after all + listeners have been registered. If this method is not called, + no frames will be received by the connection. + """ + self.__running = True + self.__attempt_connection() + thread = threading.Thread(None, self.__receiver_loop) + thread.start() + + def stop(self): + """ + Stop the connection. This is equivalent to calling + disconnect() but will do a clean shutdown by waiting for the + receiver thread to exit. + """ + self.disconnect() + + self.__receiver_thread_exit_condition.acquire() + if not self.__receiver_thread_exited: + self.__receiver_thread_exit_condition.wait() + self.__receiver_thread_exit_condition.release() + + def get_host_and_port(self): + """ + Return a (host, port) tuple indicating which STOMP host and + port is currently connected, or None if there is currently no + connection. + """ + return self.__current_host_and_port + + def is_connected(self): + """ + Return true if the socket managed by this connection is connected + """ + try: + return self.__socket is not None and self.__socket.getsockname()[1] != 0 + except socket.error: + return False + + # + # Manage objects listening to incoming frames + # + + def set_listener(self, name, listener): + """ + Set a named listener on this connection + + \see listener::ConnectionListener + + \param name the name of the listener + \param listener the listener object + """ + self.__listeners[name] = listener + + def remove_listener(self, name): + """ + Remove a listener according to the specified name + + \param name the name of the listener to remove + """ + del self.__listeners[name] + + def get_listener(self, name): + """ + Return a named listener + + \param name the listener to return + """ + if name in self.__listeners: + return self.__listeners[name] + else: + return None + + # + # STOMP transmissions + # + + def subscribe(self, headers={}, **keyword_headers): + """ + Send a SUBSCRIBE frame to subscribe to a queue + """ + self.__send_frame_helper('SUBSCRIBE', '', utils.merge_headers([headers, keyword_headers]), [ 'destination' ]) + + def unsubscribe(self, headers={}, **keyword_headers): + """ + Send an UNSUBSCRIBE frame to unsubscribe from a queue + """ + self.__send_frame_helper('UNSUBSCRIBE', '', utils.merge_headers([headers, keyword_headers]), [ ('destination', 'id') ]) + + def send(self, message='', headers={}, **keyword_headers): + """ + Send a message (SEND) frame + """ + if '\x00' in message: + content_length_headers = {'content-length': len(message)} + else: + content_length_headers = {} + self.__send_frame_helper('SEND', message, utils.merge_headers([headers, + keyword_headers, + content_length_headers]), [ 'destination' ]) + self.__notify('send', headers, message) + + def ack(self, headers={}, **keyword_headers): + """ + Send an ACK frame, to acknowledge receipt of a message + """ + self.__send_frame_helper('ACK', '', utils.merge_headers([headers, keyword_headers]), [ 'message-id' ]) + + def begin(self, headers={}, **keyword_headers): + """ + Send a BEGIN frame to start a transaction + """ + use_headers = utils.merge_headers([headers, keyword_headers]) + if not 'transaction' in use_headers.keys(): + use_headers['transaction'] = str(uuid.uuid4()) + self.__send_frame_helper('BEGIN', '', use_headers, [ 'transaction' ]) + return use_headers['transaction'] + + def abort(self, headers={}, **keyword_headers): + """ + Send an ABORT frame to rollback a transaction + """ + self.__send_frame_helper('ABORT', '', utils.merge_headers([headers, keyword_headers]), [ 'transaction' ]) + + def commit(self, headers={}, **keyword_headers): + """ + Send a COMMIT frame to commit a transaction (send pending messages) + """ + self.__send_frame_helper('COMMIT', '', utils.merge_headers([headers, keyword_headers]), [ 'transaction' ]) + + def connect(self, headers={}, **keyword_headers): + """ + Send a CONNECT frame to start a connection + """ + if 'wait' in keyword_headers and keyword_headers['wait']: + while not self.is_connected(): time.sleep(0.1) + del keyword_headers['wait'] + self.__send_frame_helper('CONNECT', '', utils.merge_headers([self.__connect_headers, headers, keyword_headers]), [ ]) + + def disconnect(self, headers={}, **keyword_headers): + """ + Send a DISCONNECT frame to finish a connection + """ + self.__send_frame_helper('DISCONNECT', '', utils.merge_headers([self.__connect_headers, headers, keyword_headers]), [ ]) + self.__running = False + if self.__socket is not None: + if self.__ssl: + # + # Even though we don't want to use the socket, unwrap is the only API method which does a proper SSL shutdown + # + try: + self.__socket = self.__socket.unwrap() + except Exception: + # + # unwrap seems flaky on Win with the backported ssl mod, so catch any exception and log it + # + _, e, _ = sys.exc_info() + log.warn(e) + elif hasattr(socket, 'SHUT_RDWR'): + self.__socket.shutdown(socket.SHUT_RDWR) + # + # split this into a separate check, because sometimes the socket is nulled between shutdown and this call + # + if self.__socket is not None: + self.__socket.close() + self.__current_host_and_port = None + + def __convert_dict(self, payload): + """ + Encode a python dictionary as a ... structure. + """ + xmlStr = "\n" + for key in payload: + xmlStr += "\n" + xmlStr += "%s" % key + xmlStr += "%s" % payload[key] + xmlStr += "\n" + xmlStr += "" + return xmlStr + + def __send_frame_helper(self, command, payload, headers, required_header_keys): + """ + Helper function for sending a frame after verifying that a + given set of headers are present. + + \param command the command to send + + \param payload the frame's payload + + \param headers a dictionary containing the frame's headers + + \param required_header_keys a sequence enumerating all + required header keys. If an element in this sequence is itself + a tuple, that tuple is taken as a list of alternatives, one of + which must be present. + + \throws ArgumentError if one of the required header keys is + not present in the header map. + """ + for required_header_key in required_header_keys: + if type(required_header_key) == tuple: + found_alternative = False + for alternative in required_header_key: + if alternative in headers.keys(): + found_alternative = True + if not found_alternative: + raise KeyError("Command %s requires one of the following headers: %s" % (command, str(required_header_key))) + elif not required_header_key in headers.keys(): + raise KeyError("Command %s requires header %r" % (command, required_header_key)) + self.__send_frame(command, headers, payload) + + def __send_frame(self, command, headers={}, payload=''): + """ + Send a STOMP frame. + + \param command the frame command + + \param headers a map of headers (key-val pairs) + + \param payload the message payload + """ + if type(payload) == dict: + headers["transformation"] = "jms-map-xml" + payload = self.__convert_dict(payload) + + if self.__socket is not None: + try: + frame = [ command + '\n' ] + for key, val in headers.items(): + frame.append('%s:%s\n' % (key, val)) + frame.append('\n') + if payload: + frame.append(payload) + frame.append('\x00') + frame = ''.join(frame) + self.__socket_semaphore.acquire() + try: + self.__socket.sendall(frame.encode()) + finally: + self.__socket_semaphore.release() + except Exception: + _, e, _ = sys.exc_info() + print(e) + log.debug("Sent frame: type=%s, headers=%r, body=%r" % (command, headers, payload)) + else: + raise exception.NotConnectedException() + + def __notify(self, frame_type, headers=None, body=None): + """ + Utility function for notifying listeners of incoming and outgoing messages + + \param frame_type the type of message + + \param headers the map of headers associated with the message + + \param body the content of the message + """ + for listener in self.__listeners.values(): + if not hasattr(listener, 'on_%s' % frame_type): + log.debug('listener %s has no method on_%s' % (listener, frame_type)) + continue + + if frame_type == 'connecting': + listener.on_connecting(self.__current_host_and_port) + continue + + notify_func = getattr(listener, 'on_%s' % frame_type) + params = backward.get_func_argcount(notify_func) + if params >= 3: + notify_func(headers, body) + elif params == 2: + notify_func(headers) + else: + notify_func() + + def __receiver_loop(self): + """ + Main loop listening for incoming data. + """ + try: + try: + threading.currentThread().setName("StompReceiver") + while self.__running: + log.debug('starting receiver loop') + + if self.__socket is None: + break + + try: + try: + self.__notify('connecting') + + while self.__running: + frames = self.__read() + + for frame in frames: + (frame_type, headers, body) = utils.parse_frame(frame) + log.debug("Received frame: result=%r, headers=%r, body=%r" % (frame_type, headers, body)) + frame_type = frame_type.lower() + if frame_type in [ 'connected', 'message', 'receipt', 'error' ]: + self.__notify(frame_type, headers, body) + else: + log.warning('Unknown response frame type: "%s" (frame length was %d)' % (frame_type, len(frame))) + finally: + try: + self.__socket.close() + except: + pass # ignore errors when attempting to close socket + self.__socket = None + self.__current_host_and_port = None + except exception.ConnectionClosedException: + if self.__running: + log.error("Lost connection") + self.__notify('disconnected') + # + # Clear out any half-received messages after losing connection + # + self.__recvbuf = '' + continue + else: + break + except: + log.exception("An unhandled exception was encountered in the stomp receiver loop") + + finally: + self.__receiver_thread_exit_condition.acquire() + self.__receiver_thread_exited = True + self.__receiver_thread_exit_condition.notifyAll() + self.__receiver_thread_exit_condition.release() + + def __read(self): + """ + Read the next frame(s) from the socket. + """ + fastbuf = StringIO() + while self.__running: + try: + c = self.__socket.recv(1024) + c = c.decode() + except Exception: + _, e, _ = sys.exc_info() + c = '' + if len(c) == 0: + raise exception.ConnectionClosedException + fastbuf.write(c) + if '\x00' in c: + break + self.__recvbuf += fastbuf.getvalue() + fastbuf.close() + result = [] + + if len(self.__recvbuf) > 0 and self.__running: + while True: + pos = self.__recvbuf.find('\x00') + + if pos >= 0: + frame = self.__recvbuf[0:pos] + preamble_end = frame.find('\n\n') + if preamble_end >= 0: + content_length_match = Connection.__content_length_re.search(frame[0:preamble_end]) + if content_length_match: + content_length = int(content_length_match.group('value')) + content_offset = preamble_end + 2 + frame_size = content_offset + content_length + if frame_size > len(frame): + # + # Frame contains NUL bytes, need to read more + # + if frame_size < len(self.__recvbuf): + pos = frame_size + frame = self.__recvbuf[0:pos] + else: + # + # Haven't read enough data yet, exit loop and wait for more to arrive + # + break + result.append(frame) + self.__recvbuf = self.__recvbuf[pos+1:] + else: + break + return result + + def __attempt_connection(self): + """ + Try connecting to the (host, port) tuples specified at construction time. + """ + sleep_exp = 1 + connect_count = 0 + while self.__running and self.__socket is None and connect_count < self.__reconnect_attempts_max: + for host_and_port in self.__host_and_ports: + try: + log.debug("Attempting connection to host %s, port %s" % host_and_port) + self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if self.__ssl: # wrap socket + if self.__ssl_ca_certs: + cert_validation = ssl.CERT_REQUIRED + else: + cert_validation = ssl.CERT_NONE + self.__socket = ssl.wrap_socket(self.__socket, keyfile = self.__ssl_key_file, + certfile = self.__ssl_cert_file, cert_reqs = cert_validation, + ca_certs=self.__ssl_ca_certs, ssl_version = ssl.PROTOCOL_SSLv3) + self.__socket.settimeout(None) + if self.blocking is not None: + self.__socket.setblocking(self.blocking) + self.__socket.connect(host_and_port) + + # + # Validate server cert + # + if self.__ssl and self.__ssl_cert_validator: + cert = self.__socket.getpeercert() + (ok, errmsg) = apply(self.__ssl_cert_validator, (cert, host_and_port[0])) + if not ok: + raise SSLError("Server certificate validation failed: %s" % errmsg) + + self.__current_host_and_port = host_and_port + log.info("Established connection to host %s, port %s" % host_and_port) + break + except socket.error: + self.__socket = None + if isinstance(sys.exc_info()[1], tuple): + exc = sys.exc_info()[1][1] + else: + exc = sys.exc_info()[1] + connect_count += 1 + print(exc) + log.warning("Could not connect to host %s, port %s: %s" % (host_and_port[0], host_and_port[1], exc)) + + if self.__socket is None: + sleep_duration = (min(self.__reconnect_sleep_max, + ((self.__reconnect_sleep_initial / (1.0 + self.__reconnect_sleep_increase)) + * math.pow(1.0 + self.__reconnect_sleep_increase, sleep_exp))) + * (1.0 + random.random() * self.__reconnect_sleep_jitter)) + sleep_end = time.time() + sleep_duration + log.debug("Sleeping for %.1f seconds before attempting reconnect" % sleep_duration) + while self.__running and time.time() < sleep_end: + time.sleep(0.2) + + if sleep_duration < self.__reconnect_sleep_max: + sleep_exp += 1 + + if not self.__socket: + raise exception.ReconnectFailedException \ No newline at end of file diff --git a/src/stomp.py-3.0.3/stomp/exception.py b/src/stomp.py-3.0.3/stomp/exception.py new file mode 100755 index 0000000..d934252 --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/exception.py @@ -0,0 +1,21 @@ +class ConnectionClosedException(Exception): + """ + Raised in the receiver thread when the connection has been closed + by the server. + """ + pass + + +class NotConnectedException(Exception): + """ + Raised by Connection.__send_frame when there is currently no server + connection. + """ + pass + + +class ReconnectFailedException(Exception): + """ + Raised by Connection.__attempt_connection when reconnection attempts + have exceeded Connection.__reconnect_attempts_max. + """ \ No newline at end of file diff --git a/src/stomp.py-3.0.3/stomp/listener.py b/src/stomp.py-3.0.3/stomp/listener.py new file mode 100755 index 0000000..b4b86de --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/listener.py @@ -0,0 +1,133 @@ +class ConnectionListener(object): + """ + This class should be used as a base class for objects registered + using Connection.set_listener(). + """ + def on_connecting(self, host_and_port): + """ + Called by the STOMP connection once a TCP/IP connection to the + STOMP server has been established or re-established. Note that + at this point, no connection has been established on the STOMP + protocol level. For this, you need to invoke the "connect" + method on the connection. + + \param host_and_port a tuple containing the host name and port + number to which the connection has been established. + """ + pass + + def on_connected(self, headers, body): + """ + Called by the STOMP connection when a CONNECTED frame is + received, that is after a connection has been established or + re-established. + + \param headers a dictionary containing all headers sent by the + server as key/value pairs. + + \param body the frame's payload. This is usually empty for + CONNECTED frames. + """ + pass + + def on_disconnected(self): + """ + Called by the STOMP connection when a TCP/IP connection to the + STOMP server has been lost. No messages should be sent via + the connection until it has been reestablished. + """ + pass + + def on_message(self, headers, body): + """ + Called by the STOMP connection when a MESSAGE frame is + received. + + \param headers a dictionary containing all headers sent by the + server as key/value pairs. + + \param body the frame's payload - the message body. + """ + pass + + def on_receipt(self, headers, body): + """ + Called by the STOMP connection when a RECEIPT frame is + received, sent by the server if requested by the client using + the 'receipt' header. + + \param headers a dictionary containing all headers sent by the + server as key/value pairs. + + \param body the frame's payload. This is usually empty for + RECEIPT frames. + """ + pass + + def on_error(self, headers, body): + """ + Called by the STOMP connection when an ERROR frame is + received. + + \param headers a dictionary containing all headers sent by the + server as key/value pairs. + + \param body the frame's payload - usually a detailed error + description. + """ + pass + + def on_send(self, headers, body): + """ + Called by the STOMP connection when it is in the process of sending a message + + \param headers a dictionary containing the headers that will be sent with this message + + \param body the message payload + """ + pass + + +class StatsListener(ConnectionListener): + """ + A connection listener for recording statistics on messages sent and received. + """ + def __init__(self): + self.errors = 0 + self.connections = 0 + self.messages_recd = 0 + self.messages_sent = 0 + + def on_error(self, headers, message): + """ + \see ConnectionListener::on_error + """ + self.errors += 1 + + def on_connecting(self, host_and_port): + """ + \see ConnectionListener::on_connecting + """ + self.connections += 1 + + def on_message(self, headers, message): + """ + \see ConnectionListener::on_message + """ + self.messages_recd += 1 + + def on_send(self, headers, message): + """ + \see ConnectionListener::on_send + """ + self.messages_sent += 1 + + def __str__(self): + """ + Return a string containing the current statistics (messages sent and received, + errors, etc) + """ + return '''Connections: %s +Messages sent: %s +Messages received: %s +Errors: %s''' % (self.connections, self.messages_sent, self.messages_recd, self.errors) diff --git a/src/stomp.py-3.0.3/stomp/test/__init__.py b/src/stomp.py-3.0.3/stomp/test/__init__.py new file mode 100755 index 0000000..203f897 --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/test/__init__.py @@ -0,0 +1,5 @@ +import os +import sys +sys.path.insert(0, os.path.split(__file__)[0]) + +__all__ = [ 'basictest', 'ssltest', 'transtest', 'rabbitmqtest', 'threadingtest' ] \ No newline at end of file diff --git a/src/stomp.py-3.0.3/stomp/test/basictest.py b/src/stomp.py-3.0.3/stomp/test/basictest.py new file mode 100755 index 0000000..447216d --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/test/basictest.py @@ -0,0 +1,32 @@ +import time +import unittest + +import stomp + +import testlistener + + +class TestBasicSend(unittest.TestCase): + + def setUp(self): + pass + + def testbasic(self): + conn = stomp.Connection([('127.0.0.2', 61613), ('localhost', 61613)]) + listener = testlistener.TestListener() + conn.set_listener('', listener) + conn.start() + conn.connect(wait=True) + conn.subscribe(destination='/queue/test', ack='auto') + + conn.send('this is a test', destination='/queue/test') + + time.sleep(3) + conn.disconnect() + + self.assert_(listener.connections == 1, 'should have received 1 connection acknowledgement') + self.assert_(listener.messages == 1, 'should have received 1 message') + self.assert_(listener.errors == 0, 'should not have received any errors') + +suite = unittest.TestLoader().loadTestsFromTestCase(TestBasicSend) +unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/src/stomp.py-3.0.3/stomp/test/rabbitmqtest.py b/src/stomp.py-3.0.3/stomp/test/rabbitmqtest.py new file mode 100755 index 0000000..e48c843 --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/test/rabbitmqtest.py @@ -0,0 +1,33 @@ +import time +import unittest + +import stomp + +from . import testlistener + + +class TestRabbitMQSend(unittest.TestCase): + + def setUp(self): + pass + + def testbasic(self): + conn = stomp.Connection([('0.0.0.0', 61613), ('127.0.0.1', 61613)], 'guest', 'guest') + listener = testlistener.TestListener() + conn.set_listener('', listener) + conn.start() + conn.connect(wait=True) + conn.subscribe(destination='/queue/test', ack='auto') + + conn.send('this is a test', destination='/queue/test') + + time.sleep(2) + conn.disconnect() + + self.assert_(listener.connections == 1, 'should have received 1 connection acknowledgement') + self.assert_(listener.messages == 1, 'should have received 1 message') + self.assert_(listener.errors == 0, 'should not have received any errors') + + +suite = unittest.TestLoader().loadTestsFromTestCase(TestRabbitMQSend) +unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/src/stomp.py-3.0.3/stomp/test/ssltest.py b/src/stomp.py-3.0.3/stomp/test/ssltest.py new file mode 100755 index 0000000..72a82d3 --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/test/ssltest.py @@ -0,0 +1,33 @@ +import time +import unittest + +import stomp + +import testlistener + + +class TestSSLSend(unittest.TestCase): + + def setUp(self): + pass + + def testsslbasic(self): + conn = stomp.Connection([('127.0.0.1', 61612), ('localhost', 61612)], use_ssl = True) + listener = testlistener.TestListener() + conn.set_listener('', listener) + conn.start() + conn.connect(wait=True) + conn.subscribe(destination='/queue/test', ack='auto') + + conn.send('this is a test', destination='/queue/test') + + time.sleep(3) + conn.disconnect() + + self.assert_(listener.connections == 1, 'should have received 1 connection acknowledgement') + self.assert_(listener.messages == 1, 'should have received 1 message') + self.assert_(listener.errors == 0, 'should not have received any errors') + + +suite = unittest.TestLoader().loadTestsFromTestCase(TestSSLSend) +unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/src/stomp.py-3.0.3/stomp/test/testlistener.py b/src/stomp.py-3.0.3/stomp/test/testlistener.py new file mode 100644 index 0000000..a64cac0 --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/test/testlistener.py @@ -0,0 +1,19 @@ +from stomp import ConnectionListener + +class TestListener(ConnectionListener): + def __init__(self): + self.errors = 0 + self.connections = 0 + self.messages = 0 + + def on_error(self, headers, message): + print('received an error %s' % message) + self.errors = self.errors + 1 + + def on_connecting(self, host_and_port): + print('connecting %s %s' % host_and_port) + self.connections = self.connections + 1 + + def on_message(self, headers, message): + print('received a message %s' % message) + self.messages = self.messages + 1 diff --git a/src/stomp.py-3.0.3/stomp/test/threadingtest.py b/src/stomp.py-3.0.3/stomp/test/threadingtest.py new file mode 100755 index 0000000..008013c --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/test/threadingtest.py @@ -0,0 +1,124 @@ +try: + from queue import Queue, Empty, Full +except ImportError: + from Queue import Queue, Empty, Full +import threading +import sys +import time +import unittest + +import stomp + +import testlistener + +class MQ(object): + def __init__(self): + self.connection = stomp.Connection([('localhost', 61613)]) + self.connection.set_listener('', None) + self.connection.start() + self.connection.connect(wait=True) + + def send(self, topic, msg, persistent='true', retry=False): + self.connection.send(destination="/topic/%s" % topic, message=msg, + persistent=persistent) +mq = MQ() + + +class TestThreading(unittest.TestCase): + + def setUp(self): + """Test that mq sends don't wedge their threads. + + Starts a number of sender threads, and runs for a set amount of + time. Each thread sends messages as fast as it can, and after each + send, pops from a Queue. Meanwhile, the Queue is filled with one + marker per second. If the Queue fills, the test fails, as that + indicates that all threads are no longer emptying the queue, and thus + must be wedged in their send() calls. + + """ + self.Q = Queue(10) + self.Cmd = Queue() + self.Error = Queue() + self.clients = 20 + self.threads = [] + self.runfor = 20 + for i in range(0, self.clients): + t = threading.Thread(name="client %s" % i, + target=self.make_sender(i)) + t.setDaemon(1) + self.threads.append(t) + + def tearDown(self): + for t in self.threads: + if not t.isAlive: + print("thread", t, "died") + self.Cmd.put('stop') + for t in self.threads: + t.join() + print() + print() + errs = [] + while 1: + try: + errs.append(self.Error.get(block=False)) + except Empty: + break + print("Dead threads:", len(errs), "of", self.clients) + etype = {} + for ec, ev, tb in errs: + if ec in etype: + etype[ec] = etype[ec] + 1 + else: + etype[ec] = 1 + for k in sorted(etype.keys()): + print("%s: %s" % (k, etype[k])) + mq.connection.disconnect() + + def make_sender(self, i): + Q = self.Q + Cmd = self.Cmd + Error = self.Error + def send(i=i, Q=Q, Cmd=Cmd, Error=Error): + counter = 0 + print("%s starting" % i) + try: + while 1: + # print "%s sending %s" % (i, counter) + try: + mq.send('testclientwedge', + 'Message %s:%s' % (i, counter)) + except: + Error.put(sys.exc_info()) + # thread will die + raise + else: + # print "%s sent %s" % (i, counter) + try: + Q.get(block=False) + except Empty: + pass + try: + if Cmd.get(block=False): + break + except Empty: + pass + counter +=1 + finally: + print("final", i, counter) + return send + + def test_threads_dont_wedge(self): + for t in self.threads: + t.start() + start = time.time() + while time.time() - start < self.runfor: + try: + self.Q.put(1, False) + time.sleep(1.0) + except Full: + assert False, "Failed: 'request' queue filled up" + print("passed") + +suite = unittest.TestLoader().loadTestsFromTestCase(TestThreading) +unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/src/stomp.py-3.0.3/stomp/test/transtest.py b/src/stomp.py-3.0.3/stomp/test/transtest.py new file mode 100755 index 0000000..fb4ee1d --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/test/transtest.py @@ -0,0 +1,60 @@ +import time +import unittest + +import stomp + +import testlistener + + +class TestTrans(unittest.TestCase): + + def setUp(self): + conn = stomp.Connection([('127.0.0.2', 61613), ('localhost', 61613)]) + listener = testlistener.TestListener() + conn.set_listener('', listener) + conn.start() + conn.connect(wait=True) + self.conn = conn + self.listener = listener + + def tearDown(self): + self.conn.disconnect() + + def testcommit(self): + self.conn.subscribe(destination='/queue/test', ack='auto') + trans_id = self.conn.begin() + self.conn.send('this is a test1', destination='/queue/test', transaction=trans_id) + self.conn.send('this is a test2', destination='/queue/test', transaction=trans_id) + self.conn.send('this is a test3', destination='/queue/test', transaction=trans_id) + + time.sleep(3) + + self.assert_(self.listener.connections == 1, 'should have received 1 connection acknowledgement') + self.assert_(self.listener.messages == 0, 'should not have received any messages') + + self.conn.commit(transaction = trans_id) + time.sleep(3) + + self.assert_(self.listener.messages == 3, 'should have received 3 messages') + self.assert_(self.listener.errors == 0, 'should not have received any errors') + + def testabort(self): + self.conn.subscribe(destination='/queue/test', ack='auto') + trans_id = self.conn.begin() + self.conn.send('this is a test1', destination='/queue/test', transaction=trans_id) + self.conn.send('this is a test2', destination='/queue/test', transaction=trans_id) + self.conn.send('this is a test3', destination='/queue/test', transaction=trans_id) + + time.sleep(3) + + self.assert_(self.listener.connections == 1, 'should have received 1 connection acknowledgement') + self.assert_(self.listener.messages == 0, 'should not have received any messages') + + self.conn.abort(transaction = trans_id) + time.sleep(3) + + self.assert_(self.listener.messages == 0, 'should not have received any messages') + self.assert_(self.listener.errors == 0, 'should not have received any errors') + +suite = unittest.TestLoader().loadTestsFromTestCase(TestTrans) +unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/src/stomp.py-3.0.3/stomp/utils.py b/src/stomp.py-3.0.3/stomp/utils.py new file mode 100755 index 0000000..904db75 --- /dev/null +++ b/src/stomp.py-3.0.3/stomp/utils.py @@ -0,0 +1,138 @@ +import time +import random +import re +import time +import xml + +try: + import hashlib +except ImportError: + import md5 as hashlib + +# +# Used to parse STOMP header lines in the format "key:value", +# +HEADER_LINE_RE = re.compile('(?P[^:]+)[:](?P.*)') + + +class DevNullLogger(object): + """ + Dummy logging class for environments without the logging module + """ + def log(self, msg): + """ + Log a message (print to console) + """ + print(msg) + + def devnull(self, msg): + """ + Dump a message (i.e. send to /dev/null) + """ + pass + + debug = devnull + info = devnull + warning = log + error = log + critical = log + exception = log + + def isEnabledFor(self, lvl): + """ + Always return False + """ + return False + + +def parse_headers(lines, offset=0): + headers = {} + for header_line in lines[offset:]: + header_match = HEADER_LINE_RE.match(header_line) + if header_match: + headers[header_match.group('key')] = header_match.group('value') + return headers + +def parse_frame(frame): + """ + Parse a STOMP frame into a (frame_type, headers, body) tuple, + where frame_type is the frame type as a string (e.g. MESSAGE), + headers is a map containing all header key/value pairs, and + body is a string containing the frame's payload. + """ + preamble_end = frame.find('\n\n') + preamble = frame[0:preamble_end] + preamble_lines = preamble.split('\n') + body = frame[preamble_end+2:] + + # Skip any leading newlines + first_line = 0 + while first_line < len(preamble_lines) and len(preamble_lines[first_line]) == 0: + first_line += 1 + + # Extract frame type + frame_type = preamble_lines[first_line] + + # Put headers into a key/value map + headers = parse_headers(preamble_lines, first_line + 1) + + if 'transformation' in headers: + body = transform(body, headers['transformation']) + + return (frame_type, headers, body) + +def transform(body, trans_type): + """ + Perform body transformation. Currently, the only supported transformation is + 'jms-map-xml', which converts a map into python dictionary. This can be extended + to support other transformation types. + + The body has the following format: + + + name + Dejan + + + city + Belgrade + + + + (see http://docs.codehaus.org/display/STOMP/Stomp+v1.1+Ideas) + + \param body the content of a message + + \param trans_type the type transformation + """ + if trans_type != 'jms-map-xml': + return body + + try: + entries = {} + doc = xml.dom.minidom.parseString(body) + rootElem = doc.documentElement + for entryElem in rootElem.getElementsByTagName("entry"): + pair = [] + for node in entryElem.childNodes: + if not isinstance(node, xml.dom.minidom.Element): continue + pair.append(node.firstChild.nodeValue) + assert len(pair) == 2 + entries[pair[0]] = pair[1] + return entries + except Exception: + _, e, _ = sys.exc_info() + # + # unable to parse message. return original + # + return body + +def merge_headers(header_map_list): + """ + Helper function for combining multiple header maps into one. + """ + headers = {} + for header_map in header_map_list: + for header_key in header_map.keys(): + headers[header_key] = header_map[header_key] + return headers \ No newline at end of file