diff --git a/pelican/plugins/webassets/vendor/__init__.py b/pelican/plugins/webassets/vendor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pelican/plugins/webassets/vendor/webassets/__init__.py b/pelican/plugins/webassets/vendor/webassets/__init__.py new file mode 100644 index 0000000..0433191 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/__init__.py @@ -0,0 +1,6 @@ +__version__ = "3.0.0a1" + + +# Make a couple frequently used things available right here. +from .bundle import Bundle +from .env import Environment diff --git a/pelican/plugins/webassets/vendor/webassets/bundle.py b/pelican/plugins/webassets/vendor/webassets/bundle.py new file mode 100644 index 0000000..bbb23d8 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/bundle.py @@ -0,0 +1,927 @@ +from contextlib import contextmanager +import os +from os import path +from webassets import six +from webassets.six.moves import map +from webassets.six.moves import zip + +from .filter import get_filter +from .merge import (FileHunk, UrlHunk, FilterTool, merge, merge_filters, + select_filters, MoreThanOneFilterError, NoFilters) +from .updater import SKIP_CACHE +from .exceptions import BundleError, BuildError +from .utils import cmp_debug_levels, hash_func +from .env import ConfigurationContext, DictConfigStorage, BaseEnvironment +from .utils import is_url, calculate_sri_on_file + + +__all__ = ('Bundle', 'get_all_bundle_files',) + + +def has_placeholder(s): + return '%(version)s' in s + + +class ContextWrapper(object): + """Implements a hierarchy-aware configuration context. + + Since each bundle can provide settings that augment the values of + the parent bundle, and ultimately the environment, as the bundle + hierarchy is processed, this class is used to provide an interface + that searches through the hierarchy of settings. It's what you get + when you are given a ``ctx`` value. + """ + + def __init__(self, parent, overwrites=None): + self._parent, self._overwrites = parent, overwrites + + def __getitem__(self, key): + try: + if self._overwrites is None: + raise KeyError() + return self._overwrites.config[key] + except KeyError: + return self._parent.config.get(key) + + def __getattr__(self, item): + try: + return self.getattr(self._overwrites, item) + except (KeyError, AttributeError, EnvironmentError): + return self.getattr(self._parent, item) + + def getattr(self, object, item): + # Helper because Bundles are special in that the config attributes + # are in bundle.config (bundle.config.url vs env.url or ctx.url). + if isinstance(object, Bundle): + return getattr(object.config, item) + else: + return getattr(object, item) + + def get(self, key, default=None): + try: + return self.__getitem__(key) + except KeyError: + return default + + @property + def environment(self): + """Find the root environment context.""" + if isinstance(self._parent, BaseEnvironment): + return self._parent + return self._parent.environment + + +def wrap(parent, overwrites): + """Return a context object where the values from ``overwrites`` + augment the ``parent`` configuration. See :class:`ContextWrapper`. + """ + return ContextWrapper(parent, overwrites) + + +class BundleConfig(DictConfigStorage, ConfigurationContext): + """A configuration dict that also supports Environment-like attribute + access, i.e. ``config['resolver']`` and ``config.resolver``. + """ + def __init__(self, bundle): + DictConfigStorage.__init__(self, bundle) + ConfigurationContext.__init__(self, self) + + +class Bundle(object): + """A bundle is the unit webassets uses to organize groups of media files, + which filters to apply and where to store them. + + Bundles can be nested arbitrarily. + + A note on the connection between a bundle and an "environment" instance: + The bundle requires a environment that it belongs to. Without an + environment, it lacks information about how to behave, and cannot know + where relative paths are actually based. However, I don't want to make the + ``Bundle.__init__`` syntax more complicated than it already is by requiring + an Environment object to be passed. This would be a particular nuisance + when nested bundles are used. Further, nested bundles are never explicitly + connected to an Environment, and what's more, the same child bundle can be + used in multiple parent bundles. + + This is the reason why basically every method of the Bundle class takes an + ``env`` parameter - so a parent bundle can provide the environment for + child bundles that do not know it. + """ + + def __init__(self, *contents, **options): + self._env = options.pop('env', None) + self.contents = contents + self.output = options.pop('output', None) + self.filters = options.pop('filters', None) + self.depends = options.pop('depends', []) + self.version = options.pop('version', []) + self.remove_duplicates = options.pop('remove_duplicates', True) + self.extra = options.pop('extra', {}) + self.merge = options.pop('merge', True) + + self._config = BundleConfig(self) + self._config.update(options.pop('config', {})) + if 'debug' in options: + debug = options.pop('debug') + if debug is not None: + self._config['debug'] = debug + + if options: + raise TypeError("got unexpected keyword argument '%s'" % + list(options.keys())[0]) + + def __repr__(self): + return "<%s output=%s, filters=%s, contents=%s>" % ( + self.__class__.__name__, + self.output, + self.filters, + self.contents, + ) + + @property + def config(self): + # This is a property so that user are not tempted to assign + # a custom dictionary which won't uphold our caseless semantics. + return self._config + + def _get_debug(self): + return self.config.get('debug', None) + def _set_debug(self, value): + self.config['debug'] = value + debug = property(_get_debug, _set_debug) + + def _get_filters(self): + return self._filters + def _set_filters(self, value): + """Filters may be specified in a variety of different ways, including + by giving their name; we need to make sure we resolve everything to an + actual filter instance. + """ + if value is None: + self._filters = () + return + + if isinstance(value, six.string_types): + # 333: Simplify w/o condition? + if six.PY3: + filters = map(str.strip, value.split(',')) + else: + filters = map(unicode.strip, unicode(value).split(',')) + elif isinstance(value, (list, tuple)): + filters = value + else: + filters = [value] + self._filters = [get_filter(f) for f in filters] + filters = property(_get_filters, _set_filters) + + def _get_contents(self): + return self._contents + def _set_contents(self, value): + self._contents = value + self._resolved_contents = None + contents = property(_get_contents, _set_contents) + + def _get_extra(self): + if not self._extra and not has_files(self): + # If this bundle has no extra values of it's own, and only + # wraps child bundles, use the extra values of those. + result = {} + for bundle in self.contents: + if bundle.extra is not None: + result.update(bundle.extra) + return result + else: + return self._extra + def _set_extra(self, value): + self._extra = value + extra = property(_get_extra, _set_extra, doc="""A custom user dict of + extra values attached to this bundle. Those will be available in + template tags, and can be used to attach things like a CSS + 'media' value.""") + + def resolve_contents(self, ctx=None, force=False): + """Return an actual list of source files. + + What the user specifies as the bundle contents cannot be + processed directly. There may be glob patterns of course. We + may need to search the load path. It's common for third party + extensions to provide support for referencing assets spread + across multiple directories. + + This passes everything through :class:`Environment.resolver`, + through which this process can be customized. + + At this point, we also validate source paths to complain about + missing files early. + + The return value is a list of 2-tuples ``(original_item, + abspath)``. In the case of urls and nested bundles both tuple + values are the same. + + Set ``force`` to ignore any cache, and always re-resolve + glob patterns. + """ + if not ctx: + ctx = wrap(self.env, self) + + # TODO: We cache the values, which in theory is problematic, since + # due to changes in the env object, the result of the globbing may + # change. Not to mention that a different env object may be passed + # in. We should find a fix for this. + if getattr(self, '_resolved_contents', None) is None or force: + resolved = [] + for item in self.contents: + try: + result = ctx.resolver.resolve_source(ctx, item) + except IOError as e: + raise BundleError(e) + if not isinstance(result, list): + result = [result] + + # Exclude the output file. + # TODO: This will not work for nested bundle contents. If it + # doesn't work properly anyway, should be do it in the first + # place? If there are multiple versions, it will fail as well. + # TODO: There is also the question whether we can/should + # exclude glob duplicates. + if self.output: + try: + result.remove(self.resolve_output(ctx)) + except (ValueError, BundleError): + pass + + resolved.extend(map(lambda r: (item, r), result)) + + # Exclude duplicate files from the bundle. + # This will only keep the first occurrence of a file in the bundle. + if self.remove_duplicates: + resolved = self._filter_duplicates(resolved) + + self._resolved_contents = resolved + + return self._resolved_contents + + @staticmethod + def _filter_duplicates(resolved): + # Keep track of the resolved filenames that have been seen, and only + # add it the first time it is encountered. + seen_files = set() + result = [] + for item, r in resolved: + if r not in seen_files: + seen_files.add(r) + result.append((item, r)) + return result + + def _get_depends(self): + return self._depends + def _set_depends(self, value): + self._depends = [value] if isinstance(value, six.string_types) else value + self._resolved_depends = None + depends = property(_get_depends, _set_depends, doc= + """Allows you to define an additional set of files (glob syntax + is supported), which are considered when determining whether a + rebuild is required. + """) + + def resolve_depends(self, ctx): + # TODO: Caching is as problematic here as it is in resolve_contents(). + if not self.depends: + return [] + if getattr(self, '_resolved_depends', None) is None: + resolved = [] + for item in self.depends: + try: + result = ctx.resolver.resolve_source(ctx, item) + except IOError as e: + raise BundleError(e) + if not isinstance(result, list): + result = [result] + resolved.extend(result) + self._resolved_depends = resolved + return self._resolved_depends + + def get_version(self, ctx=None, refresh=False): + """Return the current version of the Bundle. + + If the version is not cached in memory, it will first look in the + manifest, then ask the versioner. + + ``refresh`` causes a value in memory to be ignored, and the version + to be looked up anew. + """ + if not ctx: + ctx = wrap(self.env, self) + if not self.version or refresh: + version = None + # First, try a manifest. This should be the fastest way. + if ctx.manifest: + version = ctx.manifest.query(self, ctx) + # Often the versioner is able to help. + if not version: + from .version import VersionIndeterminableError + if ctx.versions: + try: + version = ctx.versions.determine_version(self, ctx) + assert version + except VersionIndeterminableError as e: + reason = e + else: + reason = '"versions" option not set' + if not version: + raise BundleError(( + 'Cannot find version of %s. There is no manifest ' + 'which knows the version, and it cannot be ' + 'determined dynamically, because: %s') % (self, reason)) + self.version = version + return self.version + + def resolve_output(self, ctx=None, version=None): + """Return the full, absolute output path. + + If a %(version)s placeholder is used, it is replaced. + """ + if not ctx: + ctx = wrap(self.env, self) + output = ctx.resolver.resolve_output_to_path(ctx, self.output, self) + if has_placeholder(output): + output = output % {'version': version or self.get_version(ctx)} + return output + + def id(self): + """This is used to determine when a bundle definition has changed so + that a rebuild is required. + + The hash therefore should be built upon data that actually affect the + final build result. + """ + return hash_func((tuple(self.contents), + self.output, + tuple(self.filters), + bool(self.debug))) + # Note how self.depends is not included here. It could be, but we + # really want this hash to only change for stuff that affects the + # actual output bytes. Note that modifying depends will be effective + # after the first rebuild in any case. + + @property + def is_container(self): + """Return true if this is a container bundle, that is, a bundle that + acts only as a container for a number of sub-bundles. + + It must not contain any files of its own, and must have an empty + ``output`` attribute. + """ + return not has_files(self) and not self.output + + @contextmanager + def bind(self, env): + old_env = self._env + self._env = env + try: + yield + finally: + self._env = old_env + + def _get_env(self): + if self._env is None: + raise BundleError('Bundle is not connected to an environment') + return self._env + def _set_env(self, env): + self._env = env + env = property(_get_env, _set_env) + + def _merge_and_apply(self, ctx, output, force, parent_debug=None, + parent_filters=None, extra_filters=None, + disable_cache=None): + """Internal recursive build method. + + ``parent_debug`` is the debug setting used by the parent bundle. This + is not necessarily ``bundle.debug``, but rather what the calling method + in the recursion tree is actually using. + + ``parent_filters`` are what the parent passes along, for us to be + applied as input filters. Like ``parent_debug``, it is a collection of + the filters of all parents in the hierarchy. + + ``extra_filters`` may exist if the parent is a container bundle passing + filters along to its children; these are applied as input and output + filters (since there is no parent who could do the latter), and they + are not passed further down the hierarchy (but instead they become part + of ``parent_filters``. + + ``disable_cache`` is necessary because in some cases, when an external + bundle dependency has changed, we must not rely on the cache, since the + cache key is not taking into account changes in those dependencies + (for now). + """ + + parent_filters = parent_filters or [] + extra_filters = extra_filters or [] + # Determine the debug level to use. It determines if and which filters + # should be applied. + # + # The debug level is inherited (if the parent bundle is merging, a + # child bundle clearly cannot act in full debug=True mode). Bundles + # may define a custom ``debug`` attributes, but child bundles may only + # ever lower it, not increase it. + # + # If not parent_debug is given (top level), use the Environment value. + parent_debug = parent_debug if parent_debug is not None else ctx.debug + # Consider bundle's debug attribute and other things. + current_debug_level = _effective_debug_level( + ctx, self, extra_filters, default=parent_debug) + # Special case: If we end up with ``True``, assume ``False`` instead. + # The alternative would be for the build() method to refuse to work at + # this point, which seems unnecessarily inconvenient (Instead how it + # works is that urls() simply doesn't call build() when debugging). + # Note: This can only happen if the Environment sets debug=True and + # nothing else overrides it. + if current_debug_level is True: + current_debug_level = False + + # Put together a list of filters that we would want to run here. + # These will be the bundle's filters, and any extra filters given + # to use if the parent is a container bundle. Note we do not yet + # include input/open filters pushed down by a parent build iteration. + filters = merge_filters(self.filters, extra_filters) + + # Initialize the filters. This happens before we choose which of + # them should actually run, so that Filter.setup() can influence + # this choice. + for filter in filters: + filter.set_context(ctx) + # Since we call this now every single time before the filter + # is used, we might pass the bundle instance it is going + # to be used with. For backwards-compatibility reasons, this + # is problematic. However, by inspecting the support arguments, + # we can deal with it. We probably then want to deprecate + # the old syntax before 1.0 (TODO). + filter.setup() + + # Given the debug level, determine which of the filters want to run + selected_filters = select_filters(filters, current_debug_level) + + # We construct two lists of filters. The ones we want to use in this + # iteration, and the ones we want to pass down to child bundles. + # Why? Say we are in merge mode. Assume an "input()" filter which does + # not run in merge mode, and a child bundle that switches to + # debug=False. The child bundle then DOES want to run those input + # filters, so we do need to pass them. + filters_to_run = merge_filters( + selected_filters, select_filters(parent_filters, current_debug_level)) + filters_to_pass_down = merge_filters(filters, parent_filters) + + # Prepare contents + resolved_contents = self.resolve_contents(ctx, force=True) + + # Unless we have been told by our caller to use or not use the cache + # for this, try to decide for ourselves. The issue here is that when a + # bundle has dependencies, like a sass file with includes otherwise not + # listed in the bundle sources, a change in such an external include + # would not influence the cache key, thus the use of the cache causing + # such a change to be ignored. For now, we simply do not use the cache + # for any bundle with dependencies. Another option would be to read + # the contents of all files declared via "depends", and use them as a + # cache key modifier. For now I am worried about the performance impact. + # + # Note: This decision only affects the current bundle instance. Even if + # dependencies cause us to ignore the cache for this bundle instance, + # child bundles may still use it! + actually_skip_cache_here = disable_cache or bool(self.resolve_depends(ctx)) + + filtertool = FilterTool( + ctx.cache, no_cache_read=actually_skip_cache_here, + kwargs={'output': output[0], + 'output_path': output[1]}) + + # Apply input()/open() filters to all the contents. + hunks = [] + for item, cnt in resolved_contents: + if isinstance(cnt, Bundle): + # Recursively process nested bundles. + hunk = cnt._merge_and_apply( + wrap(ctx, cnt), output, force, current_debug_level, + filters_to_pass_down, disable_cache=disable_cache) + if hunk is not None: + hunks.append((hunk, {})) + + else: + # Give a filter the chance to open his file. + try: + hunk = filtertool.apply_func( + filters_to_run, 'open', [cnt], + # Also pass along the original relative path, as + # specified by the user, before resolving. + kwargs={'source': item}, + # We still need to open the file ourselves too and use + # it's content as part of the cache key, otherwise this + # filter application would only be cached by filename, + # and changes in the source not detected. The other + # option is to not use the cache at all here. Both have + # different performance implications, but I'm guessing + # that reading and hashing some files unnecessarily + # very often is better than running filters + # unnecessarily occasionally. + cache_key=[FileHunk(cnt)] if not is_url(cnt) else []) + except MoreThanOneFilterError as e: + raise BuildError(e) + except NoFilters: + # Open the file ourselves. + if is_url(cnt): + hunk = UrlHunk(cnt, env=ctx) + else: + hunk = FileHunk(cnt) + + # With the hunk, remember both the original relative + # path, as specified by the user, and the one that has + # been resolved to a filesystem location. We'll pass + # them along to various filter steps. + item_data = {'source': item, 'source_path': cnt} + + # Run input filters, unless open() told us not to. + hunk = filtertool.apply(hunk, filters_to_run, 'input', + kwargs=item_data) + hunks.append((hunk, item_data)) + + # If this bundle is empty (if it has nested bundles, they did + # not yield any hunks either), return None to indicate so. + if len(hunks) == 0: + return None + + # Merge the individual files together. There is an optional hook for + # a filter here, by implementing a concat() method. + try: + try: + final = filtertool.apply_func(filters_to_run, 'concat', [hunks]) + except MoreThanOneFilterError as e: + raise BuildError(e) + except NoFilters: + final = merge([h for h, _ in hunks]) + except IOError as e: + # IOErrors can be raised here if hunks are loaded for the + # first time. TODO: IOErrors can also be raised when + # a file is read during the filter-apply phase, but we don't + # convert it to a BuildError there... + raise BuildError(e) + + # Apply output filters. + # TODO: So far, all the situations where bundle dependencies are + # used/useful, are based on input filters having those dependencies. Is + # it even required to consider them here with respect to the cache? We + # might be able to run this operation with the cache on (the FilterTool + # being possibly configured with cache reads off). + return filtertool.apply(final, selected_filters, 'output') + + def _build(self, ctx, extra_filters=None, force=None, output=None, + disable_cache=None): + """Internal bundle build function. + + This actually tries to build this very bundle instance, as opposed to + the public-facing ``build()``, which first deals with the possibility + that we are a container bundle, i.e. having no files of our own. + + First checks whether an update for this bundle is required, via the + configured ``updater`` (which is almost always the timestamp-based one). + Unless ``force`` is given, in which case the bundle will always be + built, without considering timestamps. + + A ``FileHunk`` will be returned, or in a certain case, with no updater + defined and force=False, the return value may be ``False``. + + TODO: Support locking. When called from inside a template tag, this + should lock, so that multiple requests don't all start to build. When + called from the command line, there is no need to lock. + """ + extra_filters = extra_filters or [] + + if not self.output: + raise BuildError('No output target found for %s' % self) + + # Determine if we really need to build, or if the output file + # already exists and nothing has changed. + if force: + update_needed = True + elif not has_placeholder(self.output) and \ + not path.exists(self.resolve_output(ctx, self.output)): + update_needed = True + else: + update_needed = ctx.updater.needs_rebuild(self, ctx) \ + if ctx.updater else True + if update_needed==SKIP_CACHE: + disable_cache = True + + if not update_needed: + # We can simply return the existing output file + return FileHunk(self.resolve_output(ctx, self.output)) + + hunk = self._merge_and_apply( + ctx, [self.output, self.resolve_output(ctx, version='?')], + force, disable_cache=disable_cache, extra_filters=extra_filters) + if hunk is None: + raise BuildError('Nothing to build for %s, is empty' % self) + + if output: + # If we are given a stream, just write to it. + output.write(hunk.data()) + else: + if has_placeholder(self.output) and not ctx.versions: + raise BuildError(( + 'You have not set the "versions" option, but %s ' + 'uses a version placeholder in the output target' + % self)) + + version = None + if ctx.versions: + version = ctx.versions.determine_version(self, ctx, hunk) + + output_filename = self.resolve_output(ctx, version=version) + + # If it doesn't exist yet, create the target directory. + output_dir = path.dirname(output_filename) + if not path.exists(output_dir): + os.makedirs(output_dir) + + hunk.save(output_filename) + self.version = version + + if ctx.manifest: + ctx.manifest.remember(self, ctx, version) + if ctx.versions and version: + # Hook for the versioner (for example set the timestamp of + # the file) to the actual version. + ctx.versions.set_version(self, ctx, output_filename, version) + + # The updater may need to know this bundle exists and how it + # has been last built, in order to detect changes in the + # bundle definition, like new source files. + if ctx.updater: + ctx.updater.build_done(self, ctx) + + return hunk + + def build(self, force=None, output=None, disable_cache=None): + """Build this bundle, meaning create the file given by the ``output`` + attribute, applying the configured filters etc. + + If the bundle is a container bundle, then multiple files will be built. + + Unless ``force`` is given, the configured ``updater`` will be used to + check whether a build is even necessary. + + If ``output`` is a file object, the result will be written to it rather + than to the filesystem. + + The return value is a list of ``FileHunk`` objects, one for each bundle + that was built. + """ + ctx = wrap(self.env, self) + hunks = [] + for bundle, extra_filters, new_ctx in self.iterbuild(ctx): + hunks.append(bundle._build( + new_ctx, extra_filters, force=force, output=output, + disable_cache=disable_cache)) + return hunks + + def iterbuild(self, ctx): + """Iterate over the bundles which actually need to be built. + + This will often only entail ``self``, though for container bundles + (and container bundle hierarchies), a list of all the non-container + leafs will be yielded. + + Essentially, what this does is "skip" bundles which do not need to be + built on their own (container bundles), and gives the caller the child + bundles instead. + + The return values are 3-tuples of (bundle, filter_list, new_ctx), with + the second item being a list of filters that the parent "container + bundles" this method is processing are passing down to the children. + """ + if self.is_container: + for bundle, _ in self.resolve_contents(ctx): + if bundle.is_container: + for child, child_filters, new_ctx in \ + bundle.iterbuild(wrap(ctx, bundle)): + yield ( + child, + merge_filters(child_filters, self.filters), + new_ctx) + else: + yield bundle, self.filters, wrap(ctx, bundle) + else: + yield self, [], ctx + + def _make_output_url(self, ctx): + """Return the output url, modified for expire header handling. + """ + + # Only query the version if we need to for performance + version = None + if has_placeholder(self.output) or ctx.url_expire != False: + # If auto-build is enabled, we must not use a cached version + # value, or we might serve old versions. + version = self.get_version(ctx, refresh=ctx.auto_build) + + url = self.output + if has_placeholder(url): + url = url % {'version': version} + url = ctx.resolver.resolve_output_to_url(ctx, url) + + if ctx.url_expire or ( + ctx.url_expire is None and not has_placeholder(self.output)): + url = "%s?%s" % (url, version) + return url + + def _urls(self, ctx, extra_filters, *args, **kwargs): + """Return a list of urls for this bundle, and all subbundles, + and, when it becomes necessary, start a build process. + """ + # Check if we should calculate SRI + calculate_sri = kwargs.pop('calculate_sri', False) + + # Look at the debug value to see if this bundle should return the + # source urls (in debug mode), or a single url of the bundle in built + # form. Once a bundle needs to be built, all of it's child bundles + # are built as well of course, so at this point we leave the urls() + # recursion and start a build() recursion. + debug = _effective_debug_level(ctx, self, extra_filters) + if debug == 'merge': + supposed_to_merge = True + elif debug is True: + supposed_to_merge = False + elif debug is False: + supposed_to_merge = True + else: + raise BundleError('Invalid debug value: %s' % debug) + + # We will output a single url for this bundle unless a) the + # configuration tells us to output the source urls + # ("supposed_to_merge"), or b) this bundle isn't actually configured to + # be built, that is, has no filters and no output target. + if supposed_to_merge and (self.filters or self.output): + # With ``auto_build``, build the bundle to make sure the output is + # up to date; otherwise, we just assume the file already exists. + # (not wasting any IO ops) + if ctx.auto_build: + self._build(ctx, extra_filters=extra_filters, force=False, + *args, **kwargs) + if calculate_sri: + return [{'uri': self._make_output_url(ctx), + 'sri': calculate_sri_on_file(ctx.resolver.resolve_output_to_path(ctx, self.output, self))}] + else: + return [self._make_output_url(ctx)] + else: + # We either have no files (nothing to build), or we are + # in debug mode: Instead of building the bundle, we + # source all contents instead. + urls = [] + for org, cnt in self.resolve_contents(ctx): + if isinstance(cnt, Bundle): + urls.extend(org._urls( + wrap(ctx, cnt), + merge_filters(extra_filters, self.filters), + *args, + calculate_sri=calculate_sri, + **kwargs)) + elif is_url(cnt): + # Can't calculate SRI for non file + if calculate_sri: + urls.append({'uri': cnt, 'sri': None}) + else: + urls.append(cnt) + else: + sri = None + try: + url = ctx.resolver.resolve_source_to_url(ctx, cnt, org) + if calculate_sri: + sri = calculate_sri_on_file(ctx.resolver.resolve_output_to_path(ctx, cnt, org)) + except ValueError: + # If we cannot generate a url to a path outside the + # media directory. So if that happens, we copy the + # file into the media directory. + external = pull_external(ctx, cnt) + url = ctx.resolver.resolve_source_to_url(ctx, external, org) + if calculate_sri: + sri = calculate_sri_on_file(ctx.resolver.resolve_output_to_path(ctx, external, org)) + + if calculate_sri: + urls.append({'uri': url, 'sri': sri}) + else: + urls.append(url) + return urls + + def urls(self, *args, **kwargs): + """Return a list of urls for this bundle. + + Depending on the environment and given options, this may be a single + url (likely the case in production mode), or many urls (when we source + the original media files in DEBUG mode). + + Insofar necessary, this will automatically create or update the files + behind these urls. + + :param calculate_sri: Set to true to calculate a sub-resource integrity + string for the URLs. This changes the returned format. + + :return: List of URIs if calculate_sri is False. If calculate_sri is + true: list of {'uri': '', 'sri': ''}. + """ + ctx = wrap(self.env, self) + urls = [] + for bundle, extra_filters, new_ctx in self.iterbuild(ctx): + urls.extend(bundle._urls(new_ctx, extra_filters, *args, **kwargs)) + return urls + + +def pull_external(ctx, filename): + """Helper which will pull ``filename`` into + :attr:`Environment.directory`, for the purposes of being able to + generate a url for it. + """ + + # Generate the target filename. Use a hash to keep it unique and short, + # but attach the base filename for readability. + # The bit-shifting rids us of ugly leading - characters. + hashed_filename = hash_func(filename) + rel_path = path.join('webassets-external', + "%s_%s" % (hashed_filename, path.basename(filename))) + full_path = path.join(ctx.directory, rel_path) + + # Copy the file if necessary + if path.isfile(full_path): + gs = lambda p: os.stat(p).st_mtime + if gs(full_path) > gs(filename): + return full_path + directory = path.dirname(full_path) + if not path.exists(directory): + os.makedirs(directory) + FileHunk(filename).save(full_path) + return full_path + + +def get_all_bundle_files(bundle, ctx=None): + """Return a flattened list of all source files of the given bundle, all + its dependencies, recursively for all nested bundles. + + Making this a helper function rather than a part of the official + Bundle feels right. + """ + if not ctx: + ctx = wrap(bundle.env, bundle) + if not isinstance(ctx, ContextWrapper): + ctx = ContextWrapper(ctx) + files = [] + for _, c in bundle.resolve_contents(ctx): + if isinstance(c, Bundle): + files.extend(get_all_bundle_files(c, wrap(ctx, c))) + elif not is_url(c): + files.append(c) + files.extend(bundle.resolve_depends(ctx)) + return files + + +def _effective_debug_level(ctx, bundle, extra_filters=None, default=None): + """This is a helper used both in the urls() and the build() recursions. + + It returns the debug level that this bundle, in a tree structure + of bundles, should use. It looks at any bundle-specific ``debug`` + attribute, considers an automatic upgrade to "merge" due to filters that + are present, and will finally use the value in the ``default`` argument, + which in turn defaults to ``env.debug``. + + It also ensures our rule that in a bundle hierarchy, the debug level may + only ever be lowered. Nested bundle may lower the level from ``True`` to + ``"merge"`` to ``False``, but never in the other direction. Which makes + sense: If a bundle is already being merged, we cannot start exposing the + source urls a child bundle, not if the correct order should be maintained. + + And while in theory it would seem possible to switch between full-out + production (debug=False) and ``"merge"``, the complexity there, in + particular with view as to how certain filter types like input() and + open() need to be applied to child bundles, is just not worth it. + """ + if default is None: + default = ctx.environment.debug + + if bundle.config.get('debug') is not None: + level = bundle.config.debug + else: + # If bundle doesn't force a level, then the presence of filters which + # declare they should always run puts the bundle automatically in + # merge mode. + filters = merge_filters(bundle.filters, extra_filters) + level = 'merge' if select_filters(filters, True) else None + + if level is not None: + # The new level must be lower than the older one. We do not thrown an + # error if this is NOT the case, but silently ignore it. This is so + # that a debug=True can be used to overwrite auto_debug_upgrade. + # Otherwise debug=True would always fail. + if cmp_debug_levels(default, level) > 0: + return level + return default + + +has_files = lambda bundle: \ + any([c for c in bundle.contents if not isinstance(c, Bundle)]) diff --git a/pelican/plugins/webassets/vendor/webassets/cache.py b/pelican/plugins/webassets/vendor/webassets/cache.py new file mode 100644 index 0000000..60fabb5 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/cache.py @@ -0,0 +1,239 @@ +"""Caches are used for multiple things: + + - To speed up asset building. Filter operations every step + of the way can be cached, so that individual parts of a + build that haven't changed can be reused. + + - Bundle definitions are cached when a bundle is built so we + can determine whether they have changed and whether a rebuild + is required. + +This data is not all stored in the same cache necessarily. The +classes in this module provide the "environment.cache" object, but +also serve in other places. +""" + +import os +from os import path +import errno +import tempfile +import warnings +from webassets import six +from webassets.merge import BaseHunk +from webassets.filter import Filter, freezedicts +from webassets.utils import md5_constructor, pickle +import types + + +__all__ = ('FilesystemCache', 'MemoryCache', 'get_cache',) + + +def make_hashable(data): + """Ensures ``data`` can be hashed(). + + Mostly needs to support dict. The other special types we use + as hash keys (Hunks, Filters) already have a proper hash() method. + + See also ``make_md5``. + + Note that we do not actually hash the data for the memory cache. + """ + return freezedicts(data) + + +def make_md5(*data): + """Make a md5 hash based on``data``. + + Specifically, this knows about ``Hunk`` objects, and makes sure + the actual content is hashed. + + This is very conservative, and raises an exception if there are + data types that it does not explicitly support. This is because + we had in the past some debugging headaches with the cache not + working for this very reason. + + MD5 is faster than sha, and we don't care so much about collisions. + We care enough however not to use hash(). + """ + def walk(obj): + if isinstance(obj, (tuple, list, frozenset)): + for item in obj: + for d in walk(item): yield d + elif isinstance(obj, (dict)): + for k in sorted(obj.keys()): + for d in walk(k): yield d + for d in walk(obj[k]): yield d + elif isinstance(obj, BaseHunk): + yield obj.data().encode('utf-8') + elif isinstance(obj, int): + yield str(obj).encode('utf-8') + elif isinstance(obj, six.text_type): + yield obj.encode('utf-8') + elif isinstance(obj, six.binary_type): + yield obj + elif hasattr(obj, "id"): + for i in walk(obj.id()): + yield i + elif obj is None: + yield "None".encode('utf-8') + elif isinstance(obj, types.FunctionType): + yield str(hash(obj)).encode('utf-8') + else: + raise ValueError('Cannot MD5 type %s' % type(obj)) + md5 = md5_constructor() + for d in walk(data): + md5.update(d) + return md5.hexdigest() + + +def safe_unpickle(string): + """Unpickle the string, or return ``None`` if that fails.""" + try: + return pickle.loads(string) + except: + return None + + +class BaseCache(object): + """Abstract base class. + + The cache key must be something that is supported by the Python hash() + function. The cache value may be a string, or anything that can be pickled. + + Since the cache is used for multiple purposes, all webassets-internal code + should always tag its keys with an id, like so: + + key = ("tag", actual_key) + + One cache instance can only be used safely with a single Environment. + """ + + def get(self, key): + """Should return the cache contents, or False. + """ + raise NotImplementedError() + + def set(self, key, value): + raise NotImplementedError() + + +class MemoryCache(BaseCache): + """Caches stuff in the process memory. + + WARNING: Do NOT use this in a production environment, where you + are likely going to have multiple processes serving the same app! + + Note that the keys are used as-is, not passed through hash() (which is + a difference: http://stackoverflow.com/a/9022664/15677). However, the + reason we don't is because the original value is nicer to debug. + """ + + def __init__(self, capacity): + self.capacity = capacity + self.keys = [] + self.cache = {} + + def __eq__(self, other): + """Return equality with the config values that instantiate + this instance. + """ + return False == other or \ + None == other or \ + id(self) == id(other) + + def get(self, key): + key = make_md5(make_hashable(key)) + return self.cache.get(key, None) + + def set(self, key, value): + key = make_md5(make_hashable(key)) + self.cache[key] = value + try: + self.keys.remove(key) + except ValueError: + pass + self.keys.append(key) + + # limit cache to the given capacity + to_delete = self.keys[0:max(0, len(self.keys)-self.capacity)] + self.keys = self.keys[len(to_delete):] + for item in to_delete: + del self.cache[item] + + +class FilesystemCache(BaseCache): + """Uses a temporary directory on the disk. + """ + + V = 2 # We have changed the cache format once + + def __init__(self, directory, new_file_mode=None): + self.directory = directory + self.new_file_mode = new_file_mode + + def __eq__(self, other): + """Return equality with the config values + that instantiate this instance. + """ + return True == other or \ + self.directory == other or \ + id(self) == id(other) + + def get(self, key): + filename = path.join(self.directory, '%s' % make_md5(self.V, key)) + try: + f = open(filename, 'rb') + except IOError as e: + if e.errno != errno.ENOENT: + raise + return None + try: + result = f.read() + finally: + f.close() + + unpickled = safe_unpickle(result) + if unpickled is None: + warnings.warn('Ignoring corrupted cache file %s' % filename) + return unpickled + + def set(self, key, data): + md5 = '%s' % make_md5(self.V, key) + filename = path.join(self.directory, md5) + fd, temp_filename = tempfile.mkstemp(prefix='.' + md5, + dir=self.directory) + try: + with os.fdopen(fd, 'wb') as f: + pickle.dump(data, f) + f.flush() + # If a non default mode is specified, then chmod the file to + # it before renaming it into place + if self.new_file_mode is not None: + os.chmod(temp_filename, self.new_file_mode) + if os.path.isfile(filename): + os.unlink(filename) + os.rename(temp_filename, filename) + except: + os.unlink(temp_filename) + raise + + +def get_cache(option, ctx): + """Return a cache instance based on ``option``. + """ + if not option: + return None + + if isinstance(option, BaseCache): + return option + elif isinstance(option, type) and issubclass(option, BaseCache): + return option() + + if option is True: + directory = path.join(ctx.directory, '.webassets-cache') + # Auto-create the default directory + if not path.exists(directory): + os.makedirs(directory) + else: + directory = option + return FilesystemCache(directory, ctx.cache_file_mode) diff --git a/pelican/plugins/webassets/vendor/webassets/env.py b/pelican/plugins/webassets/vendor/webassets/env.py new file mode 100644 index 0000000..90b8cd0 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/env.py @@ -0,0 +1,847 @@ +import os +from os import path +from itertools import chain +from webassets import six +from webassets.six.moves import map +from webassets.utils import is_url + +try: + import glob2 as glob + from glob import has_magic +except ImportError: + import glob + from glob import has_magic + +from .cache import get_cache +from .version import get_versioner, get_manifest +from .updater import get_updater +from .utils import urlparse + + +__all__ = ('Environment', 'RegisterError') + + +class RegisterError(Exception): + pass + + +class ConfigStorage(object): + """This is the backend which :class:`Environment` uses to store + its configuration values. + + Environment-subclasses like the one used by ``django-assets`` will + often want to use a custom ``ConfigStorage`` as well, building upon + whatever configuration the framework is using. + + The goal in designing this class therefore is to make it easy for + subclasses to change the place the data is stored: Only + _meth:`__getitem__`, _meth:`__setitem__`, _meth:`__delitem__` and + _meth:`__contains__` need to be implemented. + + One rule: The default storage is case-insensitive, and custom + environments should maintain those semantics. + + A related reason is why we don't inherit from ``dict``. It would + require us to re-implement a whole bunch of methods, like pop() etc. + """ + + def __init__(self, env): + self.env = env + + def get(self, key, default=None): + try: + return self.__getitem__(key) + except KeyError: + return default + + def update(self, d): + for key in d: + self.__setitem__(key, d[key]) + + def setdefault(self, key, value): + if key not in self: + self.__setitem__(key, value) + return value + return self.__getitem__(key) + + def __contains__(self, key): + raise NotImplementedError() + + def __getitem__(self, key): + raise NotImplementedError() + + def __setitem__(self, key, value): + raise NotImplementedError() + + def __delitem__(self, key): + raise NotImplementedError() + + def _get_deprecated(self, key): + """For deprecated keys, fake the values as good as we can. + Subclasses need to call this in __getitem__.""" + pass + + def _set_deprecated(self, key, value): + """Same for __setitem__.""" + pass + + +def url_prefix_join(prefix, fragment): + """Join url prefix with fragment.""" + # Ensures urljoin will not cut the last part. + prefix += prefix[-1:] != '/' and '/' or '' + return urlparse.urljoin(prefix, fragment) + + +class Resolver(object): + """Responsible for resolving user-specified :class:`Bundle` + contents to actual files, as well as to urls. + + In this base version, this is essentially responsible for searching + the load path for the queried file. + + A custom implementation of this class is tremendously useful when + integrating with frameworks, which usually have some system to + spread static files across applications or modules. + + The class is designed for maximum extensibility. + """ + + def glob(self, basedir, expr): + """Evaluates a glob expression. + Yields a sorted list of absolute filenames. + """ + def glob_generator(basedir, expr): + expr = path.join(basedir, expr) + for filename in glob.iglob(expr): + if path.isdir(filename): + continue + yield path.normpath(filename) + + # The order of files returned by the glob implementation is undefined, + # so sort alphabetically to maintain a deterministic ordering + return sorted(glob_generator(basedir, expr)) + + def consider_single_directory(self, directory, item): + """Searches for ``item`` within ``directory``. Is able to + resolve glob instructions. + + Subclasses can call this when they have narrowed done the + location of a bundle item to a single directory. + """ + expr = path.join(directory, item) + if has_magic(expr): + # Note: No error if glob returns an empty list + return self.glob(directory, item) + else: + if path.exists(expr): + return expr + raise IOError("'%s' does not exist" % expr) + + def search_env_directory(self, ctx, item): + """This is called by :meth:`search_for_source` when no + :attr:`Environment.load_path` is set. + """ + return self.consider_single_directory(ctx.directory, item) + + def search_load_path(self, ctx, item): + """This is called by :meth:`search_for_source` when a + :attr:`Environment.load_path` is set. + + If you want to change how the load path is processed, + overwrite this method. + """ + if has_magic(item): + # We glob all paths. + result = [] + for path in ctx.load_path: + result.extend(self.glob(path, item)) + return result + else: + # Single file, stop when we find the first match, or error + # out otherwise. We still use glob() because then the load_path + # itself can contain globs. Neat! + for path in ctx.load_path: + result = self.glob(path, item) + if result: + return result + raise IOError("'%s' not found in load path: %s" % ( + item, ctx.load_path)) + + def search_for_source(self, ctx, item): + """Called by :meth:`resolve_source` after determining that + ``item`` is a relative filesystem path. + + You should always overwrite this method, and let + :meth:`resolve_source` deal with absolute paths, urls and + other types of items that a bundle may contain. + """ + if ctx.load_path: + return self.search_load_path(ctx, item) + else: + return self.search_env_directory(ctx, item) + + def query_url_mapping(self, ctx, filepath): + """Searches the environment-wide url mapping (based on the + urls assigned to each directory in the load path). Returns + the correct url for ``filepath``. + + Subclasses should be sure that they really want to call this + method, instead of simply falling back to ``super()``. + """ + # Build a list of dir -> url mappings + mapping = list(ctx.url_mapping.items()) + try: + mapping.append((ctx.directory, ctx.url)) + except EnvironmentError: + # Rarely, directory/url may not be set. That's ok. + pass + + # Make sure paths are absolute, normalized, and sorted by length + mapping = list(map( + lambda p_u: (path.normpath(path.abspath(p_u[0])), p_u[1]), + mapping)) + mapping.sort(key=lambda i: len(i[0]), reverse=True) + + needle = path.normpath(filepath) + for candidate, url in mapping: + if needle.startswith(candidate): + # Found it! + rel_path = needle[len(candidate) + 1:] + # If there are any subdirs in rel_path, ensure + # they use HTML-style path separators, in case + # the local OS (Windows!) has a different scheme + rel_path = rel_path.replace(os.sep, "/") + return url_prefix_join(url, rel_path) + raise ValueError('Cannot determine url for %s' % filepath) + + def resolve_source(self, ctx, item): + """Given ``item`` from a Bundle's contents, this has to + return the final value to use, usually an absolute + filesystem path. + + .. note:: + It is also allowed to return urls and bundle instances + (or generally anything else the calling :class:`Bundle` + instance may be able to handle). Indeed this is the + reason why the name of this method does not imply a + return type. + + The incoming item is usually a relative path, but may also be + an absolute path, or a url. These you will commonly want to + return unmodified. + + This method is also allowed to resolve ``item`` to multiple + values, in which case a list should be returned. This is + commonly used if ``item`` includes glob instructions + (wildcards). + + .. note:: + Instead of this, subclasses should consider implementing + :meth:`search_for_source` instead. + """ + + # Pass through some things unscathed + if not isinstance(item, six.string_types): + # Don't stand in the way of custom values. + return item + if is_url(item) or path.isabs(item): + return item + + return self.search_for_source(ctx, item) + + def resolve_output_to_path(self, ctx, target, bundle): + """Given ``target``, this has to return the absolute + filesystem path to which the output file of ``bundle`` + should be written. + + ``target`` may be a relative or absolute path, and is + usually taking from the :attr:`Bundle.output` property. + + If a version-placeholder is used (``%(version)s``, it is + still unresolved at this point. + """ + return path.join(ctx.directory, target) + + def resolve_source_to_url(self, ctx, filepath, item): + """Given the absolute filesystem path in ``filepath``, as + well as the original value from :attr:`Bundle.contents` which + resolved to this path, this must return the absolute url + through which the file is to be referenced. + + Depending on the use case, either the ``filepath`` or the + ``item`` argument will be more helpful in generating the url. + + This method should raise a ``ValueError`` if the url cannot + be determined. + """ + return self.query_url_mapping(ctx, filepath) + + def resolve_output_to_url(self, ctx, target): + """Given ``target``, this has to return the url through + which the output file can be referenced. + + ``target`` may be a relative or absolute path, and is + usually taking from the :attr:`Bundle.output` property. + + This is different from :meth:`resolve_source_to_url` in + that you do not passed along the result of + :meth:`resolve_output_to_path`. This is because in many + use cases, the filesystem is not available at the point + where the output url is needed (the media server may on + a different machine). + """ + if not path.isabs(target): + # If relative, output files are written to env.directory, + # thus we can simply base all values off of env.url. + return url_prefix_join(ctx.url, target) + else: + # If an absolute output path was specified, then search + # the url mappings. + return self.query_url_mapping(ctx, target) + + +class BundleRegistry(object): + + def __init__(self): + self._named_bundles = {} + self._anon_bundles = [] + + def __iter__(self): + return chain(six.itervalues(self._named_bundles), self._anon_bundles) + + def __getitem__(self, name): + return self._named_bundles[name] + + def __contains__(self, name): + return name in self._named_bundles + + def __len__(self): + return len(self._named_bundles) + len(self._anon_bundles) + + def __bool__(self): + return True + __nonzero__ = __bool__ # For Python 2 + + def register(self, name, *args, **kwargs): + """Register a :class:`Bundle` with the given ``name``. + + This can be called in multiple ways: + + - With a single :class:`Bundle` instance:: + + env.register('jquery', jquery_bundle) + + - With a dictionary, registering multiple bundles at once: + + bundles = {'js': js_bundle, 'css': css_bundle} + env.register(bundles) + + .. note:: + This is a convenient way to use a :doc:`loader `: + + env.register(YAMLLoader('assets.yaml').load_bundles()) + + - With many arguments, creating a new bundle on the fly:: + + env.register('all_js', jquery_bundle, 'common.js', + filters='rjsmin', output='packed.js') + """ + + from .bundle import Bundle + + # Register a dict + if isinstance(name, dict) and not args and not kwargs: + for name, bundle in name.items(): + self.register(name, bundle) + return + + if len(args) == 0: + raise TypeError('at least two arguments are required') + else: + if len(args) == 1 and not kwargs and isinstance(args[0], Bundle): + bundle = args[0] + else: + bundle = Bundle(*args, **kwargs) + + if not bundle.merge: + return self.decompose_bundle(name, bundle) + + if name in self._named_bundles: + if self._named_bundles[name] == bundle: + pass # ignore + else: + raise RegisterError('Another bundle is already registered ' + + 'as "%s": %s' % (name, self._named_bundles[name])) + else: + self._named_bundles[name] = bundle + bundle.env = self # take ownership + + return bundle + + def add(self, *bundles): + """Register a list of bundles with the environment, without + naming them. + + This isn't terribly useful in most cases. It exists primarily + because in some cases, like when loading bundles by searching + in templates for the use of an "assets" tag, no name is available. + """ + for bundle in bundles: + self._anon_bundles.append(bundle) + bundle.env = self # take ownership + + def decompose_bundle(self, name, bundle): + from .bundle import Bundle + + if not bundle.output: + raise RegisterError('If `merge` is False, an output must be defined') + + for content in bundle.contents: + if isinstance(content, Bundle): + raise RegisterError('Nested bundles are not allowed when `merge` is False') + + bundle.env = self + bundles = [] + contents = bundle.resolve_contents() + for _, abspath in contents: + nb = self.register_decomposed(name, bundle, abspath) + bundles.append(nb) + + return bundles + + def register_decomposed(self, name, bundle, abspath): + from .bundle import Bundle + + relpath = path.relpath(abspath, self.directory) + basename = path.basename(relpath) + filename, _ = path.splitext(basename) + filepath, fileext = path.splitext(relpath) + new_name = path.join(name, basename) if name else basename + # The output might also contain `%(version)s` so I can't use + # the C-style method of string formatting + output = ( + bundle.output + .replace('%(name)s', filename) + .replace('%(path)s', filepath) + .replace('%(ext)s', fileext.strip('.')) + ) + new_bundle = Bundle( + relpath, + output=output, + filters=bundle.filters, + depends=bundle.depends, + remove_duplicates=bundle.remove_duplicates, + extra=bundle.extra, + ) + new_bundle._config = bundle._config + return self.register(new_name, new_bundle) + + +# Those are config keys used by the environment. Framework-wrappers may +# find this list useful if they desire to prefix those settings. For example, +# in Django, it would be ASSETS_DEBUG. Other config keys are encouraged to use +# their own namespacing, so they don't need to be prefixed. For example, a +# filter setting might be CSSMIN_BIN. +env_options = [ + 'directory', 'url', 'debug', 'cache', 'updater', 'auto_build', + 'url_expire', 'versions', 'manifest', 'load_path', 'url_mapping', + 'cache_file_mode' ] + + +class ConfigurationContext(object): + """Interface to the webassets configuration key-value store. + + This wraps the :class:`ConfigStorage`` interface and adds some + helpers. It allows attribute-access to the most important + settings, and transparently instantiates objects, such that + ``env.manifest`` gives you an object, even though the configuration + contains the string "json". + """ + + def __init__(self, storage): + self._storage = storage + + def append_path(self, path, url=None): + """Appends ``path`` to :attr:`load_path`, and adds a + corresponding entry to :attr:`url_mapping`. + """ + self.load_path.append(path) + if url: + self.url_mapping[path] = url + + def _set_debug(self, debug): + self._storage['debug'] = debug + def _get_debug(self): + return self._storage['debug'] + debug = property(_get_debug, _set_debug, doc= + """Enable/disable debug mode. Possible values are: + + ``False`` + Production mode. Bundles will be merged and filters applied. + ``True`` + Enable debug mode. Bundles will output their individual source + files. + *"merge"* + Merge the source files, but do not apply filters. + """) + + def _set_cache_file_mode(self, mode): + self._storage['cache_file_mode'] = mode + def _get_cache_file_mode(self): + return self._storage['cache_file_mode'] + cache_file_mode = property(_get_cache_file_mode, _set_cache_file_mode, doc= + """Controls the mode of files created in the cache. The default mode + is 0600. Follows standard unix mode. + Possible values are any unix mode, e.g.: + + ``0660`` + Enable the group read+write bits + + ``0666`` + Enable world read+write bits + + """) + + def _set_cache(self, enable): + self._storage['cache'] = enable + def _get_cache(self): + cache = get_cache(self._storage['cache'], self) + if cache != self._storage['cache']: + self._storage['cache'] = cache + return cache + cache = property(_get_cache, _set_cache, doc= + """Controls the behavior of the cache. The cache will speed up rebuilding + of your bundles, by caching individual filter results. This can be + particularly useful while developing, if your bundles would otherwise take + a long time to rebuild. + + Possible values are: + + ``False`` + Do not use the cache. + + ``True`` (default) + Cache using default location, a ``.webassets-cache`` folder inside + :attr:`directory`. + + *custom path* + Use the given directory as the cache directory. + """) + + def _set_auto_build(self, value): + self._storage['auto_build'] = value + def _get_auto_build(self): + return self._storage['auto_build'] + auto_build = property(_get_auto_build, _set_auto_build, doc= + """Controls whether bundles should be automatically built, and + rebuilt, when required (if set to ``True``), or whether they + must be built manually be the user, for example via a management + command. + + This is a good setting to have enabled during debugging, and can + be very convenient for low-traffic sites in production as well. + However, there is a cost in checking whether the source files + have changed, so if you care about performance, or if your build + process takes very long, then you may want to disable this. + + By default automatic building is enabled. + """) + + def _set_manifest(self, manifest): + self._storage['manifest'] = manifest + def _get_manifest(self): + manifest = get_manifest(self._storage['manifest'], env=self) + if manifest != self._storage['manifest']: + self._storage['manifest'] = manifest + return manifest + manifest = property(_get_manifest, _set_manifest, doc= + """A manifest persists information about the versions bundles + are at. + + The Manifest plays a role only if you insert the bundle version + in your output filenames, or append the version as a querystring + to the url (via the ``url_expire`` option). It serves two + purposes: + + - Without a manifest, it may be impossible to determine the + version at runtime. In a deployed app, the media files may + be stored on a different server entirely, and be + inaccessible from the application code. The manifest, + if shipped with your application, is what still allows to + construct the proper URLs. + + - Even if it were possible to determine the version at + runtime without a manifest, it may be a costly process, + and using a manifest may give you better performance. If + you use a hash-based version for example, this hash would + need to be recalculated every time a new process is + started. + + Valid values are: + + ``"cache"`` (default) + The cache is used to remember version information. This + is useful to avoid recalculating the version hash. + + ``"file:{path}"`` + Stores version information in a file at {path}. If not + path is given, the manifest will be stored as + ``.webassets-manifest`` in ``Environment.directory``. + + ``"json:{path}"`` + Same as "file:{path}", but uses JSON to store the information. + + ``False``, ``None`` + No manifest is used. + + Any custom manifest implementation. + """) + + def _set_versions(self, versions): + self._storage['versions'] = versions + def _get_versions(self): + versions = get_versioner(self._storage['versions']) + if versions != self._storage['versions']: + self._storage['versions'] = versions + return versions + versions = property(_get_versions, _set_versions, doc= + """Defines what should be used as a Bundle ``version``. + + A bundle's version is what is appended to URLs when the + ``url_expire`` option is enabled, and the version can be part + of a Bundle's output filename by use of the ``%(version)s`` + placeholder. + + Valid values are: + + ``timestamp`` + The version is determined by looking at the mtime of a + bundle's output file. + + ``hash`` (default) + The version is a hash over the output file's content. + + ``False``, ``None`` + Functionality that requires a version is disabled. This + includes the ``url_expire`` option, the ``auto_build`` + option, and support for the %(version)s placeholder. + + Any custom version implementation. + + """) + + def set_updater(self, updater): + self._storage['updater'] = updater + def get_updater(self): + updater = get_updater(self._storage['updater']) + if updater != self._storage['updater']: + self._storage['updater'] = updater + return updater + updater = property(get_updater, set_updater, doc= + """Controls how the ``auto_build`` option should determine + whether a bundle needs to be rebuilt. + + ``"timestamp"`` (default) + Rebuild bundles if the source file timestamp exceeds the existing + output file's timestamp. + + ``"always"`` + Always rebuild bundles (avoid in production environments). + + Any custom version implementation. + """) + + def _set_url_expire(self, url_expire): + self._storage['url_expire'] = url_expire + def _get_url_expire(self): + return self._storage['url_expire'] + url_expire = property(_get_url_expire, _set_url_expire, doc= + """If you send your assets to the client using a + *far future expires* header (to minimize the 304 responses + your server has to send), you need to make sure that assets + will be reloaded by the browser when they change. + + If this is set to ``True``, then the Bundle URLs generated by + webassets will have their version (see ``Environment.versions``) + appended as a querystring. + + An alternative approach would be to use the ``%(version)s`` + placeholder in the bundle output file. + + The default behavior (indicated by a ``None`` value) is to add + an expiry querystring if the bundle does not use a version + placeholder. + """) + + def _set_directory(self, directory): + self._storage['directory'] = directory + def _get_directory(self): + try: + return path.abspath(self._storage['directory']) + except KeyError: + raise EnvironmentError( + 'The environment has no "directory" configured') + directory = property(_get_directory, _set_directory, doc= + """The base directory to which all paths will be relative to, + unless :attr:`load_path` are given, in which case this will + only serve as the output directory. + + In the url space, it is mapped to :attr:`urls`. + """) + + def _set_url(self, url): + self._storage['url'] = url + def _get_url(self): + try: + return self._storage['url'] + except KeyError: + raise EnvironmentError( + 'The environment has no "url" configured') + url = property(_get_url, _set_url, doc= + """The url prefix used to construct urls for files in + :attr:`directory`. + + To define url spaces for other directories, see + :attr:`url_mapping`. + """) + + def _set_load_path(self, load_path): + self._storage['load_path'] = load_path + def _get_load_path(self): + return self._storage['load_path'] + load_path = property(_get_load_path, _set_load_path, doc= + """An list of directories that will be searched for source files. + + If this is set, source files will only be looked for in these + directories, and :attr:`directory` is used as a location for + output files only. + + .. note: + You are free to add :attr:`directory` to your load path as + well. + + .. note: + Items on the load path are allowed to contain globs. + + To modify this list, you should use :meth:`append_path`, since + it makes it easy to add the corresponding url prefix to + :attr:`url_mapping`. + """) + + def _set_url_mapping(self, url_mapping): + self._storage['url_mapping'] = url_mapping + def _get_url_mapping(self): + return self._storage['url_mapping'] + url_mapping = property(_get_url_mapping, _set_url_mapping, doc= + """A dictionary of directory -> url prefix mappings that will + be considered when generating urls, in addition to the pair of + :attr:`directory` and :attr:`url`, which is always active. + + You should use :meth:`append_path` to add directories to the + load path along with their respective url spaces, instead of + modifying this setting directly. + """) + + def _set_resolver(self, resolver): + self._storage['resolver'] = resolver + def _get_resolver(self): + return self._storage['resolver'] + resolver = property(_get_resolver, _set_resolver) + + +class BaseEnvironment(BundleRegistry, ConfigurationContext): + """Abstract base class for :class:`Environment` with slightly more + generic assumptions, to ease subclassing. + """ + + config_storage_class = None + resolver_class = Resolver + + def __init__(self, **config): + BundleRegistry.__init__(self) + self._config = self.config_storage_class(self) + ConfigurationContext.__init__(self, self._config) + + # directory, url currently do not have default values + # + # some thought went into these defaults: + # - enable url_expire, because we want to encourage the right thing + # - default to hash versions, for the same reason: they're better + # - manifest=cache because hash versions are slow + self.config.setdefault('debug', False) + self.config.setdefault('cache', True) + self.config.setdefault('url_expire', None) + self.config.setdefault('auto_build', True) + self.config.setdefault('manifest', 'cache') + self.config.setdefault('versions', 'hash') + self.config.setdefault('updater', 'timestamp') + self.config.setdefault('load_path', []) + self.config.setdefault('url_mapping', {}) + self.config.setdefault('resolver', self.resolver_class()) + self.config.setdefault('cache_file_mode', None) + + self.config.update(config) + + @property + def config(self): + """Key-value configuration. Keys are case-insensitive. + """ + # This is a property so that user are not tempted to assign + # a custom dictionary which won't uphold our caseless semantics. + return self._config + + +class DictConfigStorage(ConfigStorage): + """Using a lower-case dict for configuration values. + """ + def __init__(self, *a, **kw): + self._dict = {} + ConfigStorage.__init__(self, *a, **kw) + def __contains__(self, key): + return self._dict.__contains__(key.lower()) + def __getitem__(self, key): + key = key.lower() + value = self._get_deprecated(key) + if not value is None: + return value + return self._dict.__getitem__(key) + def __setitem__(self, key, value): + key = key.lower() + if not self._set_deprecated(key, value): + self._dict.__setitem__(key.lower(), value) + def __delitem__(self, key): + self._dict.__delitem__(key.lower()) + + +class Environment(BaseEnvironment): + """Owns a collection of bundles, and a set of configuration values which + will be used when processing these bundles. + """ + + config_storage_class = DictConfigStorage + + def __init__(self, directory=None, url=None, **more_config): + super(Environment, self).__init__(**more_config) + if directory is not None: + self.directory = directory + if url is not None: + self.url = url + + +def parse_debug_value(value): + """Resolve the given string value to a debug option. + + Can be used to deal with os environment variables, for example. + """ + if value is None: + return value + value = value.lower() + if value in ('true', '1'): + return True + elif value in ('false', '0'): + return False + elif value in ('merge',): + return 'merge' + else: + raise ValueError() diff --git a/pelican/plugins/webassets/vendor/webassets/exceptions.py b/pelican/plugins/webassets/vendor/webassets/exceptions.py new file mode 100644 index 0000000..a642c96 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/exceptions.py @@ -0,0 +1,32 @@ +__all__ = ('BundleError', 'BuildError', 'FilterError', + 'EnvironmentError', 'ImminentDeprecationWarning') + + +class EnvironmentError(Exception): + pass + + +class BundleError(Exception): + pass + + +class BuildError(BundleError): + pass + + +class FilterError(BuildError): + pass + + +class ImminentDeprecationWarning(Warning): + """Warning category for deprecated features, since the default + DeprecationWarning is silenced on Python 2.7+. + + With webassets mainly targeting developers working directly with + the library, it makes sense to force deprecation warnings on them. + There should be no end users who will be bothered with them. + + Plus, we tend to remove rather quickly, so it's important devs + get to see this. + """ + pass diff --git a/pelican/plugins/webassets/vendor/webassets/ext/__init__.py b/pelican/plugins/webassets/vendor/webassets/ext/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pelican/plugins/webassets/vendor/webassets/ext/jinja2.py b/pelican/plugins/webassets/vendor/webassets/ext/jinja2.py new file mode 100644 index 0000000..defeb9e --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/ext/jinja2.py @@ -0,0 +1,255 @@ +from __future__ import absolute_import + +import warnings +import jinja2 +from jinja2.ext import Extension +from jinja2 import nodes +from webassets import Bundle +from webassets.loaders import GlobLoader, LoaderError +from webassets.exceptions import ImminentDeprecationWarning + + +__all__ = ('assets', 'Jinja2Loader',) + + +class AssetsExtension(Extension): + """ + As opposed to the Django tag, this tag is slightly more capable due + to the expressive powers inherited from Jinja. For example: + + {% assets "src1.js", "src2.js", get_src3(), + filter=("jsmin", "gzip"), output=get_output() %} + {% endassets %} + """ + + tags = set(['assets']) + + BundleClass = Bundle # Helpful for mocking during tests. + + def __init__(self, environment): + super(AssetsExtension, self).__init__(environment) + + # Add the defaults to the environment + environment.extend( + assets_environment=None, + ) + + def parse(self, parser): + lineno = next(parser.stream).lineno + + files = [] + output = nodes.Const(None) + filters = nodes.Const(None) + dbg = nodes.Const(None) + depends = nodes.Const(None) + + # Parse the arguments + first = True + while parser.stream.current.type != 'block_end': + if not first: + parser.stream.expect('comma') + first = False + + # Lookahead to see if this is an assignment (an option) + if parser.stream.current.test('name') and parser.stream.look().test('assign'): + name = next(parser.stream).value + parser.stream.skip() + value = parser.parse_expression() + if name == 'filters': + filters = value + elif name == 'filter': + filters = value + warnings.warn('The "filter" option of the {%% assets %%} ' + 'template tag has been renamed to ' + '"filters" for consistency reasons ' + '(line %s).' % lineno, + ImminentDeprecationWarning) + elif name == 'output': + output = value + elif name == 'debug': + dbg = value + elif name == 'depends': + depends = value + else: + parser.fail('Invalid keyword argument: %s' % name) + # Otherwise assume a source file is given, which may be any + # expression, except note that strings are handled separately above. + else: + expression = parser.parse_expression() + if isinstance(expression, (nodes.List, nodes.Tuple)): + files.extend(expression.iter_child_nodes()) + else: + files.append(expression) + + # Parse the contents of this tag + body = parser.parse_statements(['name:endassets'], drop_needle=True) + + # We want to make some values available to the body of our tag. + # Specifically, the file url(s) (ASSET_URL), and any extra dict set in + # the bundle (EXTRA). + # + # A short interlope: I would have preferred to make the values of the + # extra dict available directly. Unfortunately, the way Jinja2 does + # things makes this problematic. I'll explain. + # + # Jinja2 generates Python code from it's AST which it then executes. + # So the way extensions implement making custom variables available to + # a block of code is by generating a ``CallBlock``, which essentially + # wraps our child nodes in a Python function. The arguments of this + # function are the values that are available to our tag contents. + # + # But we need to generate this ``CallBlock`` now, during parsing, and + # right now we don't know the actual ``Bundle.extra`` values yet. We + # only resolve the bundle during rendering! + # + # This would easily be solved if Jinja2 where to allow extensions to + # scope it's context, which is a dict of values that templates can + # access, just like in Django (you might see on occasion + # ``context.resolve('foo')`` calls in Jinja2's generated code). + # However, it seems the context is essentially only for the initial + # set of data passed to render(). There are some statements by Armin + # that this might change at some point, but I've run into this problem + # before, and I'm not holding my breath. + # + # I **really** did try to get around this, including crazy things like + # inserting custom Python code by patching the tag child nodes:: + # + # rv = object.__new__(nodes.InternalName) + # # l_EXTRA is the argument we defined for the CallBlock/Macro + # # Letting Jinja define l_kwargs is also possible + # nodes.Node.__init__(rv, '; context.vars.update(l_EXTRA)', + # lineno=lineno) + # # Scope required to ensure our code on top + # body = [rv, nodes.Scope(body)] + # + # This custom code would run at the top of the function in which the + # CallBlock node would wrap the code generated from our tag's child + # nodes. Note that it actually does works, but doesn't clear the values + # at the end of the scope). + # + # If it is possible to do this, it certainly isn't reasonable/ + # + # There is of course another option altogether: Simple resolve the tag + # definition to a bundle right here and now, thus get access to the + # extra dict, make all values arguments to the CallBlock (Limited to + # 255 arguments to a Python function!). And while that would work fine + # in 99% of cases, it wouldn't be correct. The compiled template could + # be cached and used with different bundles/environments, and this + # would require the bundle to resolve at parse time, and hardcode it's + # extra values. + # + # Interlope end. + # + # Summary: We have to be satisfied with a single EXTRA variable. + args = [nodes.Name('ASSET_URL', 'param'), + nodes.Name('ASSET_SRI', 'param'), + nodes.Name('EXTRA', 'param')] + + # Return a ``CallBlock``, which means Jinja2 will call a Python method + # of ours when the tag needs to be rendered. That method can then + # render the template body. + call = self.call_method( + # Note: Changing the args here requires updating ``Jinja2Loader`` + '_render_assets', args=[filters, output, dbg, depends, nodes.List(files)]) + call_block = nodes.CallBlock(call, args, [], body) + call_block.set_lineno(lineno) + return call_block + + @classmethod + def resolve_contents(cls, contents, env): + """Resolve bundle names.""" + result = [] + for f in contents: + try: + result.append(env[f]) + except KeyError: + result.append(f) + return result + + def _render_assets(self, filter, output, dbg, depends, files, caller=None): + env = self.environment.assets_environment + if env is None: + raise RuntimeError('No assets environment configured in '+ + 'Jinja2 environment') + + # Construct a bundle with the given options + bundle_kwargs = { + 'output': output, + 'filters': filter, + 'debug': dbg, + 'depends': depends + } + bundle = self.BundleClass( + *self.resolve_contents(files, env), **bundle_kwargs) + + # Retrieve urls (this may or may not cause a build) + with bundle.bind(env): + urls = bundle.urls(calculate_sri=True) + + # For each url, execute the content of this template tag (represented + # by the macro ```caller`` given to use by Jinja2). + result = u"" + for entry in urls: + if isinstance(entry, dict): + result += caller(entry['uri'], entry.get('sri', None), bundle.extra) + else: + result += caller(entry, None, bundle.extra) + return result + + +assets = AssetsExtension # nicer import name + + +class Jinja2Loader(GlobLoader): + """Parse all the Jinja2 templates in the given directory, try to + find bundles in active use. + + Try all the given environments to parse the template, until we + succeed. + """ + + def __init__(self, assets_env, directories, jinja2_envs, charset='utf8', jinja_ext='*.html'): + self.asset_env = assets_env + self.directories = directories + self.jinja2_envs = jinja2_envs + self.charset = charset + self.jinja_ext = jinja_ext + + def load_bundles(self): + bundles = [] + for template_dir in self.directories: + for filename in self.glob_files((template_dir, self.jinja_ext)): + bundles.extend(self.with_file(filename, self._parse) or []) + return bundles + + def _parse(self, filename, contents): + for i, env in enumerate(self.jinja2_envs): + try: + t = env.parse(contents.decode(self.charset)) + except jinja2.exceptions.TemplateSyntaxError as e: + #print ('jinja parser (env %d) failed: %s'% (i, e)) + pass + else: + result = [] + def _recurse_node(node_to_search): + for node in node_to_search.iter_child_nodes(): + if isinstance(node, jinja2.nodes.Call): + if isinstance(node.node, jinja2.nodes.ExtensionAttribute)\ + and node.node.identifier == AssetsExtension.identifier: + filter, output, dbg, depends, files = node.args + bundle = Bundle( + *AssetsExtension.resolve_contents(files.as_const(), self.asset_env), + **{ + 'output': output.as_const(), + 'depends': depends.as_const(), + 'filters': filter.as_const()}) + result.append(bundle) + else: + _recurse_node(node) + for node in t.iter_child_nodes(): + _recurse_node(node) + return result + else: + raise LoaderError('Jinja parser failed on %s, tried %d environments' % ( + filename, len(self.jinja2_envs))) + return False diff --git a/pelican/plugins/webassets/vendor/webassets/filter/__init__.py b/pelican/plugins/webassets/vendor/webassets/filter/__init__.py new file mode 100644 index 0000000..146f1a4 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/__init__.py @@ -0,0 +1,737 @@ +"""Assets can be filtered through one or multiple filters, modifying their +contents (think minification, compression). +""" + +from __future__ import with_statement + +import os +import subprocess +import inspect +import shlex +import tempfile +import pkgutil +from webassets import six +from webassets.six.moves import map +from webassets.six.moves import zip +try: + frozenset +except NameError: + from sets import ImmutableSet as frozenset +from webassets.exceptions import FilterError +from webassets.importlib import import_module +from webassets.utils import hash_func + + +__all__ = ('Filter', 'CallableFilter', 'get_filter', 'register_filter', + 'ExternalTool', 'JavaTool') + + +def freezedicts(obj): + """Recursively iterate over ``obj``, supporting dicts, tuples + and lists, and freeze ``dicts`` such that ``obj`` can be used + with hash(). + """ + if isinstance(obj, (list, tuple)): + return type(obj)([freezedicts(sub) for sub in obj]) + if isinstance(obj, dict): + return frozenset(six.iteritems(obj)) + return obj + + +def smartsplit(string, sep): + """Split while allowing escaping. + + So far, this seems to do what I expect - split at the separator, + allow escaping via \\, and allow the backslash itself to be escaped. + + One problem is that it can raise a ValueError when given a backslash + without a character to escape. I'd really like a smart splitter + without manually scan the string. But maybe that is exactly what should + be done. + """ + assert string is not None # or shlex will read from stdin + if not six.PY3: + # On 2.6, shlex fails miserably with unicode input + is_unicode = isinstance(string, unicode) + if is_unicode: + string = string.encode('utf8') + l = shlex.shlex(string, posix=True) + l.whitespace += ',' + l.whitespace_split = True + l.quotes = '' + if not six.PY3 and is_unicode: + return map(lambda s: s.decode('utf8'), list(l)) + else: + return list(l) + + +class option(tuple): + """Micro option system. I want this to remain small and simple, + which is why this class is lower-case. + + See ``parse_options()`` and ``Filter.options``. + """ + def __new__(cls, initarg, configvar=None, type=None): + # If only one argument given, it is the configvar + if configvar is None: + configvar = initarg + initarg = None + return tuple.__new__(cls, (initarg, configvar, type)) + + +def parse_options(options): + """Parses the filter ``options`` dict attribute. + The result is a dict of ``option`` tuples. + """ + # Normalize different ways to specify the dict items: + # attribute: option() + # attribute: ('__init__ arg', 'config variable') + # attribute: ('config variable,') + # attribute: 'config variable' + result = {} + for internal, external in options.items(): + if not isinstance(external, option): + if not isinstance(external, (list, tuple)): + external = (external,) + external = option(*external) + result[internal] = external + return result + + +class Filter(object): + """Base class for a filter. + + Subclasses should allow the creation of an instance without any + arguments, i.e. no required arguments for __init__(), so that the + filter can be specified by name only. In fact, the taking of + arguments will normally be the exception. + """ + + # Name by which this filter can be referred to. + name = None + + # Options the filter supports. The base class will ensure that + # these are both accepted by __init__ as kwargs, and may also be + # defined in the environment config, or the OS environment (i.e. + # a setup() implementation will be generated which uses + # get_config() calls). + # + # Can look like this: + # options = { + # 'binary': 'COMPASS_BINARY', + # 'plugins': option('COMPASS_PLUGINS', type=list), + # } + options = {} + + # The maximum debug level under which this filter should run. + # Most filters only run in production mode (debug=False), so this is the + # default value. However, a filter like ``cssrewrite`` needs to run in + # ``merge`` mode. Further, compiler-type filters (like less/sass) would + # say ``None``, indicating that they have to run **always**. + # There is an interesting and convenient twist here: If you use such a + # filter, the bundle will automatically be merged, even in debug mode. + # It couldn't work any other way of course, the output needs to be written + # somewhere. If you have other files that do not need compiling, and you + # don't want them pulled into the merge, you can use a nested bundle with + # it's own output target just for those files that need the compilation. + max_debug_level = False + + def __init__(self, **kwargs): + self.ctx = None + self._options = parse_options(self.__class__.options) + + # Resolve options given directly to the filter. This + # allows creating filter instances with options that + # deviate from the global default. + # TODO: can the metaclass generate a init signature? + for attribute, (initarg, _, _) in self._options.items(): + arg = initarg if initarg is not None else attribute + if arg in kwargs: + setattr(self, attribute, kwargs.pop(arg)) + else: + setattr(self, attribute, None) + if kwargs: + raise TypeError('got an unexpected keyword argument: %s' % + list(kwargs.keys())[0]) + + def __eq__(self, other): + if isinstance(other, Filter): + return self.id() == other.id() + return NotImplemented + + def set_context(self, ctx): + """This is called before the filter is used.""" + self.ctx = ctx + + def get_config(self, setting=False, env=None, require=True, + what='dependency', type=None): + """Helper function that subclasses can use if they have + dependencies which they cannot automatically resolve, like + an external binary. + + Using this function will give the user the ability to resolve + these dependencies in a common way through either a Django + setting, or an environment variable. + + You may specify different names for ``setting`` and ``env``. + If only the former is given, the latter is considered to use + the same name. If either argument is ``False``, the respective + source is not used. + + By default, if the value is not found, an error is raised. If + ``required`` is ``False``, then ``None`` is returned instead. + + ``what`` is a string that is used in the exception message; + you can use it to give the user an idea what he is lacking, + i.e. 'xyz filter binary'. + + Specifying values via the OS environment is obviously limited. If + you are expecting a special type, you may set the ``type`` argument + and a value from the OS environment will be parsed into that type. + Currently only ``list`` is supported. + """ + assert type in (None, list), "%s not supported for type" % type + + if env is None: + env = setting + + assert setting or env + + value = None + if not setting is False: + value = self.ctx.get(setting, None) + + if value is None and not env is False: + value = os.environ.get(env) + if value is not None: + if not six.PY3: + # TODO: What charset should we use? What does Python 3 use? + value = value.decode('utf8') + if type == list: + value = smartsplit(value, ',') + + if value is None and require: + err_msg = '%s was not found. Define a ' % what + options = [] + if setting: + options.append('%s setting' % setting) + if env: + options.append('%s environment variable' % env) + err_msg += ' or '.join(options) + raise EnvironmentError(err_msg) + return value + + def unique(self): + """This function is used to determine if two filter instances + represent the same filter and can be merged. Only one of the + filters will be applied. + + If your filter takes options, you might want to override this + and return a hashable object containing all the data unique + to your current instance. This will allow your filter to be applied + multiple times with differing values for those options. + """ + return False + + def id(self): + """Unique identifier for the filter instance. + + Among other things, this is used as part of the caching key. + It should therefore not depend on instance data, but yield + the same result across multiple python invocations. + """ + # freezedicts() allows filters to return dict objects as part + # of unique(), which are not per-se supported by hash(). + return hash_func((self.name, freezedicts(self.unique()),)) + + def setup(self): + """Overwrite this to have the filter do initial setup work, + like determining whether required modules are available etc. + + Since this will only be called when the user actually + attempts to use the filter, you can raise an error here if + dependencies are not matched. + + Note: In most cases, it should be enough to simply define + the ``options`` attribute. If you override this method and + want to use options as well, don't forget to call super(). + + Note: This may be called multiple times if one filter instance + is used with different asset environment instances. + """ + for attribute, (_, configvar, type) in self._options.items(): + if not configvar: + continue + if getattr(self, attribute) is None: + # No value specified for this filter instance , + # specifically attempt to load it from the environment. + setattr(self, attribute, + self.get_config(setting=configvar, require=False, + type=type)) + + def input(self, _in, out, **kw): + """Implement your actual filter here. + + This will be called for every source file. + """ + + def output(self, _in, out, **kw): + """Implement your actual filter here. + + This will be called for every output file. + """ + + def open(self, out, source_path, **kw): + """Implement your actual filter here. + + This is like input(), but only one filter may provide this. + Use this if your filter needs to read from the source file + directly, and would ignore any processing by earlier filters. + """ + + def concat(self, out, hunks, **kw): + """Implement your actual filter here. + + Will be called once between the input() and output() + steps, and should concat all the source files (given as hunks) + together, writing the result to the ``out`` stream. + + Only one such filter is allowed. + """ + + def get_additional_cache_keys(self, **kw): + """Additional cache keys dependent on keyword arguments. + + If your filter's output is dependent on some or all of the + keyword arguments, you can return these arguments here as a list. + This will make sure the caching behavior is correct. + + For example, the CSSRewrite filter depends not only on the + contents of the file it applies to, but also the output path + of the final file. If the CSSRewrite filter doesn't correctly + override this method, a certain output file with a certain base + directory might potentially get a CSSRewriten file from cache + that is meant for an output file in a different base directory. + """ + + return [] + + # We just declared those for demonstration purposes + del input + del output + del open + del concat + + +class CallableFilter(Filter): + """Helper class that create a simple filter wrapping around + callable. + """ + + def __init__(self, callable): + super(CallableFilter, self).__init__() + self.callable = callable + + def unique(self): + # XXX This means the cache will never work for those filters. + # This is actually a deeper problem: Originally unique() was + # used to remove duplicate filters. Now it is also for the cache + # key. The latter would benefit from ALL the filter's options being + # included. Possibly this might just be what we should do, at the + # expense of the "remove duplicates" functionality (because it + # is never really needed anyway). It's also illdefined when a filter + # should be a removable duplicate - most options probably SHOULD make + # a filter no longer being considered duplicate. + return self.callable + + def output(self, _in, out, **kw): + return self.callable(_in, out) + + +class ExternalToolMetaclass(type): + def __new__(cls, name, bases, attrs): + # First, determine the method defined for this very class. We + # need to pop the ``method`` attribute from ``attrs``, so that we + # create the class without the argument; allowing us then to look + # at a ``method`` attribute that parents may have defined. + # + # method defaults to 'output' if argv is set, to "implement + # no default method" without an argv. + if not 'method' in attrs and 'argv' in attrs: + chosen = 'output' + else: + chosen = attrs.pop('method', False) + + # Create the class first, since this helps us look at any + # method attributes defined in the parent hierarchy. + klass = type.__new__(cls, name, bases, attrs) + parent_method = getattr(klass, 'method', None) + + # Assign the method argument that we initially popped again. + klass.method = chosen + + try: + # Don't do anything for this class itself + ExternalTool + except NameError: + return klass + + # If the class already has a method attribute, this indicates + # that a parent class already dealt with it and enabled/disabled + # the methods, and we won't again. + if parent_method is not None: + return klass + + methods = ('output', 'input', 'open') + + if chosen is not None: + assert not chosen or chosen in methods, \ + '%s not a supported filter method' % chosen + # Disable those methods not chosen. + for m in methods: + if m != chosen: + # setdefault = Don't override actual methods the + # class has in fact provided itself. + if not m in klass.__dict__: + setattr(klass, m, None) + + return klass + + +class ExternalTool(six.with_metaclass(ExternalToolMetaclass, Filter)): + """Subclass that helps creating filters that need to run an external + program. + + You are encouraged to use this when possible, as it helps consistency. + + In the simplest possible case, subclasses only have to define one or more + of the following attributes, without needing to write any code: + + ``argv`` + The command line that will be passed to subprocess.Popen. New-style + format strings can be used to access all kinds of data: The arguments + to the filter method, as well as the filter instance via ``self``: + + argv = ['{self.binary}', '--input', '{source_path}', '--cwd', + '{self.env.directory}'] + + ``method`` + The filter method to implement. One of ``input``, ``output`` or + ``open``. + """ + + argv = [] + method = None + + def open(self, out, source_path, **kw): + self._evaluate([out, source_path], kw, out) + + def input(self, _in, out, **kw): + self._evaluate([_in, out], kw, out, _in) + + def output(self, _in, out, **kw): + self._evaluate([_in, out], kw, out, _in) + + def _evaluate(self, args, kwargs, out, data=None): + # For now, still support Python 2.5, but the format strings in argv + # are not supported (making the feature mostly useless). For this + # reason none of the builtin filters is using argv currently. + if hasattr(str, 'format'): + # Add 'self' to the keywords available in format strings + kwargs = kwargs.copy() + kwargs.update({'self': self}) + + # Resolve all the format strings in argv + def replace(arg): + try: + return arg.format(*args, **kwargs) + except KeyError as e: + # Treat "output" and "input" variables special, they + # are dealt with in :meth:`subprocess` instead. + if e.args[0] not in ('input', 'output'): + raise + return arg + argv = list(map(replace, self.argv)) + else: + argv = self.argv + self.subprocess(argv, out, data=data) + + @classmethod + def subprocess(cls, argv, out, data=None, cwd=None): + """Execute the commandline given by the list in ``argv``. + + If a byestring is given via ``data``, it is piped into data. + + If ``cwd`` is not None, the process will be executed in that directory. + + ``argv`` may contain two placeholders: + + ``{input}`` + If given, ``data`` will be written to a temporary file instead + of data. The placeholder is then replaced with that file. + + ``{output}`` + Will be replaced by a temporary filename. The return value then + will be the content of this file, rather than stdout. + """ + + class tempfile_on_demand(object): + def __repr__(self): + if not hasattr(self, 'filename'): + fd, self.filename = tempfile.mkstemp() + os.close(fd) + return self.filename + + @property + def created(self): + return hasattr(self, 'filename') + + # Replace input and output placeholders + input_file = tempfile_on_demand() + output_file = tempfile_on_demand() + if hasattr(str, 'format'): # Support Python 2.5 without the feature + argv = list(map(lambda item: + item.format(input=input_file, output=output_file), argv)) + + try: + data = (data.read() if hasattr(data, 'read') else data) + if data is not None: + data = data.encode('utf-8') + + if input_file.created: + if data is None: + raise ValueError( + '{input} placeholder given, but no data passed') + with open(input_file.filename, 'wb') as f: + f.write(data) + # No longer pass to stdin + data = None + try: + proc = subprocess.Popen( + argv, + # we cannot use the in/out streams directly, as they might be + # StringIO objects (which are not supported by subprocess) + stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=cwd, + shell=os.name == 'nt') + except OSError: + raise FilterError('Program file not found: %s.' % argv[0]) + stdout, stderr = proc.communicate(data) + if proc.returncode: + raise FilterError( + '%s: subprocess returned a non-success result code: ' + '%s, stdout=%s, stderr=%s' % ( + cls.name or cls.__name__, + proc.returncode, + stdout.decode('utf-8').strip(), + stderr.decode('utf-8').strip())) + else: + if output_file.created: + with open(output_file.filename, 'rb') as f: + out.write(f.read().decode('utf-8')) + else: + if isinstance(stdout, bytes): + out.write(stdout.decode('utf-8')) + else: + out.write(stdout) + finally: + if output_file.created: + os.unlink(output_file.filename) + if input_file.created: + os.unlink(input_file.filename) + + @classmethod + def parse_binary(cls, string): + r""" + Parse a string for a binary (executable). Allow multiple arguments + to indicate the binary (as parsed by shlex). + + Return a list of arguments suitable for passing to subprocess + functions. + + >>> ExternalTool.parse_binary('/usr/bin/lessc') + ['/usr/bin/lessc'] + + >>> ExternalTool.parse_binary('node node_modules/bin/lessc') + ['node', 'node_modules/bin/lessc'] + + >>> ExternalTool.parse_binary('"binary with spaces"') + ['binary with spaces'] + + >>> ExternalTool.parse_binary(r'binary\ with\ spaces') + ['binary with spaces'] + + >>> ExternalTool.parse_binary('') + [] + """ + return shlex.split(string) + + +class JavaTool(ExternalTool): + """Helper class for filters which are implemented as Java ARchives (JARs). + + The subclass is expected to define a ``jar`` attribute in :meth:`setup`. + + If the ``argv`` definition is used, it is expected to contain only the + arguments to be passed to the Java tool. The path to the java binary and + the jar file are added by the base class. + """ + + method = None + + def setup(self): + super(JavaTool, self).setup() + + # We can reasonably expect that java is just on the path, so + # don't require it, but hope for the best. + path = self.get_config(env='JAVA_HOME', require=False) + if path is not None: + self.java_bin = os.path.join(path, 'bin/java') + else: + self.java_bin = 'java' + + def subprocess(self, args, out, data=None): + ExternalTool.subprocess( + [self.java_bin, '-jar', self.jar] + args, out, data) + + +_FILTERS = {} + + +def register_filter(f): + """Add the given filter to the list of know filters. + """ + if not issubclass(f, Filter): + raise ValueError("Must be a subclass of 'Filter'") + if not f.name: + raise ValueError('Must have a name') + _FILTERS[f.name] = f + + +def get_filter(f, *args, **kwargs): + """Resolves ``f`` to a filter instance. + + Different ways of specifying a filter are supported, for example by + giving the class, or a filter name. + + *args and **kwargs are passed along to the filter when it's + instantiated. + """ + if isinstance(f, Filter): + # Don't need to do anything. + assert not args and not kwargs + return f + elif isinstance(f, six.string_types): + if f in _FILTERS: + klass = _FILTERS[f] + else: + raise ValueError('No filter \'%s\'' % f) + elif inspect.isclass(f) and issubclass(f, Filter): + klass = f + elif callable(f): + assert not args and not kwargs + return CallableFilter(f) + else: + raise ValueError('Unable to resolve to a filter: %s' % f) + + return klass(*args, **kwargs) + +CODE_FILES = ['.py', '.pyc', '.so'] + + +def is_module(name): + """Is this a recognized module type? + + Does this name end in one of the recognized CODE_FILES extensions? + + The file is assumed to exist, as unique_modules has found it using + an os.listdir() call. + + returns the name with the extension stripped (the module name) or + None if the name does not appear to be a module + """ + for ext in CODE_FILES: + if name.endswith(ext): + return name[:-len(ext)] + + +def is_package(directory): + """Is the (fully qualified) directory a python package? + + """ + for ext in ['.py', '.pyc']: + if os.path.exists(os.path.join(directory, '__init__'+ext)): + return True + + +def unique_modules(directory): + """Find all unique module names within a directory + + For each entry in the directory, check if it is a source + code file-type (using is_code(entry)), or a directory with + a source-code file-type at entry/__init__.py[c]? + + Filter the results to only produce a single entry for each + module name. + + Filter the results to not include '_' prefixed names. + + yields each entry as it is encountered + """ + found = {} + for entry in sorted(os.listdir(directory)): + if entry.startswith('_'): + continue + module = is_module(entry) + if module: + if module not in found: + found[module] = entry + yield module + elif is_package(os.path.join(directory, entry)): + if entry not in found: + found[entry] = entry + yield entry + + +def load_builtin_filters(): + from os import path + import warnings + + # load modules to work based with and without pyinstaller + # from: https://github.com/webcomics/dosage/blob/master/dosagelib/loader.py + # see: https://github.com/pyinstaller/pyinstaller/issues/1905 + + # load modules using iter_modules() + # (should find all filters in normal build, but not pyinstaller) + prefix = __name__ + '.' + module_names = [m[1] for m in pkgutil.iter_modules(__path__, prefix)] + + # special handling for PyInstaller + importers = map(pkgutil.get_importer, __path__) + toc = set() + for i in importers: + if hasattr(i, 'toc'): + toc |= i.toc + for elm in toc: + if elm.startswith(prefix): + module_names.append(elm) + + for module_name in module_names: + #module_name = 'webassets.filter.%s' % name + try: + module = import_module(module_name) + except Exception as e: + warnings.warn('Error while loading builtin filter ' + 'module \'%s\': %s' % (module_name, e)) + else: + for attr_name in dir(module): + attr = getattr(module, attr_name) + if inspect.isclass(attr) and issubclass(attr, Filter): + if not attr.name: + # Skip if filter has no name; those are + # considered abstract base classes. + continue + register_filter(attr) +load_builtin_filters() diff --git a/pelican/plugins/webassets/vendor/webassets/filter/autoprefixer.py b/pelican/plugins/webassets/vendor/webassets/filter/autoprefixer.py new file mode 100644 index 0000000..a406c30 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/autoprefixer.py @@ -0,0 +1,85 @@ +from __future__ import with_statement + +from webassets.filter import ExternalTool +from webassets.utils import working_directory + + +class AutoprefixerFilter(ExternalTool): + """Prefixes vendor-prefixes using `autoprefixer + `, which uses the `Can I Use? + ` database to know which prefixes need to be + inserted. + + This depends on the `autoprefixer ` + command line tool being installed (use ``npm install autoprefixer``). + + *Supported configuration options*: + + AUTOPREFIXER_BIN + Path to the autoprefixer executable used to compile source files. By + default, the filter will attempt to run ``autoprefixer`` via the + system path. + + AUTOPREFIXER_BROWSERS + The browser expressions to use. This corresponds to the ``--browsers + `` flag, see the `--browsers documentation + `. By default, this flag + won't be passed, and autoprefixer's default will be used. + + Example:: + + AUTOPREFIXER_BROWSERS = ['> 1%', 'last 2 versions', 'firefox 24', 'opera 12.1'] + + AUTOPREFIXER_EXTRA_ARGS + Additional options may be passed to ``autoprefixer`` using this + setting, which expects a list of strings. + + """ + name = 'autoprefixer' + options = { + 'autoprefixer': 'AUTOPREFIXER_BIN', + 'browsers': 'AUTOPREFIXER_BROWSERS', + 'extra_args': 'AUTOPREFIXER_EXTRA_ARGS', + } + + max_debug_level = None + + def input(self, in_, out, source_path, **kw): + # Set working directory to the source file so that includes are found + args = [self.autoprefixer or 'autoprefixer'] + if self.browsers: + if isinstance(self.browsers, (list, tuple)): + self.browsers = u','.join(self.browsers) + args.extend(['--browsers', self.browsers]) + if self.extra_args: + args.extend(self.extra_args) + with working_directory(filename=source_path): + self.subprocess(args, out, in_) + + +class Autoprefixer6Filter(AutoprefixerFilter): + name = 'autoprefixer6' + + options = { + 'autoprefixer': 'AUTOPREFIXER_BIN', + 'browsers': 'AUTOPREFIXER_BROWSERS', + 'extra_args': 'AUTOPREFIXER_EXTRA_ARGS', + } + + _postcss_autoprefixer = ['-u', 'autoprefixer'] + + max_debug_level = None + + def input(self, in_, out, source_path, **kw): + # Set working directory to the source file so that includes are found + args = [self.autoprefixer or 'postcss'] + args.extend(self._postcss_autoprefixer) + + if self.browsers: + if isinstance(self.browsers, (list, tuple)): + self.browsers = u','.join(self.browsers) + args.extend(['--autoprefixer.browsers', self.browsers]) + if self.extra_args: + args.extend(self.extra_args) + with working_directory(filename=source_path): + self.subprocess(args, out, in_) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/babel.py b/pelican/plugins/webassets/vendor/webassets/filter/babel.py new file mode 100755 index 0000000..9b36e13 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/babel.py @@ -0,0 +1,77 @@ +from webassets.filter import ExternalTool + + +class Babel(ExternalTool): + """Processes ES6+ code into ES5 friendly code using `Babel `_. + + Requires the babel executable to be available externally. + To install it, you might be able to do:: + + $ npm install --global babel-cli + + You probably also want some presets:: + + $ npm install --global babel-preset-es2015 + + Example python bundle: + + .. code-block:: python + + es2015 = get_filter('babel', presets='es2015') + bundle = Bundle('**/*.js', filters=es2015) + + Example YAML bundle: + + .. code-block:: yaml + + es5-bundle: + output: dist/es5.js + config: + BABEL_PRESETS: es2015 + filters: babel + contents: + - file1.js + - file2.js + + Supported configuration options: + + BABEL_BIN + The path to the babel binary. If not set the filter will try to run + ``babel`` as if it's in the system path. + + BABEL_PRESETS + Passed straight through to ``babel --presets`` to specify which babel + presets to use + + BABEL_EXTRA_ARGS + A list of manual arguments to be specified to the babel command + + BABEL_RUN_IN_DEBUG + May be set to False to make babel not run in debug + """ + name = 'babel' + max_debug_level = None + + options = { + 'binary': 'BABEL_BIN', + 'presets': 'BABEL_PRESETS', + 'extra_args': 'BABEL_EXTRA_ARGS', + 'run_in_debug': 'BABEL_RUN_IN_DEBUG', + } + + def setup(self): + super(Babel, self).setup() + if self.run_in_debug is False: + # Disable running in debug mode for this instance. + self.max_debug_level = False + + def input(self, _in, out, **kw): + args = [self.binary or 'babel'] + if self.presets: + args += ['--presets', self.presets] + if self.extra_args: + args.extend(self.extra_args) + if 'source_path' in kw: + args.extend(['--filename', kw['source_path']]) + return self.subprocess(args, out, _in) + diff --git a/pelican/plugins/webassets/vendor/webassets/filter/cleancss.py b/pelican/plugins/webassets/vendor/webassets/filter/cleancss.py new file mode 100644 index 0000000..f55516c --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/cleancss.py @@ -0,0 +1,49 @@ +import os +from subprocess import PIPE, Popen + +from webassets.filter import ExternalTool + +__all__ = ('CleanCSS',) + + +class CleanCSS(ExternalTool): + """ + Minify css using `Clean-css `_. + + Clean-css is an external tool written for NodeJS; this filter assumes that + the ``cleancss`` executable is in the path. Otherwise, you may define + a ``CLEANCSS_BIN`` setting. + + Additional options may be passed to ``cleancss`` binary using the setting + ``CLEANCSS_EXTRA_ARGS``, which expects a list of strings. + """ + + name = 'cleancss' + options = { + 'binary': 'CLEANCSS_BIN', + 'extra_args': 'CLEANCSS_EXTRA_ARGS', + } + + @property + def cleancss_ver(self): + if not hasattr(self, '_cleancss_ver'): + args = [self.binary or 'cleancss'] + args += ['--version'] + # out = b"MAJOR.MINOR.REVISION" // b"3.4.19" or b"4.0.0" + out, err = Popen(args, stdout=PIPE).communicate() + self._cleancss_ver = int(out[:out.index(b'.')]) + return self._cleancss_ver + + def output(self, _in, out, **kw): + args = [self.binary or 'cleancss'] + if self.extra_args: + args.extend(self.extra_args) + self.subprocess(args, out, _in) + + def input(self, _in, out, **kw): + args = [self.binary or 'cleancss'] + if self.cleancss_ver < 4: + args += ['--root', os.path.dirname(kw['source_path'])] + if self.extra_args: + args.extend(self.extra_args) + self.subprocess(args, out, _in) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/clevercss.py b/pelican/plugins/webassets/vendor/webassets/filter/clevercss.py new file mode 100644 index 0000000..37ef78e --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/clevercss.py @@ -0,0 +1,24 @@ +from __future__ import absolute_import +from webassets.filter import Filter + + +__all__ = ('CleverCSS',) + + +class CleverCSS(Filter): + """Converts `CleverCSS `_ markup + to real CSS. + + If you want to combine it with other CSS filters, make sure this one + runs first. + """ + + name = 'clevercss' + max_debug_level = None + + def setup(self): + import clevercss + self.clevercss = clevercss + + def output(self, _in, out, **kw): + out.write(self.clevercss.convert(_in.read())) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/closure.py b/pelican/plugins/webassets/vendor/webassets/filter/closure.py new file mode 100644 index 0000000..76435e2 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/closure.py @@ -0,0 +1,75 @@ +"""Minify Javascript with `Google Closure Compiler +`_. + +Google Closure Compiler is an external tool written in Java, which needs +to be available. One way to get it is to install the +`closure `_ package:: + + pip install closure + +No configuration is necessary in this case. + +You can also define a ``CLOSURE_COMPRESSOR_PATH`` setting that +points to the ``.jar`` file. Otherwise, an environment variable by +the same name is tried. The filter will also look for a ``JAVA_HOME`` +environment variable to run the ``.jar`` file, or will otherwise +assume that ``java`` is on the system path. + +Supported configuration options: + +CLOSURE_COMPRESSOR_OPTIMIZATION + Corresponds to Google Closure's `compilation level parameter + `_. + +CLOSURE_EXTRA_ARGS + A list of further options to be passed to the Closure compiler. + There are a lot of them. + + For options which take values you want to use two items in the list:: + + ['--output_wrapper', 'foo: %output%'] +""" + +from __future__ import absolute_import +from webassets.filter import JavaTool + + +__all__ = ('ClosureJS',) + + +class ClosureJS(JavaTool): + + name = 'closure_js' + options = { + 'opt': 'CLOSURE_COMPRESSOR_OPTIMIZATION', + 'extra_args': 'CLOSURE_EXTRA_ARGS', + } + + def setup(self): + super().setup() + self.jar = self.get_jar() + + def get_jar(self): + try: + return self.get_config('CLOSURE_COMPRESSOR_PATH', + what='Google Closure Compiler') + except EnvironmentError: + try: + import closure + return closure.get_jar_filename() + except ImportError: + raise EnvironmentError( + "\nClosure Compiler jar can't be found." + "\nPlease either install the closure package:" + "\n\n pip install closure\n" + "\nor provide a CLOSURE_COMPRESSOR_PATH setting " + "or an environment variable with the full path to " + "the Closure compiler jar." + ) + + def output(self, _in, out, **kw): + args = ['--charset', 'UTF-8', + '--compilation_level', self.opt or 'WHITESPACE_ONLY'] + if self.extra_args: + args.extend(self.extra_args) + self.subprocess(args, out, _in) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/closure_stylesheets.py b/pelican/plugins/webassets/vendor/webassets/filter/closure_stylesheets.py new file mode 100644 index 0000000..1b8a4e1 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/closure_stylesheets.py @@ -0,0 +1,50 @@ +""" Compile and Minify CSS with `Google Closure Stylesheets +`_. + +Google Closure Templates is an external tool written in Java, which needs +to be obtained separately. + +You must define a ``CLOSURE_STYLESHEETS_PATH`` setting that +points to the ``.jar`` file. Otherwise, an environment variable by +the same name is tried. The filter will also look for a ``JAVA_HOME`` +environment variable to run the ``.jar`` file, or will otherwise +assume that ``java`` is on the system path. +""" + +from webassets.filter import JavaTool + + +__all__ = ['ClosureStylesheetsCompiler', 'ClosureStylesheetsMinifier'] + + +class ClosureStylesheetsBase(JavaTool): + + def setup(self): + super(ClosureStylesheetsBase, self).setup() + try: + self.jar = self.get_config('CLOSURE_STYLESHEETS_PATH', + what='Google Closure Stylesheets tool') + except EnvironmentError: + raise EnvironmentError( + "\nGoogle Closure Stylesheets jar can't be found." + "\nPlease provide a CLOSURE_STYLESHEETS_PATH setting " + "or an environment variable with the full path to " + "the Google Closure Stylesheets jar." + ) + + def output(self, _in, out, **kw): + params = [] + if self.mode != 'minify': + params.append('--pretty-print') + self.subprocess( + params + ['{input}'], out, _in) + + +class ClosureStylesheetsCompiler(ClosureStylesheetsBase): + name = 'closure_stylesheets_compiler' + mode = 'compile' + + +class ClosureStylesheetsMinifier(ClosureStylesheetsBase): + name = 'closure_stylesheets_minifier' + mode = 'minify' diff --git a/pelican/plugins/webassets/vendor/webassets/filter/closure_templates.py b/pelican/plugins/webassets/vendor/webassets/filter/closure_templates.py new file mode 100644 index 0000000..b2a15fa --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/closure_templates.py @@ -0,0 +1,100 @@ +"""Client Side Templating with `Google Closure Templates +`_. + +Google Closure Templates is an external tool written in Java, which needs +to be available. One way to get it is to install the +`closure-soy `_ package:: + + pip install closure-soy + +No configuration is necessary in this case. + +You can also define a ``CLOSURE_TEMPLATES_PATH`` setting that +points to the ``.jar`` file. Otherwise, an environment variable by +the same name is tried. The filter will also look for a ``JAVA_HOME`` +environment variable to run the ``.jar`` file, or will otherwise +assume that ``java`` is on the system path. + +Supported configuration options: + +CLOSURE_EXTRA_ARGS + A list of further options to be passed to the Closure compiler. + There are a lot of them. + + For options which take values you want to use two items in the list:: + + ['--inputPrefix', 'prefix'] +""" + +import subprocess +import os +import tempfile + +from webassets.exceptions import FilterError +from webassets.filter.jst import JSTemplateFilter + + +__all__ = ('ClosureTemplateFilter',) + + +class ClosureTemplateFilter(JSTemplateFilter): + name = 'closure_tmpl' + options = { + 'extra_args': 'CLOSURE_EXTRA_ARGS', + } + + def process_templates(self, out, hunks, **kw): + templates = [info['source_path'] for _, info in hunks] + + temp = tempfile.NamedTemporaryFile(dir='.', delete=True) + args = ["--outputPathFormat", temp.name, '--srcs'] + args.extend(templates) + if self.extra_args: + args.extend(self.extra_args) + self.java_run(args) + out.write(open(temp.name).read()) + + def setup(self): + super(ClosureTemplateFilter, self).setup() + try: + self.jar = self.get_config('CLOSURE_TEMPLATES_PATH', + what='Google Closure Soy Templates Compiler') + except EnvironmentError: + try: + import closure_soy + self.jar = closure_soy.get_jar_filename() + except ImportError: + raise EnvironmentError( + "\nClosure Templates jar can't be found." + "\nPlease either install the closure package:" + "\n\n pip install closure-soy\n" + "\nor provide a CLOSURE_TEMPLATES_PATH setting " + "or an environment variable with the full path to " + "the Closure compiler jar." + ) + self.java_setup() + super(ClosureTemplateFilter, self).setup() + + def java_setup(self): + # We can reasonably expect that java is just on the path, so + # don't require it, but hope for the best. + path = self.get_config(env='JAVA_HOME', require=False) + if path is not None: + self.java = os.path.join(path, 'bin/java') + else: + self.java = 'java' + + def java_run(self, args): + proc = subprocess.Popen( + [self.java, '-jar', self.jar] + args, + # we cannot use the in/out streams directly, as they might be + # StringIO objects (which are not supported by subprocess) + stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=(os.name == 'nt')) + stdout, stderr = proc.communicate() + if proc.returncode: + raise FilterError('%s: subprocess returned a ' + 'non-success result code: %s, stdout=%s, stderr=%s' % ( + self.name, proc.returncode, stdout, stderr)) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/coffeescript.py b/pelican/plugins/webassets/vendor/webassets/filter/coffeescript.py new file mode 100644 index 0000000..9ff075d --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/coffeescript.py @@ -0,0 +1,62 @@ +from __future__ import print_function +import os, subprocess + +from webassets.filter import Filter +from webassets.exceptions import FilterError, ImminentDeprecationWarning + + +__all__ = ('CoffeeScript',) + + +class CoffeeScript(Filter): + """Converts `CoffeeScript `_ + to real JavaScript. + + If you want to combine it with other JavaScript filters, make sure this + one runs first. + + Supported configuration options: + + COFFEE_NO_BARE + Set to ``True`` to compile with the top-level function + wrapper (suppresses the --bare option to ``coffee``, which + is used by default). + """ + + name = 'coffeescript' + max_debug_level = None + options = { + 'coffee_deprecated': (False, 'COFFEE_PATH'), + 'coffee_bin': ('binary', 'COFFEE_BIN'), + 'no_bare': 'COFFEE_NO_BARE', + } + + def output(self, _in, out, **kw): + binary = self.coffee_bin or self.coffee_deprecated or 'coffee' + if self.coffee_deprecated: + import warnings + warnings.warn( + 'The COFFEE_PATH option of the "coffeescript" ' + +'filter has been deprecated and will be removed.' + +'Use COFFEE_BIN instead.', ImminentDeprecationWarning) + + args = "-sp" + ("" if self.no_bare else 'b') + try: + proc = subprocess.Popen([binary, args], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=(os.name == 'nt')) + except OSError as e: + if e.errno == 2: + raise Exception("coffeescript not installed or in system path for webassets") + raise + stdout, stderr = proc.communicate(_in.read().encode('utf-8')) + if proc.returncode != 0: + raise FilterError(('coffeescript: subprocess had error: stderr=%s, '+ + 'stdout=%s, returncode=%s') % ( + stderr, stdout, proc.returncode)) + elif stderr: + print("coffeescript filter has warnings:", stderr) + out.write(stdout.decode('utf-8')) + diff --git a/pelican/plugins/webassets/vendor/webassets/filter/compass.py b/pelican/plugins/webassets/vendor/webassets/filter/compass.py new file mode 100644 index 0000000..256544f --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/compass.py @@ -0,0 +1,255 @@ +""" +Generally speaking, compass provides a command line util that is used + a) as a management script (like django-admin.py) doing for example + setup work, adding plugins to a project etc), and + b) can compile the sass source files into CSS. + +While generally project-based, starting with 0.10, compass supposedly +supports compiling individual files, which is what we are using for +implementing this filter. Supposedly, because there are numerous issues +that require working around. See the comments in the actual filter code +for the full story on all the hoops be have to jump through. + +An alternative option would be to use Sass to compile. Compass essentially +adds two things on top of sass: A bunch of CSS frameworks, ported to Sass, +and available for including. And various ruby helpers that these frameworks +and custom Sass files can use. Apparently there is supposed to be a way +to compile a compass project through sass, but so far, I haven't got it +to work. The syntax is supposed to be one of: + + $ sass -r compass `compass imports` FILE + $ sass --compass FILE + +See: + http://groups.google.com/group/compass-users/browse_thread/thread/a476dfcd2b47653e + http://groups.google.com/group/compass-users/browse_thread/thread/072bd8b51bec5f7c + http://groups.google.com/group/compass-users/browse_thread/thread/daf55acda03656d1 +""" + +import os +from os import path +import tempfile +import shutil +import subprocess +from io import open +from webassets import six + +from webassets.exceptions import FilterError +from webassets.filter import Filter, option + + +__all__ = ('Compass',) + + +class CompassConfig(dict): + """A trivial dict wrapper that can generate a Compass config file.""" + + def to_string(self): + def string_rep(val): + """ Determine the correct string rep for the config file """ + if isinstance(val, bool): + # True -> true and False -> false + return six.text_type(val).lower() + elif isinstance(val, six.string_types) and val.startswith(':'): + # ruby symbols, like :nested, used for "output_style" + return six.text_type(val) + elif isinstance(val, dict): + # ruby hashes, for "sass_options" for example + return u'{%s}' % ', '.join("'%s' => '%s'" % i for i in val.items()) + elif isinstance(val, tuple): + val = list(val) + # works fine with strings and lists + return repr(val) + return u'\n'.join(['%s = %s' % (k, string_rep(v)) for k, v in self.items()]) + + +class Compass(Filter): + """Converts `Compass `_ .sass files to + CSS. + + Requires at least version 0.10. + + To compile a standard Compass project, you only need to have + to compile your main ``screen.sass``, ``print.sass`` and ``ie.sass`` + files. All the partials that you include will be handled by Compass. + + If you want to combine the filter with other CSS filters, make + sure this one runs first. + + Supported configuration options: + + COMPASS_BIN + The path to the Compass binary. If not set, the filter will + try to run ``compass`` as if it's in the system path. + + COMPASS_PLUGINS + Compass plugins to use. This is equivalent to the ``--require`` + command line option of the Compass. and expects a Python list + object of Ruby libraries to load. + + COMPASS_CONFIG + An optional dictionary of Compass `configuration options + `_. + The values are emitted as strings, and paths are relative to the + Environment's ``directory`` by default; include a ``project_path`` + entry to override this. + + The ``sourcemap`` option has a caveat. A file called _.css.map is + created by Compass in the tempdir (where _.scss is the original asset), + which is then moved into the output_path directory. Since the tempdir + is created one level down from the output path, the relative links in + the sourcemap should correctly map. This file, however, will not be + versioned, and thus this option should ideally only be used locally + for development and not in production with a caching service as the + _.css.map file will not be invalidated. + """ + + name = 'compass' + max_debug_level = None + options = { + 'compass': ('binary', 'COMPASS_BIN'), + 'plugins': option('COMPASS_PLUGINS', type=list), + 'config': 'COMPASS_CONFIG', + } + + def open(self, out, source_path, **kw): + """Compass currently doesn't take data from stdin, and doesn't allow + us accessing the result from stdout either. + + Also, there's a bunch of other issues we need to work around: + + - compass doesn't support given an explicit output file, only a + "--css-dir" output directory. + + We have to "guess" the filename that will be created in that + directory. + + - The output filename used is based on the input filename, and + simply cutting of the length of the "sass_dir" (and changing + the file extension). That is, compass expects the input + filename to always be inside the "sass_dir" (which defaults to + ./src), and if this is not the case, the output filename will + be gibberish (missing characters in front). See: + https://github.com/chriseppstein/compass/issues/304 + + We fix this by setting the proper --sass-dir option. + + - Compass insists on creating a .sass-cache folder in the + current working directory, and unlike the sass executable, + there doesn't seem to be a way to disable it. + + The workaround is to set the working directory to our temp + directory, so that the cache folder will be deleted at the end. + """ + + # Create temp folder one dir below output_path so sources in + # sourcemap are correct. This will be in the project folder, + # and as such, while exteremly unlikely, this could interfere + # with existing files and directories. + tempout_dir = path.normpath( + path.join(path.dirname(kw['output_path']), '../') + ) + tempout = tempfile.mkdtemp(dir=tempout_dir) + # Temporarily move to "tempout", so .sass-cache will be created there + old_wd = os.getcwd() + os.chdir(tempout) + try: + # Make sure to use normpath() to not cause trouble with + # compass' simplistic path handling, where it just assumes + # source_path is within sassdir, and cuts off the length of + # sassdir from the input file. + sassdir = path.normpath(path.dirname(source_path)) + source_path = path.normpath(source_path) + + # Compass offers some helpers like image-url(), which need + # information about the urls under which media files will be + # available. This is hard for two reasons: First, the options in + # question aren't supported on the command line, so we need to write + # a temporary config file. Secondly, they assume defined and + # separate directories for "images", "stylesheets" etc., something + # webassets knows nothing of: we don't support the user defining + # such directories. Because we traditionally had this + # filter point all type-specific directories to the root media + # directory, we will define the paths to match this. In other + # words, in Compass, both inline-image("img/test.png) and + # image-url("img/test.png") will find the same file, and assume it + # to be {env.directory}/img/test.png. + # However, this partly negates the purpose of an utility like + # image-url() in the first place - you not having to hard code + # the location of your images. So we allow direct modification of + # the configuration file via the COMPASS_CONFIG setting (see + # tickets #36 and #125). + # + # Note that there is also the --relative-assets option, which we + # can't use because it calculates an actual relative path between + # the image and the css output file, the latter being in a + # temporary directory in our case. + config = CompassConfig( + project_path=self.ctx.directory, + http_path=self.ctx.url, + http_images_dir='', + http_stylesheets_dir='', + http_fonts_dir='', + http_javascripts_dir='', + images_dir='', + output_style=':expanded', + ) + # Update with the custom config dictionary, if any. + if self.config: + config.update(self.config) + config_file = path.join(tempout, '.config.rb') + f = open(config_file, 'w') + try: + f.write(config.to_string()) + f.flush() + finally: + f.close() + + command = [self.compass or 'compass', 'compile'] + for plugin in self.plugins or []: + command.extend(('--require', plugin)) + command.extend(['--sass-dir', sassdir, + '--css-dir', tempout, + '--config', config_file, + '--quiet', + '--boring', + source_path]) + proc = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + # shell: necessary on windows to execute + # ruby files, but doesn't work on linux. + shell=(os.name == 'nt')) + stdout, stderr = proc.communicate() + + # compass seems to always write a utf8 header? to stderr, so + # make sure to not fail just because there's something there. + if proc.returncode != 0: + raise FilterError(('compass: subprocess had error: stderr=%s, '+ + 'stdout=%s, returncode=%s') % ( + stderr, stdout, proc.returncode)) + + guessed_outputfilename = path.splitext(path.basename(source_path))[0] + guessed_outputfilepath = path.join(tempout, guessed_outputfilename) + output_file = open("%s.css" % guessed_outputfilepath, encoding='utf-8') + if config.get('sourcemap'): + sourcemap_file = open("%s.css.map" % guessed_outputfilepath) + sourcemap_output_filepath = path.join( + path.dirname(kw['output_path']), + path.basename(sourcemap_file.name) + ) + if not path.exists(path.dirname(sourcemap_output_filepath)): + os.mkdir(path.dirname(sourcemap_output_filepath)) + sourcemap_output_file = open(sourcemap_output_filepath, 'w') + sourcemap_output_file.write(sourcemap_file.read()) + sourcemap_file.close() + try: + contents = output_file.read() + out.write(contents) + finally: + output_file.close() + finally: + # Restore previous working dir + os.chdir(old_wd) + # Clean up the temp dir + shutil.rmtree(tempout) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/cssmin.py b/pelican/plugins/webassets/vendor/webassets/filter/cssmin.py new file mode 100644 index 0000000..bf0c4a2 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/cssmin.py @@ -0,0 +1,26 @@ +from __future__ import absolute_import +from webassets.filter import Filter + + +__all__ = ('CSSMin',) + + +class CSSMin(Filter): + """Minifies CSS. + + Requires the ``cssmin`` package (http://github.com/zacharyvoase/cssmin), + which is a port of the YUI CSS compression algorithm. + """ + + name = 'cssmin' + + def setup(self): + try: + import cssmin + except ImportError: + raise EnvironmentError('The "cssmin" package is not installed.') + else: + self.cssmin = cssmin + + def output(self, _in, out, **kw): + out.write(self.cssmin.cssmin(_in.read())) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/cssprefixer.py b/pelican/plugins/webassets/vendor/webassets/filter/cssprefixer.py new file mode 100644 index 0000000..59cffbb --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/cssprefixer.py @@ -0,0 +1,25 @@ +from __future__ import absolute_import +from webassets.filter import Filter + + +__all__ = ('CSSPrefixer',) + + +class CSSPrefixer(Filter): + """Uses `CSSPrefixer `_ + to add vendor prefixes to CSS files. + """ + + name = 'cssprefixer' + max_debug_level = 'merge' + + def setup(self): + import cssprefixer + self.cssprefixer = cssprefixer + + def output(self, _in, out, **kw): + output = self.cssprefixer.process(_in.read(), False, False) + if isinstance(output, unicode): + # cssprefixer likes to return unicode strings + output = output.encode('utf8') + out.write(output) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/cssrewrite/__init__.py b/pelican/plugins/webassets/vendor/webassets/filter/cssrewrite/__init__.py new file mode 100644 index 0000000..a3985e0 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/cssrewrite/__init__.py @@ -0,0 +1,110 @@ +import os +from os.path import join +from webassets.utils import common_path_prefix +from webassets.utils import urlparse +from . import urlpath +try: + from collections import OrderedDict +except ImportError: + # Use an ordered dict when available, otherwise we simply don't + # support ordering - it's just a nice bonus. + OrderedDict = dict + +from .base import CSSUrlRewriter, addsep, path2url + + +__all__ = ('CSSRewrite',) + + +class CSSRewrite(CSSUrlRewriter): + """Source filter that rewrites relative urls in CSS files. + + CSS allows you to specify urls relative to the location of the CSS file. + However, you may want to store your compressed assets in a different place + than source files, or merge source files from different locations. This + would then break these relative CSS references, since the base URL changed. + + This filter transparently rewrites CSS ``url()`` instructions in the source + files to make them relative to the location of the output path. It works as + a *source filter*, i.e. it is applied individually to each source file + before they are merged. + + No configuration is necessary. + + The filter also supports a manual mode:: + + get_filter('cssrewrite', replace={'old_directory':'/custom/path/'}) + + This will rewrite all urls that point to files within ``old_directory`` to + use ``/custom/path`` as a prefix instead. + + You may plug in your own replace function:: + + get_filter('cssrewrite', replace=lambda url: re.sub(r'^/?images/', '/images/', url)) + get_filter('cssrewrite', replace=lambda url: '/images/'+url[7:] if url.startswith('images/') else url) + """ + + # TODO: If we want to support inline assets, this needs to be + # updated to optionally convert URLs to absolute ones based on + # MEDIA_URL. + + name = 'cssrewrite' + max_debug_level = 'merge' + + def __init__(self, replace=False): + super(CSSRewrite, self).__init__() + self.replace = replace + + def unique(self): + # Allow mixing the standard version of this filter, and replace mode. + return self.replace + + def input(self, _in, out, **kw): + if self.replace not in (False, None) and not callable(self.replace): + # For replace mode, make sure we have all the directories to be + # rewritten in form of a url, so we can later easily match it + # against the urls encountered in the CSS. + replace_dict = False + root = addsep(self.ctx.directory) + replace_dict = OrderedDict() + for repldir, sub in self.replace.items(): + repldir = addsep(os.path.normpath(join(root, repldir))) + replurl = path2url(repldir[len(common_path_prefix([root, repldir])):]) + replace_dict[replurl] = sub + self.replace_dict = replace_dict + + return super(CSSRewrite, self).input(_in, out, **kw) + + def replace_url(self, url): + # Replace mode: manually adjust the location of files + if callable(self.replace): + return self.replace(url) + elif self.replace is not False: + for to_replace, sub in self.replace_dict.items(): + targeturl = urlparse.urljoin(self.source_url, url) + if targeturl.startswith(to_replace): + url = "%s%s" % (sub, targeturl[len(to_replace):]) + # Only apply the first match + break + + # Default mode: auto correct relative urls + else: + # If path is an absolute one, keep it + parsed = urlparse.urlparse(url) + if not parsed.scheme and not parsed.path.startswith('/'): + abs_source_url = urlparse.urljoin(self.source_url, url) + + # relpath() will not detect this case + if urlparse.urlparse(abs_source_url).scheme: + return abs_source_url + + # rewritten url: relative path from new location (output) + # to location of referenced file (source + current url) + url = urlpath.relpath(self.output_url, abs_source_url) + + return url + + def get_additional_cache_keys(self, **kw): + if 'output_path' in kw: + return [os.path.dirname(kw['output_path'])] + return [] diff --git a/pelican/plugins/webassets/vendor/webassets/filter/cssrewrite/base.py b/pelican/plugins/webassets/vendor/webassets/filter/cssrewrite/base.py new file mode 100644 index 0000000..db358a7 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/cssrewrite/base.py @@ -0,0 +1,118 @@ +import os +import re +from os.path import join, normpath +from webassets.filter import Filter +from webassets.utils import common_path_prefix + + +__all__ = () + + +def addsep(path): + """Add a trailing path separator.""" + if path and path[-1] != os.path.sep: + return path + os.path.sep + return path + + +def path2url(path): + """Simple helper for NT systems to replace slash syntax.""" + if os.name == 'nt': + return path.replace('\\', '/') + return path + + +class PatternRewriter(Filter): + """Base class for input filters which want to replace certain patterns. + """ + + # Define the patterns in the form of: + # method to call -> pattern to call it for (as a compiled regex) + patterns = {} + + def input(self, _in, out, **kw): + content = _in.read() + for func, pattern in self.patterns.items(): + if not callable(func): + func = getattr(self, func) + # Should this pass along **kw? How many subclasses would need it? + # As is, subclasses needing access need to overwrite input() and + # set class attributes. + content = pattern.sub(func, content) + out.write(content) + + +urltag_re = re.compile(r""" +url\( + (\s*) # allow whitespace wrapping (and capture) + ( # capture actual url + [^\)\\\r\n]*? # don't allow newlines, closing paran, escape chars (1) + (?:\\. # process all escapes here instead + [^\)\\\r\n]*? # proceed, with previous restrictions (1) + )* # repeat until end + ) + (\s*) # whitespace again (and capture) +\) + +# (1) non-greedy to let the last whitespace group capture something +# TODO: would it be faster to handle whitespace within _rewrite()? +""", re.VERBOSE) + + +class CSSUrlRewriter(PatternRewriter): + """Base class for input filters which need to replace url() statements + in CSS stylesheets. + """ + + patterns = { + 'rewrite_url': urltag_re + } + + def input(self, _in, out, **kw): + source, source_path, output, output_path = \ + kw['source'], kw['source_path'], kw['output'], kw['output_path'] + + self.source_path = source_path + self.output_path = output_path + self.source_url = self.ctx.resolver.resolve_source_to_url( + self.ctx, source_path, source) + self.output_url = self.ctx.resolver.resolve_output_to_url( + self.ctx, output) + + return super(CSSUrlRewriter, self).input(_in, out, **kw) + + def rewrite_url(self, m): + # Get the regex matches; note how we maintain the exact + # whitespace around the actual url; we'll indeed only + # replace the url itself. + text_before = m.groups()[0] + url = m.groups()[1] + text_after = m.groups()[2] + + # Normalize the url: remove quotes + quotes_used = '' + if url[:1] in '"\'': + quotes_used = url[:1] + url = url[1:] + if url[-1:] in '"\'': + url = url[:-1] + + url = self.replace_url(url) or url + + result = 'url(%s%s%s%s%s)' % ( + text_before, quotes_used, url, quotes_used, text_after) + return result + + def replace_url(self, url): + """Implement this to return a replacement for each URL found.""" + raise NotImplementedError() + + +if __name__ == '__main__': + for text, expect in [ + (r' url(icon\)xyz) ', r'url(icon\)xyz)'), + (r' url(icon\\)xyz) ', r'url(icon\\)'), + (r' url(icon\\\)xyz) ', r'url(icon\\\)xyz)'), + ]: + m = urltag_re.search(text) + assert m.group() == expect diff --git a/pelican/plugins/webassets/vendor/webassets/filter/cssrewrite/urlpath.py b/pelican/plugins/webassets/vendor/webassets/filter/cssrewrite/urlpath.py new file mode 100644 index 0000000..f768da5 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/cssrewrite/urlpath.py @@ -0,0 +1,269 @@ +# urlpath.py + +# 0.1.0 +# 2005/08/20 + +# Functions that handle url paths. +# Part of Pythonutils +# http://www.voidspace.org.uk/python/pythonutils.html + +# Copyright Michael Foord, 2004 & 2005. +# Released subject to the BSD License +# Please see http://www.voidspace.org.uk/python/license.shtml + +# For information about bugfixes, updates and support, please join the +# Pythonutils mailing list. +# http://groups.google.com/group/pythonutils/ +# Comments, suggestions and bug reports welcome. +# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml +# E-mail fuzzyman@voidspace.org.uk + +from __future__ import print_function +import posixpath +import os +try: + from urllib.request import url2pathname, pathname2url +except ImportError: + from urllib import url2pathname, pathname2url + +__all__ = [ + 'nativejoin', + 'pathjoin', + 'relpathto', + 'tslash', + 'relpath' + ] + +def pathjoin(base, *paths): + """ + Join paths to a base, observing pardir. + + If base doesn't *end* with '/' we assume it's a file rather than a directory. + (so we get rid of it) + """ + # XXXX will posixpath.join do all this anyway? + if base and not base.endswith('/'): + # get rid of the filename + base = '/'.join(base.split('/')[:-1]) + base = tslash(base) + path = (base,) + paths + return posixpath.normpath(posixpath.join(*path)) + +def nativejoin(base, path): + """ + Joins two paths - returning a native file path. + + Given a base path and a relative location, (in posix format) + return a file path in a (relatively) OS native way. + """ + return url2pathname(pathjoin(base, path)) + +def relpathto(thisdir, origin, dest): + """ + Given two paths relative to a directory, work out a path from origin + to destination. + + Assumes UNIX/URL type relative paths. + If origin doesn't *end* with '/' we assume it's a file rather than a + directory. + + If the same paths are passed in : + if the path ends with ('/') then we return '' + else we return the last part of the path (presumably a filename) + + If thisdir doesn't start with '/' then we add one + (this makes the top level of thisdir our root directory) + """ + orig_thisdir = thisdir + if not thisdir.startswith('/'): + thisdir = '/' + thisdir + orig_abs = posixpath.normpath(posixpath.join(thisdir, origin)) + dest_abs = posixpath.normpath(posixpath.join(thisdir, dest)) + if origin.endswith('/') and not orig_abs.endswith('/'): + orig_abs = orig_abs + '/' + if dest.endswith('/') and not dest_abs.endswith('/'): + dest_abs = dest_abs + '/' +# print orig_abs, dest_abs + # + # if the first item is a filename, we want to get rid of it + orig_list = orig_abs.split('/')[:-1] + dest_list = dest_abs.split('/') +# print orig_list, dest_list + + if orig_list[0] != dest_list[0]: + # can't get here from there + # XXXX raise exception? + return dest + # + # find the location where the two paths start to differ. + i = 0 + for start_seg, dest_seg in zip(orig_list, dest_list): + if start_seg != dest_seg: + break + i += 1 + # + # now i is the point where the two paths diverge; + # need a certain number of "os.pardir"s to work up + # from the origin to the point of divergence. + segments = ['..'] * (len(orig_list) - i) + # need to add the diverging part of dest_list. + segments += dest_list[i:] + if len(segments) == 0: + # if they happen to be identical paths + # identical directories + if dest.endswith('/'): + return '' + # just the filename - the last part of dest + return dest_list[-1] + else: + return '/'.join(segments) + +def relpath(origin, dest): + """Given two absolute paths, work out a path from origin to destination. + + Assumes UNIX/URL type relative paths. + If origin doesn't *end* with '/' we assume it's a file rather than + a directory. + + If the same paths are passed in : + if the path ends with ('/') then we return '' + else we return the last part of the path (presumably a filename) + + If origin or dest don't start with '/' then we add it. + + We are *assuming* relative paths on the same device + (i.e. same top level directory) + """ + if not origin.startswith('/'): + origin = '/' + origin + if not dest.startswith('/'): + dest = '/' + dest + # + # if the first item is a filename, we want to get rid of it + orig_list = origin.split('/')[:-1] + dest_list = dest.split('/') + # + # find the location where the two paths start to differ. + i = 0 + for start_seg, dest_seg in zip(orig_list, dest_list): + if start_seg != dest_seg: + break + i += 1 + + # now i is the point where the two paths diverge. + # need a certain number of "os.pardir"s to work up + # from the origin to the point of divergence. + segments = ['..'] * (len(orig_list) - i) + # need to add the diverging part of dest_list. + segments += dest_list[i:] + if len(segments) == 0: + # if they happen to be identical paths + # identical directories + if dest.endswith('/'): + return '' + # just the filename - the last part of dest + return dest_list[-1] + else: + return '/'.join(segments) + +def tslash(apath): + """Add a trailing slash to a path if it needs one. + + Doesn't use os.sep because you end up jiggered on windoze - when you + want separators for URLs. + """ + if (apath and + apath != '.' and + not apath.endswith('/') and + not apath.endswith('\\')): + return apath + '/' + else: + return apath + +############################################## + +def testJoin(): + thelist = [ + ('/', 'fish.html'), + ('/dir/dir/', '../file'), + ('dir/dir/', '../file'), + ('dir/dir/', '../../file'), + ('dir/dir/', '../../../file'), + ('/dir/dir/', '../notherdir/file'), + ('/dir/dir/', '../../notherdir/file'), + ('dir/dir/', '../../notherdir/file'), + ('dir/dir/', '../../../notherdir/file'), + ('', '../path'), + ] + for entry in thelist: + print(entry, ' :: ', pathjoin(*entry)) + print(entry, ' :: ', nativejoin(*entry)) + print('\n') + +def testRelpathto(): + thedir = '//toplevel/dirone/dirtwo/dirthree' + thelist = [ + ('file1.html', 'file2.html'), + ('file1.html', '../file2.html'), + ('../file1.html', '../file2.html'), + ('../file1.html', 'file2.html'), + ('../fish1/fish2/', '../../sub1/sub2/'), + ('../fish1/fish2/', 'sub1/sub2'), + ('../../../fish1/fish2/', 'sub1/sub2/'), + ('../../../fish1/fish2/', 'sub1/sub2/file1.html'), + ] + for orig, dest in thelist: + print('(%s, %s) : ' % (orig, dest), relpathto(thedir, orig, dest)) + +def testRelpathto2(): + thedir = 'section3/' + thelist = [ + ('../archive/strangeindex1.html', 'article2.html'), + ] + for orig, dest in thelist: + answer = relpathto(thedir, orig, dest) + print('(%s, %s) : ' % (orig, dest), answer) + +def testRelpath(): + thelist = [ + ('/hello/fish/', 'bungles'), + ] + for orig, dest in thelist: + answer = relpath(orig, dest) + print('(%s, %s) : ' % (orig, dest), answer) + + +if __name__ == '__main__': + testJoin() + testRelpathto() + testRelpath() +# testRelpathto2() + +""" +TODO +==== + +More comprehensive tests. + +CHANGELOG +2005/07/31 +Can now pass multiple args to ``pathjoin``. +Finalised as version 0.1.0 + +2005/06/18 +Changes by Nicola Larosa + Code cleanup + lines shortened + comments on line above code + empty comments in empty lines + +2005/05/28 +Added relpath to __all__ + + +TODO +Move into pythonutils +relpathto could call relpath (and so be shorter) +nativejoin could accept multiple paths +Could tslash be more elegant ? +""" diff --git a/pelican/plugins/webassets/vendor/webassets/filter/cssutils.py b/pelican/plugins/webassets/vendor/webassets/filter/cssutils.py new file mode 100644 index 0000000..2b8f4a1 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/cssutils.py @@ -0,0 +1,34 @@ +from __future__ import absolute_import +import logging +import logging.handlers + +from webassets.filter import Filter + + +__all__ = ('CSSUtils',) + + +class CSSUtils(Filter): + """Minifies CSS by removing whitespace, comments etc., using the Python + `cssutils `_ library. + + Note that since this works as a parser on the syntax level, so invalid + CSS input could potentially result in data loss. + """ + + name = 'cssutils' + + def setup(self): + import cssutils + self.cssutils = cssutils + + # cssutils is unaware of many new CSS3 properties, + # vendor-prefixes etc., and logs many non-fatal warnings + # about them. These diagnostic messages are rather + # useless, so disable everything that's non-fatal. + cssutils.log.setLevel(logging.FATAL) + + def output(self, _in, out, **kw): + sheet = self.cssutils.parseString(_in.read()) + self.cssutils.ser.prefs.useMinified() + out.write(sheet.cssText.decode('utf-8')) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/datauri.py b/pelican/plugins/webassets/vendor/webassets/filter/datauri.py new file mode 100644 index 0000000..339fed6 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/datauri.py @@ -0,0 +1,71 @@ +from base64 import b64encode +import mimetypes +import os +from webassets.utils import urlparse + +from webassets.filter.cssrewrite.base import CSSUrlRewriter + + +__all__ = ('CSSDataUri',) + + +class CSSDataUri(CSSUrlRewriter): + """Will replace CSS url() references to external files with internal + `data: URIs `_. + + The external file is now included inside your CSS, which minimizes HTTP + requests. + + .. note:: + + Data Uris have `clear disadvantages `_, + so put some thought into if and how you would like to use them. Have + a look at some `performance measurements `_. + + The filter respects a ``DATAURI_MAX_SIZE`` option, which is the maximum + size (in bytes) of external files to include. The default limit is what + I think should be a reasonably conservative number, 2048 bytes. + """ + + name = 'datauri' + options = { + 'max_size': 'DATAURI_MAX_SIZE', + } + + def replace_url(self, url): + if url.startswith('data:'): + # Don't even both sending data: through urlparse(), + # who knows how well it'll deal with a lot of data. + return + + # Ignore any urls which are not relative + parsed = urlparse.urlparse(url) + if parsed.scheme or parsed.netloc or parsed.path.startswith('/'): + return + + # Since this runs BEFORE cssrewrite, we can thus assume that urls + # will be relative to the file location. + # + # Notes: + # - Django might need to override this filter for staticfiles if it + # it should be possible to resolve cross-references between + # different directories. + # - For Flask-Assets blueprints, the logic might need to be: + # 1) Take source_path, convert into correct url via absurl(). + # 2) Join with the URL be be replaced. + # 3) Convert url back to the filesystem path to which the url + # would map (the hard part?). + # + + filename = os.path.join(os.path.dirname(self.source_path), url) + + try: + if os.stat(filename).st_size <= (self.max_size or 2048): + with open(filename, 'rb') as f: + data = b64encode(f.read()) + return 'data:%s;base64,%s' % ( + mimetypes.guess_type(filename)[0], data.decode()) + except (OSError, IOError): + # Ignore the file not existing. + # TODO: When we have a logging system, this could produce a warning + return diff --git a/pelican/plugins/webassets/vendor/webassets/filter/dust.py b/pelican/plugins/webassets/vendor/webassets/filter/dust.py new file mode 100644 index 0000000..4623455 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/dust.py @@ -0,0 +1,57 @@ +"""Compile DustJS templates to a single JavaScript file that, when +loaded in the browser, registers automatically. + +""" + +from webassets.filter import ExternalTool + + +__all__ = ('DustJS',) + + +class DustJS(ExternalTool): + """`DustJS `_ templates compilation + filter. + + Takes a directory full ``.dust`` files and creates a single Javascript + object that registers to the ``dust`` global when loaded in the browser:: + + Bundle('js/templates/', filters='dustjs') + + Note that in the above example, a directory is given as the bundle + contents, which is unusual, but required by this filter. + + This uses the ``dusty`` compiler, which is a separate project from the + DustJS implementation. To install ``dusty`` together with LinkedIn's + version of ``dustjs`` (the original does not support NodeJS > 0.4):: + + npm install dusty + rm -rf node_modules/dusty/node_modules/dust + git clone https://github.com/linkedin/dustjs node_modules/dust + + .. note:: + + To generate the DustJS client-side Javascript, you can then do:: + + cd node_modules/dust + make dust + cp dist/dist-core...js your/static/assets/path + + For compilation, set the ``DUSTY_PATH=.../node_modules/dusty/bin/dusty``. + Optionally, set ``NODE_PATH=.../node``. + """ + + name = 'dustjs' + options = {'dusty_path': 'DUSTY_PATH', + 'node_path': 'NODE_PATH'} + max_debug_level = None + + def open(self, out, source_path, **kw): + args = [] + if self.node_path: + args += [self.node_path] + args += [self.dusty_path or 'dusty'] + # no need for --single, as we output to STDOUT + args += [source_path] + + self.subprocess(args, out) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/handlebars.py b/pelican/plugins/webassets/vendor/webassets/filter/handlebars.py new file mode 100644 index 0000000..173a92f --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/handlebars.py @@ -0,0 +1,75 @@ +import subprocess +import os +from os import path + +from webassets.exceptions import FilterError +from webassets.filter.jst import JSTemplateFilter + + +__all__ = ('Handlebars',) + + +class Handlebars(JSTemplateFilter): + """Compile `Handlebars `_ templates. + + This filter assumes that the ``handlebars`` executable is in the path. + Otherwise, you may define a ``HANDLEBARS_BIN`` setting. + + .. note:: + Use this filter if you want to precompile Handlebars templates. + If compiling them in the browser is acceptable, you may use the + JST filter, which needs no external dependency. + + .. warning:: + Currently, this filter is not compatible with input filters. Any + filters that would run during the input-stage will simply be + ignored. Input filters tend to be other compiler-style filters, + so this is unlikely to be an issue. + """ + + # TODO: We should fix the warning above. Either, me make this filter + # support input-processing (we'd have to work with the hunks given to + # us, rather than the original source files), or make webassets raise + # an error if the handlebars filter is combined with an input filter. + # I'm unsure about the best API design. We could support open() + # returning ``True`` to indicate "no input filters allowed" ( + # surprisingly hard to implement) Or, use an attribute to declare + # as much. + + name = 'handlebars' + options = { + 'binary': 'HANDLEBARS_BIN', + 'extra_args': 'HANDLEBARS_EXTRA_ARGS', + 'root': 'HANDLEBARS_ROOT', + } + max_debug_level = None + + def process_templates(self, out, hunks, **kw): + templates = [info['source_path'] for _, info in hunks] + + if self.root is True: + root = self.get_config('directory') + elif self.root: + root = path.join(self.get_config('directory'), self.root) + else: + root = self._find_base_path(templates) + + args = [self.binary or 'handlebars'] + if root: + args.extend(['-r', root]) + if self.extra_args: + args.extend(self.extra_args) + args.extend(templates) + + proc = subprocess.Popen( + args, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=(os.name == 'nt')) + stdout, stderr = proc.communicate() + + if proc.returncode != 0: + raise FilterError(('handlebars: subprocess had error: stderr=%s, '+ + 'stdout=%s, returncode=%s') % ( + stderr, stdout, proc.returncode)) + out.write(stdout.decode('utf-8').strip() + ';') diff --git a/pelican/plugins/webassets/vendor/webassets/filter/jade.py b/pelican/plugins/webassets/vendor/webassets/filter/jade.py new file mode 100644 index 0000000..16bab7d --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/jade.py @@ -0,0 +1,130 @@ +# coding=utf-8 + +from __future__ import print_function +import os, subprocess +from webassets.filter import Filter, register_filter +from webassets.exceptions import FilterError + + +class Jade(Filter): + """Converts `Jade `_ templates to client-side + JavaScript functions. + + Requires the Jade executable to be available externally. You can install it + using the `Node Package Manager `_:: + + $ npm install jade + + Jade templates are compiled and stored in a window-level JavaScript object + under a key corresponding to the template file's basename. For example, + ``keyboardcat.jade`` compiles into: + + window.templates['keyboardcat'] = function() { ... }; + + Supported configuration options: + + JADE_BIN + The system path to the Jade binary. If not set assumes ``jade`` is in + the system path. + + JADE_RUNTIME + The system path to the Jade runtime, ``runtime.js`` which ships with + Jade. If you installed Jade locally it can be found in: + + node_modules/jade/runtime.js + + Globally, on Ubuntu it's typically located in: + + /usr/lib/node_modules/jade/runtime.js + + Or sometimes: + + /usr/local/lib/node_modules/jade/runtime.js + + If, for some reason you can't find your Jade runtime you can download + it from the `Jade Repository `_:: + but do take care to download the runtime version which matches the + version of your installed Jade. + + JADE_NO_DEBUG + Omits debugging information to output shorter functions. + + JADE_TEMPLATE_VAR + The window-level JavaScript object where the compiled Jade objects will + be stored. By default this defaults to ``templates`` as such: + + window['templates'] + """ + + name = 'jade' + max_debug_level = None + options = { + 'jade': 'JADE_BIN', + 'jade_runtime': 'JADE_RUNTIME', + 'jade_no_debug': 'JADE_NO_DEBUG', + 'js_var': 'JADE_TEMPLATE_VAR' + } + argv = [] + + + def setup(self): + """ + Check options and apply defaults if necessary + """ + super(Jade, self).setup() + + self.argv = [self.jade or 'jade'] + self.argv.append('--client') + + if self.jade_no_debug: + self.argv.append('--no-debug') + + if not self.js_var: + self.js_var = 'templates' + + + def input(self, _in, out, **kwargs): + """ + Compile individual Jade templates + """ + proc = subprocess.Popen(self.argv, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=(os.name == 'nt')) + stdout, stderr = proc.communicate(_in.read()) + + if proc.returncode != 0: + raise FilterError(('jade: subprocess returned a non-success ' + + 'result code: %s, stdout=%s, stderr=%s') + % (proc.returncode, stdout, stderr)) + elif stderr: + print('jade filter has warnings:', stderr) + + # Add a bit of JavaScript that will place our compiled Jade function + # into an object on the `window` object. Jade files are keyed by their + # basename. + key = os.path.splitext(os.path.basename(kwargs['source_path']))[0] + preamble = "window['%s']['%s'] = " % (self.js_var, key) + + out.write('%s%s' % (preamble, stdout.strip())) + + + def output(self, _in, out, **kwargs): + """ + Prepend Jade runtime and initialize template variable. + """ + if self.jade_runtime: + with open(self.jade_runtime) as file: + runtime = ''.join(file.readlines()) + else: + runtime = '' + + # JavaScript code to initialize the window-level object that will hold + # our compiled Jade templates as functions + init = "if(!window['%s']) { window['%s'] = {}; }" % (self.js_var, self.js_var) + + out.write('%s\n%s\n%s' % (runtime, init, _in.read())) + + +register_filter(Jade) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/jinja2.py b/pelican/plugins/webassets/vendor/webassets/filter/jinja2.py new file mode 100644 index 0000000..50a295e --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/jinja2.py @@ -0,0 +1,42 @@ +from __future__ import absolute_import +from webassets.filter import Filter + + +__all__ = ('Jinja2',) + + +class Jinja2(Filter): + """Process a file through the Jinja2 templating engine. + + Requires the ``jinja2`` package (https://github.com/mitsuhiko/jinja2). + + The Jinja2 context can be specified with the `JINJA2_CONTEXT` configuration + option or directly with `context={...}`. Example: + + .. code-block:: python + + Bundle('input.css', filters=Jinja2(context={'foo': 'bar'})) + + Additionally to enable template loading mechanics from your project you can provide + `JINJA2_ENV` or `jinja2_env` arg to make use of already created environment. + """ + + name = 'jinja2' + max_debug_level = None + options = { + 'context': 'JINJA2_CONTEXT', + 'jinja2_env': 'JINJA2_ENV' + } + + def setup(self): + try: + import jinja2 + except ImportError: + raise EnvironmentError('The "jinja2" package is not installed.') + else: + self.jinja2 = jinja2 + super(Jinja2, self).setup() + + def input(self, _in, out, **kw): + tpl_factory = self.jinja2_env.from_string if self.jinja2_env else self.jinja2.Template + out.write(tpl_factory(_in.read()).render(self.context or {})) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/jsmin.py b/pelican/plugins/webassets/vendor/webassets/filter/jsmin.py new file mode 100644 index 0000000..ff21d7f --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/jsmin.py @@ -0,0 +1,44 @@ +from __future__ import absolute_import +import warnings + +from webassets.filter import Filter + + +__all__ = ('JSMin',) + + +class JSMin(Filter): + """Minifies Javascript by removing whitespace, comments, etc. + + This filter uses a Python port of Douglas Crockford's `JSMin + `_, which needs + to be installed separately. + + There are actually multiple implementations available, for + example one by Baruch Even. Easiest to install via PyPI is + the one by Dave St. Germain:: + + $ pip install jsmin + + The filter is tested with this ``jsmin`` package from PyPI, + but will work with any module that exposes a + ``JavascriptMinify`` object with a ``minify`` method. + + If you want to avoid installing another dependency, use the + :class:`webassets.filter.rjsmin.RJSMin` filter instead. + """ + + name = 'jsmin' + + def setup(self): + import jsmin + self.jsmin = jsmin + + def output(self, _in, out, **kw): + if hasattr(self.jsmin, 'JavaScriptMinifier'): + # jsmin.py from v8 + minifier = self.jsmin.JavaScriptMinifier() + minified = minifier.JSMinify(_in.read()) + out.write(minified) + else: + self.jsmin.JavascriptMinify().minify(_in, out) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/jspacker/__init__.py b/pelican/plugins/webassets/vendor/webassets/filter/jspacker/__init__.py new file mode 100644 index 0000000..9fc56b8 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/jspacker/__init__.py @@ -0,0 +1,24 @@ +from .jspacker import JavaScriptPacker +from webassets.filter import Filter + + +__all__ = ('JSPacker',) + + +class JSPacker(Filter): + """Reduces the size of Javascript using an inline compression + algorithm, i.e. the script will be unpacked on the client side + by the browser. + + Based on Dean Edwards' `jspacker 2 `_, + as ported by Florian Schulze. + """ + # TODO: This could support options. + + name = 'jspacker' + + def output(self, _in, out, **kw): + out.write(JavaScriptPacker().pack(_in.read(), + compaction=False, + encoding=62, + fastDecode=True)) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/jspacker/jspacker.py b/pelican/plugins/webassets/vendor/webassets/filter/jspacker/jspacker.py new file mode 100644 index 0000000..e681531 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/jspacker/jspacker.py @@ -0,0 +1,577 @@ +from __future__ import print_function +## ParseMaster, version 1.0 (pre-release) (2005/05/12) x6 +## Copyright 2005, Dean Edwards +## Web: http://dean.edwards.name/ +## +## This software is licensed under the CC-GNU LGPL +## Web: http://creativecommons.org/licenses/LGPL/2.1/ +## +## Ported to Python by Florian Schulze + +import os, re +import sys +if sys.version < '3': + integer_types = (int, long,) +else: + integer_types = (int,) + +# a multi-pattern parser + +class Pattern: + def __init__(self, expression, replacement, length): + self.expression = expression + self.replacement = replacement + self.length = length + + def __str__(self): + return "(" + self.expression + ")" + +class Patterns(list): + def __str__(self): + return '|'.join([str(e) for e in self]) + +class ParseMaster: + # constants + EXPRESSION = 0 + REPLACEMENT = 1 + LENGTH = 2 + GROUPS = re.compile(r"""\(""", re.M)#g + SUB_REPLACE = re.compile(r"""\$\d""", re.M) + INDEXED = re.compile(r"""^\$\d+$""", re.M) + TRIM = re.compile(r"""(['"])\1\+(.*)\+\1\1$""", re.M) + ESCAPE = re.compile(r"""\\.""", re.M)#g + #QUOTE = re.compile(r"""'""", re.M) + DELETED = re.compile("""\x01[^\x01]*\x01""", re.M)#g + + def __init__(self): + # private + self._patterns = Patterns() # patterns stored by index + self._escaped = [] + self.ignoreCase = False + self.escapeChar = None + + def DELETE(self, match, offset): + return "\x01" + match.group(offset) + "\x01" + + def _repl(self, a, o, r, i): + while (i): + m = a.group(o+i-1) + if m is None: + s = "" + else: + s = m + r = r.replace("$" + str(i), s) + i = i - 1 + r = ParseMaster.TRIM.sub("$1", r) + return r + + # public + def add(self, expression="^$", replacement=None): + if replacement is None: + replacement = self.DELETE + # count the number of sub-expressions + # - add one because each pattern is itself a sub-expression + length = len(ParseMaster.GROUPS.findall(self._internalEscape(str(expression)))) + 1 + # does the pattern deal with sub-expressions? + if (isinstance(replacement, str) and ParseMaster.SUB_REPLACE.match(replacement)): + # a simple lookup? (e.g. "$2") + if (ParseMaster.INDEXED.match(replacement)): + # store the index (used for fast retrieval of matched strings) + replacement = int(replacement[1:]) - 1 + else: # a complicated lookup (e.g. "Hello $2 $1") + # build a function to do the lookup + i = length + r = replacement + replacement = lambda a,o: self._repl(a,o,r,i) + # pass the modified arguments + self._patterns.append(Pattern(expression, replacement, length)) + + # execute the global replacement + def execute(self, string): + if self.ignoreCase: + r = re.compile(str(self._patterns), re.I | re.M) + else: + r = re.compile(str(self._patterns), re.M) + string = self._escape(string, self.escapeChar) + string = r.sub(self._replacement, string) + string = self._unescape(string, self.escapeChar) + string = ParseMaster.DELETED.sub("", string) + return string + + # clear the patterns collections so that this object may be re-used + def reset(self): + self._patterns = Patterns() + + # this is the global replace function (it's quite complicated) + def _replacement(self, match): + i = 1 + # loop through the patterns + for pattern in self._patterns: + if match.group(i) is not None: + replacement = pattern.replacement + if callable(replacement): + return replacement(match, i) + elif isinstance(replacement, integer_types): + return match.group(replacement+i) + else: + return replacement + else: + i = i+pattern.length + + # encode escaped characters + def _escape(self, string, escapeChar=None): + def repl(match): + char = match.group(1) + self._escaped.append(char) + return escapeChar + if escapeChar is None: + return string + r = re.compile("\\"+escapeChar+"(.)", re.M) + result = r.sub(repl, string) + return result + + # decode escaped characters + def _unescape(self, string, escapeChar=None): + def repl(match): + try: + #result = eval("'"+escapeChar + self._escaped.pop(0)+"'") + result = escapeChar + self._escaped.pop(0) + return result + except IndexError: + return escapeChar + if escapeChar is None: + return string + r = re.compile("\\"+escapeChar, re.M) + result = r.sub(repl, string) + return result + + def _internalEscape(self, string): + return ParseMaster.ESCAPE.sub("", string) + + +## packer, version 2.0 (2005/04/20) +## Copyright 2004-2005, Dean Edwards +## License: http://creativecommons.org/licenses/LGPL/2.1/ + +## Ported to Python by Florian Schulze + +## http://dean.edwards.name/packer/ + +class JavaScriptPacker: + def __init__(self): + self._basicCompressionParseMaster = self.getCompressionParseMaster(False) + self._specialCompressionParseMaster = self.getCompressionParseMaster(True) + + def basicCompression(self, script): + return self._basicCompressionParseMaster.execute(script) + + def specialCompression(self, script): + return self._specialCompressionParseMaster.execute(script) + + def getCompressionParseMaster(self, specialChars): + IGNORE = "$1" + parser = ParseMaster() + parser.escapeChar = '\\' + # protect strings + parser.add(r"""'[^']*?'""", IGNORE) + parser.add(r'"[^"]*?"', IGNORE) + # remove comments + parser.add(r"""//[^\n\r]*?[\n\r]""") + parser.add(r"""/\*[^*]*?\*+([^/][^*]*?\*+)*?/""") + # protect regular expressions + parser.add(r"""\s+(\/[^\/\n\r\*][^\/\n\r]*\/g?i?)""", "$2") + parser.add(r"""[^\w\$\/'"*)\?:]\/[^\/\n\r\*][^\/\n\r]*\/g?i?""", IGNORE) + # remove: ;;; doSomething(); + if specialChars: + parser.add(""";;;[^\n\r]+[\n\r]""") + # remove redundant semi-colons + parser.add(r""";+\s*([};])""", "$2") + # remove white-space + parser.add(r"""(\b|\$)\s+(\b|\$)""", "$2 $3") + parser.add(r"""([+\-])\s+([+\-])""", "$2 $3") + parser.add(r"""\s+""", "") + return parser + + def getEncoder(self, ascii): + mapping = {} + base = ord('0') + mapping.update(dict([(i, chr(i+base)) for i in range(10)])) + base = ord('a') + mapping.update(dict([(i+10, chr(i+base)) for i in range(26)])) + base = ord('A') + mapping.update(dict([(i+36, chr(i+base)) for i in range(26)])) + base = 161 + mapping.update(dict([(i+62, chr(i+base)) for i in range(95)])) + + # zero encoding + # characters: 0123456789 + def encode10(charCode): + return str(charCode) + + # inherent base36 support + # characters: 0123456789abcdefghijklmnopqrstuvwxyz + def encode36(charCode): + l = [] + remainder = charCode + while 1: + result, remainder = divmod(remainder, 36) + l.append(mapping[remainder]) + if not result: + break + remainder = result + l.reverse() + return "".join(l) + + # hitch a ride on base36 and add the upper case alpha characters + # characters: 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ + def encode62(charCode): + l = [] + remainder = charCode + while 1: + result, remainder = divmod(remainder, 62) + l.append(mapping[remainder]) + if not result: + break + remainder = result + l.reverse() + return "".join(l) + + # use high-ascii values + def encode95(charCode): + l = [] + remainder = charCode + while 1: + result, remainder = divmod(remainder, 95) + l.append(mapping[remainder+62]) + if not result: + break + remainder = result + l.reverse() + return "".join(l) + + if ascii <= 10: + return encode10 + elif ascii <= 36: + return encode36 + elif ascii <= 62: + return encode62 + return encode95 + + def escape(self, script): + script = script.replace("\\","\\\\") + script = script.replace("'","\\'") + script = script.replace('\n','\\n') + #return re.sub(r"""([\\'](?!\n))""", "\\$1", script) + return script + + def escape95(self, script): + result = [] + for x in script: + if x>'\xa1': + x = "\\x%0x" % ord(x) + result.append(x) + return "".join(result) + + def encodeKeywords(self, script, encoding, fastDecode): + # escape high-ascii values already in the script (i.e. in strings) + if (encoding > 62): + script = self.escape95(script) + # create the parser + parser = ParseMaster() + encode = self.getEncoder(encoding) + # for high-ascii, don't encode single character low-ascii + if encoding > 62: + regexp = r"""\w\w+""" + else: + regexp = r"""\w+""" + # build the word list + keywords = self.analyze(script, regexp, encode) + encoded = keywords['encoded'] + # encode + def repl(match, offset): + return encoded.get(match.group(offset), "") + parser.add(regexp, repl) + # if encoded, wrap the script in a decoding function + script = parser.execute(script) + script = self.bootStrap(script, keywords, encoding, fastDecode) + return script + + def analyze(self, script, regexp, encode): + # analyse + # retrieve all words in the script + regexp = re.compile(regexp, re.M) + all = regexp.findall(script) + sorted = [] # list of words sorted by frequency + encoded = {} # dictionary of word->encoding + protected = {} # instances of "protected" words + if all: + unsorted = [] + _protected = {} + values = {} + count = {} + all.reverse() + for word in all: + word = "$"+word + if word not in count: + count[word] = 0 + j = len(unsorted) + unsorted.append(word) + # make a dictionary of all of the protected words in this script + # these are words that might be mistaken for encoding + values[j] = encode(j) + _protected["$"+values[j]] = j + count[word] = count[word] + 1 + # prepare to sort the word list, first we must protect + # words that are also used as codes. we assign them a code + # equivalent to the word itself. + # e.g. if "do" falls within our encoding range + # then we store keywords["do"] = "do"; + # this avoids problems when decoding + sorted = [None] * len(unsorted) + for word in unsorted: + if word in _protected and isinstance(_protected[word], int): + sorted[_protected[word]] = word[1:] + protected[_protected[word]] = True + count[word] = 0 + unsorted.sort(key=lambda a: count[a]) + j = 0 + for i in range(len(sorted)): + if sorted[i] is None: + sorted[i] = unsorted[j][1:] + j = j + 1 + encoded[sorted[i]] = values[i] + return {'sorted': sorted, 'encoded': encoded, 'protected': protected} + + def encodePrivate(self, charCode): + return "_"+str(charCode) + + def encodeSpecialChars(self, script): + parser = ParseMaster() + # replace: $name -> n, $$name -> $$na + def repl(match, offset): + #print offset, match.groups() + length = len(match.group(offset + 2)) + start = length - max(length - len(match.group(offset + 3)), 0) + return match.group(offset + 1)[start:start+length] + match.group(offset + 4) + parser.add(r"""((\$+)([a-zA-Z\$_]+))(\d*)""", repl) + # replace: _name -> _0, double-underscore (__name) is ignored + regexp = r"""\b_[A-Za-z\d]\w*""" + # build the word list + keywords = self.analyze(script, regexp, self.encodePrivate) + # quick ref + encoded = keywords['encoded'] + def repl(match, offset): + return encoded.get(match.group(offset), "") + parser.add(regexp, repl) + return parser.execute(script) + + # build the boot function used for loading and decoding + def bootStrap(self, packed, keywords, encoding, fastDecode): + ENCODE = re.compile(r"""\$encode\(\$count\)""") + # $packed: the packed script + #packed = self.escape(packed) + #packed = [packed[x*10000:(x+1)*10000] for x in range((len(packed)/10000)+1)] + #packed = "'" + "'+\n'".join(packed) + "'\n" + packed = "'" + self.escape(packed) + "'" + + # $count: number of words contained in the script + count = len(keywords['sorted']) + + # $ascii: base for encoding + ascii = min(count, encoding) or 1 + + # $keywords: list of words contained in the script + for i in keywords['protected']: + keywords['sorted'][i] = "" + # convert from a string to an array + keywords = "'" + "|".join(keywords['sorted']) + "'.split('|')" + + encoding_functions = { + 10: """ function($charCode) { + return $charCode; + }""", + 36: """ function($charCode) { + return $charCode.toString(36); + }""", + 62: """ function($charCode) { + return ($charCode < _encoding ? "" : arguments.callee(parseInt($charCode / _encoding))) + + (($charCode = $charCode % _encoding) > 35 ? String.fromCharCode($charCode + 29) : $charCode.toString(36)); + }""", + 95: """ function($charCode) { + return ($charCode < _encoding ? "" : arguments.callee($charCode / _encoding)) + + String.fromCharCode($charCode % _encoding + 161); + }""" + } + + # $encode: encoding function (used for decoding the script) + encode = encoding_functions[encoding] + encode = encode.replace('_encoding',"$ascii") + encode = encode.replace('arguments.callee', "$encode") + if ascii > 10: + inline = "$count.toString($ascii)" + else: + inline = "$count" + # $decode: code snippet to speed up decoding + if fastDecode: + # create the decoder + decode = r"""// does the browser support String.replace where the + // replacement value is a function? + if (!''.replace(/^/, String)) { + // decode all the values we need + while ($count--) $decode[$encode($count)] = $keywords[$count] || $encode($count); + // global replacement function + $keywords = [function($encoded){return $decode[$encoded]}]; + // generic match + $encode = function(){return'\\w+'}; + // reset the loop counter - we are now doing a global replace + $count = 1; + }""" + if encoding > 62: + decode = decode.replace('\\\\w', "[\\xa1-\\xff]") + else: + # perform the encoding inline for lower ascii values + if ascii < 36: + decode = ENCODE.sub(inline, decode) + # special case: when $count==0 there ar no keywords. i want to keep + # the basic shape of the unpacking function so i'll frig the code... + if not count: + raise NotImplemented + #) $decode = $decode.replace(/(\$count)\s*=\s*1/, "$1=0"); + + + # boot function + unpack = r"""function($packed, $ascii, $count, $keywords, $encode, $decode) { + while ($count--) + if ($keywords[$count]) + $packed = $packed.replace(new RegExp("\\b" + $encode($count) + "\\b", "g"), $keywords[$count]); + return $packed; + }""" + if fastDecode: + # insert the decoder + #unpack = re.sub(r"""\{""", "{" + decode + ";", unpack) + unpack = unpack.replace('{', "{" + decode + ";", 1) + + if encoding > 62: # high-ascii + # get rid of the word-boundaries for regexp matches + unpack = re.sub(r"""'\\\\b'\s*\+|\+\s*'\\\\b'""", "", unpack) + if ascii > 36 or encoding > 62 or fastDecode: + # insert the encode function + #unpack = re.sub(r"""\{""", "{$encode=" + encode + ";", unpack) + unpack = unpack.replace('{', "{$encode=" + encode + ";", 1) + else: + # perform the encoding inline + unpack = ENCODE.sub(inline, unpack) + # pack the boot function too + unpack = self.pack(unpack, 0, False, True) + + # arguments + params = [packed, str(ascii), str(count), keywords] + if fastDecode: + # insert placeholders for the decoder + params.extend(['0', "{}"]) + + # the whole thing + return "eval(" + unpack + "(" + ",".join(params) + "))\n"; + + def pack(self, script, encoding=0, fastDecode=False, specialChars=False, compaction=True): + script = script+"\n" + self._encoding = encoding + self._fastDecode = fastDecode + if specialChars: + script = self.specialCompression(script) + script = self.encodeSpecialChars(script) + else: + if compaction: + script = self.basicCompression(script) + if encoding: + script = self.encodeKeywords(script, encoding, fastDecode) + return script + +def run(): + p = JavaScriptPacker() + script = open('test_plone.js').read() + result = p.pack(script, compaction=False, encoding=62, fastDecode=True) + open('output.js','w').write(result) + +def run1(): + + test_scripts = [] + + test_scripts.append(("""// ----------------------------------------------------------------------- +// public interface +// ----------------------------------------------------------------------- + +cssQuery.toString = function() { + return "function cssQuery() {\n [version " + version + "]\n}"; +};""", 0, False, False, """cssQuery.toString=function(){return"function cssQuery() {\n [version "+version+"]\n}"};""")) + + test_scripts.append(("""function test(_localvar) { + var $name = 'foo'; + var $$dummy = 2; + + return $name + $$dummy; +}""", 0, False, True, """function test(_0){var n='foo';var du=2;return n+du}""")) + + test_scripts.append(("""function _test($localvar) { + var $name = 1; + var _dummy = 2; + var __foo = 3; + + return $name + _dummy + $localvar + __foo; +}""", 0, False, True, """function _1(l){var n=1;var _0=2;var __foo=3;return n+_0+l+__foo}""")) + + test_scripts.append(("""function _test($localvar) { + var $name = 1; + var _dummy = 2; + var __foo = 3; + + return $name + _dummy + $localvar + __foo; +} + +function _bar(_ocalvar) { + var $name = 1; + var _dummy = 2; + var __foo = 3; + + return $name + _dummy + $localvar + __foo; +}""", 0, False, True, """function _3(l){var n=1;var _0=2;var __foo=3;return n+_0+l+__foo}function _2(_1){var n=1;var _0=2;var __foo=3;return n+_0+l+__foo}""")) + + test_scripts.append(("cssQuery1.js", 0, False, False, "cssQuery1-p1.js")) + test_scripts.append(("cssQuery.js", 0, False, False, "cssQuery-p1.js")) + test_scripts.append(("pack.js", 0, False, False, "pack-p1.js")) + test_scripts.append(("cssQuery.js", 0, False, True, "cssQuery-p2.js")) + # the following ones are different, because javascript might use an + # unstable sort algorithm while python uses an stable sort algorithm + test_scripts.append(("pack.js", 0, False, True, "pack-p2.js")) + test_scripts.append(("test.js", 0, False, True, """function _4(l){var n=1;var _0=2;var __foo=3;return n+_0+l+__foo}function _3(_1){var n=1;var _2=2;var __foo=3;return n+_2+l+__foo}""")) + test_scripts.append(("test.js", 10, False, False, """eval(function(p,a,c,k,e,d){while(c--){if(k[c]){p=p.replace(new RegExp("\\b"+e(c)+"\\b","g"),k[c])}}return p}('8 13($6){0 $4=1;0 7=2;0 5=3;9 $4+7+$6+5}8 11(12){0 $4=1;0 10=2;0 5=3;9 $4+10+$6+5}',10,14,'var||||name|__foo|localvar|_dummy|function|return|_2|_bar|_ocalvar|_test'.split('|'))) +""")) + test_scripts.append(("test.js", 62, False, False, """eval(function(p,a,c,k,e,d){while(c--){if(k[c]){p=p.replace(new RegExp("\\b"+e(c)+"\\b","g"),k[c])}}return p}('8 d($6){0 $4=1;0 7=2;0 5=3;9 $4+7+$6+5}8 b(c){0 $4=1;0 a=2;0 5=3;9 $4+a+$6+5}',14,14,'var||||name|__foo|localvar|_dummy|function|return|_2|_bar|_ocalvar|_test'.split('|'))) +""")) + test_scripts.append(("test.js", 95, False, False, "test-p4.js")) + test_scripts.append(("cssQuery.js", 0, False, True, "cssQuery-p3.js")) + test_scripts.append(("cssQuery.js", 62, False, True, "cssQuery-p4.js")) + + import difflib + p = JavaScriptPacker() + for script, encoding, fastDecode, specialChars, expected in test_scripts: + if os.path.exists(script): + _script = open(script).read() + else: + _script = script + if os.path.exists(expected): + _expected = open(expected).read() + else: + _expected = expected + print(script[:20], encoding, fastDecode, specialChars, expected[:20]) + print("="*40) + result = p.pack(_script, encoding, fastDecode, specialChars) + print(len(result), len(_script)) + if (result != _expected): + print("ERROR!!!!!!!!!!!!!!!!") + print(_expected) + print(result) + #print list(difflib.unified_diff(result, _expected)) + +if __name__=='__main__': + run() diff --git a/pelican/plugins/webassets/vendor/webassets/filter/jst.py b/pelican/plugins/webassets/vendor/webassets/filter/jst.py new file mode 100644 index 0000000..101fedc --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/jst.py @@ -0,0 +1,190 @@ +import os +import re +try: + import json +except ImportError: + import simplejson as json +from webassets.filter import Filter +from webassets.utils import common_path_prefix + + +__all__ = ('JST',) + + +class JSTemplateFilter(Filter): + """Common base class for the JST and Handlebars filters, and + possibly other Javascript templating systems in the future. + """ + + def concat(self, out, hunks, **kwargs): + self.process_templates(out, hunks, **kwargs) + + def process_templates(self, out, hunks, **kw): + raise NotImplementedError() + + def iter_templates_with_base(self, hunks): + """Helper that for list of ``hunks``, as given to + ``concat()``, yields 2-tuples of (name, hunk), with name + being the name of the source file relative to the common + prefix of all source files. + + In other words, each template gets the shortest possible + name to identify it. + """ + base_path = self._find_base_path( + [info['source_path'] for _, info in hunks]) + os.path.sep + for hunk, info in hunks: + name = info['source_path'] + name = name[len(base_path):] + name = os.path.splitext(name)[0] + yield name, hunk + + def _find_base_path(self, paths): + """Hmmm. There should aways be some common base path.""" + if len(paths) == 1: + return os.path.dirname(paths[0]) + return common_path_prefix(paths) + + +class JST(JSTemplateFilter): + """This filter processes generic JavaScript templates. It will generate + JavaScript code that runs all files through a template compiler, and makes + the templates available as an object. + + It was inspired by `Jammit`_. + + For example, if you have a file named ``license.jst``: + + .. code-block:: html + +
+

Name: <%= name %>

+ Hometown: <%= birthplace %> +
+ + Then, after applying this filter, you could use the template in JavaScript: + + .. code-block:: javascript + + JST.license({name : "Moe", birthplace : "Brooklyn"}); + + The name of each template is derived from the filename. If your JST files + are spread over different directories, the path up to the common prefix + will be included. For example:: + + Bundle('templates/app1/license.jst', 'templates/app2/profile.jst', + filters='jst') + + will make the templates available as ``app1/license`` and ``app2/profile``. + + .. note:: + The filter is "generic" in the sense that it does not actually compile + the templates, but wraps them in a JavaScript function call, and can + thus be used with any template language. webassets also has filters + for specific JavaScript template languages like + :class:`~.filter.dust.DustJS` or + :class:`~.filter.handlebars.Handlebars`, and those filters precompile + the templates on the server, which means a performance boost on the + client-side. + + Unless configured otherwise, the filter will use the same micro-templating + language that `Jammit`_ uses, which is turn is the same one that is + available in `underscore.js`_. The JavaScript code necessary to compile + such templates will implicitly be included in the filter output. + + *Supported configuration options:* + + JST_COMPILER (template_function) + A string that is inserted into the generated JavaScript code in place + of the function to be called that should do the compiling. Unless you + specify a custom function here, the filter will include the JavaScript + code of it's own micro-templating language, which is the one used by + `underscore.js`_ and `Jammit`_. + + If you assign a custom function, it is your responsibility to ensure + that it is available in your final JavaScript. + + If this option is set to ``False``, then the template strings will be + output directly, which is to say, ``JST.foo`` will be a string holding + the raw source of the ``foo`` template. + + JST_NAMESPACE (namespace) + How the templates should be made available in JavaScript. Defaults to + ``window.JST``, which gives you a global ``JST`` object. + + JST_BARE (bare) + Whether everything generated by this filter should be wrapped inside + an anonymous function. Default to ``False``. + + .. note:: + + If you enable this option, the namespace must be a property + of the ``window`` object, or you won't be able to access the + templates. + + JST_DIR_SEPARATOR (separator) + The separator character to use for templates within directories. + Defaults to '/' + + .. _Jammit: + .. _underscore.js: http://documentcloud.github.com/underscore/#template + """ + name = 'jst' + options = { + # The JavaScript compiler function to use + 'template_function': 'JST_COMPILER', + # The JavaScript namespace to put templates in + 'namespace': 'JST_NAMESPACE', + # Wrap everything in a closure + 'bare': 'JST_BARE', + # The path separator to use with templates in different directories + 'separator': 'JST_DIR_SEPARATOR' + } + max_debug_level = None + + def setup(self): + super(JST, self).setup() + self.include_jst_script = (self.template_function == 'template') \ + or self.template_function is None + + def process_templates(self, out, hunks, **kwargs): + namespace = self.namespace or 'window.JST' + + if self.bare is False: + out.write("(function(){\n") + + out.write("%s = %s || {};\n" % (namespace, namespace)) + + if self.include_jst_script: + out.write("%s\n" % _jst_script) + + for name, hunk in self.iter_templates_with_base(hunks): + # Make it a valid Javascript string. + contents = json.dumps(hunk.data()) + + out.write("%s['%s'] = " % (namespace, self._get_jst_name(name))) + if self.template_function is False: + out.write("%s;\n" % (contents)) + else: + out.write("%s(%s);\n" % ( + self.template_function or 'template', contents)) + + if self.bare is False: + out.write("})();") + + def _get_jst_name(self, name): + """Return the name for the JST with any path separators normalised""" + return _path_separator_re.sub(self.separator or "/", name) + + +_path_separator_re = re.compile(r'[/\\]+') + +_jst_script = 'var template = function(str){var fn = new Function(\'obj\', \'var \ +__p=[],print=function(){__p.push.apply(__p,arguments);};\ +with(obj||{}){__p.push(\\\'\'+str.replace(/\\\\/g, \'\\\\\\\\\')\ +.replace(/\'/g, "\\\\\'").replace(/<%=([\\s\\S]+?)%>/g,\ +function(match,code){return "\',"+code.replace(/\\\\\'/g, "\'")+",\'";})\ +.replace(/<%([\\s\\S]+?)%>/g,function(match,code){return "\');"+code\ +.replace(/\\\\\'/g, "\'").replace(/[\\r\\n\\t]/g,\' \')+"__p.push(\'";})\ +.replace(/\\r/g,\'\\\\r\').replace(/\\n/g,\'\\\\n\')\ +.replace(/\\t/g,\'\\\\t\')+"\');}return __p.join(\'\');");return fn;};' diff --git a/pelican/plugins/webassets/vendor/webassets/filter/less.py b/pelican/plugins/webassets/vendor/webassets/filter/less.py new file mode 100644 index 0000000..c228e3e --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/less.py @@ -0,0 +1,145 @@ +from __future__ import with_statement + +import os + +from webassets.filter import ExternalTool +from webassets.utils import working_directory + + +class Less(ExternalTool): + """Converts `less `_ markup to real CSS. + + This depends on the NodeJS implementation of less, installable via npm. + To use the old Ruby-based version (implemented in the 1.x Ruby gem), see + :class:`~.less_ruby.Less`. + + *Supported configuration options*: + + LESS_BIN (binary) + Path to the less executable used to compile source files. By default, + the filter will attempt to run ``lessc`` via the system path. + + LESS_LINE_NUMBERS (line_numbers) + Outputs filename and line numbers. Can be either 'comments', which + will output the debug info within comments, 'mediaquery' that will + output the information within a fake media query which is compatible + with the SASSPath to the less executable used to compile source files. + + LESS_RUN_IN_DEBUG (run_in_debug) + By default, the filter will compile in debug mode. Since the less + compiler is written in Javascript and capable of running in the + browser, you can set this to ``False`` to have your original less + source files served (see below). + + LESS_PATHS (paths) + Add include paths for less command line. + It should be a list of paths relatives to Environment.directory or absolute paths. + Order matters as less will pick the first file found in path order. + + LESS_AS_OUTPUT (boolean) + By default, this works as an "input filter", meaning ``less`` is + called for each source file in the bundle. This is because the + path of the source file is required so that @import directives + within the Less file can be correctly resolved. + + However, it is possible to use this filter as an "output filter", + meaning the source files will first be concatenated, and then the + Less filter is applied in one go. This can provide a speedup for + bigger projects. + + .. admonition:: Compiling less in the browser + + less is an interesting case because it is written in Javascript and + capable of running in the browser. While for performance reason you + should prebuild your stylesheets in production, while developing you + may be interested in serving the original less files to the client, + and have less compile them in the browser. + + To do so, you first need to make sure the less filter is not applied + when :attr:`Environment.debug` is ``True``. You can do so via an + option:: + + env.config['less_run_in_debug'] = False + + Second, in order for the less to identify the less source files as + needing to be compiled, they have to be referenced with a + ``rel="stylesheet/less"`` attribute. One way to do this is to use the + :attr:`Bundle.extra` dictionary, which works well with the template + tags that webassets provides for some template languages:: + + less_bundle = Bundle( + '**/*.less', + filters='less', + extra={'rel': 'stylesheet/less' if env.debug else 'stylesheet'} + ) + + Then, for example in a Jinja2 template, you would write:: + + {% assets less_bundle %} + + {% endassets %} + + With this, the ```` tag will sport the correct ``rel`` value both + in development and in production. + + Finally, you need to include the less compiler:: + + if env.debug: + js_bundle.contents += 'http://lesscss.googlecode.com/files/less-1.3.0.min.js' + """ + + name = 'less' + options = { + 'less': ('binary', 'LESS_BIN'), + 'run_in_debug': 'LESS_RUN_IN_DEBUG', + 'line_numbers': 'LESS_LINE_NUMBERS', + 'extra_args': 'LESS_EXTRA_ARGS', + 'paths': 'LESS_PATHS', + 'as_output': 'LESS_AS_OUTPUT' + } + max_debug_level = None + + def setup(self): + super(Less, self).setup() + if self.run_in_debug is False: + # Disable running in debug mode for this instance. + self.max_debug_level = False + + def resolve_source(self, path): + return self.ctx.resolver.resolve_source(self.ctx, path) + + def _apply_less(self, in_, out, source_path=None, **kw): + # Set working directory to the source file so that includes are found + args = self.parse_binary(self.less or 'lessc') + if self.line_numbers: + args.append('--line-numbers=%s' % self.line_numbers) + + if self.paths: + paths = [ + path if os.path.isabs(path) else self.resolve_source(path) + for path in self.paths + ] + args.append('--include-path={0}'.format(os.pathsep.join(paths))) + + if self.extra_args: + args.extend(self.extra_args) + + args.append('-') + + if source_path: + with working_directory(filename=source_path): + self.subprocess(args, out, in_) + else: + self.subprocess(args, out, in_) + + def input(self, _in, out, source_path, output_path, **kw): + if self.as_output: + out.write(_in.read()) + else: + self._apply_less(_in, out, source_path) + + def output(self, _in, out, **kwargs): + if not self.as_output: + out.write(_in.read()) + else: + self._apply_less(_in, out) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/less_ruby.py b/pelican/plugins/webassets/vendor/webassets/filter/less_ruby.py new file mode 100644 index 0000000..b85e8cc --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/less_ruby.py @@ -0,0 +1,84 @@ +import time +import os, subprocess +import tempfile + +from webassets.filter import Filter +from webassets.exceptions import FilterError + + +__all__ = ('Less',) + + +class Less(Filter): + """Converts `Less `_ markup to real CSS. + + This uses the old Ruby implementation available in the 1.x versions of the + less gem. All 2.x versions of the gem are wrappers around the newer + NodeJS/Javascript implementation, which you are generally encouraged to + use, and which is available in webassets via the :class:`~.filter.less.Less` + filter. + + This filter for the Ruby version is being kept around for + backwards-compatibility. + + *Supported configuration options*: + + LESS_RUBY_PATH (binary) + Path to the less executable used to compile source files. By default, + the filter will attempt to run ``lessc`` via the system path. + """ + + # XXX Deprecate this one. + """ + XXX: Depending on how less is actually used in practice, it might actually + be a valid use case to NOT have this be a source filter, so that one can + split the css files into various less files, referencing variables in other + files' - without using @include, instead having them merged together by + django-assets. This will currently not work because we compile each + file separately, and the compiler would fail at undefined variables. + """ + + name = 'less_ruby' + options = { + 'less': ('binary', 'LESS_RUBY_PATH') + } + max_debug_level = None + + def open(self, out, sourcePath, **kw): + """Less currently doesn't take data from stdin, and doesn't allow + us from stdout either. Neither does it return a proper non-0 error + code when an error occurs, or even write to stderr (stdout instead)! + + Hopefully this will improve in the future: + + http://groups.google.com/group/lesscss/browse_thread/thread/3aed033a44c51b4c/b713148afde87e81 + """ + # TODO: Use NamedTemporaryFile. + outtemp_name = os.path.join(tempfile.gettempdir(), + 'assets_temp_%d.css' % int(time.time())) + + proc = subprocess.Popen( + [self.less or 'lessc', sourcePath, outtemp_name], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + # shell: necessary on windows to execute + # ruby files, but doesn't work on linux. + shell=(os.name == 'nt')) + stdout, stderr = proc.communicate() + + # less only writes to stdout, as noted in the method doc, but + # check everything anyway. + if stdout or stderr or proc.returncode != 0: + if os.path.exists(outtemp_name): + os.unlink(outtemp_name) + raise FilterError(('less: subprocess had error: stderr=%s, '+ + 'stdout=%s, returncode=%s') % ( + stderr, stdout, proc.returncode)) + + outtemp = open(outtemp_name) + try: + out.write(outtemp.read()) + finally: + outtemp.close() + + os.unlink(outtemp_name) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/libsass.py b/pelican/plugins/webassets/vendor/webassets/filter/libsass.py new file mode 100644 index 0000000..6867be3 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/libsass.py @@ -0,0 +1,104 @@ +# coding: utf-8 + +""" +This filter based on Jesús Jerez code [1]. + +[1] https://bitbucket.org/jhuss/webassets-libsass +""" + +from __future__ import print_function +from __future__ import absolute_import + +from webassets.filter import Filter + + +__all__ = ('LibSass',) + + +class LibSass(Filter): + """Converts `Sass `_ markup to real CSS. + + Requires the ``libsass`` package (https://pypi.python.org/pypi/libsass):: + + pip install libsass + + `libsass `_ is binding to C/C++ + implementation of a Sass compiler `Libsass + `_ + + *Configuration options:* + + LIBSASS_STYLE (style) + an optional coding style of the compiled result. choose one of: + `nested` (default), `expanded`, `compact`, `compressed` + + LIBSASS_INCLUDES (includes) + an optional list of paths to find @imported SASS/CSS source files + + LIBSASS_AS_OUTPUT + use this filter as an "output filter", meaning the source files + will first be concatenated, and then the Sass filter is applied. + + See libsass documentation for full documentation about these configuration + options: + + http://hongminhee.org/libsass-python/sass.html#sass.compile + + *Example:* + + Define a bundle for ``style.scss`` that contains ``@imports`` to files in + subfolders: + + .. code-block:: python + + Bundle('style.scss', filters='libsass', output='style.css', depends='**/*.scss') + + """ + name = 'libsass' + options = { + 'style': 'LIBSASS_STYLE', + 'includes': 'LIBSASS_INCLUDES', + 'as_output': 'LIBSASS_AS_OUTPUT', + } + max_debug_level = None + + def _apply_sass(self, _in, out): + args = dict( + output_style=self.style, + include_paths=(self.includes if self.includes else []) + ) + + if self.as_output: + args['string'] = _in.read() + else: + args['filename'] = _in + + out.write( + # http://hongminhee.org/libsass-python/sass.html#sass.compile + self.sass.compile(**args) + ) + + def setup(self): + super(LibSass, self).setup() + + try: + import sass + except ImportError: + raise EnvironmentError('The "libsass" package is not installed.') + else: + self.sass = sass + + if not self.style: + self.style = 'nested' + + def input(self, _in, out, source_path, **kwargs): + if self.as_output: + out.write(_in.read()) + else: + self._apply_sass(source_path, out) + + def output(self, _in, out, **kwargs): + if not self.as_output: + out.write(_in.read()) + else: + self._apply_sass(_in, out) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/node_sass.py b/pelican/plugins/webassets/vendor/webassets/filter/node_sass.py new file mode 100644 index 0000000..cbb38a0 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/node_sass.py @@ -0,0 +1,105 @@ +import os +import subprocess + +from webassets.exceptions import FilterError + +from .sass import Sass + + +__all__ = ('NodeSass', ) + + +class NodeSass(Sass): + """Converts `Scss `_ markup to real CSS. + + This uses node-sass which is a wrapper around libsass. + + This is an alternative to using the ``sass`` or ``scss`` filters, + which are based on the original, external tools. + + *Supported configuration options:* + + NODE_SASS_DEBUG_INFO (debug_info) + Include debug information in the output + + If unset, the default value will depend on your + :attr:`Environment.debug` setting. + + NODE_SASS_LOAD_PATHS (load_paths) + Additional load paths that node-sass should use. + + NODE_SASS_STYLE (style) + The style of the output CSS. Can be one of ``nested`` (default), + ``compact``, ``compressed``, or ``expanded``. + + NODE_SASS_CLI_ARGS (cli_args) + Additional cli arguments + """ + + name = 'node-sass' + options = { + 'binary': 'NODE_SASS_BIN', + 'debug_info': 'NODE_SASS_DEBUG_INFO', + 'use_scss': ('scss', 'NODE_SASS_USE_SCSS'), + 'as_output': 'NODE_SASS_AS_OUTPUT', + 'load_paths': 'NODE_SASS_LOAD_PATHS', + 'style': 'NODE_SASS_STYLE', + 'cli_args': 'NODE_SASS_CLI_ARGS', + } + max_debug_level = None + + def _apply_sass(self, _in, out, cd=None): + # Switch to source file directory if asked, so that this directory + # is by default on the load path. We could pass it via --include-paths, but then + # files in the (undefined) wd could shadow the correct files. + old_dir = os.getcwd() + if cd: + os.chdir(cd) + + try: + args = [self.binary or 'node-sass', + '--output-style', self.style or 'expanded'] + + if not self.use_scss: + args.append("--indented-syntax") + + if (self.ctx.environment.debug if self.debug_info is None else self.debug_info): + args.append('--debug-info') + for path in self.load_paths or []: + args.extend(['--include-path', path]) + + if (self.cli_args): + args.extend(self.cli_args) + + proc = subprocess.Popen(args, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + # shell: necessary on windows to execute + # ruby files, but doesn't work on linux. + shell=(os.name == 'nt')) + stdout, stderr = proc.communicate(_in.read().encode('utf-8')) + + if proc.returncode != 0: + raise FilterError(('sass: subprocess had error: stderr=%s, '+ + 'stdout=%s, returncode=%s') % ( + stderr, stdout, proc.returncode)) + elif stderr: + print("node-sass filter has warnings:", stderr) + + out.write(stdout.decode('utf-8')) + finally: + if cd: + os.chdir(old_dir) + + +class NodeSCSS(NodeSass): + """Version of the ``node-sass`` filter that uses the SCSS syntax. + """ + + name = 'node-scss' + + def __init__(self, *a, **kw): + assert not 'scss' in kw + kw['scss'] = True + super(NodeSCSS, self).__init__(*a, **kw) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/postcss.py b/pelican/plugins/webassets/vendor/webassets/filter/postcss.py new file mode 100644 index 0000000..479402b --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/postcss.py @@ -0,0 +1,58 @@ +from __future__ import with_statement + +from webassets.filter import ExternalTool +from webassets.utils import working_directory + + +class PostCSS(ExternalTool): + """Processes CSS code using `PostCSS `_. + + Requires the ``postcss`` executable to be available externally. + To install it, you might be able to do:: + + $ npm install --global postcss + + You should also install the plugins you want to use:: + + $ npm install --global postcss-cssnext + + You can configure postcss in ``postcss.config.js``: + + .. code-block:: javascript + + module.exports = { + plugins: [ + require('postcss-cssnext')({ + // optional configuration for cssnext + }) + ], + }; + + *Supported configuration options*: + + POSTCSS_BIN + Path to the postcss executable used to compile source files. By + default, the filter will attempt to run ``postcss`` via the + system path. + + POSTCSS_EXTRA_ARGS + Additional command-line options to be passed to ``postcss`` using this + setting, which expects a list of strings. + + """ + name = 'postcss' + + options = { + 'binary': 'POSTCSS_BIN', + 'extra_args': 'POSTCSS_EXTRA_ARGS', + } + + max_debug_level = None + + def input(self, in_, out, source_path, **kw): + # Set working directory to the source file so that includes are found + args = [self.binary or 'postcss'] + if self.extra_args: + args.extend(self.extra_args) + with working_directory(filename=source_path): + self.subprocess(args, out, in_) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/pyscss.py b/pelican/plugins/webassets/vendor/webassets/filter/pyscss.py new file mode 100644 index 0000000..cfa6637 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/pyscss.py @@ -0,0 +1,156 @@ +import os + +from webassets.filter import Filter +from webassets.utils import working_directory + + +__all__ = ('PyScss',) + + +class PyScss(Filter): + """Converts `Scss `_ markup to real CSS. + + This uses `PyScss `_, a native + Python implementation of the Scss language. The PyScss module needs + to be installed. It's API has been changing; currently, version + 1.1.5 is known to be supported. + + This is an alternative to using the ``sass`` or ``scss`` filters, + which are based on the original, external tools. + + .. note:: + The Sass syntax is not supported by PyScss. You need to use + the ``sass`` filter based on the original Ruby implementation + instead. + + *Supported configuration options:* + + PYSCSS_DEBUG_INFO (debug_info) + Include debug information in the output for use with FireSass. + + If unset, the default value will depend on your + :attr:`Environment.debug` setting. + + PYSCSS_LOAD_PATHS (load_paths) + Additional load paths that PyScss should use. + + .. warning:: + The filter currently does not automatically use + :attr:`Environment.load_path` for this. + + PYSCSS_STATIC_ROOT (static_root) + The directory PyScss should look in when searching for include + files that you have referenced. Will use + :attr:`Environment.directory` by default. + + PYSCSS_STATIC_URL (static_url) + The url PyScss should use when generating urls to files in + ``PYSCSS_STATIC_ROOT``. Will use :attr:`Environment.url` by + default. + + PYSCSS_ASSETS_ROOT (assets_root) + The directory PyScss should look in when searching for things + like images that you have referenced. Will use + ``PYSCSS_STATIC_ROOT`` by default. + + PYSCSS_ASSETS_URL (assets_url) + The url PyScss should use when generating urls to files in + ``PYSCSS_ASSETS_ROOT``. Will use ``PYSCSS_STATIC_URL`` by + default. + + PYSCSS_STYLE (style) + The style of the output CSS. Can be one of ``nested`` (default), + ``compact``, ``compressed``, or ``expanded``. + """ + + # TODO: PyScss now allows STATIC_ROOT to be a callable, though + # none of the other pertitent values are allowed to be, so this + # is probably not good enough for us. + + name = 'pyscss' + options = { + 'debug_info': 'PYSCSS_DEBUG_INFO', + 'load_paths': 'PYSCSS_LOAD_PATHS', + 'static_root': 'PYSCSS_STATIC_ROOT', + 'static_url': 'PYSCSS_STATIC_URL', + 'assets_root': 'PYSCSS_ASSETS_ROOT', + 'assets_url': 'PYSCSS_ASSETS_URL', + 'style': 'PYSCSS_STYLE', + } + max_debug_level = None + + def setup(self): + super(PyScss, self).setup() + + import scss + self.scss = scss + + if self.style: + try: + from packaging.version import Version + except ImportError: + from distutils.version import LooseVersion as Version + assert Version(scss.__version__) >= Version('1.2.0'), \ + 'PYSCSS_STYLE only supported in pyScss>=1.2.0' + + # Initialize various settings: + # Why are these module-level, not instance-level ?! + # TODO: It appears that in the current dev version, the + # settings can finally passed to a constructor. We'll need + # to support this. + + # Only the dev version appears to support a list + if self.load_paths: + scss.config.LOAD_PATHS = ','.join(self.load_paths) + + # These are needed for various helpers (working with images + # etc.). Similar to the compass filter, we require the user + # to specify such paths relative to the media directory. + try: + scss.config.STATIC_ROOT = self.static_root or self.ctx.directory + scss.config.STATIC_URL = self.static_url or self.ctx.url + except EnvironmentError: + raise EnvironmentError('Because Environment.url and/or ' + 'Environment.directory are not set, you need to ' + 'provide values for the PYSCSS_STATIC_URL and/or ' + 'PYSCSS_STATIC_ROOT settings.') + + # This directory PyScss will use when generating new files, + # like a spritemap. Maybe we should REQUIRE this to be set. + scss.config.ASSETS_ROOT = self.assets_root or scss.config.STATIC_ROOT + scss.config.ASSETS_URL = self.assets_url or scss.config.STATIC_URL + + def input(self, _in, out, **kw): + """Like the original sass filter, this also needs to work as + an input filter, so that relative @imports can be properly + resolved. + """ + + source_path = kw['source_path'] + + # Because PyScss always puts the current working dir at first + # place of the load path, this is what we need to use to make + # relative references work. + with working_directory(os.path.dirname(source_path)): + + scss_opts = { + 'debug_info': ( + self.ctx.environment.debug if self.debug_info is None else self.debug_info), + } + if self.style: + scss_opts['style'] = self.style + else: + scss_opts['compress'] = False + + scss = self.scss.Scss( + scss_opts=scss_opts, + # This is rather nice. We can pass along the filename, + # but also give it already preprocessed content. + scss_files={source_path: _in.read()}) + + # Compile + # Note: This will not throw an error when certain things + # are wrong, like an include file missing. It merely outputs + # to stdout, via logging. We might have to do something about + # this, and evaluate such problems to an exception. + out.write(scss.compile()) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/rcssmin.py b/pelican/plugins/webassets/vendor/webassets/filter/rcssmin.py new file mode 100644 index 0000000..b76325c --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/rcssmin.py @@ -0,0 +1,36 @@ +from __future__ import absolute_import +from webassets.filter import Filter + + +__all__ = ('RCSSMin',) + + +class RCSSMin(Filter): + """Minifies CSS. + + Requires the ``rcssmin`` package (https://github.com/ndparker/rcssmin). + Alike 'cssmin' it is a port of the YUI CSS compression algorithm but aiming + for speed instead of maximum compression. + + Supported configuration options: + RCSSMIN_KEEP_BANG_COMMENTS (boolean) + Keep bang-comments (comments starting with an exclamation mark). + """ + + name = 'rcssmin' + options = { + 'keep_bang_comments': 'RCSSMIN_KEEP_BANG_COMMENTS', + } + + def setup(self): + super(RCSSMin, self).setup() + try: + import rcssmin + except ImportError: + raise EnvironmentError('The "rcssmin" package is not installed.') + else: + self.rcssmin = rcssmin + + def output(self, _in, out, **kw): + keep = self.keep_bang_comments or False + out.write(self.rcssmin.cssmin(_in.read(), keep_bang_comments=keep)) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/replace.py b/pelican/plugins/webassets/vendor/webassets/filter/replace.py new file mode 100644 index 0000000..829face --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/replace.py @@ -0,0 +1,52 @@ +import re +from webassets.filter import ( + Filter, + register_filter +) + + +class ReplaceFilter(Filter): + """ + A filter that allows arbitrary search/replace of strings using a source + regex and a replacement string. Unlike cssrewrite this works on strings + which are not paths and can be used as an output filter. + + Usage: + + replace_static_urls = ReplaceFilter( + pattern=r'\\s*{{\\s*STATIC_URL\\s*}}\\s*', + repl=settings.STATIC_URL, + ) + """ + + name = 'replace' + max_debug_level = None + + def __init__(self, pattern=None, repl=None, as_output=True, **kwargs): + self.pattern = pattern + self.repl = repl + self.as_output = as_output + + super(ReplaceFilter, self).__init__(**kwargs) + + def unique(self): + """ Return a hashable representation of the parameters to allow different instances of this filter. """ + return self.pattern, self.repl + + def _process(self, _in, out, **kwargs): + out.write(re.sub(self.pattern, self.repl, _in.read())) + + def output(self, _in, out, **kwargs): + if self.as_output: + self._process(_in, out, **kwargs) + else: + out.write(_in.read()) + + def input(self, _in, out, **kwargs): + if self.as_output: + out.write(_in.read()) + else: + self._process(_in, out, **kwargs) + + +register_filter(ReplaceFilter) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/requirejs.py b/pelican/plugins/webassets/vendor/webassets/filter/requirejs.py new file mode 100644 index 0000000..536a0e3 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/requirejs.py @@ -0,0 +1,168 @@ +from __future__ import with_statement + +import shlex +from os import path, getcwd + +from webassets.filter import ExternalTool + + +class RequireJSFilter(ExternalTool): + ''' + Optimizes AMD-style modularized JavaScript into a single asset + using `RequireJS `_. + + This depends on the NodeJS executable ``r.js``; install via npm:: + + $ npm install -g requirejs + + Details on configuring r.js can be found at + http://requirejs.org/docs/optimization.html#basics. + + *Supported configuration options*: + + executable (env: REQUIREJS_BIN) + + Path to the RequireJS executable used to compile source + files. By default, the filter will attempt to run ``r.js`` via + the system path. + + config (env: REQUIREJS_CONFIG) + + The RequireJS options file. The path is taken to be relative + to the Environment.directory (by default is /static). + + + baseUrl (env: REQUIREJS_BASEURL) + + The ``baseUrl`` parameter to r.js; this is the directory that + AMD modules will be loaded from. The path is taken relative + to the Environment.directory (by default is /static). + Typically, this is used in + conjunction with a ``baseUrl`` parameter set in the `config` + options file, where the baseUrl value in the config file is + used for client-side processing, and the value here is for + server-side processing. + + optimize (env: REQUIREJS_OPTIMIZE) + + The ``optimize`` parameter to r.js; controls whether or not + r.js minifies the output. By default, it is enabled, but can + be set to ``none`` to disable minification. The typical + scenario to disable minification is if you do some additional + processing of the JavaScript (such as removing + ``console.log()`` lines) before minification by the ``rjsmin`` + filter. + + extras (env: REQUIREJS_EXTRAS) + + Any other command-line parameters to be passed to r.js. The + string is expected to be in unix shell-style format, meaning + that quotes can be used to escape spaces, etc. + + run_in_debug (env: REQUIREJS_RUN_IN_DEBUG) + + Boolean which controls if the AMD requirejs is evaluated + client-side or server-side in debug mode. If set to a truthy + value (e.g. 'yes'), then server-side compilation is done, even + in debug mode. The default is false. + + .. admonition:: Client-side AMD evaluation + + AMD modules can be loaded client-side without any processing + done on the server-side. The advantage to this is that + debugging is easier because the browser can tell you which + source file is responsible for a particular line of code. The + disadvantage is that it means that each loaded AMD module is a + separate HTTP request. When running client-side, the client + needs access to the `config` -- for this reason, when running + in client-side mode, the webassets environment must be + adjusted to include a reference to this + configuration. Typically, this is done by adding something + similar to the following during webassets initialization: + + .. code-block:: python + + if env.debug and not env.config.get('requirejs_run_in_debug', True): + env['requirejs'].contents += ('requirejs-browser-config.js',) + + And the file ``requirejs-browser-config.js`` will look + something like: + + .. code-block:: js + + require.config({baseUrl: '/static/script/'}); + + Set the `run_in_debug` option to control client-side or + server-side compilation in debug. + ''' + + name = 'requirejs' + method = 'open' + options = { + 'executable' : ('executable', 'REQUIREJS_BIN'), + 'config' : ('config', 'REQUIREJS_CONFIG'), + 'baseUrl' : ('baseUrl', 'REQUIREJS_BASEURL'), + 'optimize' : ('optimize', 'REQUIREJS_OPTIMIZE'), + 'extras' : ('extras', 'REQUIREJS_EXTRAS'), + 'run_in_debug' : ('run_in_debug', 'REQUIREJS_RUN_IN_DEBUG'), + } + + max_debug_level = None + + def setup(self): + super(RequireJSFilter, self).setup() + # todo: detect if `r.js` is installed... + if not self.run_in_debug: + # Disable running in debug mode for this instance. + self.max_debug_level = False + + if self.executable: + self.argv = shlex.split(self.executable) + else: + self.argv = ['r.js'] + + if self.config: + rel_config = path.join( + path.relpath( + self.ctx.directory, + getcwd() + ), + self.config + ) + if not self.baseUrl: + self.baseUrl = path.relpath( + self.ctx.directory, + getcwd() + ) + + self.argv.extend( + filter( + None, + ['-o', + rel_config if self.config else None, + 'name={modname}', + 'out={{output}}', + 'baseUrl=' + self.baseUrl if self.baseUrl else None, + 'optimize=' + self.optimize if self.optimize else None, + ]) + ) + if self.extras: + self.argv.extend(shlex.split(self.extras)) + + def open(self, out, source_path, **kw): + if self.ctx.debug and not self.run_in_debug: + with open(source_path, 'rb') as fp: + out.write(fp.read()) + return + # extract the AMD module name + name = kw.get('source') + if not name: + base = path.abspath(self.baseUrl) + name = path.abspath(source_path) + if not name.startswith(base): + raise ValueError( + 'requested AMD script "%s" does not exist in baseUrl "%s"' + % (source_path, self.baseUrl)) + name = name[len(base) + 1:] + kw['modname'] = path.splitext(name)[0] + return super(RequireJSFilter, self).open(out, source_path, **kw) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/rjsmin/__init__.py b/pelican/plugins/webassets/vendor/webassets/filter/rjsmin/__init__.py new file mode 100644 index 0000000..ff32865 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/rjsmin/__init__.py @@ -0,0 +1,35 @@ +from __future__ import absolute_import +try: + import rjsmin +except ImportError: + from . import rjsmin + + +from webassets.filter import Filter + + +__all__ = ('RJSMin',) + + +class RJSMin(Filter): + """Minifies Javascript by removing whitespace, comments, etc. + + Uses the `rJSmin library `_, + which is included with webassets. However, if you have the external + package installed, it will be used instead. You may want to do this + to get access to the faster C-extension. + + Supported configuration options: + + RJSMIN_KEEP_BANG_COMMENTS (boolean) + Keep bang-comments (comments starting with an exclamation mark). + """ + + name = 'rjsmin' + options = { + 'keep_bang_comments': 'RJSMIN_KEEP_BANG_COMMENTS', + } + + def output(self, _in, out, **kw): + keep = self.keep_bang_comments or False + out.write(rjsmin.jsmin(_in.read(), keep_bang_comments=keep)) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/rjsmin/rjsmin.py b/pelican/plugins/webassets/vendor/webassets/filter/rjsmin/rjsmin.py new file mode 100755 index 0000000..ef30102 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/rjsmin/rjsmin.py @@ -0,0 +1,543 @@ +#!/usr/bin/env python +# -*- coding: ascii -*- +r""" +===================== + Javascript Minifier +===================== + +rJSmin is a javascript minifier written in python. + +The minifier is based on the semantics of `jsmin.c by Douglas Crockford`_\\. + +:Copyright: + + Copyright 2011 - 2019 + Andr\xe9 Malo or his licensors, as applicable + +:License: + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +The module is a re-implementation aiming for speed, so it can be used at +runtime (rather than during a preprocessing step). Usually it produces the +same results as the original ``jsmin.c``. It differs in the following ways: + +- there is no error detection: unterminated string, regex and comment + literals are treated as regular javascript code and minified as such. +- Control characters inside string and regex literals are left untouched; they + are not converted to spaces (nor to \\n) +- Newline characters are not allowed inside string and regex literals, except + for line continuations in string literals (ECMA-5). +- "return /regex/" is recognized correctly. +- More characters are allowed before regexes. +- Line terminators after regex literals are handled more sensibly +- "+ +" and "- -" sequences are not collapsed to '++' or '--' +- Newlines before ! operators are removed more sensibly +- (Unnested) template literals are supported (ECMA-6) +- Comments starting with an exclamation mark (``!``) can be kept optionally +- rJSmin does not handle streams, but only complete strings. (However, the + module provides a "streamy" interface). + +Since most parts of the logic are handled by the regex engine it's way faster +than the original python port of ``jsmin.c`` by Baruch Even. The speed factor +varies between about 6 and 55 depending on input and python version (it gets +faster the more compressed the input already is). Compared to the +speed-refactored python port by Dave St.Germain the performance gain is less +dramatic but still between 3 and 50 (for huge inputs). See the docs/BENCHMARKS +file for details. + +rjsmin.c is a reimplementation of rjsmin.py in C and speeds it up even more. + +Supported python versions are 2.7 and 3.4+. + +.. _jsmin.c by Douglas Crockford: + http://www.crockford.com/javascript/jsmin.c +""" +__author__ = u"Andr\xe9 Malo" +__docformat__ = "restructuredtext en" +__license__ = "Apache License, Version 2.0" +__version__ = '1.1.0' +__all__ = ['jsmin'] + +import functools as _ft +import re as _re + + +def _make_jsmin(python_only=False): + """ + Generate JS minifier based on `jsmin.c by Douglas Crockford`_ + + .. _jsmin.c by Douglas Crockford: + http://www.crockford.com/javascript/jsmin.c + + :Parameters: + `python_only` : ``bool`` + Use only the python variant. If true, the c extension is not even + tried to be loaded. + + :Return: Minifier + :Rtype: ``callable`` + """ + # pylint: disable = unused-variable + # pylint: disable = too-many-locals + + if not python_only: + try: + import _rjsmin + except ImportError: + pass + else: + # Ensure that the C version is in sync + # https://github.com/ndparker/rjsmin/issues/11 + if getattr(_rjsmin, '__version__', None) == __version__: + return _rjsmin.jsmin + try: + xrange + except NameError: + xrange = range # pylint: disable = redefined-builtin + + space_chars = r'[\000-\011\013\014\016-\040]' + + line_comment = r'(?://[^\r\n]*)' + space_comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)' + space_comment_nobang = r'(?:/\*(?!!)[^*]*\*+(?:[^/*][^*]*\*+)*/)' + bang_comment = r'(?:/\*![^*]*\*+(?:[^/*][^*]*\*+)*/)' + + string1 = r"(?:'[^'\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^'\\\r\n]*)*')" + string1 = string1.replace("'", r'\047') # portability + string2 = r'(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^"\\\r\n]*)*")' + string3 = r'(?:`[^`\\]*(?:\\(?:[^\r\n]|\r?\n|\r)[^`\\]*)*`)' + string3 = string3.replace('`', r'\140') # portability + strings = r'(?:%s|%s|%s)' % (string1, string2, string3) + + charclass = r'(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\])' + nospecial = r'[^/\\\[\r\n]' + regex = r'(?:/(?![\r\n/*])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)' % ( + nospecial, charclass, nospecial + ) + space = r'(?:%s|%s)' % (space_chars, space_comment) + newline = r'(?:%s?[\r\n])' % line_comment + + def fix_charclass(result): + """ Fixup string of chars to fit into a regex char class """ + pos = result.find('-') + if pos >= 0: + result = r'%s%s-' % (result[:pos], result[pos + 1:]) + + def sequentize(string): + """ + Notate consecutive characters as sequence + + (1-4 instead of 1234) + """ + first, last, result = None, None, [] + for char in map(ord, string): + if last is None: + first = last = char + elif last + 1 == char: + last = char + else: + result.append((first, last)) + first = last = char + if last is not None: + result.append((first, last)) + return ''.join(['%s%s%s' % ( + chr(first), + last > first + 1 and '-' or '', + last != first and chr(last) or '' + ) for first, last in result]) # noqa + + return _re.sub( + r"([\000-\040'`])", # ' and ` for better portability + lambda m: '\\%03o' % ord(m.group(1)), ( + sequentize(result) + .replace('\\', '\\\\') + .replace('[', '\\[') + .replace(']', '\\]') + ) + ) + + def id_literal_(what): + """ Make id_literal like char class """ + match = _re.compile(what).match + result = ''.join([ + chr(c) for c in xrange(127) if not match(chr(c)) + ]) + return '[^%s]' % fix_charclass(result) + + def not_id_literal_(keep): + """ Make negated id_literal like char class """ + match = _re.compile(id_literal_(keep)).match + result = ''.join([ + chr(c) for c in xrange(127) if not match(chr(c)) + ]) + return r'[%s]' % fix_charclass(result) + + not_id_literal = not_id_literal_(r'[a-zA-Z0-9_$]') + preregex1 = r'[(,=:\[!&|?{};\r\n+*-]' + preregex2 = r'%(not_id_literal)sreturn' % locals() + + id_literal = id_literal_(r'[a-zA-Z0-9_$]') + id_literal_open = id_literal_(r'[a-zA-Z0-9_${\[(!+-]') + id_literal_close = id_literal_(r'[a-zA-Z0-9_$}\])"\047\140+-]') + post_regex_off = id_literal_(r'[^\000-\040}\])?:|,;.&=+-]') + + dull = r'[^\047"\140/\000-\040]' + + space_sub_simple = _re.compile(( + # noqa pylint: disable = bad-continuation + + r'(%(dull)s+)' # 0 + r'|(%(strings)s%(dull)s*)' # 1 + r'|(?<=%(preregex1)s)' + r'%(space)s*(?:%(newline)s%(space)s*)*' + r'(%(regex)s)' # 2 + r'(%(space)s*(?:%(newline)s%(space)s*)+' # 3 + r'(?=%(post_regex_off)s))?' + r'|(?<=%(preregex2)s)' + r'%(space)s*(?:(%(newline)s)%(space)s*)*' # 4 + r'(%(regex)s)' # 5 + r'(%(space)s*(?:%(newline)s%(space)s*)+' # 6 + r'(?=%(post_regex_off)s))?' + r'|(?<=%(id_literal_close)s)' + r'%(space)s*(?:(%(newline)s)%(space)s*)+' # 7 + r'(?=%(id_literal_open)s)' + r'|(?<=%(id_literal)s)(%(space)s)+(?=%(id_literal)s)' # 8 + r'|(?<=\+)(%(space)s)+(?=\+)' # 9 + r'|(?<=-)(%(space)s)+(?=-)' # 10 + r'|%(space)s+' + r'|(?:%(newline)s%(space)s*)+' + ) % locals()).sub + + # print(space_sub_simple.__self__.pattern) + + def space_subber_simple(match): + """ Substitution callback """ + # pylint: disable = too-many-return-statements + + groups = match.groups() + if groups[0]: + return groups[0] + elif groups[1]: + return groups[1] + elif groups[2]: + if groups[3]: + return groups[2] + '\n' + return groups[2] + elif groups[5]: + return "%s%s%s" % ( + groups[4] and '\n' or '', + groups[5], + groups[6] and '\n' or '', + ) + elif groups[7]: + return '\n' + elif groups[8] or groups[9] or groups[10]: + return ' ' + else: + return '' + + space_sub_banged = _re.compile(( + # noqa pylint: disable = bad-continuation + + r'(%(dull)s+)' # 0 + r'|(%(strings)s%(dull)s*)' # 1 + r'|(?<=%(preregex1)s)' + r'(%(space)s*(?:%(newline)s%(space)s*)*)' # 2 + r'(%(regex)s)' # 3 + r'(%(space)s*(?:%(newline)s%(space)s*)+' # 4 + r'(?=%(post_regex_off)s))?' + r'|(?<=%(preregex2)s)' + r'(%(space)s*(?:(%(newline)s)%(space)s*)*)' # 5, 6 + r'(%(regex)s)' # 7 + r'(%(space)s*(?:%(newline)s%(space)s*)+' # 8 + r'(?=%(post_regex_off)s))?' + r'|(?<=%(id_literal_close)s)' + r'(%(space)s*(?:%(newline)s%(space)s*)+)' # 9 + r'(?=%(id_literal_open)s)' + r'|(?<=%(id_literal)s)(%(space)s+)(?=%(id_literal)s)' # 10 + r'|(?<=\+)(%(space)s+)(?=\+)' # 11 + r'|(?<=-)(%(space)s+)(?=-)' # 12 + r'|(%(space)s+)' # 13 + r'|((?:%(newline)s%(space)s*)+)' # 14 + ) % locals()).sub + + # print(space_sub_banged.__self__.pattern) + + keep = _re.compile(( + r'%(space_chars)s+|%(space_comment_nobang)s+|%(newline)s+' + r'|(%(bang_comment)s+)' + ) % locals()).sub + keeper = lambda m: m.groups()[0] or '' + + # print(keep.__self__.pattern) + + def space_subber_banged(match): + """ Substitution callback """ + # pylint: disable = too-many-return-statements + + groups = match.groups() + if groups[0]: + return groups[0] + elif groups[1]: + return groups[1] + elif groups[3]: + return "%s%s%s%s" % ( + keep(keeper, groups[2]), + groups[3], + keep(keeper, groups[4] or ''), + groups[4] and '\n' or '', + ) + elif groups[7]: + return "%s%s%s%s%s" % ( + keep(keeper, groups[5]), + groups[6] and '\n' or '', + groups[7], + keep(keeper, groups[8] or ''), + groups[8] and '\n' or '', + ) + elif groups[9]: + return keep(keeper, groups[9]) + '\n' + elif groups[10] or groups[11] or groups[12]: + return keep(keeper, groups[10] or groups[11] or groups[12]) or ' ' + else: + return keep(keeper, groups[13] or groups[14]) + + banged = _ft.partial(space_sub_banged, space_subber_banged) + simple = _ft.partial(space_sub_simple, space_subber_simple) + + def jsmin(script, keep_bang_comments=False): + r""" + Minify javascript based on `jsmin.c by Douglas Crockford`_\. + + Instead of parsing the stream char by char, it uses a regular + expression approach which minifies the whole script with one big + substitution regex. + + .. _jsmin.c by Douglas Crockford: + http://www.crockford.com/javascript/jsmin.c + + :Parameters: + `script` : ``str`` + Script to minify + + `keep_bang_comments` : ``bool`` + Keep comments starting with an exclamation mark? (``/*!...*/``) + + :Return: Minified script + :Rtype: ``str`` + """ + # pylint: disable = redefined-outer-name + + is_bytes, script = _as_str(script) + script = (banged if keep_bang_comments else simple)( + '\n%s\n' % script + ).strip() + if is_bytes: + return script.encode('latin-1') + return script + + return jsmin + +jsmin = _make_jsmin() + + +def _as_str(script): + """ Make sure the script is a text string """ + is_bytes = False + if str is bytes: + if not isinstance(script, basestring): # noqa pylint: disable = undefined-variable + raise TypeError("Unexpected type") + elif isinstance(script, (bytes, bytearray)): + is_bytes = True + script = script.decode('latin-1') + elif not isinstance(script, str): + raise TypeError("Unexpected type") + + return is_bytes, script + + +def jsmin_for_posers(script, keep_bang_comments=False): + r""" + Minify javascript based on `jsmin.c by Douglas Crockford`_\. + + Instead of parsing the stream char by char, it uses a regular + expression approach which minifies the whole script with one big + substitution regex. + + .. _jsmin.c by Douglas Crockford: + http://www.crockford.com/javascript/jsmin.c + + :Warning: This function is the digest of a _make_jsmin() call. It just + utilizes the resulting regexes. It's here for fun and may + vanish any time. Use the `jsmin` function instead. + + :Parameters: + `script` : ``str`` + Script to minify + + `keep_bang_comments` : ``bool`` + Keep comments starting with an exclamation mark? (``/*!...*/``) + + :Return: Minified script + :Rtype: ``str`` + """ + if not keep_bang_comments: + rex = ( + r'([^\047"\140/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^' + r'\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^' + r'\r\n]|\r?\n|\r)[^"\\\r\n]*)*")|(?:\140[^\140\\]*(?:\\(?:[^\r\n' + r']|\r?\n|\r)[^\140\\]*)*\140))[^\047"\140/\000-\040]*)|(?<=[(,=' + r':\[!&|?{};\r\n+*-])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*' + r'\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-' + r'\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)*(' + r'(?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*' + r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/))((?:[\000-\011' + r'\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(' + r'?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*' + r']*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040&)+,.:;=?\]|}-]))?|' + r'(?<=[\000-#%-,./:-@\[-^\140{-~-]return)(?:[\000-\011\013\014\0' + r'16-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:((?:(?://[^\r' + r'\n]*)?[\r\n]))(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?' + r':[^/*][^*]*\*+)*/))*)*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^' + r'\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r' + r'\n]*)*/))((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/' + r'*][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013' + r'\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000' + r'-\040&)+,.:;=?\]|}-]))?|(?<=[^\000-!#%&(*,./:-@\[\\^{|~])(?:[' + r'\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)' + r')*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\014\016-\040' + r']|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#%-\047' + r')*,./:-@\\-^\140|-~])|(?<=[^\000-#%-,./:-@\[-^\140{-~-])((?:[' + r'\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)' + r'))+(?=[^\000-#%-,./:-@\[-^\140{-~-])|(?<=\+)((?:[\000-\011\013' + r'\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<' + r'=-)((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]' + r'*\*+)*/)))+(?=-)|(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*' + r'+(?:[^/*][^*]*\*+)*/))+|(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-' + r'\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+' + ) + + def subber(match): + """ Substitution callback """ + groups = match.groups() + return ( + groups[0] or + groups[1] or + (groups[3] and (groups[2] + '\n')) or + groups[2] or + (groups[5] and "%s%s%s" % ( + groups[4] and '\n' or '', + groups[5], + groups[6] and '\n' or '', + )) or + (groups[7] and '\n') or + (groups[8] and ' ') or + (groups[9] and ' ') or + (groups[10] and ' ') or + '' + ) + else: + rex = ( + r'([^\047"\140/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^' + r'\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^' + r'\r\n]|\r?\n|\r)[^"\\\r\n]*)*")|(?:\140[^\140\\]*(?:\\(?:[^\r\n' + r']|\r?\n|\r)[^\140\\]*)*\140))[^\047"\140/\000-\040]*)|(?<=[(,=' + r':\[!&|?{};\r\n+*-])((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]' + r'*\*+(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000' + r'-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)*' + r')((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n' + r']*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/))((?:[\000-\0' + r'11\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:(?' + r':(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[' + r'^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040&)+,.:;=?\]|}-]))' + r'?|(?<=[\000-#%-,./:-@\[-^\140{-~-]return)((?:[\000-\011\013\01' + r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:((?:(?://[^' + r'\r\n]*)?[\r\n]))(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+' + r'(?:[^/*][^*]*\*+)*/))*)*)((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:' + r'\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/' + r'\\\[\r\n]*)*/))((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+' + r'(?:[^/*][^*]*\*+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\01' + r'1\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[' + r'^\000-\040&)+,.:;=?\]|}-]))?|(?<=[^\000-!#%&(*,./:-@\[\\^{|~])' + r'((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*' + r'+)*/))*(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-' + r'\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+)(?=[^\000-\040"#%' + r'-\047)*,./:-@\\-^\140|-~])|(?<=[^\000-#%-,./:-@\[-^\140{-~-])(' + r'(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+' + r')*/))+)(?=[^\000-#%-,./:-@\[-^\140{-~-])|(?<=\+)((?:[\000-\011' + r'\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+)(?=\+)' + r'|(?<=-)((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*]' + r'[^*]*\*+)*/))+)(?=-)|((?:[\000-\011\013\014\016-\040]|(?:/\*[^' + r'*]*\*+(?:[^/*][^*]*\*+)*/))+)|((?:(?:(?://[^\r\n]*)?[\r\n])(?:' + r'[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/' + r'))*)+)' + ) + + keep = _re.compile( + r'[\000-\011\013\014\016-\040]+|(?:/\*(?!!)[^*]*\*+(?:[^/*][^*]*' + r'\*+)*/)+|(?:(?://[^\r\n]*)?[\r\n])+|((?:/\*![^*]*\*+(?:[^/*][^' + r'*]*\*+)*/)+)' + ).sub + keeper = lambda m: m.groups()[0] or '' + + def subber(match): + """ Substitution callback """ + groups = match.groups() + return ( + groups[0] or + groups[1] or + groups[3] and "%s%s%s%s" % ( + keep(keeper, groups[2]), + groups[3], + keep(keeper, groups[4] or ''), + groups[4] and '\n' or '', + ) or + groups[7] and "%s%s%s%s%s" % ( + keep(keeper, groups[5]), + groups[6] and '\n' or '', + groups[7], + keep(keeper, groups[8] or ''), + groups[8] and '\n' or '', + ) or + groups[9] and (keep(keeper, groups[9]) + '\n') or + groups[10] and (keep(keeper, groups[10]) or ' ') or + groups[11] and (keep(keeper, groups[11]) or ' ') or + groups[12] and (keep(keeper, groups[12]) or ' ') or + keep(keeper, groups[13] or groups[14]) + ) + + is_bytes, script = _as_str(script) + script = _re.sub(rex, subber, '\n%s\n' % script).strip() + if is_bytes: + return script.encode('latin-1') + return script + + +if __name__ == '__main__': + def main(): + """ Main """ + import sys as _sys + + argv = _sys.argv[1:] + keep_bang_comments = '-b' in argv or '-bp' in argv or '-pb' in argv + if '-p' in argv or '-bp' in argv or '-pb' in argv: + xjsmin = _make_jsmin(python_only=True) + else: + xjsmin = jsmin + + _sys.stdout.write(xjsmin( + _sys.stdin.read(), keep_bang_comments=keep_bang_comments + )) + + main() diff --git a/pelican/plugins/webassets/vendor/webassets/filter/sass.py b/pelican/plugins/webassets/vendor/webassets/filter/sass.py new file mode 100644 index 0000000..02854d6 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/sass.py @@ -0,0 +1,166 @@ +from __future__ import print_function + +import os + +from webassets.filter import ExternalTool + +__all__ = ('Sass', 'SCSS') + + +class Sass(ExternalTool): + """Converts `Sass `_ markup to + real CSS. + + Requires the Sass executable to be available externally. To install + it, you might be able to do:: + + $ sudo npm install -g sass + + By default, this works as an "input filter", meaning ``sass`` is + called for each source file in the bundle. This is because the + path of the source file is required so that @import directives + within the Sass file can be correctly resolved. + + However, it is possible to use this filter as an "output filter", + meaning the source files will first be concatenated, and then the + Sass filter is applied in one go. This can provide a speedup for + bigger projects. + + To use Sass as an output filter:: + + from webassets.filter import get_filter + sass = get_filter('sass', as_output=True) + Bundle(...., filters=(sass,)) + + However, if you want to use the output filter mode and still also + use the @import directive in your Sass files, you will need to + pass along the ``load_paths`` argument, which specifies the path + to which the imports are relative to (this is implemented by + changing the working directory before calling the ``sass`` + executable):: + + sass = get_filter('sass', as_output=True, load_paths='/tmp') + + With ``as_output=True``, the resulting concatenation of the Sass + files is piped to Sass via stdin (``cat ... | sass --stdin ...``) + and may cause applications to not compile if import statements are + given as relative paths. + + For example, if a file ``foo/bar/baz.scss`` imports file + ``foo/bar/bat.scss`` (same directory) and the import is defined as + ``@import "bat";`` then Sass will fail compiling because Sass + has naturally no information on where ``baz.scss`` is located on + disk (since the data was passed via stdin) in order for Sass to + resolve the location of ``bat.scss``:: + + Traceback (most recent call last): + ... + webassets.exceptions.FilterError: sass: subprocess had error: stderr=(sass):1: File to import not found or unreadable: bat. (Sass::SyntaxError) + Load paths: + /path/to/project-foo + on line 1 of standard input + Use --trace for backtrace. + , stdout=, returncode=65 + + To overcome this issue, the full path must be provided in the + import statement, ``@import "foo/bar/bat"``, then webassets + will pass the ``load_paths`` argument (e.g., + ``/path/to/project-foo``) to Sass via its ``-I`` flags so Sass can + resolve the full path to the file to be imported: + ``/path/to/project-foo/foo/bar/bat`` + + Support configuration options: + + SASS_BIN + The path to the Sass binary. If not set, the filter will + try to run ``sass`` as if it's in the system path. + + SASS_STYLE + The style for the output CSS. Can be one of ``expanded`` (default) + or ``compressed``. + + SASS_AS_OUTPUT + By default, this works as an "input filter", meaning ``sass`` is + called for each source file in the bundle. This is because the + path of the source file is required so that @import directives + within the Sass file can be correctly resolved. + + However, it is possible to use this filter as an "output filter", + meaning the source files will first be concatenated, and then the + Sass filter is applied in one go. This can provide a speedup for + bigger projects. + + It will also allow you to share variables between files. + + SASS_LOAD_PATHS + It should be a list of paths relatives to Environment.directory or absolute paths. + Order matters as sass will pick the first file found in path order. + These are fed into the -I flag of the sass command and + is used to control where sass imports code from. + """ + # TODO: If an output filter could be passed the list of all input + # files, the filter might be able to do something interesting with + # it (for example, determine that all source files are in the same + # directory). + + name = 'sass' + options = { + 'binary': 'SASS_BIN', + 'use_scss': ('scss', 'SASS_USE_SCSS'), + 'as_output': 'SASS_AS_OUTPUT', + 'load_paths': 'SASS_LOAD_PATHS', + 'style': 'SASS_STYLE', + } + max_debug_level = None + + def resolve_path(self, path): + return self.ctx.resolver.resolve_source(self.ctx, path) + + def _apply_sass(self, _in, out, cd=None): + # Switch to source file directory if asked, so that this directory + # is by default on the load path. We could pass it via -I, but then + # files in the (undefined) wd could shadow the correct files. + orig_cwd = os.getcwd() + child_cwd = orig_cwd + if cd: + child_cwd = cd + + args = [self.binary or 'sass', + '--stdin', + '--style', self.style or 'expanded'] + + if not self.use_scss: + args.append("--indented") + + for path in self.load_paths or []: + if os.path.isabs(path): + abs_path = path + else: + abs_path = self.resolve_path(path) + args.extend(['-I', abs_path]) + + return self.subprocess(args, out, _in, cwd=child_cwd) + + def input(self, _in, out, source_path, output_path, **kw): + if self.as_output: + out.write(_in.read()) + else: + self._apply_sass(_in, out, os.path.dirname(source_path)) + + def output(self, _in, out, **kwargs): + if not self.as_output: + out.write(_in.read()) + else: + self._apply_sass(_in, out) + + +class SCSS(Sass): + """Version of the ``sass`` filter that uses the SCSS syntax. + """ + + name = 'scss' + + def __init__(self, *a, **kw): + assert 'scss' not in kw + kw['scss'] = True + super(SCSS, self).__init__(*a, **kw) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/sass_ruby.py b/pelican/plugins/webassets/vendor/webassets/filter/sass_ruby.py new file mode 100644 index 0000000..63b07b0 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/sass_ruby.py @@ -0,0 +1,225 @@ +from __future__ import print_function +import os, subprocess + +from webassets.filter import ExternalTool +from webassets.cache import FilesystemCache + + +__all__ = ('RubySass', 'RubySCSS') + + +class RubySass(ExternalTool): + """Converts `Sass `_ markup to real CSS. + + This filter uses the legacy ruby Sass compiler, which has been + replaced by the dart version in use in the ``sass`` filter. + + Requires the Sass executable to be available externally. To install + it, you might be able to do:: + + $ sudo gem install sass + + By default, this works as an "input filter", meaning ``sass`` is + called for each source file in the bundle. This is because the + path of the source file is required so that @import directives + within the Sass file can be correctly resolved. + + However, it is possible to use this filter as an "output filter", + meaning the source files will first be concatenated, and then the + Sass filter is applied in one go. This can provide a speedup for + bigger projects. + + To use Sass as an output filter:: + + from webassets.filter import get_filter + sass = get_filter('sass', as_output=True) + Bundle(...., filters=(sass,)) + + However, if you want to use the output filter mode and still also + use the @import directive in your Sass files, you will need to + pass along the ``load_paths`` argument, which specifies the path + to which the imports are relative to (this is implemented by + changing the working directory before calling the ``sass`` + executable):: + + sass = get_filter('sass', as_output=True, load_paths='/tmp') + + With ``as_output=True``, the resulting concatenation of the Sass + files is piped to Sass via stdin (``cat ... | sass --stdin ...``) + and may cause applications to not compile if import statements are + given as relative paths. + + For example, if a file ``foo/bar/baz.scss`` imports file + ``foo/bar/bat.scss`` (same directory) and the import is defined as + ``@import "bat";`` then Sass will fail compiling because Sass + has naturally no information on where ``baz.scss`` is located on + disk (since the data was passed via stdin) in order for Sass to + resolve the location of ``bat.scss``:: + + Traceback (most recent call last): + ... + webassets.exceptions.FilterError: sass: subprocess had error: stderr=(sass):1: File to import not found or unreadable: bat. (Sass::SyntaxError) + Load paths: + /path/to/project-foo + on line 1 of standard input + Use --trace for backtrace. + , stdout=, returncode=65 + + To overcome this issue, the full path must be provided in the + import statement, ``@import "foo/bar/bat"``, then webassets + will pass the ``load_paths`` argument (e.g., + ``/path/to/project-foo``) to Sass via its ``-I`` flags so Sass can + resolve the full path to the file to be imported: + ``/path/to/project-foo/foo/bar/bat`` + + Support configuration options: + + SASS_BIN + The path to the Sass binary. If not set, the filter will + try to run ``sass`` as if it's in the system path. + + SASS_STYLE + The style for the output CSS. Can be one of ``expanded`` (default), + ``nested``, ``compact`` or ``compressed``. + + SASS_DEBUG_INFO + If set to ``True``, will cause Sass to output debug information + to be used by the FireSass Firebug plugin. Corresponds to the + ``--debug-info`` command line option of Sass. + + Note that for this, Sass uses ``@media`` rules, which are + not removed by a CSS compressor. You will thus want to make + sure that this option is disabled in production. + + By default, the value of this option will depend on the + environment ``DEBUG`` setting. + + SASS_LINE_COMMENTS + Passes ``--line-comments`` flag to sass which emit comments in the + generated CSS indicating the corresponding source line. + + Note that this option is disabled by Sass if ``--style compressed`` or + ``--debug-info`` options are provided. + + Enabled by default. To disable, set empty environment variable + ``SASS_LINE_COMMENTS=`` or pass ``line_comments=False`` to this filter. + + SASS_AS_OUTPUT + By default, this works as an "input filter", meaning ``sass`` is + called for each source file in the bundle. This is because the + path of the source file is required so that @import directives + within the Sass file can be correctly resolved. + + However, it is possible to use this filter as an "output filter", + meaning the source files will first be concatenated, and then the + Sass filter is applied in one go. This can provide a speedup for + bigger projects. + + It will also allow you to share variables between files. + + SASS_SOURCE_MAP + If provided, this will generate source maps in the output depending + on the type specified. By default this will use Sass's ``auto``. + Possible values are ``auto``, ``file``, ``inline``, or ``none``. + + SASS_LOAD_PATHS + It should be a list of paths relatives to Environment.directory or absolute paths. + Order matters as sass will pick the first file found in path order. + These are fed into the -I flag of the sass command and + is used to control where sass imports code from. + + SASS_LIBS + It should be a list of paths relatives to Environment.directory or absolute paths. + These are fed into the -r flag of the sass command and + is used to require ruby libraries before running sass. + """ + # TODO: If an output filter could be passed the list of all input + # files, the filter might be able to do something interesting with + # it (for example, determine that all source files are in the same + # directory). + + name = 'sass_ruby' + options = { + 'binary': 'SASS_BIN', + 'use_scss': ('scss', 'SASS_USE_SCSS'), + 'use_compass': ('use_compass', 'SASS_COMPASS'), + 'debug_info': 'SASS_DEBUG_INFO', + 'as_output': 'SASS_AS_OUTPUT', + 'load_paths': 'SASS_LOAD_PATHS', + 'libs': 'SASS_LIBS', + 'style': 'SASS_STYLE', + 'source_map': 'SASS_SOURCE_MAP', + 'line_comments': 'SASS_LINE_COMMENTS', + } + max_debug_level = None + + def resolve_path(self, path): + return self.ctx.resolver.resolve_source(self.ctx, path) + + def _apply_sass(self, _in, out, cd=None): + # Switch to source file directory if asked, so that this directory + # is by default on the load path. We could pass it via -I, but then + # files in the (undefined) wd could shadow the correct files. + orig_cwd = os.getcwd() + child_cwd = orig_cwd + if cd: + child_cwd = cd + + args = [self.binary or 'sass', + '--stdin', + '--style', self.style or 'expanded'] + if self.line_comments is None or self.line_comments: + args.append('--line-comments') + if isinstance(self.ctx.cache, FilesystemCache): + args.extend(['--cache-location', + os.path.join(orig_cwd, self.ctx.cache.directory, 'sass')]) + elif not cd: + # Without a fixed working directory, the location of the cache + # is basically undefined, so prefer not to use one at all. + args.extend(['--no-cache']) + if (self.ctx.environment.debug if self.debug_info is None else self.debug_info): + args.append('--debug-info') + if self.use_scss: + args.append('--scss') + if self.use_compass: + args.append('--compass') + if self.source_map: + args.append('--sourcemap=' + self.source_map) + for path in self.load_paths or []: + if os.path.isabs(path): + abs_path = path + else: + abs_path = self.resolve_path(path) + args.extend(['-I', abs_path]) + for lib in self.libs or []: + if os.path.isabs(lib): + abs_path = lib + else: + abs_path = self.resolve_path(lib) + args.extend(['-r', abs_path]) + + return self.subprocess(args, out, _in, cwd=child_cwd) + + def input(self, _in, out, source_path, output_path, **kw): + if self.as_output: + out.write(_in.read()) + else: + self._apply_sass(_in, out, os.path.dirname(source_path)) + + def output(self, _in, out, **kwargs): + if not self.as_output: + out.write(_in.read()) + else: + self._apply_sass(_in, out) + + +class RubySCSS(RubySass): + """Version of the ``sass`` filter that uses the SCSS syntax. + """ + + name = 'scss_ruby' + + def __init__(self, *a, **kw): + assert not 'scss' in kw + kw['scss'] = True + super(RubySCSS, self).__init__(*a, **kw) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/slimit.py b/pelican/plugins/webassets/vendor/webassets/filter/slimit.py new file mode 100644 index 0000000..0b8fae0 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/slimit.py @@ -0,0 +1,31 @@ +from __future__ import absolute_import +from webassets.filter import Filter + + +__all__ = ('Slimit',) + + +class Slimit(Filter): + """Minifies JS. + + Requires the ``slimit`` package (https://github.com/rspivak/slimit), + which is a JavaScript minifier written in Python. It compiles JavaScript + into more compact code so that it downloads and runs faster. + + It offers mangle and mangle_toplevel options through SLIMIT_MANGLE and SLIMIT_MANGLE_TOPLEVEL + """ + + name = 'slimit' + options = {"mangle": "SLIMIT_MANGLE", "mangle_toplevel": "SLIMIT_MANGLE_TOPLEVEL"} + + def setup(self): + try: + import slimit + except ImportError: + raise EnvironmentError('The "slimit" package is not installed.') + else: + self.slimit = slimit + + def output(self, _in, out, **kw): + out.write(self.slimit.minify(_in.read(), + mangle=self.mangle, mangle_toplevel=self.mangle_toplevel)) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/slimmer.py b/pelican/plugins/webassets/vendor/webassets/filter/slimmer.py new file mode 100644 index 0000000..7e98bfd --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/slimmer.py @@ -0,0 +1,26 @@ +from __future__ import absolute_import + +from webassets.filter import Filter + + +__all__ = ('CSSSlimmer',) + + +class Slimmer(Filter): + + def setup(self): + super(Slimmer, self).setup() + import slimmer + self.slimmer = slimmer + + +class CSSSlimmer(Slimmer): + """Minifies CSS by removing whitespace, comments etc., using the Python + `slimmer `_ library. + """ + + name = 'css_slimmer' + + def output(self, _in, out, **kw): + out.write(self.slimmer.css_slimmer(_in.read())) + diff --git a/pelican/plugins/webassets/vendor/webassets/filter/spritemapper.py b/pelican/plugins/webassets/vendor/webassets/filter/spritemapper.py new file mode 100644 index 0000000..8ee8465 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/spritemapper.py @@ -0,0 +1,125 @@ +from __future__ import print_function +from __future__ import absolute_import +from webassets.six import StringIO +from contextlib import contextmanager +from webassets.filter import Filter + +try: + from spritecss.main import CSSFile + from spritecss.css import CSSParser + from spritecss.css.parser import iter_print_css + from spritecss.config import CSSConfig + from spritecss.mapper import SpriteMapCollector + from spritecss.packing import PackedBoxes, print_packed_size + from spritecss.packing.sprites import open_sprites + from spritecss.stitch import stitch + from spritecss.replacer import SpriteReplacer + +except ImportError: + spritecss_loaded = False + +else: + spritecss_loaded = True + + class FakeCSSFile(CSSFile): + """ + A custom subclass of spritecss.main.CSSFile that accepts CSS input + as string data, instead of requiring that a CSS file be read from + disk. + """ + + def __init__(self, fname, conf=None, data=''): + super(FakeCSSFile, self).__init__(fname, conf=conf) + self.data = StringIO(data) + + @contextmanager + def open_parser(self): + yield CSSParser.read_file(self.data) + + +__all__ = ('Spritemapper',) + + +class Spritemapper(Filter): + """ + Generate CSS spritemaps using + `Spritemapper `_, a Python + utility that merges multiple images into one and generates CSS positioning + for the corresponding slices. Installation is easy:: + + pip install spritemapper + + Supported configuration options: + + SPRITEMAPPER_PADDING + A tuple of integers indicating the number of pixels of padding to + place between sprites + + SPRITEMAPPER_ANNEAL_STEPS + Affects the number of combinations to be attempted by the box packer + algorithm + + **Note:** Since the ``spritemapper`` command-line utility expects source + and output files to be on the filesystem, this filter interfaces directly + with library internals instead. It has been tested to work with + Spritemapper version 1.0. + """ + + name = 'spritemapper' + + def setup(self): + + if not spritecss_loaded: + raise EnvironmentError( + "The spritemapper package could not be found." + ) + + self.options = {} + padding = self.get_config('SPRITEMAPPER_PADDING', require=False) + if padding: + self.options['padding'] = padding + anneal_steps = self.get_config('SPRITEMAPPER_ANNEAL_STEPS', require=False) + if anneal_steps: + self.options['anneal_steps'] = anneal_steps + + def input(self, _in, out, **kw): + + source_path = kw['source_path'] + + # Save the input data for later + css = _in.read() + + # Build config object + conf = CSSConfig(base=self.options, fname=source_path) + + # Instantiate a dummy file instance + cssfile = FakeCSSFile(fname=source_path, conf=conf, data=css) + + # Find spritemaps + smaps = SpriteMapCollector(conf=conf) + smaps.collect(cssfile.map_sprites()) + + # Weed out single-image spritemaps + smaps = [sm for sm in smaps if len(sm) > 1] + + # Generate spritemapped image + # This code is almost verbatim from spritecss.main.spritemap + sm_plcs = [] + for smap in smaps: + with open_sprites(smap, pad=conf.padding) as sprites: + print(("Packing sprites in mapping %s" % (smap.fname,))) + packed = PackedBoxes(sprites, anneal_steps=conf.anneal_steps) + print_packed_size(packed) + sm_plcs.append((smap, packed.placements)) + print(("Writing spritemap image at %s" % (smap.fname,))) + im = stitch(packed) + with open(smap.fname, "wb") as fp: + im.save(fp) + + # Instantiate a fake file instance again + cssfile = FakeCSSFile(fname=source_path, conf=conf, data=css) + + # Output rewritten CSS with spritemapped URLs + replacer = SpriteReplacer(sm_plcs) + for data in iter_print_css(replacer(cssfile)): + out.write(data) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/stylus.py b/pelican/plugins/webassets/vendor/webassets/filter/stylus.py new file mode 100644 index 0000000..63d9205 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/stylus.py @@ -0,0 +1,52 @@ +import os +from webassets.filter import ExternalTool, option + + +__all__ = ('Stylus',) + + +class Stylus(ExternalTool): + """Converts `Stylus `_ markup to CSS. + + Requires the Stylus executable to be available externally. You can install + it using the `Node Package Manager `_:: + + $ npm install -g stylus + + Supported configuration options: + + STYLUS_BIN + The path to the Stylus binary. If not set, assumes ``stylus`` is in the + system path. + + STYLUS_PLUGINS + A Python list of Stylus plugins to use. Each plugin will be included + via Stylus's command-line ``--use`` argument. + + STYLUS_EXTRA_ARGS + A Python list of any additional command-line arguments. + + STYLUS_EXTRA_PATHS + A Python list of any additional import paths. + """ + + name = 'stylus' + options = { + 'stylus': 'STYLUS_BIN', + 'plugins': option('STYLUS_PLUGINS', type=list), + 'extra_args': option('STYLUS_EXTRA_ARGS', type=list), + 'extra_paths': option('STYLUS_EXTRA_PATHS', type=list), + } + max_debug_level = None + + def input(self, _in, out, **kwargs): + args = [self.stylus or 'stylus'] + source_dir = os.path.dirname(kwargs['source_path']) + paths = [source_dir] + (self.extra_paths or []) + for path in paths: + args.extend(('--include', path)) + for plugin in self.plugins or []: + args.extend(('--use', plugin)) + if self.extra_args: + args.extend(self.extra_args) + self.subprocess(args, out, _in) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/typescript.py b/pelican/plugins/webassets/vendor/webassets/filter/typescript.py new file mode 100644 index 0000000..bed10ae --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/typescript.py @@ -0,0 +1,60 @@ +import os +import subprocess +import tempfile +from io import open # Give 2 and 3 use same newline behaviour. + +from webassets.filter import Filter +from webassets.exceptions import FilterError + + +__all__ = ('TypeScript',) + + + +class TypeScript(Filter): + """Compile `TypeScript `_ to JavaScript. + + TypeScript is an external tool written for NodeJS. + This filter assumes that the ``tsc`` executable is in the path. Otherwise, you + may define the ``TYPESCRIPT_BIN`` setting. + + To specify TypeScript compiler options, ``TYPESCRIPT_CONFIG`` may be defined. + E.g.: ``--removeComments true --target ES6``. + """ + + name = 'typescript' + max_debug_level = None + options = { + 'binary': 'TYPESCRIPT_BIN', + 'config': 'TYPESCRIPT_CONFIG' + } + + def output(self, _in, out, **kw): + # The typescript compiler cannot read a file which does not have + # the .ts extension. The output file needs to have an extension, + # or the compiler will want to create a directory in its place. + input_filename = tempfile.mktemp() + ".ts" + output_filename = tempfile.mktemp() + ".js" + + with open(input_filename, 'w') as f: + f.write(_in.read()) + + args = [self.binary or 'tsc', '--out', output_filename, input_filename] + if self.config: + args += self.config.split() + proc = subprocess.Popen( + args, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=(os.name == 'nt')) + stdout, stderr = proc.communicate() + if proc.returncode != 0: + raise FilterError("typescript: subprocess had error: stderr=%s," % stderr + + "stdout=%s, returncode=%s" % (stdout, proc.returncode)) + + with open(output_filename, 'r') as f: + out.write(f.read()) + + os.unlink(input_filename) + os.unlink(output_filename) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/uglifyjs.py b/pelican/plugins/webassets/vendor/webassets/filter/uglifyjs.py new file mode 100644 index 0000000..7e35255 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/uglifyjs.py @@ -0,0 +1,32 @@ +from webassets.filter import ExternalTool + + +__all__ = ('UglifyJS',) + + +class UglifyJS(ExternalTool): + """ + Minify Javascript using `UglifyJS `_. + + The filter requires version 2 of UglifyJS. + + UglifyJS is an external tool written for NodeJS; this filter assumes that + the ``uglifyjs`` executable is in the path. Otherwise, you may define + a ``UGLIFYJS_BIN`` setting. + + Additional options may be passed to ``uglifyjs`` using the setting + ``UGLIFYJS_EXTRA_ARGS``, which expects a list of strings. + """ + + name = 'uglifyjs' + options = { + 'binary': 'UGLIFYJS_BIN', + 'extra_args': 'UGLIFYJS_EXTRA_ARGS', + } + + def output(self, _in, out, **kw): + # UglifyJS 2 doesn't properly read data from stdin (#212). + args = [self.binary or 'uglifyjs', '{input}', '--output', '{output}'] + if self.extra_args: + args.extend(self.extra_args) + self.subprocess(args, out, _in) diff --git a/pelican/plugins/webassets/vendor/webassets/filter/yui.py b/pelican/plugins/webassets/vendor/webassets/filter/yui.py new file mode 100644 index 0000000..0c67de4 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/filter/yui.py @@ -0,0 +1,54 @@ +"""Minify Javascript and CSS with +`YUI Compressor `_. + +YUI Compressor is an external tool written in Java, which needs to be +available. One way to get it is to install the +`yuicompressor `_ package:: + + pip install yuicompressor + +No configuration is necessary in this case. + +You can also get YUI compressor a different way and define +a ``YUI_COMPRESSOR_PATH`` setting that points to the ``.jar`` file. +Otherwise, an environment variable by the same name is tried. The +filter will also look for a ``JAVA_HOME`` environment variable to +run the ``.jar`` file, or will otherwise assume that ``java`` is +on the system path. +""" + +from webassets.filter import JavaTool + + +__all__ = ('YUIJS', 'YUICSS',) + + +class YUIBase(JavaTool): + + def setup(self): + super(YUIBase, self).setup() + + try: + self.jar = self.get_config('YUI_COMPRESSOR_PATH', + what='YUI Compressor') + except EnvironmentError: + raise EnvironmentError( + "\nYUI Compressor jar can't be found." + "\nPlease provide a YUI_COMPRESSOR_PATH setting or an " + "environment variable with the full path to the " + "YUI compressor jar." + ) + + def output(self, _in, out, **kw): + self.subprocess( + ['--charset=utf-8', '--type=%s' % self.mode], out, _in) + + +class YUIJS(YUIBase): + name = 'yui_js' + mode = 'js' + + +class YUICSS(YUIBase): + name = 'yui_css' + mode = 'css' diff --git a/pelican/plugins/webassets/vendor/webassets/importlib.py b/pelican/plugins/webassets/vendor/webassets/importlib.py new file mode 100644 index 0000000..48846f7 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/importlib.py @@ -0,0 +1,38 @@ +# From Python 2.7. + +import sys + +def _resolve_name(name, package, level): + """Return the absolute name of the module to be imported.""" + if not hasattr(package, 'rindex'): + raise ValueError("'package' not set to a string") + dot = len(package) + for x in range(level, 1, -1): + try: + dot = package.rindex('.', 0, dot) + except ValueError: + raise ValueError("attempted relative import beyond top-level " + "package") + return "%s.%s" % (package[:dot], name) + + +def import_module(name, package=None): + """Import a module. + + The 'package' argument is required when performing a relative import. It + specifies the package to use as the anchor point from which to resolve the + relative import to an absolute import. + + """ + if name.startswith('.'): + if not package: + raise TypeError("relative imports require the 'package' argument") + level = 0 + for character in name: + if character != '.': + break + level += 1 + name = _resolve_name(name[level:], package, level) + __import__(name) + return sys.modules[name] + diff --git a/pelican/plugins/webassets/vendor/webassets/loaders.py b/pelican/plugins/webassets/vendor/webassets/loaders.py new file mode 100644 index 0000000..500ab57 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/loaders.py @@ -0,0 +1,338 @@ +"""Loaders are helper classes which will read environments and/or +bundles from a source, like a configuration file. + +This can be used as an alternative to an imperative setup. +""" + +import os, sys +from os import path +import glob, fnmatch +import inspect +import types +from webassets import six +try: + import yaml +except ImportError: + pass + +from webassets import six +from webassets import Environment +from webassets.bundle import Bundle +from webassets.exceptions import EnvironmentError +from webassets.filter import register_filter +from webassets.importlib import import_module + + +__all__ = ('Loader', 'LoaderError', 'PythonLoader', 'YAMLLoader', + 'GlobLoader',) + + + +class LoaderError(Exception): + """Loaders should raise this when they can't deal with a given file. + """ + + +class YAMLLoader(object): + """Will load an environment or a set of bundles from + `YAML `_ files. + """ + + def __init__(self, file_or_filename): + try: + yaml + except NameError: + raise EnvironmentError('PyYAML is not installed') + else: + self.yaml = yaml + self.file_or_filename = file_or_filename + + def _yield_bundle_contents(self, data): + """Yield bundle contents from the given dict. + + Each item yielded will be either a string representing a file path + or a bundle.""" + contents = data.get('contents', []) + if isinstance(contents, six.string_types): + contents = contents, + for content in contents: + if isinstance(content, dict): + content = self._get_bundle(content) + yield content + + def _get_bundle(self, data): + """Return a bundle initialised by the given dict.""" + kwargs = dict( + filters=data.get('filters', None), + output=data.get('output', None), + debug=data.get('debug', None), + extra=data.get('extra', {}), + config=data.get('config', {}), + depends=data.get('depends', None)) + return Bundle(*list(self._yield_bundle_contents(data)), **kwargs) + + def _get_bundles(self, obj, known_bundles=None): + """Return a dict that keys bundle names to bundles.""" + bundles = {} + for key, data in six.iteritems(obj): + if data is None: + data = {} + bundles[key] = self._get_bundle(data) + + # now we need to recurse through the bundles and get any that + # are included in each other. + for bundle_name, bundle in bundles.items(): + # copy contents + contents = list(bundle.contents) + for i, item in enumerate(bundle.contents): + if item in bundles: + contents[i] = bundles[item] + elif known_bundles and item in known_bundles: + contents[i] = known_bundles[item] + # cast back to a tuple + contents = tuple(contents) + if contents != bundle.contents: + bundle.contents = contents + return bundles + + def _open(self): + """Returns a (fileobj, filename) tuple. + + The filename can be False if it is unknown. + """ + if isinstance(self.file_or_filename, six.string_types): + return open(self.file_or_filename), self.file_or_filename + + file = self.file_or_filename + return file, getattr(file, 'name', False) + + @classmethod + def _get_import_resolver(cls): + """ method that can be overridden in tests """ + from zope.dottedname.resolve import resolve as resolve_dotted + return resolve_dotted + + def load_bundles(self, environment=None): + """Load a list of :class:`Bundle` instances defined in the YAML file. + + Expects the following format: + + .. code-block:: yaml + + bundle-name: + filters: sass,cssutils + output: cache/default.css + contents: + - css/jquery.ui.calendar.css + - css/jquery.ui.slider.css + another-bundle: + # ... + + Bundles may reference each other: + + .. code-block:: yaml + + js-all: + contents: + - jquery.js + - jquery-ui # This is a bundle reference + jquery-ui: + contents: jqueryui/*.js + + If an ``environment`` argument is given, it's bundles + may be referenced as well. Note that you may pass any + compatibly dict-like object. + + Finally, you may also use nesting: + + .. code-block:: yaml + + js-all: + contents: + - jquery.js + # This is a nested bundle + - contents: "*.coffee" + filters: coffeescript + + """ + # TODO: Support a "consider paths relative to YAML location, return + # as absolute paths" option? + f, _ = self._open() + try: + obj = self.yaml.safe_load(f) or {} + return self._get_bundles(obj, environment) + finally: + f.close() + + def load_environment(self): + """Load an :class:`Environment` instance defined in the YAML file. + + Expects the following format: + + .. code-block:: yaml + + directory: ../static + url: /media + debug: True + updater: timestamp + filters: + - my_custom_package.my_filter + config: + compass_bin: /opt/compass + another_custom_config_value: foo + + bundles: + # ... + + All values, including ``directory`` and ``url`` are optional. The + syntax for defining bundles is the same as for + :meth:`~.YAMLLoader.load_bundles`. + + Sample usage:: + + from webassets.loaders import YAMLLoader + loader = YAMLLoader('asset.yml') + env = loader.load_environment() + + env['some-bundle'].urls() + """ + f, filename = self._open() + try: + obj = self.yaml.safe_load(f) or {} + + env = Environment() + + # Load environment settings + for setting in ('debug', 'cache', 'versions', 'url_expire', + 'auto_build', 'url', 'directory', 'manifest', 'load_path', + 'cache_file_mode', + # TODO: The deprecated values; remove at some point + 'expire', 'updater'): + if setting in obj: + setattr(env, setting, obj[setting]) + + # Treat the 'directory' option special, make it relative to the + # path of the YAML file, if we know it. + if filename and 'directory' in env.config: + env.directory = path.normpath( + path.join(path.dirname(filename), + env.config['directory'])) + + # Treat the 'filters' option special, it should resolve the + # entries as classes and register them to the environment + if 'filters' in obj: + try: + resolve_dotted = self._get_import_resolver() + except ImportError: + raise EnvironmentError( + "In order to use custom filters in the YAMLLoader " + "you must install the zope.dottedname package") + for filter_class in obj['filters']: + try: + cls = resolve_dotted(filter_class) + except ImportError: + raise LoaderError("Unable to resolve class %s" % filter_class) + if inspect.isclass(cls): + register_filter(cls) + else: + raise LoaderError("Custom filters must be classes " + "not modules or functions") + + # Load custom config options + if 'config' in obj: + env.config.update(obj['config']) + + # Load bundles + bundles = self._get_bundles(obj.get('bundles', {})) + for name, bundle in six.iteritems(bundles): + env.register(name, bundle) + + return env + finally: + f.close() + + +class PythonLoader(object): + """Basically just a simple helper to import a Python file and + retrieve the bundles defined there. + """ + + environment = "environment" + + def __init__(self, module_name): + if isinstance(module_name, types.ModuleType): + self.module = module_name + else: + sys.path.insert(0, '') # Ensure the current directory is on the path + try: + try: + if ":" in module_name: + module_name, env = module_name.split(":") + self.environment = env + self.module = import_module(module_name) + except ImportError as e: + raise LoaderError(e) + finally: + sys.path.pop(0) + + def load_bundles(self): + """Load ``Bundle`` objects defined in the Python module. + + Collects all bundles in the global namespace. + """ + bundles = {} + for name in dir(self.module): + value = getattr(self.module, name) + if isinstance(value, Bundle): + bundles[name] = value + return bundles + + def load_environment(self): + """Load an ``Environment`` defined in the Python module. + + Expects as default a global name ``environment`` to be defined, + or overridden by passing a string ``module:environment`` to the + constructor. + """ + try: + return getattr(self.module, self.environment) + except AttributeError as e: + raise LoaderError(e) + + +def recursive_glob(treeroot, pattern): + """ + From: + http://stackoverflow.com/questions/2186525/2186639#2186639 + """ + results = [] + for base, dirs, files in os.walk(treeroot): + goodfiles = fnmatch.filter(files, pattern) + results.extend(os.path.join(base, f) for f in goodfiles) + return results + + +class GlobLoader(object): + """Base class with some helpers for loaders which need to search + for files. + """ + + def glob_files(self, f, recursive=False): + if isinstance(f, tuple): + return iter(recursive_glob(f[0], f[1])) + else: + return iter(glob.glob(f)) + + def with_file(self, filename, then_run): + """Call ``then_run`` with the file contents. + """ + file = open(filename, 'rb') + try: + contents = file.read() + try: + return then_run(filename, contents) + except LoaderError: + # We can't handle this file. + pass + finally: + file.close() diff --git a/pelican/plugins/webassets/vendor/webassets/merge.py b/pelican/plugins/webassets/vendor/webassets/merge.py new file mode 100644 index 0000000..3d70bff --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/merge.py @@ -0,0 +1,356 @@ +"""Contains the core functionality that manages merging of assets. +""" +from __future__ import with_statement +import contextlib + +try: + from urllib.request import Request as URLRequest, urlopen + from urllib.error import HTTPError +except ImportError: + from urllib2 import Request as URLRequest, urlopen + from urllib2 import HTTPError +import logging +from io import open +from webassets import six +from webassets.six.moves import filter + +from .utils import cmp_debug_levels, StringIO, hash_func + + +__all__ = ('FileHunk', 'MemoryHunk', 'merge', 'FilterTool', + 'MoreThanOneFilterError', 'NoFilters') + + +# Log which is used to output low-level information about what the build does. +# This is setup such that it does not output just because the root level +# "webassets" logger is set to level DEBUG (for example via the commandline +# --verbose option). Instead, the messages are only shown when an environment +# variable is set. +# However, we might want to change this in the future. The CLI --verbose option +# could instead just set the level to NOTICE, for example. +log = logging.getLogger('webassets.debug') +log.addHandler(logging.StreamHandler()) +import os +if os.environ.get('WEBASSETS_DEBUG'): + log.setLevel(logging.DEBUG) +else: + log.setLevel(logging.ERROR) + + +class BaseHunk(object): + """Abstract base class. + """ + + def mtime(self): + raise NotImplementedError() + + def id(self): + return hash_func(self.data()) + + def __eq__(self, other): + if isinstance(other, BaseHunk): + # Allow class to be used as a unique dict key. + return hash_func(self) == hash_func(other) + return False + + def data(self): + raise NotImplementedError() + + def save(self, filename): + with open(filename, 'w', encoding='utf-8') as f: + f.write(self.data()) + + +class FileHunk(BaseHunk): + """Exposes a single file through as a hunk. + """ + + def __init__(self, filename): + self.filename = filename + + def __repr__(self): + return '<%s %s>' % (self.__class__.__name__, self.filename) + + def mtime(self): + pass + + def data(self): + f = open(self.filename, 'r', encoding='utf-8') + try: + return f.read() + finally: + f.close() + + +class UrlHunk(BaseHunk): + """Represents a file that is referenced by an Url. + + If an environment is given, it's cache will be used to cache the url + contents, and to access it, as allowed by the etag/last modified headers. + """ + + def __init__(self, url, env=None): + self.url = url + self.env = env + + def __repr__(self): + return '<%s %s>' % (self.__class__.__name__, self.url) + + def data(self): + if not hasattr(self, '_data'): + request = URLRequest(self.url) + + # Look in the cache for etag / last modified headers to use + # TODO: "expires" header could be supported + if self.env and self.env.cache: + headers = self.env.cache.get( + ('url', 'headers', self.url)) + if headers: + etag, lmod = headers + if etag: request.add_header('If-None-Match', etag) + if lmod: request.add_header('If-Modified-Since', lmod) + + # Make a request + try: + response = urlopen(request) + except HTTPError as e: + if e.code != 304: + raise + # Use the cached version of the url + self._data = self.env.cache.get(('url', 'contents', self.url)) + else: + with contextlib.closing(response): + data = response.read() + if isinstance(data, six.binary_type): + data = data.decode('utf-8') + self._data = data + + # Cache the info from this request + if self.env and self.env.cache: + self.env.cache.set( + ('url', 'headers', self.url), + (response.headers.get("ETag"), + response.headers.get("Last-Modified"))) + self.env.cache.set(('url', 'contents', self.url), self._data) + return self._data + + +class MemoryHunk(BaseHunk): + """Content that is no longer a direct representation of a source file. It + might have filters applied, and is probably the result of merging multiple + individual source files together. + """ + + def __init__(self, data, files=None): + self._data = data + self.files = files or [] + + def __repr__(self): + # Include a has of the data. We want this during logging, so we + # can see which hunks contain identical content. Because this is + # a question of performance, make sure to log in such a way that + # when logging is disabled, this won't be called, i.e.: don't + # %s-format yourself, let logging do it as needed. + return '<%s %s>' % (self.__class__.__name__, hash_func(self)) + + def mtime(self): + pass + + def data(self): + if hasattr(self._data, 'read'): + return self._data.read() + return self._data + + def save(self, filename): + f = open(filename, 'w', encoding='utf-8') + try: + f.write(self.data()) + finally: + f.close() + + +def merge(hunks, separator=None): + """Merge the given list of hunks, returning a new ``MemoryHunk`` object. + """ + # TODO: combine the list of source files, we'd like to collect them + # The linebreak is important in certain cases for Javascript + # files, like when a last line is a //-comment. + if not separator: + separator = '\n' + return MemoryHunk(separator.join([h.data() for h in hunks])) + + +class MoreThanOneFilterError(Exception): + + def __init__(self, message, filters): + Exception.__init__(self, message) + self.filters = filters + + +class NoFilters(Exception): + pass + + +class FilterTool(object): + """Can apply filters to hunk objects, while using the cache. + + If ``no_cache_read`` is given, then the cache will not be considered for + this operation (though the result will still be written to the cache). + + ``kwargs`` are options that should be passed along to the filters. + """ + + VALID_TRANSFORMS = ('input', 'output',) + VALID_FUNCS = ('open', 'concat',) + + def __init__(self, cache=None, no_cache_read=False, kwargs=None): + self.cache = cache + self.no_cache_read = no_cache_read + self.kwargs = kwargs or {} + + def _wrap_cache(self, key, func): + """Return cache value ``key``, or run ``func``. + """ + if self.cache: + if not self.no_cache_read: + log.debug('Checking cache for key %s', key) + content = self.cache.get(key) + if not content in (False, None): + log.debug('Using cached result for %s', key) + return MemoryHunk(content) + + content = func().getvalue() + if self.cache: + log.debug('Storing result in cache with key %s', key,) + self.cache.set(key, content) + return MemoryHunk(content) + + def apply(self, hunk, filters, type, kwargs=None): + """Apply the given list of filters to the hunk, returning a new + ``MemoryHunk`` object. + + ``kwargs`` are options that should be passed along to the filters. + If ``hunk`` is a file hunk, a ``source_path`` key will automatically + be added to ``kwargs``. + """ + assert type in self.VALID_TRANSFORMS + log.debug('Need to run method "%s" of filters (%s) on hunk %s with ' + 'kwargs=%s', type, filters, hunk, kwargs) + + filters = [f for f in filters if getattr(f, type, None)] + if not filters: # Short-circuit + log.debug('No filters have "%s" methods, returning hunk ' + 'unchanged' % (type,)) + return hunk + + kwargs_final = self.kwargs.copy() + kwargs_final.update(kwargs or {}) + + def func(): + data = StringIO(hunk.data()) + for filter in filters: + log.debug('Running method "%s" of %s with kwargs=%s', + type, filter, kwargs_final) + out = StringIO(u'') # For 2.x, StringIO().getvalue() returns str + getattr(filter, type)(data, out, **kwargs_final) + data = out + data.seek(0) + + return data + + additional_cache_keys = [] + if kwargs_final: + for filter in filters: + additional_cache_keys += filter.get_additional_cache_keys(**kwargs_final) + + # Note that the key used to cache this hunk is different from the key + # the hunk will expose to subsequent merges, i.e. hunk.key() is always + # based on the actual content, and does not match the cache key. The + # latter also includes information about for example the filters used. + # + # It wouldn't have to be this way. Hunk could subsequently expose their + # cache key through hunk.key(). This would work as well, but would be + # an inferior solution: Imagine a source file which receives + # non-substantial changes, in the sense that they do not affect the + # filter output, for example whitespace. If a hunk's key is the cache + # key, such a change would invalidate the caches for all subsequent + # operations on this hunk as well, even though it didn't actually + # change after all. + key = ("hunk", hunk, tuple(filters), type, additional_cache_keys) + return self._wrap_cache(key, func) + + def apply_func(self, filters, type, args, kwargs=None, cache_key=None): + """Apply a filter that is not a "stream in, stream out" transform (i.e. + like the input() and output() filter methods). Instead, the filter + method is given the arguments in ``args`` and should then produce an + output stream. This is used, e.g., for the concat() and open() filter + methods. + + Only one such filter can run per operation. + + ``cache_key`` may be a list of additional values to use as the cache + key, in addition to the default key (the filter and arguments). + """ + assert type in self.VALID_FUNCS + log.debug('Need to run method "%s" of one of the filters (%s) ' + 'with args=%s, kwargs=%s', type, filters, args, kwargs) + + filters = [f for f in filters if getattr(f, type, None)] + if not filters: # Short-circuit + log.debug('No filters have a "%s" method' % type) + raise NoFilters() + + if len(filters) > 1: + raise MoreThanOneFilterError( + 'These filters cannot be combined: %s' % ( + ', '.join([f.name for f in filters])), filters) + + kwargs_final = self.kwargs.copy() + kwargs_final.update(kwargs or {}) + + def func(): + filter = filters[0] + out = StringIO(u'') # For 2.x, StringIO().getvalue() returns str + log.debug('Running method "%s" of %s with args=%s, kwargs=%s', + type, filter, args, kwargs) + getattr(filter, type)(out, *args, **kwargs_final) + return out + + additional_cache_keys = [] + if kwargs_final: + for filter in filters: + additional_cache_keys += filter.get_additional_cache_keys(**kwargs_final) + + key = ("hunk", args, tuple(filters), type, cache_key or [], additional_cache_keys) + return self._wrap_cache(key, func) + + +def merge_filters(filters1, filters2): + """Merge two filter lists into one. + + Duplicate filters are removed. Since filter order is important, the order + of the arguments to this function also matter. Duplicates are always + removed from the second filter set if they exist in the first. + + The result will always be ``filters1``, with additional unique filters + from ``filters2`` appended. Within the context of a hierarchy, you want + ``filters2`` to be the parent. + + This function presumes that all the given filters inherit from ``Filter``, + which properly implements operators to determine duplicate filters. + """ + result = list(filters1[:]) + if filters2: + for f in filters2: + if not f in result: + result.append(f) + return result + + +def select_filters(filters, level): + """Return from the list in ``filters`` those filters which indicate that + they should run for the given debug level. + """ + return [f for f in filters + if f.max_debug_level is None or + cmp_debug_levels(level, f.max_debug_level) <= 0] diff --git a/pelican/plugins/webassets/vendor/webassets/py.typed b/pelican/plugins/webassets/vendor/webassets/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/pelican/plugins/webassets/vendor/webassets/script.py b/pelican/plugins/webassets/vendor/webassets/script.py new file mode 100644 index 0000000..102a421 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/script.py @@ -0,0 +1,582 @@ +from __future__ import print_function +import shutil +import os, sys +import time +import logging + +from webassets.loaders import PythonLoader, YAMLLoader +from webassets.bundle import get_all_bundle_files +from webassets.exceptions import BuildError +from webassets.updater import TimestampUpdater +from webassets.merge import MemoryHunk +from webassets.version import get_manifest +from webassets.cache import FilesystemCache +from webassets.utils import set, StringIO + + +__all__ = ('CommandError', 'CommandLineEnvironment', 'main') + + +# logging has WARNING as default level, for the CLI we want INFO. Set this +# as early as possible, so that user customizations will not be overwritten. +logging.getLogger('webassets.script').setLevel(logging.INFO) + + +class CommandError(Exception): + pass + + +class Command(object): + """Base-class for a command used by :class:`CommandLineEnvironment`. + + Each command being a class opens up certain possibilities with respect to + subclassing and customizing the default CLI. + """ + + def __init__(self, cmd_env): + self.cmd = cmd_env + + def __getattr__(self, name): + # Make stuff from cmd environment easier to access + return getattr(self.cmd, name) + + def __call__(self, *args, **kwargs): + raise NotImplementedError() + + +class BuildCommand(Command): + + def __call__(self, bundles=None, output=None, directory=None, no_cache=None, + manifest=None, production=None): + """Build assets. + + ``bundles`` + A list of bundle names. If given, only this list of bundles + should be built. + + ``output`` + List of (bundle, filename) 2-tuples. If given, only these + bundles will be built, using the custom output filenames. + Cannot be used with ``bundles``. + + ``directory`` + Custom output directory to use for the bundles. The original + basenames defined in the bundle ``output`` attribute will be + used. If the ``output`` of the bundles are pointing to different + directories, they will be offset by their common prefix. + Cannot be used with ``output``. + + ``no_cache`` + If set, a cache (if one is configured) will not be used. + + ``manifest`` + If set, the given manifest instance will be used, instead of + any that might have been configured in the Environment. The value + passed will be resolved through ``get_manifest()``. If this fails, + a file-based manifest will be used using the given value as the + filename. + + ``production`` + If set to ``True``, then :attr:`Environment.debug`` will forcibly + be disabled (set to ``False``) during the build. + """ + + # Validate arguments + if bundles and output: + raise CommandError( + 'When specifying explicit output filenames you must ' + 'do so for all bundles you want to build.') + if directory and output: + raise CommandError('A custom output directory cannot be ' + 'combined with explicit output filenames ' + 'for individual bundles.') + + if production: + # TODO: Reset again (refactor commands to be classes) + self.environment.debug = False + + # TODO: Oh how nice it would be to use the future options stack. + if manifest is not None: + try: + manifest = get_manifest(manifest, env=self.environment) + except ValueError: + manifest = get_manifest( + # abspath() is important, or this will be considered + # relative to Environment.directory. + "file:%s" % os.path.abspath(manifest), + env=self.environment) + self.environment.manifest = manifest + + # Use output as a dict. + if output: + output = dict(output) + + # Validate bundle names + bundle_names = bundles if bundles else (output.keys() if output else []) + for name in bundle_names: + if not name in self.environment: + raise CommandError( + 'I do not know a bundle name named "%s".' % name) + + # Make a list of bundles to build, and the filename to write to. + if bundle_names: + # TODO: It's not ok to use an internal property here. + bundles = [(n,b) for n, b in self.environment._named_bundles.items() + if n in bundle_names] + else: + # Includes unnamed bundles as well. + bundles = [(None, b) for b in self.environment] + + # Determine common prefix for use with ``directory`` option. + if directory: + prefix = os.path.commonprefix( + [os.path.normpath(b.resolve_output()) + for _, b in bundles if b.output]) + # dirname() gives the right value for a single file. + prefix = os.path.dirname(prefix) + + to_build = [] + for name, bundle in bundles: + # TODO: We really should support this. This error here + # is just in place of a less understandable error that would + # otherwise occur. + if bundle.is_container and directory: + raise CommandError( + 'A custom output directory cannot currently be ' + 'used with container bundles.') + + # Determine which filename to use, if not the default. + overwrite_filename = None + if output: + overwrite_filename = output[name] + elif directory: + offset = os.path.normpath( + bundle.resolve_output())[len(prefix)+1:] + overwrite_filename = os.path.join(directory, offset) + to_build.append((bundle, overwrite_filename, name,)) + + # Build. + built = [] + for bundle, overwrite_filename, name in to_build: + if name: + # A name is not necessary available of the bundle was + # registered without one. + self.log.info("Building bundle: %s (to %s)" % ( + name, overwrite_filename or bundle.output)) + else: + self.log.info("Building bundle: %s" % bundle.output) + + try: + if not overwrite_filename: + with bundle.bind(self.environment): + bundle.build(force=True, disable_cache=no_cache) + else: + # TODO: Rethink how we deal with container bundles here. + # As it currently stands, we write all child bundles + # to the target output, merged (which is also why we + # create and force writing to a StringIO instead of just + # using the ``Hunk`` objects that build() would return + # anyway. + output = StringIO() + with bundle.bind(self.environment): + bundle.build(force=True, output=output, + disable_cache=no_cache) + if directory: + # Only auto-create directories in this mode. + output_dir = os.path.dirname(overwrite_filename) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + MemoryHunk(output.getvalue()).save(overwrite_filename) + built.append(bundle) + except BuildError as e: + self.log.error("Failed, error was: %s" % e) + if len(built): + self.event_handlers['post_build']() + if len(built) != len(to_build): + return 2 + + +class WatchCommand(Command): + + def __call__(self, loop=None): + """Watch assets for changes. + + ``loop`` + A callback, taking no arguments, to be called once every loop + iteration. Can be useful to integrate the command with other code. + If not specified, the loop will call ``time.sleep()``. + """ + # TODO: This should probably also restart when the code changes. + mtimes = {} + + try: + # Before starting to watch for changes, also recognize changes + # made while we did not run, and apply those immediately. + for bundle in self.environment: + print('Bringing up to date: %s' % bundle.output) + bundle.build(force=False) + + self.log.info("Watching %d bundles for changes..." % + len(self.environment)) + + while True: + changed_bundles = self.check_for_changes(mtimes) + + built = [] + for bundle in changed_bundles: + print("Building bundle: %s ..." % bundle.output, end=' ') + sys.stdout.flush() + try: + bundle.build(force=True) + built.append(bundle) + except BuildError as e: + print("") + print("Failed: %s" % e) + else: + print("done") + + if len(built): + self.event_handlers['post_build']() + + do_end = loop() if loop else time.sleep(0.1) + if do_end: + break + except KeyboardInterrupt: + pass + + def check_for_changes(self, mtimes): + # Do not update original mtimes dict right away, so that we detect + # all bundle changes if a file is in multiple bundles. + _new_mtimes = mtimes.copy() + + changed_bundles = set() + # TODO: An optimization was lost here, skipping a bundle once + # a single file has been found to have changed. Bring back. + for filename, bundles_to_update in self.yield_files_to_watch(): + stat = os.stat(filename) + mtime = stat.st_mtime + if sys.platform == "win32": + mtime -= stat.st_ctime + + if mtimes.get(filename, mtime) != mtime: + if callable(bundles_to_update): + # Hook for when file has changed + try: + bundles_to_update = bundles_to_update() + except EnvironmentError: + # EnvironmentError is what the hooks is allowed to + # raise for a temporary problem, like an invalid config + import traceback + traceback.print_exc() + # Don't update anything, wait for another change + bundles_to_update = set() + + if bundles_to_update is True: + # Indicates all bundles should be rebuilt for the change + bundles_to_update = set(self.environment) + changed_bundles |= bundles_to_update + _new_mtimes[filename] = mtime + _new_mtimes[filename] = mtime + + mtimes.update(_new_mtimes) + return changed_bundles + + def yield_files_to_watch(self): + for bundle in self.environment: + for filename in get_all_bundle_files(bundle): + yield filename, set([bundle]) + + +class CleanCommand(Command): + + def __call__(self): + """Delete generated assets. + """ + self.log.info('Cleaning generated assets...') + for bundle in self.environment: + if not bundle.output: + continue + file_path = bundle.resolve_output(self.environment) + if os.path.exists(file_path): + os.unlink(file_path) + self.log.info("Deleted asset: %s" % bundle.output) + if isinstance(self.environment.cache, FilesystemCache): + shutil.rmtree(self.environment.cache.directory) + + +class CheckCommand(Command): + + def __call__(self): + """Check to see if assets need to be rebuilt. + + A non-zero exit status will be returned if any of the input files are + newer (based on mtime) than their output file. This is intended to be + used in pre-commit hooks. + """ + needsupdate = False + updater = self.environment.updater + if not updater: + self.log.debug('no updater configured, using TimestampUpdater') + updater = TimestampUpdater() + for bundle in self.environment: + self.log.info('Checking asset: %s', bundle.output) + if updater.needs_rebuild(bundle, self.environment): + self.log.info(' needs update') + needsupdate = True + if needsupdate: + sys.exit(-1) + + +class CommandLineEnvironment(object): + """Implements the core functionality for a command line frontend to + ``webassets``, abstracted in a way to allow frameworks to integrate the + functionality into their own tools, for example, as a Django management + command, or a command for ``Flask-Script``. + """ + + def __init__(self, env, log, post_build=None, commands=None): + self.environment = env + self.log = log + self.event_handlers = dict(post_build=lambda: True) + if callable(post_build): + self.event_handlers['post_build'] = post_build + + # Instantiate each command + command_def = self.DefaultCommands.copy() + command_def.update(commands or {}) + self.commands = {} + for name, construct in command_def.items(): + if not construct: + continue + if not isinstance(construct, (list, tuple)): + construct = [construct, (), {}] + self.commands[name] = construct[0]( + self, *construct[1], **construct[2]) + + def __getattr__(self, item): + # Allow method-like access to commands. + if item in self.commands: + return self.commands[item] + raise AttributeError(item) + + def invoke(self, command, args): + """Invoke ``command``, or throw a CommandError. + + This is essentially a simple validation mechanism. Feel free + to call the individual command methods manually. + """ + try: + function = self.commands[command] + except KeyError as e: + raise CommandError('unknown command: %s' % e) + else: + return function(**args) + + # List of commands installed + DefaultCommands = { + 'build': BuildCommand, + 'watch': WatchCommand, + 'clean': CleanCommand, + 'check': CheckCommand + } + + +class GenericArgparseImplementation(object): + """Generic command line utility to interact with an webassets environment. + + This is effectively a reference implementation of a command line utility + based on the ``CommandLineEnvironment`` class. Implementers may find it + feasible to simple base their own command line utility on this, rather than + implementing something custom on top of ``CommandLineEnvironment``. In + fact, if that is possible, you are encouraged to do so for greater + consistency across implementations. + """ + + class WatchCommand(WatchCommand): + """Extended watch command that also looks at the config file itself.""" + + def __init__(self, cmd_env, argparse_ns): + WatchCommand.__init__(self, cmd_env) + self.ns = argparse_ns + + def yield_files_to_watch(self): + for result in WatchCommand.yield_files_to_watch(self): + yield result + # If the config changes, rebuild all bundles + if getattr(self.ns, 'config', None): + yield self.ns.config, self.reload_config + + def reload_config(self): + try: + self.cmd.environment = YAMLLoader(self.ns.config).load_environment() + except Exception as e: + raise EnvironmentError(e) + return True + + + def __init__(self, env=None, log=None, prog=None, no_global_options=False): + try: + import argparse + except ImportError: + raise RuntimeError( + 'The webassets command line now requires the ' + '"argparse" library on Python versions <= 2.6.') + else: + self.argparse = argparse + self.env = env + self.log = log + self._construct_parser(prog, no_global_options) + + def _construct_parser(self, prog=None, no_global_options=False): + self.parser = parser = self.argparse.ArgumentParser( + description="Manage assets.", + prog=prog) + + if not no_global_options: + # Start with the base arguments that are valid for any command. + # XXX: Add those to the subparser? + parser.add_argument("-v", dest="verbose", action="store_true", + help="be verbose") + parser.add_argument("-q", action="store_true", dest="quiet", + help="be quiet") + if self.env is None: + loadenv = parser.add_mutually_exclusive_group() + loadenv.add_argument("-c", "--config", dest="config", + help="read environment from a YAML file") + loadenv.add_argument("-m", "--module", dest="module", + help="read environment from a Python module") + + # Add subparsers. + subparsers = parser.add_subparsers(dest='command') + for command in CommandLineEnvironment.DefaultCommands.keys(): + command_parser = subparsers.add_parser(command) + maker = getattr(self, 'make_%s_parser' % command, False) + if maker: + maker(command_parser) + + @staticmethod + def make_build_parser(parser): + parser.add_argument( + 'bundles', nargs='*', metavar='BUNDLE', + help='Optional bundle names to process. If none are ' + 'specified, then all known bundles will be built.') + parser.add_argument( + '--output', '-o', nargs=2, action='append', + metavar=('BUNDLE', 'FILE'), + help='Build the given bundle, and use a custom output ' + 'file. Can be given multiple times.') + parser.add_argument( + '--directory', '-d', + help='Write built files to this directory, using the ' + 'basename defined by the bundle. Will offset ' + 'the original bundle output paths on their common ' + 'prefix. Cannot be used with --output.') + parser.add_argument( + '--no-cache', action='store_true', + help='Do not use a cache that might be configured.') + parser.add_argument( + '--manifest', + help='Write a manifest to the given file. Also supports ' + 'the id:arg format, if you want to use a different ' + 'manifest implementation.') + parser.add_argument( + '--production', action='store_true', + help='Forcably turn off debug mode for the build. This ' + 'only has an effect if debug is set to "merge".') + + def _setup_logging(self, ns): + if self.log: + log = self.log + else: + log = logging.getLogger('webassets.script') + if not log.handlers: + # In theory, this could run multiple times (e.g. tests) + handler = logging.StreamHandler() + log.addHandler(handler) + # Note that setting the level filter at the handler level is + # better than the logger level, since this is "our" handler, + # we create it, for the purposes of having a default output. + # The logger itself the user may be modifying. + handler.setLevel(logging.DEBUG if ns.verbose else ( + logging.WARNING if ns.quiet else logging.INFO)) + return log + + def _setup_assets_env(self, ns, log): + env = self.env + if env is None: + assert not (ns.module and ns.config) + if ns.module: + env = PythonLoader(ns.module).load_environment() + if ns.config: + env = YAMLLoader(ns.config).load_environment() + return env + + def _setup_cmd_env(self, assets_env, log, ns): + return CommandLineEnvironment(assets_env, log, commands={ + 'watch': (GenericArgparseImplementation.WatchCommand, (ns,), {}) + }) + + def _prepare_command_args(self, ns): + # Prepare a dict of arguments cleaned of values that are not + # command-specific, and which the command method would not accept. + args = vars(ns).copy() + for action in self.parser._actions: + dest = action.dest + if dest in args: + del args[dest] + return args + + def run_with_ns(self, ns): + log = self._setup_logging(ns) + env = self._setup_assets_env(ns, log) + if env is None: + raise CommandError( + "Error: No environment given or found. Maybe use -m?") + cmd = self._setup_cmd_env(env, log, ns) + + # Run the selected command + args = self._prepare_command_args(ns) + return cmd.invoke(ns.command, args) + + def run_with_argv(self, argv): + try: + ns = self.parser.parse_args(argv) + except SystemExit as e: + # We do not want the main() function to exit the program. + # See run() instead. + return e.args[0] + + return self.run_with_ns(ns) + + def main(self, argv): + """Parse the given command line. + + The commandline is expected to NOT including what would be sys.argv[0]. + """ + try: + return self.run_with_argv(argv) + except CommandError as e: + print(e) + return 1 + + +def main(argv, env=None): + """Execute the generic version of the command line interface. + + You only need to work directly with ``GenericArgparseImplementation`` if + you desire to customize things. + + If no environment is given, additional arguments will be supported to allow + the user to specify/construct the environment on the command line. + """ + return GenericArgparseImplementation(env).main(argv) + + +def run(): + """Runs the command line interface via ``main``, then exits the process + with a proper return code.""" + sys.exit(main(sys.argv[1:]) or 0) + + +if __name__ == '__main__': + run() diff --git a/pelican/plugins/webassets/vendor/webassets/six.py b/pelican/plugins/webassets/vendor/webassets/six.py new file mode 100644 index 0000000..5e0119a --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/six.py @@ -0,0 +1,417 @@ +"""Utilities for writing code that runs on Python 2 and 3""" + +# Copyright (c) 2010-2013 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import operator +import sys +import types + +__author__ = "Benjamin Peterson " +__version__ = "1.3.0" + + +# True if we are running on Python 3. +PY3 = sys.version_info[0] == 3 + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) + # This is a bit ugly, but it avoids running this again. + delattr(tp, self.name) + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + + +class _MovedItems(types.ModuleType): + """Lazy loading of moved objects""" + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("winreg", "_winreg"), +] +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) +del attr + +moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from webassets.six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" + + _iterkeys = "keys" + _itervalues = "values" + _iteritems = "items" + _iterlists = "lists" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + _iterkeys = "iterkeys" + _itervalues = "itervalues" + _iteritems = "iteritems" + _iterlists = "iterlists" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +def iterkeys(d, **kw): + """Return an iterator over the keys of a dictionary.""" + return iter(getattr(d, _iterkeys)(**kw)) + +def itervalues(d, **kw): + """Return an iterator over the values of a dictionary.""" + return iter(getattr(d, _itervalues)(**kw)) + +def iteritems(d, **kw): + """Return an iterator over the (key, value) pairs of a dictionary.""" + return iter(getattr(d, _iteritems)(**kw)) + +def iterlists(d, **kw): + """Return an iterator over the (key, [values]) pairs of a dictionary.""" + return iter(getattr(d, _iterlists)(**kw)) + + +if PY3: + def b(s): + return s.encode("latin-1") + def u(s): + return s + if sys.version_info[1] <= 1: + def int2byte(i): + return bytes((i,)) + else: + # This is about 2x faster than the implementation above on 3.2+ + int2byte = operator.methodcaller("to_bytes", 1, "big") + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO +else: + def b(s): + return s + def u(s): + return unicode(s, "unicode_escape") + int2byte = chr + def indexbytes(buf, i): + return ord(buf[i]) + def iterbytes(buf): + return (ord(byte) for byte in buf) + import StringIO + StringIO = BytesIO = StringIO.StringIO +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +if PY3: + import builtins + exec_ = getattr(builtins, "exec") + + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + + print_ = getattr(builtins, "print") + del builtins + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") + + + def print_(*args, **kwargs): + """The new-style print function.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + def write(data): + if not isinstance(data, basestring): + data = str(data) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) + +_add_doc(reraise, """Reraise an exception.""") + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + return meta("NewBase", bases, {}) diff --git a/pelican/plugins/webassets/vendor/webassets/test.py b/pelican/plugins/webassets/vendor/webassets/test.py new file mode 100644 index 0000000..359aef5 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/test.py @@ -0,0 +1,154 @@ +"""Helpers for testing webassets. + +This is included in the webassets package because it is useful for testing +external libraries that use webassets (like the flask-assets wrapper). +""" +from __future__ import print_function + +import tempfile +import shutil +import os +from os import path +import time + +from webassets import Environment, Bundle +from webassets.six.moves import map +from webassets.six.moves import zip + + +__all__ = ('TempDirHelper', 'TempEnvironmentHelper',) + + +class TempDirHelper(object): + """Base-class for tests which provides a temporary directory + (which is properly deleted after the test is done), and various + helper methods to do filesystem operations within that directory. + """ + + default_files = {} + + def setup_method(self): + self._tempdir_created = tempfile.mkdtemp() + self.create_files(self.default_files) + + def teardown_method(self): + shutil.rmtree(self._tempdir_created) + + def __enter__(self): + self.setup_method() + return self + + def __exit__(self, type, value, traceback): + self.teardown_method() + + @property + def tempdir(self): + # Use a read-only property here, so the user is + # less likely to modify the attribute, and have + # his data deleted on teardown. + return self._tempdir_created + + def create_files(self, files): + """Helper that allows to quickly create a bunch of files in + the media directory of the current test run. + """ + import codecs + # Allow passing a list of filenames to create empty files + if not hasattr(files, 'items'): + files = dict(map(lambda n: (n, ''), files)) + for name, data in files.items(): + dirs = path.dirname(self.path(name)) + if not path.exists(dirs): + os.makedirs(dirs) + f = codecs.open(self.path(name), 'w', 'utf-8') + f.write(data) + f.close() + + def create_directories(self, *dirs): + """Helper to create directories within the media directory + of the current test's environment. + """ + result = [] + for dir in dirs: + full_path = self.path(dir) + result.append(full_path) + os.makedirs(full_path) + return result + + def exists(self, name): + """Ensure the given file exists within the current test run's + media directory. + """ + return path.exists(self.path(name)) + + def get(self, name): + """Return the given file's contents. + """ + with open(self.path(name)) as f: + r = f.read() + print(repr(r)) + return r + + def unlink(self, name): + os.unlink(self.path(name)) + + def path(self, name): + """Return the given file's full path.""" + return path.join(self._tempdir_created, name) + + def setmtime(self, *files, **kwargs): + """Set the mtime of the given files. Useful helper when + needing to test things like the timestamp updater. + + Specify ``mtime`` as a keyword argument, or time.time() + will automatically be used. Returns the mtime used. + + Specify ``mod`` as a keyword argument, and the modifier + will be added to the ``mtime`` used. + """ + mtime = kwargs.pop('mtime', time.time()) + mtime += kwargs.pop('mod', 0) + assert not kwargs, "Unsupported kwargs: %s" % ', '.join(kwargs.keys()) + for f in files: + os.utime(self.path(f), (mtime, mtime)) + return mtime + + def p(self, *files): + """Print the contents of the given files to stdout; useful + for some quick debugging. + """ + if not files: + files = ['out'] # This is a often used output filename + for f in files: + content = self.get(f) + print(f) + print("-" * len(f)) + print(repr(content)) + print(content) + print() + + +class TempEnvironmentHelper(TempDirHelper): + """Base-class for tests which provides a pre-created + environment, based in a temporary directory, and utility + methods to do filesystem operations within that directory. + """ + + default_files = {'in1': 'A', 'in2': 'B', 'in3': 'C', 'in4': 'D'} + + def setup_method(self): + TempDirHelper.setup_method(self) + + self.env = self._create_environment() + # Unless we explicitly test it, we don't want to use the cache + # during testing. + self.env.cache = False + self.env.manifest = False + + def _create_environment(self): + return Environment(self._tempdir_created, '') + + def mkbundle(self, *a, **kw): + b = Bundle(*a, **kw) + b.env = self.env + return b diff --git a/pelican/plugins/webassets/vendor/webassets/updater.py b/pelican/plugins/webassets/vendor/webassets/updater.py new file mode 100644 index 0000000..0153cc7 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/updater.py @@ -0,0 +1,192 @@ +"""The auto-rebuild system is an optional part of webassets that can be used +during development, and can also be quite convenient on small sites that don't +have the performance requirements where a rebuild-check on every request is +fatal. + +This module contains classes that help determine whether a rebuild is required +for a bundle. This is more complicated than simply comparing the timestamps of +the source and output files. + +First, certain filters, in particular CSS compilers like SASS, allow bundle +source files to reference additional files which the user may not have listed +in the bundle definition. The bundles support an additional ``depends`` +argument that can list files that should be watched for modification. + +Second, if the bundle definition itself changes, i.e., source files being added +or removed, or the list of applied filters modified, the bundle needs to be +rebuilt also. Since there is no single fixed place where bundles are defined, +simply watching the timestamp of that bundle definition file is not good enough. + +To solve the latter problem, we employ an environment-specific cache of bundle +definitions. + +Note that there is no ``HashUpdater``. This doesn't make sense for two reasons. +First, for a live system, it isn't fast enough. Second, for prebuilding assets, +the cache is a superior solution for getting essentially the same speed +increase as using the hash to reliably determine which bundles to skip. +""" + +from webassets import six +from webassets.six.moves import map +from webassets.six.moves import zip +from webassets.exceptions import BundleError, BuildError +from webassets.utils import RegistryMetaclass, is_url, hash_func + + +__all__ = ('get_updater', 'SKIP_CACHE', + 'TimestampUpdater', 'AlwaysUpdater',) + + +SKIP_CACHE = object() +"""An updater can return this value as hint that a cache, if enabled, +should probably not be used for the rebuild; This is currently used +as a return value when a bundle's dependencies have changed, which +would currently not cause a different cache key to be used. + +This is marked a hint, because in the future, the bundle may be smart +enough to make this decision by itself. +""" + + +class BaseUpdater(six.with_metaclass(RegistryMetaclass( + clazz=lambda: BaseUpdater, attribute='needs_rebuild', + desc='an updater implementation'))): + """Base updater class. + + Child classes that define an ``id`` attribute are accessible via their + string id in the configuration. + + A single instance can be used with different environments. + """ + + def needs_rebuild(self, bundle, ctx): + """Returns ``True`` if the given bundle needs to be rebuilt, + ``False`` otherwise. + """ + raise NotImplementedError() + + def build_done(self, bundle, ctx): + """This will be called once a bundle has been successfully built. + """ + + +get_updater = BaseUpdater.resolve + + +class BundleDefUpdater(BaseUpdater): + """Supports the bundle definition cache update check that child + classes are usually going to want to use also. + """ + + def check_bundle_definition(self, bundle, ctx): + if not ctx.cache: + # If no global cache is configured, we could always + # fall back to a memory-cache specific for the rebuild + # process (store as env._update_cache); however, + # whenever a bundle definition changes, it's likely that + # a process restart will be required also, so in most cases + # this would make no sense. + return False + + cache_key = ('bdef', bundle.output) + current_hash = "%s" % hash_func(bundle) + cached_hash = ctx.cache.get(cache_key) + # This may seem counter-intuitive, but if no cache entry is found + # then we actually return "no update needed". This is because + # otherwise if no cache / a dummy cache is used, then we would be + # rebuilding every single time. + if not cached_hash is None: + return cached_hash != current_hash + return False + + def needs_rebuild(self, bundle, ctx): + return self.check_bundle_definition(bundle, ctx) + + def build_done(self, bundle, ctx): + if not ctx.cache: + return False + cache_key = ('bdef', bundle.output) + cache_value = "%s" % hash_func(bundle) + ctx.cache.set(cache_key, cache_value) + + +class TimestampUpdater(BundleDefUpdater): + + id = 'timestamp' + + def check_timestamps(self, bundle, ctx, o_modified=None): + from .bundle import Bundle + from webassets.version import TimestampVersion + + if not o_modified: + try: + resolved_output = bundle.resolve_output(ctx) + except BundleError: + # This exception will occur when the bundle output has + # placeholder, but a version cannot be found. If the + # user has defined a manifest, this will just be the first + # build. Return True to let it happen. + # However, if no manifest is defined, raise an error, + # because otherwise, this updater would always return True, + # and thus not do its job at all. + if ctx.manifest is None: + raise BuildError(( + '%s uses a version placeholder, and you are ' + 'using "%s" versions. To use automatic ' + 'building in this configuration, you need to ' + 'define a manifest.' % (bundle, ctx.versions))) + return True + + try: + o_modified = TimestampVersion.get_timestamp(resolved_output) + except OSError: + # If the output file does not exist, we'll have to rebuild + return True + + # Recurse through the bundle hierarchy. Check the timestamp of all + # the bundle source files, as well as any additional + # dependencies that we are supposed to watch. + from webassets.bundle import wrap + for iterator, result in ( + (lambda e: map(lambda s: s[1], bundle.resolve_contents(e)), True), + (bundle.resolve_depends, SKIP_CACHE) + ): + for item in iterator(ctx): + if isinstance(item, Bundle): + nested_result = self.check_timestamps(item, wrap(ctx, item), o_modified) + if nested_result: + return nested_result + elif not is_url(item): + try: + s_modified = TimestampVersion.get_timestamp(item) + except OSError: + # If a file goes missing, always require + # a rebuild. + return result + else: + if s_modified > o_modified: + return result + return False + + def needs_rebuild(self, bundle, ctx): + return \ + super(TimestampUpdater, self).needs_rebuild(bundle, ctx) or \ + self.check_timestamps(bundle, ctx) + + def build_done(self, bundle, ctx): + # Reset the resolved dependencies, so any globs will be + # re-resolved the next time we check if a rebuild is + # required. This ensures that we begin watching new files + # that are created, while still caching the globs as long + # no changes happen. + bundle._resolved_depends = None + super(TimestampUpdater, self).build_done(bundle, ctx) + + +class AlwaysUpdater(BaseUpdater): + + id = 'always' + + def needs_rebuild(self, bundle, ctx): + return True + diff --git a/pelican/plugins/webassets/vendor/webassets/utils.py b/pelican/plugins/webassets/vendor/webassets/utils.py new file mode 100644 index 0000000..985f5ce --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/utils.py @@ -0,0 +1,249 @@ +from webassets import six +import contextlib +import os +import sys +import re +from itertools import takewhile + +from .exceptions import BundleError + + +__all__ = ('md5_constructor', 'pickle', 'set', 'StringIO', + 'common_path_prefix', 'working_directory', 'is_url') + + +import base64 + +if sys.version_info >= (2, 5): + import hashlib + md5_constructor = hashlib.md5 +else: + import md5 + md5_constructor = md5.new + + +try: + import cPickle as pickle +except ImportError: + import pickle + + +try: + set +except NameError: + from sets import Set as set +else: + set = set + + +try: + FileNotFoundError +except NameError: + FileNotFoundError = IOError +else: + FileNotFoundError = FileNotFoundError + + +from webassets.six import StringIO + + +try: + from urllib import parse as urlparse +except ImportError: # Python 2 + import urlparse + import urllib + +def hash_func(data): + from .cache import make_md5 + return make_md5(data) + + +_directory_separator_re = re.compile(r"[/\\]+") + + +def common_path_prefix(paths, sep=os.path.sep): + """os.path.commonpath() is completely in the wrong place; it's + useless with paths since it only looks at one character at a time, + see http://bugs.python.org/issue10395 + + This replacement is from: + http://rosettacode.org/wiki/Find_Common_Directory_Path#Python + """ + def allnamesequal(name): + return all(n==name[0] for n in name[1:]) + + # The regex splits the paths on both / and \ characters, whereas the + # rosettacode.org algorithm only uses os.path.sep + bydirectorylevels = zip(*[_directory_separator_re.split(p) for p in paths]) + return sep.join(x[0] for x in takewhile(allnamesequal, bydirectorylevels)) + + +@contextlib.contextmanager +def working_directory(directory=None, filename=None): + """A context manager which changes the working directory to the given + path, and then changes it back to its previous value on exit. + + Filters will often find this helpful. + + Instead of a ``directory``, you may also give a ``filename``, and the + working directory will be set to the directory that file is in.s + """ + assert bool(directory) != bool(filename) # xor + if not directory: + directory = os.path.dirname(filename) + prev_cwd = os.getcwd() + os.chdir(directory) + try: + yield + finally: + os.chdir(prev_cwd) + + +def make_option_resolver(clazz=None, attribute=None, classes=None, + allow_none=True, desc=None): + """Returns a function which can resolve an option to an object. + + The option may given as an instance or a class (of ``clazz``, or + duck-typed with an attribute ``attribute``), or a string value referring + to a class as defined by the registry in ``classes``. + + This support arguments, so an option may look like this: + + cache:/tmp/cachedir + + If this must instantiate a class, it will pass such an argument along, + if given. In addition, if the class to be instantiated has a classmethod + ``make()``, this method will be used as a factory, and will be given an + Environment object (if one has been passed to the resolver). This allows + classes that need it to initialize themselves based on an Environment. + """ + assert clazz or attribute or classes + desc_string = ' to %s' % desc if desc else None + + def instantiate(clazz, env, *a, **kw): + # Create an instance of clazz, via the Factory if one is defined, + # passing along the Environment, or creating the class directly. + if hasattr(clazz, 'make'): + # make() protocol is that if e.g. the get_manifest() resolver takes + # an env, then the first argument of the factory is the env. + args = (env,) + a if env is not None else a + return clazz.make(*args, **kw) + return clazz(*a, **kw) + + def resolve_option(option, env=None): + the_clazz = clazz() if callable(clazz) and not isinstance(option, type) else clazz + + if not option and allow_none: + return None + + # If the value has one of the support attributes (duck-typing). + if attribute and hasattr(option, attribute): + if isinstance(option, type): + return instantiate(option, env) + return option + + # If it is the class we support. + if the_clazz and isinstance(option, the_clazz): + return option + elif isinstance(option, type) and issubclass(option, the_clazz): + return instantiate(option, env) + + # If it is a string + elif isinstance(option, six.string_types): + parts = option.split(':', 1) + key = parts[0] + arg = parts[1] if len(parts) > 1 else None + if key in classes: + return instantiate(classes[key], env, *([arg] if arg else [])) + + raise ValueError('%s cannot be resolved%s' % (option, desc_string)) + resolve_option.__doc__ = """Resolve ``option``%s.""" % desc_string + + return resolve_option + + +def RegistryMetaclass(clazz=None, attribute=None, allow_none=True, desc=None): + """Returns a metaclass which will keep a registry of all subclasses, keyed + by their ``id`` attribute. + + The metaclass will also have a ``resolve`` method which can turn a string + into an instance of one of the classes (based on ``make_option_resolver``). + """ + def eq(self, other): + """Return equality with config values that instantiate this.""" + return (hasattr(self, 'id') and self.id == other) or\ + id(self) == id(other) + def unicode(self): + return "%s" % (self.id if hasattr(self, 'id') else repr(self)) + + class Metaclass(type): + REGISTRY = {} + + def __new__(mcs, name, bases, attrs): + if not '__eq__' in attrs: + attrs['__eq__'] = eq + if not '__unicode__' in attrs: + attrs['__unicode__'] = unicode + if not '__str__' in attrs: + attrs['__str__'] = unicode + new_klass = type.__new__(mcs, name, bases, attrs) + if hasattr(new_klass, 'id'): + mcs.REGISTRY[new_klass.id] = new_klass + return new_klass + + resolve = staticmethod(make_option_resolver( + clazz=clazz, + attribute=attribute, + allow_none=allow_none, + desc=desc, + classes=REGISTRY + )) + return Metaclass + + +def cmp_debug_levels(level1, level2): + """cmp() for debug levels, returns True if ``level1`` is higher + than ``level2``.""" + level_ints = {False: 0, 'merge': 1, True: 2} + try: + cmp = lambda a, b: (a > b) - (a < b) # 333 + return cmp(level_ints[level1], level_ints[level2]) + except KeyError as e: + # Not sure if a dependency on BundleError is proper here. Validating + # debug values should probably be done on assign. But because this + # needs to happen in two places (Environment and Bundle) we do it here. + raise BundleError('Invalid debug value: %s' % e) + + +def is_url(s): + if not isinstance(s, str): + return False + parsed = urlparse.urlsplit(s) + return bool(parsed.scheme and parsed.netloc) and len(parsed.scheme) > 1 + + +def calculate_sri(data): + """Calculate SRI string for data buffer.""" + hash = hashlib.sha384() + hash.update(data) + hash = hash.digest() + hash_base64 = base64.b64encode(hash).decode() + return 'sha384-{}'.format(hash_base64) + + +def calculate_sri_on_file(file_name): + """Calculate SRI string if file can be found. Otherwise silently return None""" + BUF_SIZE = 65536 + hash = hashlib.sha384() + try: + with open(file_name, 'rb') as f: + while True: + data = f.read(BUF_SIZE) + if not data: + break + hash.update(data) + hash = hash.digest() + hash_base64 = base64.b64encode(hash).decode() + return 'sha384-{}'.format(hash_base64) + except FileNotFoundError: + return None diff --git a/pelican/plugins/webassets/vendor/webassets/version.py b/pelican/plugins/webassets/vendor/webassets/version.py new file mode 100644 index 0000000..5bb7160 --- /dev/null +++ b/pelican/plugins/webassets/vendor/webassets/version.py @@ -0,0 +1,323 @@ +"""This module defines the Version classes, and the related Manifest +implementations. +""" + +from __future__ import with_statement + +import os +import pickle +from webassets import six + +from webassets.merge import FileHunk +from webassets.utils import md5_constructor, RegistryMetaclass, is_url + + +__all__ = ('get_versioner', 'VersionIndeterminableError', + 'Version', 'TimestampVersion', + 'get_manifest', 'HashVersion', 'Manifest', 'FileManifest',) + + +class VersionIndeterminableError(Exception): + pass + + +class Version(six.with_metaclass(RegistryMetaclass( + clazz=lambda: Version, attribute='determine_version', + desc='a version implementation'))): + """A Version class that can be assigned to the ``Environment.versioner`` + attribute. + + Given a bundle, this must determine its "version". This version can then + be used in the output filename of the bundle, or appended to the url as a + query string, in order to expire cached assets. + + A version could be a timestamp, a content hash, or a git revision etc. + + As a user, all you need to care about, in most cases, is whether you want + to set the ``Environment.versioner`` attribute to ``hash`` or ``timestamp``. + + A single instance can be used with different environments. + """ + + def determine_version(self, bundle, ctx, hunk=None): + """Return a string that represents the current version of the given + bundle. + + This method is called on two separate occasions: + + 1) After a bundle has been built and is about to be saved. If the + output filename contains a placeholder, this method is asked for the + version. This mode is indicated by the ``hunk`` argument being + available. + + 2) When a version is required for an already built file, either + because: + + *) An URL needs to be constructed. + *) It needs to be determined if a bundle needs an update. + + *This will only occur* if *no manifest* is used. If there is a + manifest, it would be used to determine the version instead. + + Support for option (2) is optional. If not supported, then in those + cases a manifest needs to be configured. ``VersionIndeterminableError`` + should be raised with a message why. + """ + raise NotImplementedError() + + def set_version(self, bundle, ctx, filename, version): + """Hook called after a bundle has been built. Some version classes + may need this. + """ + + +get_versioner = Version.resolve + + +class TimestampVersion(Version): + """Uses the most recent 'last modified' timestamp of all source files + as the version. + + Uses second-precision. + """ + + id = 'timestamp' + + def determine_version(self, bundle, ctx, hunk=None): + # Only look at an existing output file if we are not about to + # overwrite it with a new version. But if we can, simply using the + # timestamp of the final file is the fastest way to do this. + # Note that this works because of our ``save_done`` hook. + if not hunk: + from webassets.bundle import has_placeholder + if not has_placeholder(bundle.output): + return self.get_timestamp(bundle.resolve_output(ctx)) + + # If we need the timestamp for the file we just built (hunk!=None), + # or if we need the timestamp for a bundle with a placeholder, + # the way to get it is by looking at the source files. + try: + return self.find_recent_most_timestamp(bundle, ctx) + except OSError: + # Source files are missing. Under these circumstances, we cannot + # return a proper version. + assert hunk is None + raise VersionIndeterminableError( + 'source files are missing and output target has a ' + 'placeholder') + + def set_version(self, bundle, ctx, filename, version): + # Update the mtime of the newly created file with the version + os.utime(filename, (-1, version)) + + @classmethod + def get_timestamp(cls, filename): + return int(os.stat(filename).st_mtime) # Let OSError pass + + @classmethod + def find_recent_most_timestamp(cls, bundle, ctx): + from webassets.bundle import get_all_bundle_files + # Recurse through the bundle hierarchy. Check the timestamp of all + # the bundle source files, as well as any additional + # dependencies that we are supposed to watch. + most_recent = None + for filename in get_all_bundle_files(bundle, ctx): + if is_url(filename): + continue + timestamp = cls.get_timestamp(filename) + if most_recent is None or timestamp > most_recent: + most_recent = timestamp + return most_recent + + +class HashVersion(Version): + """Uses the MD5 hash of the content as the version. + + By default, only the first 8 characters of the hash are used, which + should be sufficient. This can be changed by passing the appropriate + ``length`` value to ``__init__`` (or ``None`` to use the full hash). + + You can also customize the hash used by passing the ``hash`` argument. + All constructors from ``hashlib`` are supported. + """ + + id = 'hash' + + @classmethod + def make(cls, length=None): + args = [int(length)] if length else [] + return cls(*args) + + def __init__(self, length=8, hash=md5_constructor): + self.length = length + self.hasher = hash + + def determine_version(self, bundle, ctx, hunk=None): + if not hunk: + from webassets.bundle import has_placeholder + if not has_placeholder(bundle.output): + hunk = FileHunk(bundle.resolve_output(ctx)) + else: + # Can cannot determine the version of placeholder files. + raise VersionIndeterminableError( + 'output target has a placeholder') + + hasher = self.hasher() + hasher.update(hunk.data().encode('utf-8')) + return hasher.hexdigest()[:self.length] + + +class Manifest(six.with_metaclass(RegistryMetaclass( + clazz=lambda: Manifest, desc='a manifest implementation'))): + """Persists information about the versions bundles are at. + + The Manifest plays a role only if you insert the bundle version in your + output filenames, or append the version as a querystring to the url (via + the url_expire option). It serves two purposes: + + - Without a manifest, it may be impossible to determine the version + at runtime. In a deployed app, the media files may be stored on + a different server entirely, and be inaccessible from the application + code. The manifest, if shipped with your application, is what still + allows to construct the proper URLs. + + - Even if it were possible to determine the version at runtime without + a manifest, it may be a costly process, and using a manifest may + give you better performance. If you use a hash-based version for + example, this hash would need to be recalculated every time a new + process is started. (*) + + (*) It needs to happen only once per process, because Bundles are smart + enough to cache their own version in memory. + + A special case is the ``Environment.auto_build`` option. A manifest + implementation should re-read its data from its out-of-process data + source on every request, if ``auto_build`` is enabled. Otherwise, if your + application is served by multiple processes, then after an automatic + rebuild in one process all other processes would continue to serve an old + version of the file (or attach an old version to the query string). + + A manifest instance is currently not guaranteed to function correctly + with multiple Environment instances. + """ + + def remember(self, bundle, ctx, version): + raise NotImplementedError() + + def query(self, bundle, ctx): + raise NotImplementedError() + + +get_manifest = Manifest.resolve + + +class FileManifest(Manifest): + """Stores version data in a single file. + + Uses Python's pickle module to store a dict data structure. You should + only use this when the manifest is read-only in production, since it is + not multi-process safe. If you use ``auto_build`` in production, use + ``CacheManifest`` instead. + + By default, the file is named ".webassets-manifest" and stored in + ``Environment.directory``. + """ + + id = 'file' + + @classmethod + def make(cls, ctx, filename=None): + if not filename: + filename = '.webassets-manifest' + return cls(os.path.join(ctx.directory, filename)) + + def __init__(self, filename): + self.filename = filename + self._load_manifest() + + def remember(self, bundle, ctx, version): + self.manifest[bundle.output] = version + self._save_manifest() + + def query(self, bundle, ctx): + if ctx.auto_build: + self._load_manifest() + return self.manifest.get(bundle.output, None) + + def _load_manifest(self): + if os.path.exists(self.filename): + with open(self.filename, 'rb') as f: + self.manifest = pickle.load(f) + else: + self.manifest = {} + + def _save_manifest(self): + with open(self.filename, 'wb') as f: + pickle.dump(self.manifest, f, protocol=2) + + +class JsonManifest(FileManifest): + """Same as ``FileManifest``, but uses JSON instead of pickle.""" + + id = 'json' + + def __init__(self, *a, **kw): + try: + import json + except ImportError: + import simplejson as json + self.json = json + super(JsonManifest, self).__init__(*a, **kw) + + def _load_manifest(self): + if os.path.exists(self.filename): + with open(self.filename, 'r') as f: + self.manifest = self.json.load(f) + else: + self.manifest = {} + + def _save_manifest(self): + with open(self.filename, 'w') as f: + self.json.dump(self.manifest, f, indent=4, sort_keys=True) + + +class CacheManifest(Manifest): + """Stores version data in the webassets cache. + + Since this has bad portability (you hardly want to copy your cache between + machines), this only makes sense when you are building on the same machine + where you're application code runs. + + When you are using ``auto_build`` in production, this is exactly what you + want to use, since it is multi-process safe. + """ + + id = 'cache' + + def _check(self, ctx): + if not ctx.cache: + raise EnvironmentError( + 'You are using the cache manifest, but have not ' + 'enabled the cache.') + + def remember(self, bundle, ctx, version): + self._check(ctx) + ctx.cache.set(('manifest', bundle.output), version) + + def query(self, bundle, ctx): + self._check(ctx) + return ctx.cache.get(('manifest', bundle.output)) + + +class SymlinkManifest(Manifest): + """Creates a symlink to the actual file. + + E.g. compressed-current.js -> compressed-1ebcdc5.js + """ + + # Implementation notes: Would presumably be Linux only initially, + # could clean up after itself, may be hard to implement and maybe + # shouldn't, would only we usable to resolve placeholders in filenames. + + def __init__(self): + raise NotImplementedError() # TODO