aboutsummaryrefslogtreecommitdiffstats
path: root/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages
diff options
context:
space:
mode:
Diffstat (limited to 'jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages')
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/__init__.py12
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/catalog.py820
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/checkers.py173
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/extract.py632
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/frontend.py1018
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/jslexer.py185
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/mofile.py213
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/plurals.py252
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/pofile.py507
9 files changed, 0 insertions, 3812 deletions
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/__init__.py b/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/__init__.py
deleted file mode 100644
index 1b63bae..0000000
--- a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- babel.messages
- ~~~~~~~~~~~~~~
-
- Support for ``gettext`` message catalogs.
-
- :copyright: (c) 2013 by the Babel Team.
- :license: BSD, see LICENSE for more details.
-"""
-
-from babel.messages.catalog import *
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/catalog.py b/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/catalog.py
deleted file mode 100644
index 8c807f8..0000000
--- a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/catalog.py
+++ /dev/null
@@ -1,820 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- babel.messages.catalog
- ~~~~~~~~~~~~~~~~~~~~~~
-
- Data structures for message catalogs.
-
- :copyright: (c) 2013 by the Babel Team.
- :license: BSD, see LICENSE for more details.
-"""
-
-import re
-import time
-
-from cgi import parse_header
-from datetime import datetime, time as time_
-from difflib import get_close_matches
-from email import message_from_string
-from copy import copy
-
-from babel import __version__ as VERSION
-from babel.core import Locale
-from babel.dates import format_datetime
-from babel.messages.plurals import get_plural
-from babel.util import odict, distinct, LOCALTZ, FixedOffsetTimezone
-from babel._compat import string_types, number_types, PY2, cmp
-
-__all__ = ['Message', 'Catalog', 'TranslationError']
-
-
-PYTHON_FORMAT = re.compile(r'''(?x)
- \%
- (?:\(([\w]*)\))?
- (
- [-#0\ +]?(?:\*|[\d]+)?
- (?:\.(?:\*|[\d]+))?
- [hlL]?
- )
- ([diouxXeEfFgGcrs%])
-''')
-
-
-def _parse_datetime_header(value):
- match = re.match(r'^(?P<datetime>.*?)(?P<tzoffset>[+-]\d{4})?$', value)
-
- tt = time.strptime(match.group('datetime'), '%Y-%m-%d %H:%M')
- ts = time.mktime(tt)
- dt = datetime.fromtimestamp(ts)
-
- # Separate the offset into a sign component, hours, and # minutes
- tzoffset = match.group('tzoffset')
- if tzoffset is not None:
- plus_minus_s, rest = tzoffset[0], tzoffset[1:]
- hours_offset_s, mins_offset_s = rest[:2], rest[2:]
-
- # Make them all integers
- plus_minus = int(plus_minus_s + '1')
- hours_offset = int(hours_offset_s)
- mins_offset = int(mins_offset_s)
-
- # Calculate net offset
- net_mins_offset = hours_offset * 60
- net_mins_offset += mins_offset
- net_mins_offset *= plus_minus
-
- # Create an offset object
- tzoffset = FixedOffsetTimezone(net_mins_offset)
-
- # Store the offset in a datetime object
- dt = dt.replace(tzinfo=tzoffset)
-
- return dt
-
-
-class Message(object):
- """Representation of a single message in a catalog."""
-
- def __init__(self, id, string=u'', locations=(), flags=(), auto_comments=(),
- user_comments=(), previous_id=(), lineno=None, context=None):
- """Create the message object.
-
- :param id: the message ID, or a ``(singular, plural)`` tuple for
- pluralizable messages
- :param string: the translated message string, or a
- ``(singular, plural)`` tuple for pluralizable messages
- :param locations: a sequence of ``(filenname, lineno)`` tuples
- :param flags: a set or sequence of flags
- :param auto_comments: a sequence of automatic comments for the message
- :param user_comments: a sequence of user comments for the message
- :param previous_id: the previous message ID, or a ``(singular, plural)``
- tuple for pluralizable messages
- :param lineno: the line number on which the msgid line was found in the
- PO file, if any
- :param context: the message context
- """
- self.id = id
- if not string and self.pluralizable:
- string = (u'', u'')
- self.string = string
- self.locations = list(distinct(locations))
- self.flags = set(flags)
- if id and self.python_format:
- self.flags.add('python-format')
- else:
- self.flags.discard('python-format')
- self.auto_comments = list(distinct(auto_comments))
- self.user_comments = list(distinct(user_comments))
- if isinstance(previous_id, string_types):
- self.previous_id = [previous_id]
- else:
- self.previous_id = list(previous_id)
- self.lineno = lineno
- self.context = context
-
- def __repr__(self):
- return '<%s %r (flags: %r)>' % (type(self).__name__, self.id,
- list(self.flags))
-
- def __cmp__(self, obj):
- """Compare Messages, taking into account plural ids"""
- def values_to_compare():
- if isinstance(obj, Message):
- plural = self.pluralizable
- obj_plural = obj.pluralizable
- if plural and obj_plural:
- return self.id[0], obj.id[0]
- elif plural:
- return self.id[0], obj.id
- elif obj_plural:
- return self.id, obj.id[0]
- return self.id, obj.id
- this, other = values_to_compare()
- return cmp(this, other)
-
- def __gt__(self, other):
- return self.__cmp__(other) > 0
-
- def __lt__(self, other):
- return self.__cmp__(other) < 0
-
- def __ge__(self, other):
- return self.__cmp__(other) >= 0
-
- def __le__(self, other):
- return self.__cmp__(other) <= 0
-
- def __eq__(self, other):
- return self.__cmp__(other) == 0
-
- def __ne__(self, other):
- return self.__cmp__(other) != 0
-
- def clone(self):
- return Message(*map(copy, (self.id, self.string, self.locations,
- self.flags, self.auto_comments,
- self.user_comments, self.previous_id,
- self.lineno, self.context)))
-
- def check(self, catalog=None):
- """Run various validation checks on the message. Some validations
- are only performed if the catalog is provided. This method returns
- a sequence of `TranslationError` objects.
-
- :rtype: ``iterator``
- :param catalog: A catalog instance that is passed to the checkers
- :see: `Catalog.check` for a way to perform checks for all messages
- in a catalog.
- """
- from babel.messages.checkers import checkers
- errors = []
- for checker in checkers:
- try:
- checker(catalog, self)
- except TranslationError as e:
- errors.append(e)
- return errors
-
- @property
- def fuzzy(self):
- """Whether the translation is fuzzy.
-
- >>> Message('foo').fuzzy
- False
- >>> msg = Message('foo', 'foo', flags=['fuzzy'])
- >>> msg.fuzzy
- True
- >>> msg
- <Message 'foo' (flags: ['fuzzy'])>
-
- :type: `bool`"""
- return 'fuzzy' in self.flags
-
- @property
- def pluralizable(self):
- """Whether the message is plurizable.
-
- >>> Message('foo').pluralizable
- False
- >>> Message(('foo', 'bar')).pluralizable
- True
-
- :type: `bool`"""
- return isinstance(self.id, (list, tuple))
-
- @property
- def python_format(self):
- """Whether the message contains Python-style parameters.
-
- >>> Message('foo %(name)s bar').python_format
- True
- >>> Message(('foo %(name)s', 'foo %(name)s')).python_format
- True
-
- :type: `bool`"""
- ids = self.id
- if not isinstance(ids, (list, tuple)):
- ids = [ids]
- return any(PYTHON_FORMAT.search(id) for id in ids)
-
-
-class TranslationError(Exception):
- """Exception thrown by translation checkers when invalid message
- translations are encountered."""
-
-
-DEFAULT_HEADER = u"""\
-# Translations template for PROJECT.
-# Copyright (C) YEAR ORGANIZATION
-# This file is distributed under the same license as the PROJECT project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
-#"""
-
-
-if PY2:
- def _parse_header(header_string):
- # message_from_string only works for str, not for unicode
- headers = message_from_string(header_string.encode('utf8'))
- decoded_headers = {}
- for name, value in headers.items():
- name = name.decode('utf8')
- value = value.decode('utf8')
- decoded_headers[name] = value
- return decoded_headers
-
-else:
- _parse_header = message_from_string
-
-
-class Catalog(object):
- """Representation of a message catalog."""
-
- def __init__(self, locale=None, domain=None, header_comment=DEFAULT_HEADER,
- project=None, version=None, copyright_holder=None,
- msgid_bugs_address=None, creation_date=None,
- revision_date=None, last_translator=None, language_team=None,
- charset=None, fuzzy=True):
- """Initialize the catalog object.
-
- :param locale: the locale identifier or `Locale` object, or `None`
- if the catalog is not bound to a locale (which basically
- means it's a template)
- :param domain: the message domain
- :param header_comment: the header comment as string, or `None` for the
- default header
- :param project: the project's name
- :param version: the project's version
- :param copyright_holder: the copyright holder of the catalog
- :param msgid_bugs_address: the email address or URL to submit bug
- reports to
- :param creation_date: the date the catalog was created
- :param revision_date: the date the catalog was revised
- :param last_translator: the name and email of the last translator
- :param language_team: the name and email of the language team
- :param charset: the encoding to use in the output (defaults to utf-8)
- :param fuzzy: the fuzzy bit on the catalog header
- """
- self.domain = domain
- if locale:
- locale = Locale.parse(locale)
- self.locale = locale
- self._header_comment = header_comment
- self._messages = odict()
-
- self.project = project or 'PROJECT'
- self.version = version or 'VERSION'
- self.copyright_holder = copyright_holder or 'ORGANIZATION'
- self.msgid_bugs_address = msgid_bugs_address or 'EMAIL@ADDRESS'
-
- self.last_translator = last_translator or 'FULL NAME <EMAIL@ADDRESS>'
- """Name and email address of the last translator."""
- self.language_team = language_team or 'LANGUAGE <LL@li.org>'
- """Name and email address of the language team."""
-
- self.charset = charset or 'utf-8'
-
- if creation_date is None:
- creation_date = datetime.now(LOCALTZ)
- elif isinstance(creation_date, datetime) and not creation_date.tzinfo:
- creation_date = creation_date.replace(tzinfo=LOCALTZ)
- self.creation_date = creation_date
- if revision_date is None:
- revision_date = 'YEAR-MO-DA HO:MI+ZONE'
- elif isinstance(revision_date, datetime) and not revision_date.tzinfo:
- revision_date = revision_date.replace(tzinfo=LOCALTZ)
- self.revision_date = revision_date
- self.fuzzy = fuzzy
-
- self.obsolete = odict() # Dictionary of obsolete messages
- self._num_plurals = None
- self._plural_expr = None
-
- def _get_header_comment(self):
- comment = self._header_comment
- year = datetime.now(LOCALTZ).strftime('%Y')
- if hasattr(self.revision_date, 'strftime'):
- year = self.revision_date.strftime('%Y')
- comment = comment.replace('PROJECT', self.project) \
- .replace('VERSION', self.version) \
- .replace('YEAR', year) \
- .replace('ORGANIZATION', self.copyright_holder)
- if self.locale:
- comment = comment.replace('Translations template', '%s translations'
- % self.locale.english_name)
- return comment
-
- def _set_header_comment(self, string):
- self._header_comment = string
-
- header_comment = property(_get_header_comment, _set_header_comment, doc="""\
- The header comment for the catalog.
-
- >>> catalog = Catalog(project='Foobar', version='1.0',
- ... copyright_holder='Foo Company')
- >>> print(catalog.header_comment) #doctest: +ELLIPSIS
- # Translations template for Foobar.
- # Copyright (C) ... Foo Company
- # This file is distributed under the same license as the Foobar project.
- # FIRST AUTHOR <EMAIL@ADDRESS>, ....
- #
-
- The header can also be set from a string. Any known upper-case variables
- will be replaced when the header is retrieved again:
-
- >>> catalog = Catalog(project='Foobar', version='1.0',
- ... copyright_holder='Foo Company')
- >>> catalog.header_comment = '''\\
- ... # The POT for my really cool PROJECT project.
- ... # Copyright (C) 1990-2003 ORGANIZATION
- ... # This file is distributed under the same license as the PROJECT
- ... # project.
- ... #'''
- >>> print(catalog.header_comment)
- # The POT for my really cool Foobar project.
- # Copyright (C) 1990-2003 Foo Company
- # This file is distributed under the same license as the Foobar
- # project.
- #
-
- :type: `unicode`
- """)
-
- def _get_mime_headers(self):
- headers = []
- headers.append(('Project-Id-Version',
- '%s %s' % (self.project, self.version)))
- headers.append(('Report-Msgid-Bugs-To', self.msgid_bugs_address))
- headers.append(('POT-Creation-Date',
- format_datetime(self.creation_date, 'yyyy-MM-dd HH:mmZ',
- locale='en')))
- if isinstance(self.revision_date, (datetime, time_) + number_types):
- headers.append(('PO-Revision-Date',
- format_datetime(self.revision_date,
- 'yyyy-MM-dd HH:mmZ', locale='en')))
- else:
- headers.append(('PO-Revision-Date', self.revision_date))
- headers.append(('Last-Translator', self.last_translator))
- if self.locale is not None:
- headers.append(('Language', str(self.locale)))
- if (self.locale is not None) and ('LANGUAGE' in self.language_team):
- headers.append(('Language-Team',
- self.language_team.replace('LANGUAGE',
- str(self.locale))))
- else:
- headers.append(('Language-Team', self.language_team))
- if self.locale is not None:
- headers.append(('Plural-Forms', self.plural_forms))
- headers.append(('MIME-Version', '1.0'))
- headers.append(('Content-Type',
- 'text/plain; charset=%s' % self.charset))
- headers.append(('Content-Transfer-Encoding', '8bit'))
- headers.append(('Generated-By', 'Babel %s\n' % VERSION))
- return headers
-
- def _set_mime_headers(self, headers):
- for name, value in headers:
- name = name.lower()
- if name == 'project-id-version':
- parts = value.split(' ')
- self.project = u' '.join(parts[:-1])
- self.version = parts[-1]
- elif name == 'report-msgid-bugs-to':
- self.msgid_bugs_address = value
- elif name == 'last-translator':
- self.last_translator = value
- elif name == 'language-team':
- self.language_team = value
- elif name == 'content-type':
- mimetype, params = parse_header(value)
- if 'charset' in params:
- self.charset = params['charset'].lower()
- elif name == 'plural-forms':
- _, params = parse_header(' ;' + value)
- self._num_plurals = int(params.get('nplurals', 2))
- self._plural_expr = params.get('plural', '(n != 1)')
- elif name == 'pot-creation-date':
- self.creation_date = _parse_datetime_header(value)
- elif name == 'po-revision-date':
- # Keep the value if it's not the default one
- if 'YEAR' not in value:
- self.revision_date = _parse_datetime_header(value)
-
- mime_headers = property(_get_mime_headers, _set_mime_headers, doc="""\
- The MIME headers of the catalog, used for the special ``msgid ""`` entry.
-
- The behavior of this property changes slightly depending on whether a locale
- is set or not, the latter indicating that the catalog is actually a template
- for actual translations.
-
- Here's an example of the output for such a catalog template:
-
- >>> from babel.dates import UTC
- >>> created = datetime(1990, 4, 1, 15, 30, tzinfo=UTC)
- >>> catalog = Catalog(project='Foobar', version='1.0',
- ... creation_date=created)
- >>> for name, value in catalog.mime_headers:
- ... print('%s: %s' % (name, value))
- Project-Id-Version: Foobar 1.0
- Report-Msgid-Bugs-To: EMAIL@ADDRESS
- POT-Creation-Date: 1990-04-01 15:30+0000
- PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
- Last-Translator: FULL NAME <EMAIL@ADDRESS>
- Language-Team: LANGUAGE <LL@li.org>
- MIME-Version: 1.0
- Content-Type: text/plain; charset=utf-8
- Content-Transfer-Encoding: 8bit
- Generated-By: Babel ...
-
- And here's an example of the output when the locale is set:
-
- >>> revised = datetime(1990, 8, 3, 12, 0, tzinfo=UTC)
- >>> catalog = Catalog(locale='de_DE', project='Foobar', version='1.0',
- ... creation_date=created, revision_date=revised,
- ... last_translator='John Doe <jd@example.com>',
- ... language_team='de_DE <de@example.com>')
- >>> for name, value in catalog.mime_headers:
- ... print('%s: %s' % (name, value))
- Project-Id-Version: Foobar 1.0
- Report-Msgid-Bugs-To: EMAIL@ADDRESS
- POT-Creation-Date: 1990-04-01 15:30+0000
- PO-Revision-Date: 1990-08-03 12:00+0000
- Last-Translator: John Doe <jd@example.com>
- Language: de_DE
- Language-Team: de_DE <de@example.com>
- Plural-Forms: nplurals=2; plural=(n != 1)
- MIME-Version: 1.0
- Content-Type: text/plain; charset=utf-8
- Content-Transfer-Encoding: 8bit
- Generated-By: Babel ...
-
- :type: `list`
- """)
-
- @property
- def num_plurals(self):
- """The number of plurals used by the catalog or locale.
-
- >>> Catalog(locale='en').num_plurals
- 2
- >>> Catalog(locale='ga').num_plurals
- 3
-
- :type: `int`"""
- if self._num_plurals is None:
- num = 2
- if self.locale:
- num = get_plural(self.locale)[0]
- self._num_plurals = num
- return self._num_plurals
-
- @property
- def plural_expr(self):
- """The plural expression used by the catalog or locale.
-
- >>> Catalog(locale='en').plural_expr
- '(n != 1)'
- >>> Catalog(locale='ga').plural_expr
- '(n==1 ? 0 : n==2 ? 1 : 2)'
-
- :type: `string_types`"""
- if self._plural_expr is None:
- expr = '(n != 1)'
- if self.locale:
- expr = get_plural(self.locale)[1]
- self._plural_expr = expr
- return self._plural_expr
-
- @property
- def plural_forms(self):
- """Return the plural forms declaration for the locale.
-
- >>> Catalog(locale='en').plural_forms
- 'nplurals=2; plural=(n != 1)'
- >>> Catalog(locale='pt_BR').plural_forms
- 'nplurals=2; plural=(n > 1)'
-
- :type: `str`"""
- return 'nplurals=%s; plural=%s' % (self.num_plurals, self.plural_expr)
-
- def __contains__(self, id):
- """Return whether the catalog has a message with the specified ID."""
- return self._key_for(id) in self._messages
-
- def __len__(self):
- """The number of messages in the catalog.
-
- This does not include the special ``msgid ""`` entry."""
- return len(self._messages)
-
- def __iter__(self):
- """Iterates through all the entries in the catalog, in the order they
- were added, yielding a `Message` object for every entry.
-
- :rtype: ``iterator``"""
- buf = []
- for name, value in self.mime_headers:
- buf.append('%s: %s' % (name, value))
- flags = set()
- if self.fuzzy:
- flags |= set(['fuzzy'])
- yield Message(u'', '\n'.join(buf), flags=flags)
- for key in self._messages:
- yield self._messages[key]
-
- def __repr__(self):
- locale = ''
- if self.locale:
- locale = ' %s' % self.locale
- return '<%s %r%s>' % (type(self).__name__, self.domain, locale)
-
- def __delitem__(self, id):
- """Delete the message with the specified ID."""
- self.delete(id)
-
- def __getitem__(self, id):
- """Return the message with the specified ID.
-
- :param id: the message ID
- """
- return self.get(id)
-
- def __setitem__(self, id, message):
- """Add or update the message with the specified ID.
-
- >>> catalog = Catalog()
- >>> catalog[u'foo'] = Message(u'foo')
- >>> catalog[u'foo']
- <Message u'foo' (flags: [])>
-
- If a message with that ID is already in the catalog, it is updated
- to include the locations and flags of the new message.
-
- >>> catalog = Catalog()
- >>> catalog[u'foo'] = Message(u'foo', locations=[('main.py', 1)])
- >>> catalog[u'foo'].locations
- [('main.py', 1)]
- >>> catalog[u'foo'] = Message(u'foo', locations=[('utils.py', 5)])
- >>> catalog[u'foo'].locations
- [('main.py', 1), ('utils.py', 5)]
-
- :param id: the message ID
- :param message: the `Message` object
- """
- assert isinstance(message, Message), 'expected a Message object'
- key = self._key_for(id, message.context)
- current = self._messages.get(key)
- if current:
- if message.pluralizable and not current.pluralizable:
- # The new message adds pluralization
- current.id = message.id
- current.string = message.string
- current.locations = list(distinct(current.locations +
- message.locations))
- current.auto_comments = list(distinct(current.auto_comments +
- message.auto_comments))
- current.user_comments = list(distinct(current.user_comments +
- message.user_comments))
- current.flags |= message.flags
- message = current
- elif id == '':
- # special treatment for the header message
- self.mime_headers = _parse_header(message.string).items()
- self.header_comment = '\n'.join([('# %s' % c).rstrip() for c
- in message.user_comments])
- self.fuzzy = message.fuzzy
- else:
- if isinstance(id, (list, tuple)):
- assert isinstance(message.string, (list, tuple)), \
- 'Expected sequence but got %s' % type(message.string)
- self._messages[key] = message
-
- def add(self, id, string=None, locations=(), flags=(), auto_comments=(),
- user_comments=(), previous_id=(), lineno=None, context=None):
- """Add or update the message with the specified ID.
-
- >>> catalog = Catalog()
- >>> catalog.add(u'foo')
- <Message ...>
- >>> catalog[u'foo']
- <Message u'foo' (flags: [])>
-
- This method simply constructs a `Message` object with the given
- arguments and invokes `__setitem__` with that object.
-
- :param id: the message ID, or a ``(singular, plural)`` tuple for
- pluralizable messages
- :param string: the translated message string, or a
- ``(singular, plural)`` tuple for pluralizable messages
- :param locations: a sequence of ``(filenname, lineno)`` tuples
- :param flags: a set or sequence of flags
- :param auto_comments: a sequence of automatic comments
- :param user_comments: a sequence of user comments
- :param previous_id: the previous message ID, or a ``(singular, plural)``
- tuple for pluralizable messages
- :param lineno: the line number on which the msgid line was found in the
- PO file, if any
- :param context: the message context
- """
- message = Message(id, string, list(locations), flags, auto_comments,
- user_comments, previous_id, lineno=lineno,
- context=context)
- self[id] = message
- return message
-
- def check(self):
- """Run various validation checks on the translations in the catalog.
-
- For every message which fails validation, this method yield a
- ``(message, errors)`` tuple, where ``message`` is the `Message` object
- and ``errors`` is a sequence of `TranslationError` objects.
-
- :rtype: ``iterator``
- """
- for message in self._messages.values():
- errors = message.check(catalog=self)
- if errors:
- yield message, errors
-
- def get(self, id, context=None):
- """Return the message with the specified ID and context.
-
- :param id: the message ID
- :param context: the message context, or ``None`` for no context
- """
- return self._messages.get(self._key_for(id, context))
-
- def delete(self, id, context=None):
- """Delete the message with the specified ID and context.
-
- :param id: the message ID
- :param context: the message context, or ``None`` for no context
- """
- key = self._key_for(id, context)
- if key in self._messages:
- del self._messages[key]
-
- def update(self, template, no_fuzzy_matching=False, update_header_comment=False):
- """Update the catalog based on the given template catalog.
-
- >>> from babel.messages import Catalog
- >>> template = Catalog()
- >>> template.add('green', locations=[('main.py', 99)])
- <Message ...>
- >>> template.add('blue', locations=[('main.py', 100)])
- <Message ...>
- >>> template.add(('salad', 'salads'), locations=[('util.py', 42)])
- <Message ...>
- >>> catalog = Catalog(locale='de_DE')
- >>> catalog.add('blue', u'blau', locations=[('main.py', 98)])
- <Message ...>
- >>> catalog.add('head', u'Kopf', locations=[('util.py', 33)])
- <Message ...>
- >>> catalog.add(('salad', 'salads'), (u'Salat', u'Salate'),
- ... locations=[('util.py', 38)])
- <Message ...>
-
- >>> catalog.update(template)
- >>> len(catalog)
- 3
-
- >>> msg1 = catalog['green']
- >>> msg1.string
- >>> msg1.locations
- [('main.py', 99)]
-
- >>> msg2 = catalog['blue']
- >>> msg2.string
- u'blau'
- >>> msg2.locations
- [('main.py', 100)]
-
- >>> msg3 = catalog['salad']
- >>> msg3.string
- (u'Salat', u'Salate')
- >>> msg3.locations
- [('util.py', 42)]
-
- Messages that are in the catalog but not in the template are removed
- from the main collection, but can still be accessed via the `obsolete`
- member:
-
- >>> 'head' in catalog
- False
- >>> list(catalog.obsolete.values())
- [<Message 'head' (flags: [])>]
-
- :param template: the reference catalog, usually read from a POT file
- :param no_fuzzy_matching: whether to use fuzzy matching of message IDs
- """
- messages = self._messages
- remaining = messages.copy()
- self._messages = odict()
-
- # Prepare for fuzzy matching
- fuzzy_candidates = []
- if not no_fuzzy_matching:
- fuzzy_candidates = dict([
- (self._key_for(msgid), messages[msgid].context)
- for msgid in messages if msgid and messages[msgid].string
- ])
- fuzzy_matches = set()
-
- def _merge(message, oldkey, newkey):
- message = message.clone()
- fuzzy = False
- if oldkey != newkey:
- fuzzy = True
- fuzzy_matches.add(oldkey)
- oldmsg = messages.get(oldkey)
- if isinstance(oldmsg.id, string_types):
- message.previous_id = [oldmsg.id]
- else:
- message.previous_id = list(oldmsg.id)
- else:
- oldmsg = remaining.pop(oldkey, None)
- message.string = oldmsg.string
- if isinstance(message.id, (list, tuple)):
- if not isinstance(message.string, (list, tuple)):
- fuzzy = True
- message.string = tuple(
- [message.string] + ([u''] * (len(message.id) - 1))
- )
- elif len(message.string) != self.num_plurals:
- fuzzy = True
- message.string = tuple(message.string[:len(oldmsg.string)])
- elif isinstance(message.string, (list, tuple)):
- fuzzy = True
- message.string = message.string[0]
- message.flags |= oldmsg.flags
- if fuzzy:
- message.flags |= set([u'fuzzy'])
- self[message.id] = message
-
- for message in template:
- if message.id:
- key = self._key_for(message.id, message.context)
- if key in messages:
- _merge(message, key, key)
- else:
- if no_fuzzy_matching is False:
- # do some fuzzy matching with difflib
- if isinstance(key, tuple):
- matchkey = key[0] # just the msgid, no context
- else:
- matchkey = key
- matches = get_close_matches(matchkey.lower().strip(),
- fuzzy_candidates.keys(), 1)
- if matches:
- newkey = matches[0]
- newctxt = fuzzy_candidates[newkey]
- if newctxt is not None:
- newkey = newkey, newctxt
- _merge(message, newkey, key)
- continue
-
- self[message.id] = message
-
- for msgid in remaining:
- if no_fuzzy_matching or msgid not in fuzzy_matches:
- self.obsolete[msgid] = remaining[msgid]
-
- if update_header_comment:
- # Allow the updated catalog's header to be rewritten based on the
- # template's header
- self.header_comment = template.header_comment
-
- # Make updated catalog's POT-Creation-Date equal to the template
- # used to update the catalog
- self.creation_date = template.creation_date
-
- def _key_for(self, id, context=None):
- """The key for a message is just the singular ID even for pluralizable
- messages, but is a ``(msgid, msgctxt)`` tuple for context-specific
- messages.
- """
- key = id
- if isinstance(key, (list, tuple)):
- key = id[0]
- if context is not None:
- key = (key, context)
- return key
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/checkers.py b/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/checkers.py
deleted file mode 100644
index 24ecdcf..0000000
--- a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/checkers.py
+++ /dev/null
@@ -1,173 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- babel.messages.checkers
- ~~~~~~~~~~~~~~~~~~~~~~~
-
- Various routines that help with validation of translations.
-
- :since: version 0.9
-
- :copyright: (c) 2013 by the Babel Team.
- :license: BSD, see LICENSE for more details.
-"""
-
-from babel.messages.catalog import TranslationError, PYTHON_FORMAT
-from babel._compat import string_types, izip
-
-
-#: list of format chars that are compatible to each other
-_string_format_compatibilities = [
- set(['i', 'd', 'u']),
- set(['x', 'X']),
- set(['f', 'F', 'g', 'G'])
-]
-
-
-def num_plurals(catalog, message):
- """Verify the number of plurals in the translation."""
- if not message.pluralizable:
- if not isinstance(message.string, string_types):
- raise TranslationError("Found plural forms for non-pluralizable "
- "message")
- return
-
- # skip further tests if no catalog is provided.
- elif catalog is None:
- return
-
- msgstrs = message.string
- if not isinstance(msgstrs, (list, tuple)):
- msgstrs = (msgstrs,)
- if len(msgstrs) != catalog.num_plurals:
- raise TranslationError("Wrong number of plural forms (expected %d)" %
- catalog.num_plurals)
-
-
-def python_format(catalog, message):
- """Verify the format string placeholders in the translation."""
- if 'python-format' not in message.flags:
- return
- msgids = message.id
- if not isinstance(msgids, (list, tuple)):
- msgids = (msgids,)
- msgstrs = message.string
- if not isinstance(msgstrs, (list, tuple)):
- msgstrs = (msgstrs,)
-
- for msgid, msgstr in izip(msgids, msgstrs):
- if msgstr:
- _validate_format(msgid, msgstr)
-
-
-def _validate_format(format, alternative):
- """Test format string `alternative` against `format`. `format` can be the
- msgid of a message and `alternative` one of the `msgstr`\s. The two
- arguments are not interchangeable as `alternative` may contain less
- placeholders if `format` uses named placeholders.
-
- The behavior of this function is undefined if the string does not use
- string formattings.
-
- If the string formatting of `alternative` is compatible to `format` the
- function returns `None`, otherwise a `TranslationError` is raised.
-
- Examples for compatible format strings:
-
- >>> _validate_format('Hello %s!', 'Hallo %s!')
- >>> _validate_format('Hello %i!', 'Hallo %d!')
-
- Example for an incompatible format strings:
-
- >>> _validate_format('Hello %(name)s!', 'Hallo %s!')
- Traceback (most recent call last):
- ...
- TranslationError: the format strings are of different kinds
-
- This function is used by the `python_format` checker.
-
- :param format: The original format string
- :param alternative: The alternative format string that should be checked
- against format
- :raises TranslationError: on formatting errors
- """
-
- def _parse(string):
- result = []
- for match in PYTHON_FORMAT.finditer(string):
- name, format, typechar = match.groups()
- if typechar == '%' and name is None:
- continue
- result.append((name, str(typechar)))
- return result
-
- def _compatible(a, b):
- if a == b:
- return True
- for set in _string_format_compatibilities:
- if a in set and b in set:
- return True
- return False
-
- def _check_positional(results):
- positional = None
- for name, char in results:
- if positional is None:
- positional = name is None
- else:
- if (name is None) != positional:
- raise TranslationError('format string mixes positional '
- 'and named placeholders')
- return bool(positional)
-
- a, b = map(_parse, (format, alternative))
-
- # now check if both strings are positional or named
- a_positional, b_positional = map(_check_positional, (a, b))
- if a_positional and not b_positional and not b:
- raise TranslationError('placeholders are incompatible')
- elif a_positional != b_positional:
- raise TranslationError('the format strings are of different kinds')
-
- # if we are operating on positional strings both must have the
- # same number of format chars and those must be compatible
- if a_positional:
- if len(a) != len(b):
- raise TranslationError('positional format placeholders are '
- 'unbalanced')
- for idx, ((_, first), (_, second)) in enumerate(izip(a, b)):
- if not _compatible(first, second):
- raise TranslationError('incompatible format for placeholder '
- '%d: %r and %r are not compatible' %
- (idx + 1, first, second))
-
- # otherwise the second string must not have names the first one
- # doesn't have and the types of those included must be compatible
- else:
- type_map = dict(a)
- for name, typechar in b:
- if name not in type_map:
- raise TranslationError('unknown named placeholder %r' % name)
- elif not _compatible(typechar, type_map[name]):
- raise TranslationError('incompatible format for '
- 'placeholder %r: '
- '%r and %r are not compatible' %
- (name, typechar, type_map[name]))
-
-
-def _find_checkers():
- checkers = []
- try:
- from pkg_resources import working_set
- except ImportError:
- pass
- else:
- for entry_point in working_set.iter_entry_points('babel.checkers'):
- checkers.append(entry_point.load())
- if len(checkers) == 0:
- # if pkg_resources is not available or no usable egg-info was found
- # (see #230), just resort to hard-coded checkers
- return [num_plurals, python_format]
- return checkers
-
-
-checkers = _find_checkers()
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/extract.py b/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/extract.py
deleted file mode 100644
index 7162627..0000000
--- a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/extract.py
+++ /dev/null
@@ -1,632 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- babel.messages.extract
- ~~~~~~~~~~~~~~~~~~~~~~
-
- Basic infrastructure for extracting localizable messages from source files.
-
- This module defines an extensible system for collecting localizable message
- strings from a variety of sources. A native extractor for Python source
- files is builtin, extractors for other sources can be added using very
- simple plugins.
-
- The main entry points into the extraction functionality are the functions
- `extract_from_dir` and `extract_from_file`.
-
- :copyright: (c) 2013 by the Babel Team.
- :license: BSD, see LICENSE for more details.
-"""
-
-import os
-from os.path import relpath
-import sys
-from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
-
-from babel.util import parse_encoding, pathmatch
-from babel._compat import PY2, text_type
-from textwrap import dedent
-
-
-GROUP_NAME = 'babel.extractors'
-
-DEFAULT_KEYWORDS = {
- '_': None,
- 'gettext': None,
- 'ngettext': (1, 2),
- 'ugettext': None,
- 'ungettext': (1, 2),
- 'dgettext': (2,),
- 'dngettext': (2, 3),
- 'N_': None,
- 'pgettext': ((1, 'c'), 2),
- 'npgettext': ((1, 'c'), 2, 3)
-}
-
-DEFAULT_MAPPING = [('**.py', 'python')]
-
-empty_msgid_warning = (
- '%s: warning: Empty msgid. It is reserved by GNU gettext: gettext("") '
- 'returns the header entry with meta information, not the empty string.')
-
-
-def _strip_comment_tags(comments, tags):
- """Helper function for `extract` that strips comment tags from strings
- in a list of comment lines. This functions operates in-place.
- """
- def _strip(line):
- for tag in tags:
- if line.startswith(tag):
- return line[len(tag):].strip()
- return line
- comments[:] = map(_strip, comments)
-
-
-def extract_from_dir(dirname=None, method_map=DEFAULT_MAPPING,
- options_map=None, keywords=DEFAULT_KEYWORDS,
- comment_tags=(), callback=None, strip_comment_tags=False):
- """Extract messages from any source files found in the given directory.
-
- This function generates tuples of the form ``(filename, lineno, message,
- comments, context)``.
-
- Which extraction method is used per file is determined by the `method_map`
- parameter, which maps extended glob patterns to extraction method names.
- For example, the following is the default mapping:
-
- >>> method_map = [
- ... ('**.py', 'python')
- ... ]
-
- This basically says that files with the filename extension ".py" at any
- level inside the directory should be processed by the "python" extraction
- method. Files that don't match any of the mapping patterns are ignored. See
- the documentation of the `pathmatch` function for details on the pattern
- syntax.
-
- The following extended mapping would also use the "genshi" extraction
- method on any file in "templates" subdirectory:
-
- >>> method_map = [
- ... ('**/templates/**.*', 'genshi'),
- ... ('**.py', 'python')
- ... ]
-
- The dictionary provided by the optional `options_map` parameter augments
- these mappings. It uses extended glob patterns as keys, and the values are
- dictionaries mapping options names to option values (both strings).
-
- The glob patterns of the `options_map` do not necessarily need to be the
- same as those used in the method mapping. For example, while all files in
- the ``templates`` folders in an application may be Genshi applications, the
- options for those files may differ based on extension:
-
- >>> options_map = {
- ... '**/templates/**.txt': {
- ... 'template_class': 'genshi.template:TextTemplate',
- ... 'encoding': 'latin-1'
- ... },
- ... '**/templates/**.html': {
- ... 'include_attrs': ''
- ... }
- ... }
-
- :param dirname: the path to the directory to extract messages from. If
- not given the current working directory is used.
- :param method_map: a list of ``(pattern, method)`` tuples that maps of
- extraction method names to extended glob patterns
- :param options_map: a dictionary of additional options (optional)
- :param keywords: a dictionary mapping keywords (i.e. names of functions
- that should be recognized as translation functions) to
- tuples that specify which of their arguments contain
- localizable strings
- :param comment_tags: a list of tags of translator comments to search for
- and include in the results
- :param callback: a function that is called for every file that message are
- extracted from, just before the extraction itself is
- performed; the function is passed the filename, the name
- of the extraction method and and the options dictionary as
- positional arguments, in that order
- :param strip_comment_tags: a flag that if set to `True` causes all comment
- tags to be removed from the collected comments.
- :see: `pathmatch`
- """
- if dirname is None:
- dirname = os.getcwd()
- if options_map is None:
- options_map = {}
-
- absname = os.path.abspath(dirname)
- for root, dirnames, filenames in os.walk(absname):
- for subdir in dirnames:
- if subdir.startswith('.') or subdir.startswith('_'):
- dirnames.remove(subdir)
- dirnames.sort()
- filenames.sort()
- for filename in filenames:
- filepath = os.path.join(root, filename).replace(os.sep, '/')
-
- for message_tuple in check_and_call_extract_file(
- filepath,
- method_map,
- options_map,
- callback,
- keywords,
- comment_tags,
- strip_comment_tags,
- dirpath=absname,
- ):
- yield message_tuple
-
-
-def check_and_call_extract_file(filepath, method_map, options_map,
- callback, keywords, comment_tags,
- strip_comment_tags, dirpath=None):
- """Checks if the given file matches an extraction method mapping, and if so, calls extract_from_file.
-
- Note that the extraction method mappings are based relative to dirpath.
- So, given an absolute path to a file `filepath`, we want to check using
- just the relative path from `dirpath` to `filepath`.
-
- :param filepath: An absolute path to a file that exists.
- :param method_map: a list of ``(pattern, method)`` tuples that maps of
- extraction method names to extended glob patterns
- :param options_map: a dictionary of additional options (optional)
- :param callback: a function that is called for every file that message are
- extracted from, just before the extraction itself is
- performed; the function is passed the filename, the name
- of the extraction method and and the options dictionary as
- positional arguments, in that order
- :param keywords: a dictionary mapping keywords (i.e. names of functions
- that should be recognized as translation functions) to
- tuples that specify which of their arguments contain
- localizable strings
- :param comment_tags: a list of tags of translator comments to search for
- and include in the results
- :param strip_comment_tags: a flag that if set to `True` causes all comment
- tags to be removed from the collected comments.
- :param dirpath: the path to the directory to extract messages from.
- """
- # filename is the relative path from dirpath to the actual file
- filename = relpath(filepath, dirpath)
-
- for pattern, method in method_map:
- if not pathmatch(pattern, filename):
- continue
-
- options = {}
- for opattern, odict in options_map.items():
- if pathmatch(opattern, filename):
- options = odict
- if callback:
- callback(filename, method, options)
- for message_tuple in extract_from_file(
- method, filepath,
- keywords=keywords,
- comment_tags=comment_tags,
- options=options,
- strip_comment_tags=strip_comment_tags
- ):
- yield (filename, ) + message_tuple
-
- break
-
-
-def extract_from_file(method, filename, keywords=DEFAULT_KEYWORDS,
- comment_tags=(), options=None, strip_comment_tags=False):
- """Extract messages from a specific file.
-
- This function returns a list of tuples of the form ``(lineno, funcname,
- message)``.
-
- :param filename: the path to the file to extract messages from
- :param method: a string specifying the extraction method (.e.g. "python")
- :param keywords: a dictionary mapping keywords (i.e. names of functions
- that should be recognized as translation functions) to
- tuples that specify which of their arguments contain
- localizable strings
- :param comment_tags: a list of translator tags to search for and include
- in the results
- :param strip_comment_tags: a flag that if set to `True` causes all comment
- tags to be removed from the collected comments.
- :param options: a dictionary of additional options (optional)
- """
- fileobj = open(filename, 'rb')
- try:
- return list(extract(method, fileobj, keywords, comment_tags, options,
- strip_comment_tags))
- finally:
- fileobj.close()
-
-
-def extract(method, fileobj, keywords=DEFAULT_KEYWORDS, comment_tags=(),
- options=None, strip_comment_tags=False):
- """Extract messages from the given file-like object using the specified
- extraction method.
-
- This function returns tuples of the form ``(lineno, message, comments)``.
-
- The implementation dispatches the actual extraction to plugins, based on the
- value of the ``method`` parameter.
-
- >>> source = b'''# foo module
- ... def run(argv):
- ... print(_('Hello, world!'))
- ... '''
-
- >>> from babel._compat import BytesIO
- >>> for message in extract('python', BytesIO(source)):
- ... print(message)
- (3, u'Hello, world!', [], None)
-
- :param method: an extraction method (a callable), or
- a string specifying the extraction method (.e.g. "python");
- if this is a simple name, the extraction function will be
- looked up by entry point; if it is an explicit reference
- to a function (of the form ``package.module:funcname`` or
- ``package.module.funcname``), the corresponding function
- will be imported and used
- :param fileobj: the file-like object the messages should be extracted from
- :param keywords: a dictionary mapping keywords (i.e. names of functions
- that should be recognized as translation functions) to
- tuples that specify which of their arguments contain
- localizable strings
- :param comment_tags: a list of translator tags to search for and include
- in the results
- :param options: a dictionary of additional options (optional)
- :param strip_comment_tags: a flag that if set to `True` causes all comment
- tags to be removed from the collected comments.
- :raise ValueError: if the extraction method is not registered
- """
- func = None
- if callable(method):
- func = method
- elif ':' in method or '.' in method:
- if ':' not in method:
- lastdot = method.rfind('.')
- module, attrname = method[:lastdot], method[lastdot + 1:]
- else:
- module, attrname = method.split(':', 1)
- func = getattr(__import__(module, {}, {}, [attrname]), attrname)
- else:
- try:
- from pkg_resources import working_set
- except ImportError:
- pass
- else:
- for entry_point in working_set.iter_entry_points(GROUP_NAME,
- method):
- func = entry_point.load(require=True)
- break
- if func is None:
- # if pkg_resources is not available or no usable egg-info was found
- # (see #230), we resort to looking up the builtin extractors
- # directly
- builtin = {
- 'ignore': extract_nothing,
- 'python': extract_python,
- 'javascript': extract_javascript
- }
- func = builtin.get(method)
-
- if func is None:
- raise ValueError('Unknown extraction method %r' % method)
-
- results = func(fileobj, keywords.keys(), comment_tags,
- options=options or {})
-
- for lineno, funcname, messages, comments in results:
- if funcname:
- spec = keywords[funcname] or (1,)
- else:
- spec = (1,)
- if not isinstance(messages, (list, tuple)):
- messages = [messages]
- if not messages:
- continue
-
- # Validate the messages against the keyword's specification
- context = None
- msgs = []
- invalid = False
- # last_index is 1 based like the keyword spec
- last_index = len(messages)
- for index in spec:
- if isinstance(index, tuple):
- context = messages[index[0] - 1]
- continue
- if last_index < index:
- # Not enough arguments
- invalid = True
- break
- message = messages[index - 1]
- if message is None:
- invalid = True
- break
- msgs.append(message)
- if invalid:
- continue
-
- # keyword spec indexes are 1 based, therefore '-1'
- if isinstance(spec[0], tuple):
- # context-aware *gettext method
- first_msg_index = spec[1] - 1
- else:
- first_msg_index = spec[0] - 1
- if not messages[first_msg_index]:
- # An empty string msgid isn't valid, emit a warning
- where = '%s:%i' % (hasattr(fileobj, 'name') and
- fileobj.name or '(unknown)', lineno)
- sys.stderr.write((empty_msgid_warning % where) + '\n')
- continue
-
- messages = tuple(msgs)
- if len(messages) == 1:
- messages = messages[0]
-
- if strip_comment_tags:
- _strip_comment_tags(comments, comment_tags)
- yield lineno, messages, comments, context
-
-
-def extract_nothing(fileobj, keywords, comment_tags, options):
- """Pseudo extractor that does not actually extract anything, but simply
- returns an empty list.
- """
- return []
-
-
-def extract_python(fileobj, keywords, comment_tags, options):
- """Extract messages from Python source code.
-
- It returns an iterator yielding tuples in the following form ``(lineno,
- funcname, message, comments)``.
-
- :param fileobj: the seekable, file-like object the messages should be
- extracted from
- :param keywords: a list of keywords (i.e. function names) that should be
- recognized as translation functions
- :param comment_tags: a list of translator tags to search for and include
- in the results
- :param options: a dictionary of additional options (optional)
- :rtype: ``iterator``
- """
- funcname = lineno = message_lineno = None
- call_stack = -1
- buf = []
- messages = []
- translator_comments = []
- in_def = in_translator_comments = False
- comment_tag = None
-
- encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
-
- if PY2:
- next_line = fileobj.readline
- else:
- next_line = lambda: fileobj.readline().decode(encoding)
-
- tokens = generate_tokens(next_line)
- for tok, value, (lineno, _), _, _ in tokens:
- if call_stack == -1 and tok == NAME and value in ('def', 'class'):
- in_def = True
- elif tok == OP and value == '(':
- if in_def:
- # Avoid false positives for declarations such as:
- # def gettext(arg='message'):
- in_def = False
- continue
- if funcname:
- message_lineno = lineno
- call_stack += 1
- elif in_def and tok == OP and value == ':':
- # End of a class definition without parens
- in_def = False
- continue
- elif call_stack == -1 and tok == COMMENT:
- # Strip the comment token from the line
- if PY2:
- value = value.decode(encoding)
- value = value[1:].strip()
- if in_translator_comments and \
- translator_comments[-1][0] == lineno - 1:
- # We're already inside a translator comment, continue appending
- translator_comments.append((lineno, value))
- continue
- # If execution reaches this point, let's see if comment line
- # starts with one of the comment tags
- for comment_tag in comment_tags:
- if value.startswith(comment_tag):
- in_translator_comments = True
- translator_comments.append((lineno, value))
- break
- elif funcname and call_stack == 0:
- if tok == OP and value == ')':
- if buf:
- messages.append(''.join(buf))
- del buf[:]
- else:
- messages.append(None)
-
- if len(messages) > 1:
- messages = tuple(messages)
- else:
- messages = messages[0]
- # Comments don't apply unless they immediately preceed the
- # message
- if translator_comments and \
- translator_comments[-1][0] < message_lineno - 1:
- translator_comments = []
-
- yield (message_lineno, funcname, messages,
- [comment[1] for comment in translator_comments])
-
- funcname = lineno = message_lineno = None
- call_stack = -1
- messages = []
- translator_comments = []
- in_translator_comments = False
- elif tok == STRING:
- # Unwrap quotes in a safe manner, maintaining the string's
- # encoding
- # https://sourceforge.net/tracker/?func=detail&atid=355470&
- # aid=617979&group_id=5470
- value = eval('# coding=%s\n%s' % (str(encoding), value),
- {'__builtins__': {}}, {})
- if PY2 and not isinstance(value, text_type):
- value = value.decode(encoding)
- buf.append(value)
- elif tok == OP and value == ',':
- if buf:
- messages.append(''.join(buf))
- del buf[:]
- else:
- messages.append(None)
- if translator_comments:
- # We have translator comments, and since we're on a
- # comma(,) user is allowed to break into a new line
- # Let's increase the last comment's lineno in order
- # for the comment to still be a valid one
- old_lineno, old_comment = translator_comments.pop()
- translator_comments.append((old_lineno + 1, old_comment))
- elif call_stack > 0 and tok == OP and value == ')':
- call_stack -= 1
- elif funcname and call_stack == -1:
- funcname = None
- elif tok == NAME and value in keywords:
- funcname = value
-
-
-def extract_javascript(fileobj, keywords, comment_tags, options):
- """Extract messages from JavaScript source code.
-
- :param fileobj: the seekable, file-like object the messages should be
- extracted from
- :param keywords: a list of keywords (i.e. function names) that should be
- recognized as translation functions
- :param comment_tags: a list of translator tags to search for and include
- in the results
- :param options: a dictionary of additional options (optional)
- Supported options are:
- * `jsx` -- set to false to disable JSX/E4X support.
- * `template_string` -- set to false to disable ES6
- template string support.
- """
- from babel.messages.jslexer import Token, tokenize, unquote_string
- funcname = message_lineno = None
- messages = []
- last_argument = None
- translator_comments = []
- concatenate_next = False
- encoding = options.get('encoding', 'utf-8')
- last_token = None
- call_stack = -1
- dotted = any('.' in kw for kw in keywords)
-
- for token in tokenize(
- fileobj.read().decode(encoding),
- jsx=options.get("jsx", True),
- template_string=options.get("template_string", True),
- dotted=dotted
- ):
- if ( # Turn keyword`foo` expressions into keyword("foo") calls:
- funcname and # have a keyword...
- (last_token and last_token.type == 'name') and # we've seen nothing after the keyword...
- token.type == 'template_string' # this is a template string
- ):
- message_lineno = token.lineno
- messages = [unquote_string(token.value)]
- call_stack = 0
- token = Token('operator', ')', token.lineno)
-
- if token.type == 'operator' and token.value == '(':
- if funcname:
- message_lineno = token.lineno
- call_stack += 1
-
- elif call_stack == -1 and token.type == 'linecomment':
- value = token.value[2:].strip()
- if translator_comments and \
- translator_comments[-1][0] == token.lineno - 1:
- translator_comments.append((token.lineno, value))
- continue
-
- for comment_tag in comment_tags:
- if value.startswith(comment_tag):
- translator_comments.append((token.lineno, value.strip()))
- break
-
- elif token.type == 'multilinecomment':
- # only one multi-line comment may preceed a translation
- translator_comments = []
- value = token.value[2:-2].strip()
- for comment_tag in comment_tags:
- if value.startswith(comment_tag):
- lines = value.splitlines()
- if lines:
- lines[0] = lines[0].strip()
- lines[1:] = dedent('\n'.join(lines[1:])).splitlines()
- for offset, line in enumerate(lines):
- translator_comments.append((token.lineno + offset,
- line))
- break
-
- elif funcname and call_stack == 0:
- if token.type == 'operator' and token.value == ')':
- if last_argument is not None:
- messages.append(last_argument)
- if len(messages) > 1:
- messages = tuple(messages)
- elif messages:
- messages = messages[0]
- else:
- messages = None
-
- # Comments don't apply unless they immediately precede the
- # message
- if translator_comments and \
- translator_comments[-1][0] < message_lineno - 1:
- translator_comments = []
-
- if messages is not None:
- yield (message_lineno, funcname, messages,
- [comment[1] for comment in translator_comments])
-
- funcname = message_lineno = last_argument = None
- concatenate_next = False
- translator_comments = []
- messages = []
- call_stack = -1
-
- elif token.type in ('string', 'template_string'):
- new_value = unquote_string(token.value)
- if concatenate_next:
- last_argument = (last_argument or '') + new_value
- concatenate_next = False
- else:
- last_argument = new_value
-
- elif token.type == 'operator':
- if token.value == ',':
- if last_argument is not None:
- messages.append(last_argument)
- last_argument = None
- else:
- messages.append(None)
- concatenate_next = False
- elif token.value == '+':
- concatenate_next = True
-
- elif call_stack > 0 and token.type == 'operator' \
- and token.value == ')':
- call_stack -= 1
-
- elif funcname and call_stack == -1:
- funcname = None
-
- elif call_stack == -1 and token.type == 'name' and \
- token.value in keywords and \
- (last_token is None or last_token.type != 'name' or
- last_token.value != 'function'):
- funcname = token.value
-
- last_token = token
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/frontend.py b/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/frontend.py
deleted file mode 100644
index d190a2c..0000000
--- a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/frontend.py
+++ /dev/null
@@ -1,1018 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- babel.messages.frontend
- ~~~~~~~~~~~~~~~~~~~~~~~
-
- Frontends for the message extraction functionality.
-
- :copyright: (c) 2013 by the Babel Team.
- :license: BSD, see LICENSE for more details.
-"""
-from __future__ import print_function
-
-import logging
-import optparse
-import os
-import re
-import shutil
-import sys
-import tempfile
-from datetime import datetime
-from locale import getpreferredencoding
-
-from babel import __version__ as VERSION
-from babel import Locale, localedata
-from babel._compat import StringIO, string_types, text_type
-from babel.core import UnknownLocaleError
-from babel.messages.catalog import Catalog
-from babel.messages.extract import DEFAULT_KEYWORDS, DEFAULT_MAPPING, check_and_call_extract_file, extract_from_dir
-from babel.messages.mofile import write_mo
-from babel.messages.pofile import read_po, write_po
-from babel.util import LOCALTZ, odict
-from distutils import log as distutils_log
-from distutils.cmd import Command as _Command
-from distutils.errors import DistutilsOptionError, DistutilsSetupError
-
-try:
- from ConfigParser import RawConfigParser
-except ImportError:
- from configparser import RawConfigParser
-
-
-def listify_value(arg, split=None):
- """
- Make a list out of an argument.
-
- Values from `distutils` argument parsing are always single strings;
- values from `optparse` parsing may be lists of strings that may need
- to be further split.
-
- No matter the input, this function returns a flat list of whitespace-trimmed
- strings, with `None` values filtered out.
-
- >>> listify_value("foo bar")
- ['foo', 'bar']
- >>> listify_value(["foo bar"])
- ['foo', 'bar']
- >>> listify_value([["foo"], "bar"])
- ['foo', 'bar']
- >>> listify_value([["foo"], ["bar", None, "foo"]])
- ['foo', 'bar', 'foo']
- >>> listify_value("foo, bar, quux", ",")
- ['foo', 'bar', 'quux']
-
- :param arg: A string or a list of strings
- :param split: The argument to pass to `str.split()`.
- :return:
- """
- out = []
-
- if not isinstance(arg, (list, tuple)):
- arg = [arg]
-
- for val in arg:
- if val is None:
- continue
- if isinstance(val, (list, tuple)):
- out.extend(listify_value(val, split=split))
- continue
- out.extend(s.strip() for s in text_type(val).split(split))
- assert all(isinstance(val, string_types) for val in out)
- return out
-
-
-class Command(_Command):
- # This class is a small shim between Distutils commands and
- # optparse option parsing in the frontend command line.
-
- #: Option name to be input as `args` on the script command line.
- as_args = None
-
- #: Options which allow multiple values.
- #: This is used by the `optparse` transmogrification code.
- multiple_value_options = ()
-
- #: Options which are booleans.
- #: This is used by the `optparse` transmogrification code.
- # (This is actually used by distutils code too, but is never
- # declared in the base class.)
- boolean_options = ()
-
- #: Option aliases, to retain standalone command compatibility.
- #: Distutils does not support option aliases, but optparse does.
- #: This maps the distutils argument name to an iterable of aliases
- #: that are usable with optparse.
- option_aliases = {}
-
- #: Log object. To allow replacement in the script command line runner.
- log = distutils_log
-
- def __init__(self, dist=None):
- # A less strict version of distutils' `__init__`.
- self.distribution = dist
- self.initialize_options()
- self._dry_run = None
- self.verbose = False
- self.force = None
- self.help = 0
- self.finalized = 0
-
-
-class compile_catalog(Command):
- """Catalog compilation command for use in ``setup.py`` scripts.
-
- If correctly installed, this command is available to Setuptools-using
- setup scripts automatically. For projects using plain old ``distutils``,
- the command needs to be registered explicitly in ``setup.py``::
-
- from babel.messages.frontend import compile_catalog
-
- setup(
- ...
- cmdclass = {'compile_catalog': compile_catalog}
- )
-
- .. versionadded:: 0.9
- """
-
- description = 'compile message catalogs to binary MO files'
- user_options = [
- ('domain=', 'D',
- "domains of PO files (space separated list, default 'messages')"),
- ('directory=', 'd',
- 'path to base directory containing the catalogs'),
- ('input-file=', 'i',
- 'name of the input file'),
- ('output-file=', 'o',
- "name of the output file (default "
- "'<output_dir>/<locale>/LC_MESSAGES/<domain>.mo')"),
- ('locale=', 'l',
- 'locale of the catalog to compile'),
- ('use-fuzzy', 'f',
- 'also include fuzzy translations'),
- ('statistics', None,
- 'print statistics about translations')
- ]
- boolean_options = ['use-fuzzy', 'statistics']
-
- def initialize_options(self):
- self.domain = 'messages'
- self.directory = None
- self.input_file = None
- self.output_file = None
- self.locale = None
- self.use_fuzzy = False
- self.statistics = False
-
- def finalize_options(self):
- self.domain = listify_value(self.domain)
- if not self.input_file and not self.directory:
- raise DistutilsOptionError('you must specify either the input file '
- 'or the base directory')
- if not self.output_file and not self.directory:
- raise DistutilsOptionError('you must specify either the output file '
- 'or the base directory')
-
- def run(self):
- for domain in self.domain:
- self._run_domain(domain)
-
- def _run_domain(self, domain):
- po_files = []
- mo_files = []
-
- if not self.input_file:
- if self.locale:
- po_files.append((self.locale,
- os.path.join(self.directory, self.locale,
- 'LC_MESSAGES',
- domain + '.po')))
- mo_files.append(os.path.join(self.directory, self.locale,
- 'LC_MESSAGES',
- domain + '.mo'))
- else:
- for locale in os.listdir(self.directory):
- po_file = os.path.join(self.directory, locale,
- 'LC_MESSAGES', domain + '.po')
- if os.path.exists(po_file):
- po_files.append((locale, po_file))
- mo_files.append(os.path.join(self.directory, locale,
- 'LC_MESSAGES',
- domain + '.mo'))
- else:
- po_files.append((self.locale, self.input_file))
- if self.output_file:
- mo_files.append(self.output_file)
- else:
- mo_files.append(os.path.join(self.directory, self.locale,
- 'LC_MESSAGES',
- domain + '.mo'))
-
- if not po_files:
- raise DistutilsOptionError('no message catalogs found')
-
- for idx, (locale, po_file) in enumerate(po_files):
- mo_file = mo_files[idx]
- infile = open(po_file, 'rb')
- try:
- catalog = read_po(infile, locale)
- finally:
- infile.close()
-
- if self.statistics:
- translated = 0
- for message in list(catalog)[1:]:
- if message.string:
- translated += 1
- percentage = 0
- if len(catalog):
- percentage = translated * 100 // len(catalog)
- self.log.info(
- '%d of %d messages (%d%%) translated in %s',
- translated, len(catalog), percentage, po_file
- )
-
- if catalog.fuzzy and not self.use_fuzzy:
- self.log.info('catalog %s is marked as fuzzy, skipping', po_file)
- continue
-
- for message, errors in catalog.check():
- for error in errors:
- self.log.error(
- 'error: %s:%d: %s', po_file, message.lineno, error
- )
-
- self.log.info('compiling catalog %s to %s', po_file, mo_file)
-
- outfile = open(mo_file, 'wb')
- try:
- write_mo(outfile, catalog, use_fuzzy=self.use_fuzzy)
- finally:
- outfile.close()
-
-
-class extract_messages(Command):
- """Message extraction command for use in ``setup.py`` scripts.
-
- If correctly installed, this command is available to Setuptools-using
- setup scripts automatically. For projects using plain old ``distutils``,
- the command needs to be registered explicitly in ``setup.py``::
-
- from babel.messages.frontend import extract_messages
-
- setup(
- ...
- cmdclass = {'extract_messages': extract_messages}
- )
- """
-
- description = 'extract localizable strings from the project code'
- user_options = [
- ('charset=', None,
- 'charset to use in the output file (default "utf-8")'),
- ('keywords=', 'k',
- 'space-separated list of keywords to look for in addition to the '
- 'defaults (may be repeated multiple times)'),
- ('no-default-keywords', None,
- 'do not include the default keywords'),
- ('mapping-file=', 'F',
- 'path to the mapping configuration file'),
- ('no-location', None,
- 'do not include location comments with filename and line number'),
- ('omit-header', None,
- 'do not include msgid "" entry in header'),
- ('output-file=', 'o',
- 'name of the output file'),
- ('width=', 'w',
- 'set output line width (default 76)'),
- ('no-wrap', None,
- 'do not break long message lines, longer than the output line width, '
- 'into several lines'),
- ('sort-output', None,
- 'generate sorted output (default False)'),
- ('sort-by-file', None,
- 'sort output by file location (default False)'),
- ('msgid-bugs-address=', None,
- 'set report address for msgid'),
- ('copyright-holder=', None,
- 'set copyright holder in output'),
- ('project=', None,
- 'set project name in output'),
- ('version=', None,
- 'set project version in output'),
- ('add-comments=', 'c',
- 'place comment block with TAG (or those preceding keyword lines) in '
- 'output file. Separate multiple TAGs with commas(,)'), # TODO: Support repetition of this argument
- ('strip-comments', 's',
- 'strip the comment TAGs from the comments.'),
- ('input-paths=', None,
- 'files or directories that should be scanned for messages. Separate multiple '
- 'files or directories with commas(,)'), # TODO: Support repetition of this argument
- ('input-dirs=', None, # TODO (3.x): Remove me.
- 'alias for input-paths (does allow files as well as directories).'),
- ]
- boolean_options = [
- 'no-default-keywords', 'no-location', 'omit-header', 'no-wrap',
- 'sort-output', 'sort-by-file', 'strip-comments'
- ]
- as_args = 'input-paths'
- multiple_value_options = ('add-comments', 'keywords')
- option_aliases = {
- 'keywords': ('--keyword',),
- 'mapping-file': ('--mapping',),
- 'output-file': ('--output',),
- 'strip-comments': ('--strip-comment-tags',),
- }
-
- def initialize_options(self):
- self.charset = 'utf-8'
- self.keywords = None
- self.no_default_keywords = False
- self.mapping_file = None
- self.no_location = False
- self.omit_header = False
- self.output_file = None
- self.input_dirs = None
- self.input_paths = None
- self.width = None
- self.no_wrap = False
- self.sort_output = False
- self.sort_by_file = False
- self.msgid_bugs_address = None
- self.copyright_holder = None
- self.project = None
- self.version = None
- self.add_comments = None
- self.strip_comments = False
-
- def finalize_options(self):
- if self.input_dirs:
- if not self.input_paths:
- self.input_paths = self.input_dirs
- else:
- raise DistutilsOptionError(
- 'input-dirs and input-paths are mutually exclusive'
- )
-
- if self.no_default_keywords:
- keywords = {}
- else:
- keywords = DEFAULT_KEYWORDS.copy()
-
- keywords.update(parse_keywords(listify_value(self.keywords)))
-
- self.keywords = keywords
-
- if not self.keywords:
- raise DistutilsOptionError('you must specify new keywords if you '
- 'disable the default ones')
-
- if not self.output_file:
- raise DistutilsOptionError('no output file specified')
- if self.no_wrap and self.width:
- raise DistutilsOptionError("'--no-wrap' and '--width' are mutually "
- "exclusive")
- if not self.no_wrap and not self.width:
- self.width = 76
- elif self.width is not None:
- self.width = int(self.width)
-
- if self.sort_output and self.sort_by_file:
- raise DistutilsOptionError("'--sort-output' and '--sort-by-file' "
- "are mutually exclusive")
-
- if self.input_paths:
- if isinstance(self.input_paths, string_types):
- self.input_paths = re.split(',\s*', self.input_paths)
- elif self.distribution is not None:
- self.input_paths = dict.fromkeys([
- k.split('.', 1)[0]
- for k in (self.distribution.packages or ())
- ]).keys()
- else:
- self.input_paths = []
-
- if not self.input_paths:
- raise DistutilsOptionError("no input files or directories specified")
-
- for path in self.input_paths:
- if not os.path.exists(path):
- raise DistutilsOptionError("Input path: %s does not exist" % path)
-
- self.add_comments = listify_value(self.add_comments or (), ",")
-
- if self.distribution:
- if not self.project:
- self.project = self.distribution.get_name()
- if not self.version:
- self.version = self.distribution.get_version()
-
- def run(self):
- mappings = self._get_mappings()
- with open(self.output_file, 'wb') as outfile:
- catalog = Catalog(project=self.project,
- version=self.version,
- msgid_bugs_address=self.msgid_bugs_address,
- copyright_holder=self.copyright_holder,
- charset=self.charset)
-
- for path, (method_map, options_map) in mappings.items():
- def callback(filename, method, options):
- if method == 'ignore':
- return
-
- # If we explicitly provide a full filepath, just use that.
- # Otherwise, path will be the directory path and filename
- # is the relative path from that dir to the file.
- # So we can join those to get the full filepath.
- if os.path.isfile(path):
- filepath = path
- else:
- filepath = os.path.normpath(os.path.join(path, filename))
-
- optstr = ''
- if options:
- optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for
- k, v in options.items()])
- self.log.info('extracting messages from %s%s', filepath, optstr)
-
- if os.path.isfile(path):
- current_dir = os.getcwd()
- extracted = check_and_call_extract_file(
- path, method_map, options_map,
- callback, self.keywords, self.add_comments,
- self.strip_comments, current_dir
- )
- else:
- extracted = extract_from_dir(
- path, method_map, options_map,
- keywords=self.keywords,
- comment_tags=self.add_comments,
- callback=callback,
- strip_comment_tags=self.strip_comments
- )
- for filename, lineno, message, comments, context in extracted:
- if os.path.isfile(path):
- filepath = filename # already normalized
- else:
- filepath = os.path.normpath(os.path.join(path, filename))
-
- catalog.add(message, None, [(filepath, lineno)],
- auto_comments=comments, context=context)
-
- self.log.info('writing PO template file to %s' % self.output_file)
- write_po(outfile, catalog, width=self.width,
- no_location=self.no_location,
- omit_header=self.omit_header,
- sort_output=self.sort_output,
- sort_by_file=self.sort_by_file)
-
- def _get_mappings(self):
- mappings = {}
-
- if self.mapping_file:
- fileobj = open(self.mapping_file, 'U')
- try:
- method_map, options_map = parse_mapping(fileobj)
- for path in self.input_paths:
- mappings[path] = method_map, options_map
- finally:
- fileobj.close()
-
- elif getattr(self.distribution, 'message_extractors', None):
- message_extractors = self.distribution.message_extractors
- for path, mapping in message_extractors.items():
- if isinstance(mapping, string_types):
- method_map, options_map = parse_mapping(StringIO(mapping))
- else:
- method_map, options_map = [], {}
- for pattern, method, options in mapping:
- method_map.append((pattern, method))
- options_map[pattern] = options or {}
- mappings[path] = method_map, options_map
-
- else:
- for path in self.input_paths:
- mappings[path] = DEFAULT_MAPPING, {}
-
- return mappings
-
-
-def check_message_extractors(dist, name, value):
- """Validate the ``message_extractors`` keyword argument to ``setup()``.
-
- :param dist: the distutils/setuptools ``Distribution`` object
- :param name: the name of the keyword argument (should always be
- "message_extractors")
- :param value: the value of the keyword argument
- :raise `DistutilsSetupError`: if the value is not valid
- """
- assert name == 'message_extractors'
- if not isinstance(value, dict):
- raise DistutilsSetupError('the value of the "message_extractors" '
- 'parameter must be a dictionary')
-
-
-class init_catalog(Command):
- """New catalog initialization command for use in ``setup.py`` scripts.
-
- If correctly installed, this command is available to Setuptools-using
- setup scripts automatically. For projects using plain old ``distutils``,
- the command needs to be registered explicitly in ``setup.py``::
-
- from babel.messages.frontend import init_catalog
-
- setup(
- ...
- cmdclass = {'init_catalog': init_catalog}
- )
- """
-
- description = 'create a new catalog based on a POT file'
- user_options = [
- ('domain=', 'D',
- "domain of PO file (default 'messages')"),
- ('input-file=', 'i',
- 'name of the input file'),
- ('output-dir=', 'd',
- 'path to output directory'),
- ('output-file=', 'o',
- "name of the output file (default "
- "'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
- ('locale=', 'l',
- 'locale for the new localized catalog'),
- ('width=', 'w',
- 'set output line width (default 76)'),
- ('no-wrap', None,
- 'do not break long message lines, longer than the output line width, '
- 'into several lines'),
- ]
- boolean_options = ['no-wrap']
-
- def initialize_options(self):
- self.output_dir = None
- self.output_file = None
- self.input_file = None
- self.locale = None
- self.domain = 'messages'
- self.no_wrap = False
- self.width = None
-
- def finalize_options(self):
- if not self.input_file:
- raise DistutilsOptionError('you must specify the input file')
-
- if not self.locale:
- raise DistutilsOptionError('you must provide a locale for the '
- 'new catalog')
- try:
- self._locale = Locale.parse(self.locale)
- except UnknownLocaleError as e:
- raise DistutilsOptionError(e)
-
- if not self.output_file and not self.output_dir:
- raise DistutilsOptionError('you must specify the output directory')
- if not self.output_file:
- self.output_file = os.path.join(self.output_dir, self.locale,
- 'LC_MESSAGES', self.domain + '.po')
-
- if not os.path.exists(os.path.dirname(self.output_file)):
- os.makedirs(os.path.dirname(self.output_file))
- if self.no_wrap and self.width:
- raise DistutilsOptionError("'--no-wrap' and '--width' are mutually "
- "exclusive")
- if not self.no_wrap and not self.width:
- self.width = 76
- elif self.width is not None:
- self.width = int(self.width)
-
- def run(self):
- self.log.info(
- 'creating catalog %s based on %s', self.output_file, self.input_file
- )
-
- infile = open(self.input_file, 'rb')
- try:
- # Although reading from the catalog template, read_po must be fed
- # the locale in order to correctly calculate plurals
- catalog = read_po(infile, locale=self.locale)
- finally:
- infile.close()
-
- catalog.locale = self._locale
- catalog.revision_date = datetime.now(LOCALTZ)
- catalog.fuzzy = False
-
- outfile = open(self.output_file, 'wb')
- try:
- write_po(outfile, catalog, width=self.width)
- finally:
- outfile.close()
-
-
-class update_catalog(Command):
- """Catalog merging command for use in ``setup.py`` scripts.
-
- If correctly installed, this command is available to Setuptools-using
- setup scripts automatically. For projects using plain old ``distutils``,
- the command needs to be registered explicitly in ``setup.py``::
-
- from babel.messages.frontend import update_catalog
-
- setup(
- ...
- cmdclass = {'update_catalog': update_catalog}
- )
-
- .. versionadded:: 0.9
- """
-
- description = 'update message catalogs from a POT file'
- user_options = [
- ('domain=', 'D',
- "domain of PO file (default 'messages')"),
- ('input-file=', 'i',
- 'name of the input file'),
- ('output-dir=', 'd',
- 'path to base directory containing the catalogs'),
- ('output-file=', 'o',
- "name of the output file (default "
- "'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
- ('locale=', 'l',
- 'locale of the catalog to compile'),
- ('width=', 'w',
- 'set output line width (default 76)'),
- ('no-wrap', None,
- 'do not break long message lines, longer than the output line width, '
- 'into several lines'),
- ('ignore-obsolete=', None,
- 'whether to omit obsolete messages from the output'),
- ('no-fuzzy-matching', 'N',
- 'do not use fuzzy matching'),
- ('update-header-comment', None,
- 'update target header comment'),
- ('previous', None,
- 'keep previous msgids of translated messages')
- ]
- boolean_options = ['no-wrap', 'ignore-obsolete', 'no-fuzzy-matching', 'previous', 'update-header-comment']
-
- def initialize_options(self):
- self.domain = 'messages'
- self.input_file = None
- self.output_dir = None
- self.output_file = None
- self.locale = None
- self.width = None
- self.no_wrap = False
- self.ignore_obsolete = False
- self.no_fuzzy_matching = False
- self.update_header_comment = False
- self.previous = False
-
- def finalize_options(self):
- if not self.input_file:
- raise DistutilsOptionError('you must specify the input file')
- if not self.output_file and not self.output_dir:
- raise DistutilsOptionError('you must specify the output file or '
- 'directory')
- if self.output_file and not self.locale:
- raise DistutilsOptionError('you must specify the locale')
- if self.no_wrap and self.width:
- raise DistutilsOptionError("'--no-wrap' and '--width' are mutually "
- "exclusive")
- if not self.no_wrap and not self.width:
- self.width = 76
- elif self.width is not None:
- self.width = int(self.width)
- if self.no_fuzzy_matching and self.previous:
- self.previous = False
-
- def run(self):
- po_files = []
- if not self.output_file:
- if self.locale:
- po_files.append((self.locale,
- os.path.join(self.output_dir, self.locale,
- 'LC_MESSAGES',
- self.domain + '.po')))
- else:
- for locale in os.listdir(self.output_dir):
- po_file = os.path.join(self.output_dir, locale,
- 'LC_MESSAGES',
- self.domain + '.po')
- if os.path.exists(po_file):
- po_files.append((locale, po_file))
- else:
- po_files.append((self.locale, self.output_file))
-
- domain = self.domain
- if not domain:
- domain = os.path.splitext(os.path.basename(self.input_file))[0]
-
- infile = open(self.input_file, 'rb')
- try:
- template = read_po(infile)
- finally:
- infile.close()
-
- if not po_files:
- raise DistutilsOptionError('no message catalogs found')
-
- for locale, filename in po_files:
- self.log.info('updating catalog %s based on %s', filename, self.input_file)
- infile = open(filename, 'rb')
- try:
- catalog = read_po(infile, locale=locale, domain=domain)
- finally:
- infile.close()
-
- catalog.update(
- template, self.no_fuzzy_matching,
- update_header_comment=self.update_header_comment
- )
-
- tmpname = os.path.join(os.path.dirname(filename),
- tempfile.gettempprefix() +
- os.path.basename(filename))
- tmpfile = open(tmpname, 'wb')
- try:
- try:
- write_po(tmpfile, catalog,
- ignore_obsolete=self.ignore_obsolete,
- include_previous=self.previous, width=self.width)
- finally:
- tmpfile.close()
- except:
- os.remove(tmpname)
- raise
-
- try:
- os.rename(tmpname, filename)
- except OSError:
- # We're probably on Windows, which doesn't support atomic
- # renames, at least not through Python
- # If the error is in fact due to a permissions problem, that
- # same error is going to be raised from one of the following
- # operations
- os.remove(filename)
- shutil.copy(tmpname, filename)
- os.remove(tmpname)
-
-
-class CommandLineInterface(object):
- """Command-line interface.
-
- This class provides a simple command-line interface to the message
- extraction and PO file generation functionality.
- """
-
- usage = '%%prog %s [options] %s'
- version = '%%prog %s' % VERSION
- commands = {
- 'compile': 'compile message catalogs to MO files',
- 'extract': 'extract messages from source files and generate a POT file',
- 'init': 'create new message catalogs from a POT file',
- 'update': 'update existing message catalogs from a POT file'
- }
-
- command_classes = {
- 'compile': compile_catalog,
- 'extract': extract_messages,
- 'init': init_catalog,
- 'update': update_catalog,
- }
-
- log = None # Replaced on instance level
-
- def run(self, argv=None):
- """Main entry point of the command-line interface.
-
- :param argv: list of arguments passed on the command-line
- """
-
- if argv is None:
- argv = sys.argv
-
- self.parser = optparse.OptionParser(usage=self.usage % ('command', '[args]'),
- version=self.version)
- self.parser.disable_interspersed_args()
- self.parser.print_help = self._help
- self.parser.add_option('--list-locales', dest='list_locales',
- action='store_true',
- help="print all known locales and exit")
- self.parser.add_option('-v', '--verbose', action='store_const',
- dest='loglevel', const=logging.DEBUG,
- help='print as much as possible')
- self.parser.add_option('-q', '--quiet', action='store_const',
- dest='loglevel', const=logging.ERROR,
- help='print as little as possible')
- self.parser.set_defaults(list_locales=False, loglevel=logging.INFO)
-
- options, args = self.parser.parse_args(argv[1:])
-
- self._configure_logging(options.loglevel)
- if options.list_locales:
- identifiers = localedata.locale_identifiers()
- longest = max([len(identifier) for identifier in identifiers])
- identifiers.sort()
- format = u'%%-%ds %%s' % (longest + 1)
- for identifier in identifiers:
- locale = Locale.parse(identifier)
- output = format % (identifier, locale.english_name)
- print(output.encode(sys.stdout.encoding or
- getpreferredencoding() or
- 'ascii', 'replace'))
- return 0
-
- if not args:
- self.parser.error('no valid command or option passed. '
- 'Try the -h/--help option for more information.')
-
- cmdname = args[0]
- if cmdname not in self.commands:
- self.parser.error('unknown command "%s"' % cmdname)
-
- cmdinst = self._configure_command(cmdname, args[1:])
- return cmdinst.run()
-
- def _configure_logging(self, loglevel):
- self.log = logging.getLogger('babel')
- self.log.setLevel(loglevel)
- # Don't add a new handler for every instance initialization (#227), this
- # would cause duplicated output when the CommandLineInterface as an
- # normal Python class.
- if self.log.handlers:
- handler = self.log.handlers[0]
- else:
- handler = logging.StreamHandler()
- self.log.addHandler(handler)
- handler.setLevel(loglevel)
- formatter = logging.Formatter('%(message)s')
- handler.setFormatter(formatter)
-
- def _help(self):
- print(self.parser.format_help())
- print("commands:")
- longest = max([len(command) for command in self.commands])
- format = " %%-%ds %%s" % max(8, longest + 1)
- commands = sorted(self.commands.items())
- for name, description in commands:
- print(format % (name, description))
-
- def _configure_command(self, cmdname, argv):
- """
- :type cmdname: str
- :type argv: list[str]
- """
- cmdclass = self.command_classes[cmdname]
- cmdinst = cmdclass()
- if self.log:
- cmdinst.log = self.log # Use our logger, not distutils'.
- assert isinstance(cmdinst, Command)
- cmdinst.initialize_options()
-
- parser = optparse.OptionParser(
- usage=self.usage % (cmdname, ''),
- description=self.commands[cmdname]
- )
- as_args = getattr(cmdclass, "as_args", ())
- for long, short, help in cmdclass.user_options:
- name = long.strip("=")
- default = getattr(cmdinst, name.replace('-', '_'))
- strs = ["--%s" % name]
- if short:
- strs.append("-%s" % short)
- strs.extend(cmdclass.option_aliases.get(name, ()))
- if name == as_args:
- parser.usage += "<%s>" % name
- elif name in cmdclass.boolean_options:
- parser.add_option(*strs, action="store_true", help=help)
- elif name in cmdclass.multiple_value_options:
- parser.add_option(*strs, action="append", help=help)
- else:
- parser.add_option(*strs, help=help, default=default)
- options, args = parser.parse_args(argv)
-
- if as_args:
- setattr(options, as_args.replace('-', '_'), args)
-
- for key, value in vars(options).items():
- setattr(cmdinst, key, value)
-
- try:
- cmdinst.ensure_finalized()
- except DistutilsOptionError as err:
- parser.error(str(err))
-
- return cmdinst
-
-
-def main():
- return CommandLineInterface().run(sys.argv)
-
-
-def parse_mapping(fileobj, filename=None):
- """Parse an extraction method mapping from a file-like object.
-
- >>> buf = StringIO('''
- ... [extractors]
- ... custom = mypackage.module:myfunc
- ...
- ... # Python source files
- ... [python: **.py]
- ...
- ... # Genshi templates
- ... [genshi: **/templates/**.html]
- ... include_attrs =
- ... [genshi: **/templates/**.txt]
- ... template_class = genshi.template:TextTemplate
- ... encoding = latin-1
- ...
- ... # Some custom extractor
- ... [custom: **/custom/*.*]
- ... ''')
-
- >>> method_map, options_map = parse_mapping(buf)
- >>> len(method_map)
- 4
-
- >>> method_map[0]
- ('**.py', 'python')
- >>> options_map['**.py']
- {}
- >>> method_map[1]
- ('**/templates/**.html', 'genshi')
- >>> options_map['**/templates/**.html']['include_attrs']
- ''
- >>> method_map[2]
- ('**/templates/**.txt', 'genshi')
- >>> options_map['**/templates/**.txt']['template_class']
- 'genshi.template:TextTemplate'
- >>> options_map['**/templates/**.txt']['encoding']
- 'latin-1'
-
- >>> method_map[3]
- ('**/custom/*.*', 'mypackage.module:myfunc')
- >>> options_map['**/custom/*.*']
- {}
-
- :param fileobj: a readable file-like object containing the configuration
- text to parse
- :see: `extract_from_directory`
- """
- extractors = {}
- method_map = []
- options_map = {}
-
- parser = RawConfigParser()
- parser._sections = odict(parser._sections) # We need ordered sections
- parser.readfp(fileobj, filename)
- for section in parser.sections():
- if section == 'extractors':
- extractors = dict(parser.items(section))
- else:
- method, pattern = [part.strip() for part in section.split(':', 1)]
- method_map.append((pattern, method))
- options_map[pattern] = dict(parser.items(section))
-
- if extractors:
- for idx, (pattern, method) in enumerate(method_map):
- if method in extractors:
- method = extractors[method]
- method_map[idx] = (pattern, method)
-
- return (method_map, options_map)
-
-
-def parse_keywords(strings=[]):
- """Parse keywords specifications from the given list of strings.
-
- >>> kw = sorted(parse_keywords(['_', 'dgettext:2', 'dngettext:2,3', 'pgettext:1c,2']).items())
- >>> for keyword, indices in kw:
- ... print((keyword, indices))
- ('_', None)
- ('dgettext', (2,))
- ('dngettext', (2, 3))
- ('pgettext', ((1, 'c'), 2))
- """
- keywords = {}
- for string in strings:
- if ':' in string:
- funcname, indices = string.split(':')
- else:
- funcname, indices = string, None
- if funcname not in keywords:
- if indices:
- inds = []
- for x in indices.split(','):
- if x[-1] == 'c':
- inds.append((int(x[:-1]), 'c'))
- else:
- inds.append(int(x))
- indices = tuple(inds)
- keywords[funcname] = indices
- return keywords
-
-
-if __name__ == '__main__':
- main()
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/jslexer.py b/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/jslexer.py
deleted file mode 100644
index aed39f3..0000000
--- a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/jslexer.py
+++ /dev/null
@@ -1,185 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- babel.messages.jslexer
- ~~~~~~~~~~~~~~~~~~~~~~
-
- A simple JavaScript 1.5 lexer which is used for the JavaScript
- extractor.
-
- :copyright: (c) 2013 by the Babel Team.
- :license: BSD, see LICENSE for more details.
-"""
-from collections import namedtuple
-import re
-from babel._compat import unichr
-
-operators = sorted([
- '+', '-', '*', '%', '!=', '==', '<', '>', '<=', '>=', '=',
- '+=', '-=', '*=', '%=', '<<', '>>', '>>>', '<<=', '>>=',
- '>>>=', '&', '&=', '|', '|=', '&&', '||', '^', '^=', '(', ')',
- '[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.', ':'
-], key=len, reverse=True)
-
-escapes = {'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t'}
-
-name_re = re.compile(r'[\w$_][\w\d$_]*', re.UNICODE)
-dotted_name_re = re.compile(r'[\w$_][\w\d$_.]*[\w\d$_.]', re.UNICODE)
-division_re = re.compile(r'/=?')
-regex_re = re.compile(r'/(?:[^/\\]*(?:\\.[^/\\]*)*)/[a-zA-Z]*(?s)')
-line_re = re.compile(r'(\r\n|\n|\r)')
-line_join_re = re.compile(r'\\' + line_re.pattern)
-uni_escape_re = re.compile(r'[a-fA-F0-9]{1,4}')
-
-Token = namedtuple('Token', 'type value lineno')
-
-_rules = [
- (None, re.compile(r'\s+(?u)')),
- (None, re.compile(r'<!--.*')),
- ('linecomment', re.compile(r'//.*')),
- ('multilinecomment', re.compile(r'/\*.*?\*/(?us)')),
- ('dotted_name', dotted_name_re),
- ('name', name_re),
- ('number', re.compile(r'''(?x)(
- (?:0|[1-9]\d*)
- (\.\d+)?
- ([eE][-+]?\d+)? |
- (0x[a-fA-F0-9]+)
- )''')),
- ('jsx_tag', re.compile(r'<(?:/?)\w+.+?>', re.I)), # May be mangled in `get_rules`
- ('operator', re.compile(r'(%s)' % '|'.join(map(re.escape, operators)))),
- ('template_string', re.compile(r'''`(?:[^`\\]*(?:\\.[^`\\]*)*)`''', re.UNICODE)),
- ('string', re.compile(r'''(?xs)(
- '(?:[^'\\]*(?:\\.[^'\\]*)*)' |
- "(?:[^"\\]*(?:\\.[^"\\]*)*)"
- )'''))
-]
-
-
-def get_rules(jsx, dotted, template_string):
- """
- Get a tokenization rule list given the passed syntax options.
-
- Internal to this module.
- """
- rules = []
- for token_type, rule in _rules:
- if not jsx and token_type and 'jsx' in token_type:
- continue
- if not template_string and token_type == 'template_string':
- continue
- if token_type == 'dotted_name':
- if not dotted:
- continue
- token_type = 'name'
- rules.append((token_type, rule))
- return rules
-
-
-def indicates_division(token):
- """A helper function that helps the tokenizer to decide if the current
- token may be followed by a division operator.
- """
- if token.type == 'operator':
- return token.value in (')', ']', '}', '++', '--')
- return token.type in ('name', 'number', 'string', 'regexp')
-
-
-def unquote_string(string):
- """Unquote a string with JavaScript rules. The string has to start with
- string delimiters (``'``, ``"`` or the back-tick/grave accent (for template strings).)
- """
- assert string and string[0] == string[-1] and string[0] in '"\'`', \
- 'string provided is not properly delimited'
- string = line_join_re.sub('\\1', string[1:-1])
- result = []
- add = result.append
- pos = 0
-
- while 1:
- # scan for the next escape
- escape_pos = string.find('\\', pos)
- if escape_pos < 0:
- break
- add(string[pos:escape_pos])
-
- # check which character is escaped
- next_char = string[escape_pos + 1]
- if next_char in escapes:
- add(escapes[next_char])
-
- # unicode escapes. trie to consume up to four characters of
- # hexadecimal characters and try to interpret them as unicode
- # character point. If there is no such character point, put
- # all the consumed characters into the string.
- elif next_char in 'uU':
- escaped = uni_escape_re.match(string, escape_pos + 2)
- if escaped is not None:
- escaped_value = escaped.group()
- if len(escaped_value) == 4:
- try:
- add(unichr(int(escaped_value, 16)))
- except ValueError:
- pass
- else:
- pos = escape_pos + 6
- continue
- add(next_char + escaped_value)
- pos = escaped.end()
- continue
- else:
- add(next_char)
-
- # bogus escape. Just remove the backslash.
- else:
- add(next_char)
- pos = escape_pos + 2
-
- if pos < len(string):
- add(string[pos:])
-
- return u''.join(result)
-
-
-def tokenize(source, jsx=True, dotted=True, template_string=True):
- """
- Tokenize JavaScript/JSX source. Returns a generator of tokens.
-
- :param jsx: Enable (limited) JSX parsing.
- :param dotted: Read dotted names as single name token.
- :param template_string: Support ES6 template strings
- """
- may_divide = False
- pos = 0
- lineno = 1
- end = len(source)
- rules = get_rules(jsx=jsx, dotted=dotted, template_string=template_string)
-
- while pos < end:
- # handle regular rules first
- for token_type, rule in rules:
- match = rule.match(source, pos)
- if match is not None:
- break
- # if we don't have a match we don't give up yet, but check for
- # division operators or regular expression literals, based on
- # the status of `may_divide` which is determined by the last
- # processed non-whitespace token using `indicates_division`.
- else:
- if may_divide:
- match = division_re.match(source, pos)
- token_type = 'operator'
- else:
- match = regex_re.match(source, pos)
- token_type = 'regexp'
- if match is None:
- # woops. invalid syntax. jump one char ahead and try again.
- pos += 1
- continue
-
- token_value = match.group()
- if token_type is not None:
- token = Token(token_type, token_value, lineno)
- may_divide = indicates_division(token)
- yield token
- lineno += len(line_re.findall(token_value))
- pos = match.end()
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/mofile.py b/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/mofile.py
deleted file mode 100644
index 79042e0..0000000
--- a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/mofile.py
+++ /dev/null
@@ -1,213 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- babel.messages.mofile
- ~~~~~~~~~~~~~~~~~~~~~
-
- Writing of files in the ``gettext`` MO (machine object) format.
-
- :copyright: (c) 2013 by the Babel Team.
- :license: BSD, see LICENSE for more details.
-"""
-
-import array
-import struct
-
-from babel.messages.catalog import Catalog, Message
-from babel._compat import range_type, array_tobytes
-
-
-LE_MAGIC = 0x950412de
-BE_MAGIC = 0xde120495
-
-
-def read_mo(fileobj):
- """Read a binary MO file from the given file-like object and return a
- corresponding `Catalog` object.
-
- :param fileobj: the file-like object to read the MO file from
-
- :note: The implementation of this function is heavily based on the
- ``GNUTranslations._parse`` method of the ``gettext`` module in the
- standard library.
- """
- catalog = Catalog()
- headers = {}
-
- filename = getattr(fileobj, 'name', '')
-
- buf = fileobj.read()
- buflen = len(buf)
- unpack = struct.unpack
-
- # Parse the .mo file header, which consists of 5 little endian 32
- # bit words.
- magic = unpack('<I', buf[:4])[0] # Are we big endian or little endian?
- if magic == LE_MAGIC:
- version, msgcount, origidx, transidx = unpack('<4I', buf[4:20])
- ii = '<II'
- elif magic == BE_MAGIC:
- version, msgcount, origidx, transidx = unpack('>4I', buf[4:20])
- ii = '>II'
- else:
- raise IOError(0, 'Bad magic number', filename)
-
- # Now put all messages from the .mo file buffer into the catalog
- # dictionary
- for i in range_type(0, msgcount):
- mlen, moff = unpack(ii, buf[origidx:origidx + 8])
- mend = moff + mlen
- tlen, toff = unpack(ii, buf[transidx:transidx + 8])
- tend = toff + tlen
- if mend < buflen and tend < buflen:
- msg = buf[moff:mend]
- tmsg = buf[toff:tend]
- else:
- raise IOError(0, 'File is corrupt', filename)
-
- # See if we're looking at GNU .mo conventions for metadata
- if mlen == 0:
- # Catalog description
- lastkey = key = None
- for item in tmsg.splitlines():
- item = item.strip()
- if not item:
- continue
- if b':' in item:
- key, value = item.split(b':', 1)
- lastkey = key = key.strip().lower()
- headers[key] = value.strip()
- elif lastkey:
- headers[lastkey] += b'\n' + item
-
- if b'\x04' in msg: # context
- ctxt, msg = msg.split(b'\x04')
- else:
- ctxt = None
-
- if b'\x00' in msg: # plural forms
- msg = msg.split(b'\x00')
- tmsg = tmsg.split(b'\x00')
- if catalog.charset:
- msg = [x.decode(catalog.charset) for x in msg]
- tmsg = [x.decode(catalog.charset) for x in tmsg]
- else:
- if catalog.charset:
- msg = msg.decode(catalog.charset)
- tmsg = tmsg.decode(catalog.charset)
- catalog[msg] = Message(msg, tmsg, context=ctxt)
-
- # advance to next entry in the seek tables
- origidx += 8
- transidx += 8
-
- catalog.mime_headers = headers.items()
- return catalog
-
-
-def write_mo(fileobj, catalog, use_fuzzy=False):
- """Write a catalog to the specified file-like object using the GNU MO file
- format.
-
- >>> import sys
- >>> from babel.messages import Catalog
- >>> from gettext import GNUTranslations
- >>> from babel._compat import BytesIO
-
- >>> catalog = Catalog(locale='en_US')
- >>> catalog.add('foo', 'Voh')
- <Message ...>
- >>> catalog.add((u'bar', u'baz'), (u'Bahr', u'Batz'))
- <Message ...>
- >>> catalog.add('fuz', 'Futz', flags=['fuzzy'])
- <Message ...>
- >>> catalog.add('Fizz', '')
- <Message ...>
- >>> catalog.add(('Fuzz', 'Fuzzes'), ('', ''))
- <Message ...>
- >>> buf = BytesIO()
-
- >>> write_mo(buf, catalog)
- >>> x = buf.seek(0)
- >>> translations = GNUTranslations(fp=buf)
- >>> if sys.version_info[0] >= 3:
- ... translations.ugettext = translations.gettext
- ... translations.ungettext = translations.ngettext
- >>> translations.ugettext('foo')
- u'Voh'
- >>> translations.ungettext('bar', 'baz', 1)
- u'Bahr'
- >>> translations.ungettext('bar', 'baz', 2)
- u'Batz'
- >>> translations.ugettext('fuz')
- u'fuz'
- >>> translations.ugettext('Fizz')
- u'Fizz'
- >>> translations.ugettext('Fuzz')
- u'Fuzz'
- >>> translations.ugettext('Fuzzes')
- u'Fuzzes'
-
- :param fileobj: the file-like object to write to
- :param catalog: the `Catalog` instance
- :param use_fuzzy: whether translations marked as "fuzzy" should be included
- in the output
- """
- messages = list(catalog)
- if not use_fuzzy:
- messages[1:] = [m for m in messages[1:] if not m.fuzzy]
- messages.sort()
-
- ids = strs = b''
- offsets = []
-
- for message in messages:
- # For each string, we need size and file offset. Each string is NUL
- # terminated; the NUL does not count into the size.
- if message.pluralizable:
- msgid = b'\x00'.join([
- msgid.encode(catalog.charset) for msgid in message.id
- ])
- msgstrs = []
- for idx, string in enumerate(message.string):
- if not string:
- msgstrs.append(message.id[min(int(idx), 1)])
- else:
- msgstrs.append(string)
- msgstr = b'\x00'.join([
- msgstr.encode(catalog.charset) for msgstr in msgstrs
- ])
- else:
- msgid = message.id.encode(catalog.charset)
- if not message.string:
- msgstr = message.id.encode(catalog.charset)
- else:
- msgstr = message.string.encode(catalog.charset)
- if message.context:
- msgid = b'\x04'.join([message.context.encode(catalog.charset),
- msgid])
- offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
- ids += msgid + b'\x00'
- strs += msgstr + b'\x00'
-
- # The header is 7 32-bit unsigned integers. We don't use hash tables, so
- # the keys start right after the index tables.
- keystart = 7 * 4 + 16 * len(messages)
- valuestart = keystart + len(ids)
-
- # The string table first has the list of keys, then the list of values.
- # Each entry has first the size of the string, then the file offset.
- koffsets = []
- voffsets = []
- for o1, l1, o2, l2 in offsets:
- koffsets += [l1, o1 + keystart]
- voffsets += [l2, o2 + valuestart]
- offsets = koffsets + voffsets
-
- fileobj.write(struct.pack('Iiiiiii',
- LE_MAGIC, # magic
- 0, # version
- len(messages), # number of entries
- 7 * 4, # start of key index
- 7 * 4 + len(messages) * 8, # start of value index
- 0, 0 # size and offset of hash table
- ) + array_tobytes(array.array("i", offsets)) + ids + strs)
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/plurals.py b/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/plurals.py
deleted file mode 100644
index cc7b79e..0000000
--- a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/plurals.py
+++ /dev/null
@@ -1,252 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- babel.messages.plurals
- ~~~~~~~~~~~~~~~~~~~~~~
-
- Plural form definitions.
-
- :copyright: (c) 2013 by the Babel Team.
- :license: BSD, see LICENSE for more details.
-"""
-
-from babel.core import default_locale, Locale
-from operator import itemgetter
-
-
-# XXX: remove this file, duplication with babel.plural
-
-
-LC_CTYPE = default_locale('LC_CTYPE')
-
-
-PLURALS = {
- # Afar
- # 'aa': (),
- # Abkhazian
- # 'ab': (),
- # Avestan
- # 'ae': (),
- # Afrikaans - From Pootle's PO's
- 'af': (2, '(n != 1)'),
- # Akan
- # 'ak': (),
- # Amharic
- # 'am': (),
- # Aragonese
- # 'an': (),
- # Arabic - From Pootle's PO's
- 'ar': (6, '(n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n>=3 && n<=10 ? 3 : n>=11 && n<=99 ? 4 : 5)'),
- # Assamese
- # 'as': (),
- # Avaric
- # 'av': (),
- # Aymara
- # 'ay': (),
- # Azerbaijani
- # 'az': (),
- # Bashkir
- # 'ba': (),
- # Belarusian
- # 'be': (),
- # Bulgarian - From Pootle's PO's
- 'bg': (2, '(n != 1)'),
- # Bihari
- # 'bh': (),
- # Bislama
- # 'bi': (),
- # Bambara
- # 'bm': (),
- # Bengali - From Pootle's PO's
- 'bn': (2, '(n != 1)'),
- # Tibetan - as discussed in private with Andrew West
- 'bo': (1, '0'),
- # Breton
- # 'br': (),
- # Bosnian
- # 'bs': (),
- # Catalan - From Pootle's PO's
- 'ca': (2, '(n != 1)'),
- # Chechen
- # 'ce': (),
- # Chamorro
- # 'ch': (),
- # Corsican
- # 'co': (),
- # Cree
- # 'cr': (),
- # Czech
- 'cs': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
- # Church Slavic
- # 'cu': (),
- # Chuvash
- 'cv': (1, '0'),
- # Welsh
- 'cy': (5, '(n==1 ? 1 : n==2 ? 2 : n==3 ? 3 : n==6 ? 4 : 0)'),
- # Danish
- 'da': (2, '(n != 1)'),
- # German
- 'de': (2, '(n != 1)'),
- # Divehi
- # 'dv': (),
- # Dzongkha
- 'dz': (1, '0'),
- # Greek
- 'el': (2, '(n != 1)'),
- # English
- 'en': (2, '(n != 1)'),
- # Esperanto
- 'eo': (2, '(n != 1)'),
- # Spanish
- 'es': (2, '(n != 1)'),
- # Estonian
- 'et': (2, '(n != 1)'),
- # Basque - From Pootle's PO's
- 'eu': (2, '(n != 1)'),
- # Persian - From Pootle's PO's
- 'fa': (1, '0'),
- # Finnish
- 'fi': (2, '(n != 1)'),
- # French
- 'fr': (2, '(n > 1)'),
- # Friulian - From Pootle's PO's
- 'fur': (2, '(n > 1)'),
- # Irish
- 'ga': (3, '(n==1 ? 0 : n==2 ? 1 : 2)'),
- # Galician - From Pootle's PO's
- 'gl': (2, '(n != 1)'),
- # Hausa - From Pootle's PO's
- 'ha': (2, '(n != 1)'),
- # Hebrew
- 'he': (2, '(n != 1)'),
- # Hindi - From Pootle's PO's
- 'hi': (2, '(n != 1)'),
- # Croatian
- 'hr': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
- # Hungarian
- 'hu': (1, '0'),
- # Armenian - From Pootle's PO's
- 'hy': (1, '0'),
- # Icelandic - From Pootle's PO's
- 'is': (2, '(n != 1)'),
- # Italian
- 'it': (2, '(n != 1)'),
- # Japanese
- 'ja': (1, '0'),
- # Georgian - From Pootle's PO's
- 'ka': (1, '0'),
- # Kongo - From Pootle's PO's
- 'kg': (2, '(n != 1)'),
- # Khmer - From Pootle's PO's
- 'km': (1, '0'),
- # Korean
- 'ko': (1, '0'),
- # Kurdish - From Pootle's PO's
- 'ku': (2, '(n != 1)'),
- # Lao - Another member of the Tai language family, like Thai.
- 'lo': (1, '0'),
- # Lithuanian
- 'lt': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2)'),
- # Latvian
- 'lv': (3, '(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2)'),
- # Maltese - From Pootle's PO's
- 'mt': (4, '(n==1 ? 0 : n==0 || ( n%100>1 && n%100<11) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3)'),
- # Norwegian Bokmål
- 'nb': (2, '(n != 1)'),
- # Dutch
- 'nl': (2, '(n != 1)'),
- # Norwegian Nynorsk
- 'nn': (2, '(n != 1)'),
- # Norwegian
- 'no': (2, '(n != 1)'),
- # Punjabi - From Pootle's PO's
- 'pa': (2, '(n != 1)'),
- # Polish
- 'pl': (3, '(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
- # Portuguese
- 'pt': (2, '(n != 1)'),
- # Brazilian
- 'pt_BR': (2, '(n > 1)'),
- # Romanian - From Pootle's PO's
- 'ro': (3, '(n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2)'),
- # Russian
- 'ru': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
- # Slovak
- 'sk': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
- # Slovenian
- 'sl': (4, '(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3)'),
- # Serbian - From Pootle's PO's
- 'sr': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
- # Southern Sotho - From Pootle's PO's
- 'st': (2, '(n != 1)'),
- # Swedish
- 'sv': (2, '(n != 1)'),
- # Thai
- 'th': (1, '0'),
- # Turkish
- 'tr': (1, '0'),
- # Ukrainian
- 'uk': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
- # Venda - From Pootle's PO's
- 've': (2, '(n != 1)'),
- # Vietnamese - From Pootle's PO's
- 'vi': (1, '0'),
- # Xhosa - From Pootle's PO's
- 'xh': (2, '(n != 1)'),
- # Chinese - From Pootle's PO's (modified)
- 'zh': (1, '0'),
-}
-
-
-DEFAULT_PLURAL = (2, '(n != 1)')
-
-
-class _PluralTuple(tuple):
- """A tuple with plural information."""
-
- __slots__ = ()
- num_plurals = property(itemgetter(0), doc="""
- The number of plurals used by the locale.""")
- plural_expr = property(itemgetter(1), doc="""
- The plural expression used by the locale.""")
- plural_forms = property(lambda x: 'nplurals=%s; plural=%s;' % x, doc="""
- The plural expression used by the catalog or locale.""")
-
- def __str__(self):
- return self.plural_forms
-
-
-def get_plural(locale=LC_CTYPE):
- """A tuple with the information catalogs need to perform proper
- pluralization. The first item of the tuple is the number of plural
- forms, the second the plural expression.
-
- >>> get_plural(locale='en')
- (2, '(n != 1)')
- >>> get_plural(locale='ga')
- (3, '(n==1 ? 0 : n==2 ? 1 : 2)')
-
- The object returned is a special tuple with additional members:
-
- >>> tup = get_plural("ja")
- >>> tup.num_plurals
- 1
- >>> tup.plural_expr
- '0'
- >>> tup.plural_forms
- 'nplurals=1; plural=0;'
-
- Converting the tuple into a string prints the plural forms for a
- gettext catalog:
-
- >>> str(tup)
- 'nplurals=1; plural=0;'
- """
- locale = Locale.parse(locale)
- try:
- tup = PLURALS[str(locale)]
- except KeyError:
- try:
- tup = PLURALS[locale.language]
- except KeyError:
- tup = DEFAULT_PLURAL
- return _PluralTuple(tup)
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/pofile.py b/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/pofile.py
deleted file mode 100644
index a775ec0..0000000
--- a/jython-tosca-parser/src/main/resources/Lib/site-packages/babel-2.3.4-py2.7.egg/babel/messages/pofile.py
+++ /dev/null
@@ -1,507 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- babel.messages.pofile
- ~~~~~~~~~~~~~~~~~~~~~
-
- Reading and writing of files in the ``gettext`` PO (portable object)
- format.
-
- :copyright: (c) 2013 by the Babel Team.
- :license: BSD, see LICENSE for more details.
-"""
-
-from __future__ import print_function
-import os
-import re
-
-from babel.messages.catalog import Catalog, Message
-from babel.util import wraptext
-from babel._compat import text_type
-
-
-def unescape(string):
- r"""Reverse `escape` the given string.
-
- >>> print(unescape('"Say:\\n \\"hello, world!\\"\\n"'))
- Say:
- "hello, world!"
- <BLANKLINE>
-
- :param string: the string to unescape
- """
- def replace_escapes(match):
- m = match.group(1)
- if m == 'n':
- return '\n'
- elif m == 't':
- return '\t'
- elif m == 'r':
- return '\r'
- # m is \ or "
- return m
- return re.compile(r'\\([\\trn"])').sub(replace_escapes, string[1:-1])
-
-
-def denormalize(string):
- r"""Reverse the normalization done by the `normalize` function.
-
- >>> print(denormalize(r'''""
- ... "Say:\n"
- ... " \"hello, world!\"\n"'''))
- Say:
- "hello, world!"
- <BLANKLINE>
-
- >>> print(denormalize(r'''""
- ... "Say:\n"
- ... " \"Lorem ipsum dolor sit "
- ... "amet, consectetur adipisicing"
- ... " elit, \"\n"'''))
- Say:
- "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
- <BLANKLINE>
-
- :param string: the string to denormalize
- """
- if '\n' in string:
- escaped_lines = string.splitlines()
- if string.startswith('""'):
- escaped_lines = escaped_lines[1:]
- lines = map(unescape, escaped_lines)
- return ''.join(lines)
- else:
- return unescape(string)
-
-
-def read_po(fileobj, locale=None, domain=None, ignore_obsolete=False, charset=None):
- """Read messages from a ``gettext`` PO (portable object) file from the given
- file-like object and return a `Catalog`.
-
- >>> from datetime import datetime
- >>> from babel._compat import StringIO
- >>> buf = StringIO('''
- ... #: main.py:1
- ... #, fuzzy, python-format
- ... msgid "foo %(name)s"
- ... msgstr "quux %(name)s"
- ...
- ... # A user comment
- ... #. An auto comment
- ... #: main.py:3
- ... msgid "bar"
- ... msgid_plural "baz"
- ... msgstr[0] "bar"
- ... msgstr[1] "baaz"
- ... ''')
- >>> catalog = read_po(buf)
- >>> catalog.revision_date = datetime(2007, 4, 1)
-
- >>> for message in catalog:
- ... if message.id:
- ... print((message.id, message.string))
- ... print(' ', (message.locations, sorted(list(message.flags))))
- ... print(' ', (message.user_comments, message.auto_comments))
- (u'foo %(name)s', u'quux %(name)s')
- ([(u'main.py', 1)], [u'fuzzy', u'python-format'])
- ([], [])
- ((u'bar', u'baz'), (u'bar', u'baaz'))
- ([(u'main.py', 3)], [])
- ([u'A user comment'], [u'An auto comment'])
-
- .. versionadded:: 1.0
- Added support for explicit charset argument.
-
- :param fileobj: the file-like object to read the PO file from
- :param locale: the locale identifier or `Locale` object, or `None`
- if the catalog is not bound to a locale (which basically
- means it's a template)
- :param domain: the message domain
- :param ignore_obsolete: whether to ignore obsolete messages in the input
- :param charset: the character set of the catalog.
- """
- catalog = Catalog(locale=locale, domain=domain, charset=charset)
-
- counter = [0]
- offset = [0]
- messages = []
- translations = []
- locations = []
- flags = []
- user_comments = []
- auto_comments = []
- obsolete = [False]
- context = []
- in_msgid = [False]
- in_msgstr = [False]
- in_msgctxt = [False]
-
- def _add_message():
- translations.sort()
- if len(messages) > 1:
- msgid = tuple([denormalize(m) for m in messages])
- else:
- msgid = denormalize(messages[0])
- if isinstance(msgid, (list, tuple)):
- string = []
- for idx in range(catalog.num_plurals):
- try:
- string.append(translations[idx])
- except IndexError:
- string.append((idx, ''))
- string = tuple([denormalize(t[1]) for t in string])
- else:
- string = denormalize(translations[0][1])
- if context:
- msgctxt = denormalize('\n'.join(context))
- else:
- msgctxt = None
- message = Message(msgid, string, list(locations), set(flags),
- auto_comments, user_comments, lineno=offset[0] + 1,
- context=msgctxt)
- if obsolete[0]:
- if not ignore_obsolete:
- catalog.obsolete[msgid] = message
- else:
- catalog[msgid] = message
- del messages[:]
- del translations[:]
- del context[:]
- del locations[:]
- del flags[:]
- del auto_comments[:]
- del user_comments[:]
- obsolete[0] = False
- counter[0] += 1
-
- def _process_message_line(lineno, line):
- if line.startswith('msgid_plural'):
- in_msgid[0] = True
- msg = line[12:].lstrip()
- messages.append(msg)
- elif line.startswith('msgid'):
- in_msgid[0] = True
- offset[0] = lineno
- txt = line[5:].lstrip()
- if messages:
- _add_message()
- messages.append(txt)
- elif line.startswith('msgstr'):
- in_msgid[0] = False
- in_msgstr[0] = True
- msg = line[6:].lstrip()
- if msg.startswith('['):
- idx, msg = msg[1:].split(']', 1)
- translations.append([int(idx), msg.lstrip()])
- else:
- translations.append([0, msg])
- elif line.startswith('msgctxt'):
- if messages:
- _add_message()
- in_msgid[0] = in_msgstr[0] = False
- context.append(line[7:].lstrip())
- elif line.startswith('"'):
- if in_msgid[0]:
- messages[-1] += u'\n' + line.rstrip()
- elif in_msgstr[0]:
- translations[-1][1] += u'\n' + line.rstrip()
- elif in_msgctxt[0]:
- context.append(line.rstrip())
-
- for lineno, line in enumerate(fileobj.readlines()):
- line = line.strip()
- if not isinstance(line, text_type):
- line = line.decode(catalog.charset)
- if line.startswith('#'):
- in_msgid[0] = in_msgstr[0] = False
- if messages and translations:
- _add_message()
- if line[1:].startswith(':'):
- for location in line[2:].lstrip().split():
- pos = location.rfind(':')
- if pos >= 0:
- try:
- lineno = int(location[pos + 1:])
- except ValueError:
- continue
- locations.append((location[:pos], lineno))
- else:
- locations.append((location, None))
- elif line[1:].startswith(','):
- for flag in line[2:].lstrip().split(','):
- flags.append(flag.strip())
- elif line[1:].startswith('~'):
- obsolete[0] = True
- _process_message_line(lineno, line[2:].lstrip())
- elif line[1:].startswith('.'):
- # These are called auto-comments
- comment = line[2:].strip()
- if comment: # Just check that we're not adding empty comments
- auto_comments.append(comment)
- else:
- # These are called user comments
- user_comments.append(line[1:].strip())
- else:
- _process_message_line(lineno, line)
-
- if messages:
- _add_message()
-
- # No actual messages found, but there was some info in comments, from which
- # we'll construct an empty header message
- elif not counter[0] and (flags or user_comments or auto_comments):
- messages.append(u'')
- translations.append([0, u''])
- _add_message()
-
- return catalog
-
-
-WORD_SEP = re.compile('('
- r'\s+|' # any whitespace
- r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
- r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w)' # em-dash
- ')')
-
-
-def escape(string):
- r"""Escape the given string so that it can be included in double-quoted
- strings in ``PO`` files.
-
- >>> escape('''Say:
- ... "hello, world!"
- ... ''')
- '"Say:\\n \\"hello, world!\\"\\n"'
-
- :param string: the string to escape
- """
- return '"%s"' % string.replace('\\', '\\\\') \
- .replace('\t', '\\t') \
- .replace('\r', '\\r') \
- .replace('\n', '\\n') \
- .replace('\"', '\\"')
-
-
-def normalize(string, prefix='', width=76):
- r"""Convert a string into a format that is appropriate for .po files.
-
- >>> print(normalize('''Say:
- ... "hello, world!"
- ... ''', width=None))
- ""
- "Say:\n"
- " \"hello, world!\"\n"
-
- >>> print(normalize('''Say:
- ... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
- ... ''', width=32))
- ""
- "Say:\n"
- " \"Lorem ipsum dolor sit "
- "amet, consectetur adipisicing"
- " elit, \"\n"
-
- :param string: the string to normalize
- :param prefix: a string that should be prepended to every line
- :param width: the maximum line width; use `None`, 0, or a negative number
- to completely disable line wrapping
- """
- if width and width > 0:
- prefixlen = len(prefix)
- lines = []
- for line in string.splitlines(True):
- if len(escape(line)) + prefixlen > width:
- chunks = WORD_SEP.split(line)
- chunks.reverse()
- while chunks:
- buf = []
- size = 2
- while chunks:
- l = len(escape(chunks[-1])) - 2 + prefixlen
- if size + l < width:
- buf.append(chunks.pop())
- size += l
- else:
- if not buf:
- # handle long chunks by putting them on a
- # separate line
- buf.append(chunks.pop())
- break
- lines.append(u''.join(buf))
- else:
- lines.append(line)
- else:
- lines = string.splitlines(True)
-
- if len(lines) <= 1:
- return escape(string)
-
- # Remove empty trailing line
- if lines and not lines[-1]:
- del lines[-1]
- lines[-1] += '\n'
- return u'""\n' + u'\n'.join([(prefix + escape(line)) for line in lines])
-
-
-def write_po(fileobj, catalog, width=76, no_location=False, omit_header=False,
- sort_output=False, sort_by_file=False, ignore_obsolete=False,
- include_previous=False):
- r"""Write a ``gettext`` PO (portable object) template file for a given
- message catalog to the provided file-like object.
-
- >>> catalog = Catalog()
- >>> catalog.add(u'foo %(name)s', locations=[('main.py', 1)],
- ... flags=('fuzzy',))
- <Message...>
- >>> catalog.add((u'bar', u'baz'), locations=[('main.py', 3)])
- <Message...>
- >>> from babel._compat import BytesIO
- >>> buf = BytesIO()
- >>> write_po(buf, catalog, omit_header=True)
- >>> print(buf.getvalue().decode("utf8"))
- #: main.py:1
- #, fuzzy, python-format
- msgid "foo %(name)s"
- msgstr ""
- <BLANKLINE>
- #: main.py:3
- msgid "bar"
- msgid_plural "baz"
- msgstr[0] ""
- msgstr[1] ""
- <BLANKLINE>
- <BLANKLINE>
-
- :param fileobj: the file-like object to write to
- :param catalog: the `Catalog` instance
- :param width: the maximum line width for the generated output; use `None`,
- 0, or a negative number to completely disable line wrapping
- :param no_location: do not emit a location comment for every message
- :param omit_header: do not include the ``msgid ""`` entry at the top of the
- output
- :param sort_output: whether to sort the messages in the output by msgid
- :param sort_by_file: whether to sort the messages in the output by their
- locations
- :param ignore_obsolete: whether to ignore obsolete messages and not include
- them in the output; by default they are included as
- comments
- :param include_previous: include the old msgid as a comment when
- updating the catalog
- """
- def _normalize(key, prefix=''):
- return normalize(key, prefix=prefix, width=width)
-
- def _write(text):
- if isinstance(text, text_type):
- text = text.encode(catalog.charset, 'backslashreplace')
- fileobj.write(text)
-
- def _write_comment(comment, prefix=''):
- # xgettext always wraps comments even if --no-wrap is passed;
- # provide the same behaviour
- if width and width > 0:
- _width = width
- else:
- _width = 76
- for line in wraptext(comment, _width):
- _write('#%s %s\n' % (prefix, line.strip()))
-
- def _write_message(message, prefix=''):
- if isinstance(message.id, (list, tuple)):
- if message.context:
- _write('%smsgctxt %s\n' % (prefix,
- _normalize(message.context, prefix)))
- _write('%smsgid %s\n' % (prefix, _normalize(message.id[0], prefix)))
- _write('%smsgid_plural %s\n' % (
- prefix, _normalize(message.id[1], prefix)
- ))
-
- for idx in range(catalog.num_plurals):
- try:
- string = message.string[idx]
- except IndexError:
- string = ''
- _write('%smsgstr[%d] %s\n' % (
- prefix, idx, _normalize(string, prefix)
- ))
- else:
- if message.context:
- _write('%smsgctxt %s\n' % (prefix,
- _normalize(message.context, prefix)))
- _write('%smsgid %s\n' % (prefix, _normalize(message.id, prefix)))
- _write('%smsgstr %s\n' % (
- prefix, _normalize(message.string or '', prefix)
- ))
-
- sort_by = None
- if sort_output:
- sort_by = "message"
- elif sort_by_file:
- sort_by = "location"
-
- for message in _sort_messages(catalog, sort_by=sort_by):
- if not message.id: # This is the header "message"
- if omit_header:
- continue
- comment_header = catalog.header_comment
- if width and width > 0:
- lines = []
- for line in comment_header.splitlines():
- lines += wraptext(line, width=width,
- subsequent_indent='# ')
- comment_header = u'\n'.join(lines)
- _write(comment_header + u'\n')
-
- for comment in message.user_comments:
- _write_comment(comment)
- for comment in message.auto_comments:
- _write_comment(comment, prefix='.')
-
- if not no_location:
- locs = []
- for filename, lineno in sorted(message.locations):
- if lineno:
- locs.append(u'%s:%d' % (filename.replace(os.sep, '/'), lineno))
- else:
- locs.append(u'%s' % filename.replace(os.sep, '/'))
- _write_comment(' '.join(locs), prefix=':')
- if message.flags:
- _write('#%s\n' % ', '.join([''] + sorted(message.flags)))
-
- if message.previous_id and include_previous:
- _write_comment('msgid %s' % _normalize(message.previous_id[0]),
- prefix='|')
- if len(message.previous_id) > 1:
- _write_comment('msgid_plural %s' % _normalize(
- message.previous_id[1]
- ), prefix='|')
-
- _write_message(message)
- _write('\n')
-
- if not ignore_obsolete:
- for message in _sort_messages(
- catalog.obsolete.values(),
- sort_by=sort_by
- ):
- for comment in message.user_comments:
- _write_comment(comment)
- _write_message(message, prefix='#~ ')
- _write('\n')
-
-
-def _sort_messages(messages, sort_by):
- """
- Sort the given message iterable by the given criteria.
-
- Always returns a list.
-
- :param messages: An iterable of Messages.
- :param sort_by: Sort by which criteria? Options are `message` and `location`.
- :return: list[Message]
- """
- messages = list(messages)
- if sort_by == "message":
- messages.sort()
- elif sort_by == "location":
- messages.sort(key=lambda m: m.locations)
- return messages