from __future__ import with_statement
"""The API for interacting with code review data."""
import base64, datetime, operator, os
import messages
from rutil import fromlocal
from mercurial import cmdutil, error, hg, patch, util
from mercurial.node import hex
from mercurial import ui as _ui
from mercurial import demandimport
demandimport.ignore.append('json')
try:
from os.path import relpath
except ImportError: # python < 2.6
from os.path import curdir, abspath, sep, commonprefix, pardir, join
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
try:
import json
except ImportError:
import simplejson as json
DEFAULT_DATASTORE_DIRNAME = os.path.join('.hg', 'review')
class PreexistingDatastore(Exception):
"""Raised when trying to initialize a datastore when one seems to exist."""
def __init__(self, committed):
super(PreexistingDatastore, self).__init__()
self.committed = committed
class UninitializedDatastore(Exception):
"""Raised when trying to access a datastore that does not exist.
The committed attribute will be True if someone else has already
initialized code review for the target (i.e. the .hgreview file is present
in the target repository), or False otherwise.
"""
def __init__(self, committed):
super(UninitializedDatastore, self).__init__()
self.committed = committed
class DatastoreRequiresRemotePath(Exception):
"""Raised when initializing a fresh datastore without a remote path."""
pass
class RelativeRemotePath(Exception):
"""Raised when trying to use a relative remote path (for now)."""
pass
class SignoffExists(Exception):
"""Raised when trying to signoff twice."""
pass
class FileNotInChangeset(Exception):
"""Raised when trying to add a comment on a file not in the changeset."""
def __init__(self, filename):
super(FileNotInChangeset, self).__init__()
self.filename = filename
class AmbiguousIdentifier(Exception):
"""Raised when trying to specify an item with an identifier which matches more than one item."""
pass
class UnknownIdentifier(Exception):
"""Raised when trying to specify an item with an identifier which does not match any items."""
pass
class WrongEditItemType(Exception):
"""Raised when calling edit_comment with a signoff, or vice versa."""
pass
def _split_path_dammit(p):
"""Take a file path (from the current platform) and split it. Really.
os.path doesn't seem to have an easy way to say "Split this path into a
list of pieces."
>>> _split_path_dammit('')
[]
>>> _split_path_dammit('one')
['one']
>>> _split_path_dammit('one/two/three')
['one', 'two', 'three']
>>> _split_path_dammit('one/two/three/')
['one', 'two', 'three']
>>> _split_path_dammit('one/two/three.py')
['one', 'two', 'three.py']
"""
def _spd(p):
p, i = os.path.split(p)
while i or p:
yield i
p, i = os.path.split(p)
return filter(None, list(_spd(p)))[::-1]
def _parse_hgrf(hgrf):
"""Parse the .hgreview file and return the data inside.
The .hgreview file will be pulled from the tip revision of the given
repository. If it is not committed it will not be found!
"""
data = {}
hgrd = hgrf.data().split('\n')
lines = [line for line in hgrd if line.strip()]
for line in lines:
label, _, path = [i.strip() for i in line.partition('=')]
if label == 'remote':
data['rpath'] = path
return data
def _commitfunc(ui, repo, message, match, opts):
"""A function used by the guts of Mercurial.
Mercurial needs a "commit function" parameter when using cmdutil.commit.
This is a simple function for *only* that purpose.
"""
return repo.commit(message, opts.get('user'), opts.get('date'), match)
def _parse_data(data):
"""Parse the data (string) of a stored _ReviewObject and return a dict."""
result = {}
for k, v in json.loads(data).iteritems():
result[k.encode('UTF-8')] = v
result['node'] = result['node'].encode('UTF-8')
result['style'] = result['style'].encode('UTF-8')
if 'lines' in result:
result['lines'] = map(int, result['lines'])
return result
def _datetime_from_hgdate(hgdate):
"""Return a datetime.datetime for the given Mecurial-style date tuple.
It will have NO timezone information -- the date and time are what a clock
next to the current computer would have read at the instant represented
by the Mercurial-style date!
"""
offset = abs(hgdate[1] - util.makedate()[1])
later = offset < 0
offset = datetime.timedelta(seconds=offset)
conversion_format = '%Y-%m-%d %H:%M:%S'
bare = util.datestr(hgdate, format=conversion_format)
bare_datetime = datetime.datetime.strptime(bare, conversion_format)
if later:
return bare_datetime + offset
else:
return bare_datetime - offset
def _flatten_filter(i):
return filter(None, reduce(operator.add, i, []))
def sanitize_path(p, repo=None):
"""Sanitize a (platform-specific) path.
If no repository is given, the path's separators will be replaced with
forward slashes (the form Mercurial uses internally).
If a repository is given, the result will be relative to the root of the
repository. This is useful for turning relative paths into normalized
paths that can be used to look up files from a changectx.
This function is idempotent. If you sanitize a path multiple times
against the same repository the result will not change.
"""
if repo:
p = relpath(os.path.realpath(p), start=repo.root)
return '/'.join(_split_path_dammit(p))
class ReviewDatastore(object):
"""The code review data for a particular repository."""
def __init__(self, ui, repo, lpath=None, rpath=None, create=False,
clone_message=False):
"""Initialize a ReviewDatastore for a Mercurial repository.
To get a ReviewDatastore for a repository that has already been
initialized for code reviewing:
review_data = ReviewDatastore(ui, repo)
To set up a repository to support code review (which will either create
a new repo if this has not been done by someone, or clone down the data
repo if it has):
review_data = ReviewDatastore(ui, repo, create=True)
If you want to specify your own path to the code review repository for
this repo, pass the FULL path to the repository as the lpath parameter.
Error handling is a bit tricky at the moment. I need to refactor
and/or document this.
"""
self.ui = ui
self.target = repo
self.lpath = lpath or os.path.join(self.target.root, DEFAULT_DATASTORE_DIRNAME)
hgrd = None
for head in (repo[h] for h in repo.heads()):
if '.hgreview' in head:
hgrd = _parse_hgrf(head['.hgreview'])
break
if not create:
if not hgrd:
raise UninitializedDatastore(False)
self.rpath = hgrd['rpath']
try:
self.repo = hg.repository(_ui.ui(), self.lpath)
except error.RepoError:
raise UninitializedDatastore(True)
return
if hgrd:
self.rpath = hgrd['rpath']
if self.rpath.startswith('.'):
raise RelativeRemotePath
try:
hg.repository(ui, self.lpath)
except error.RepoError:
if clone_message:
ui.write(messages.INIT_CLONE_MESSAGE % self.rpath)
hg.clone(hg.remoteui(self.ui, {}), self.rpath, self.lpath)
else:
raise PreexistingDatastore(True)
elif os.path.exists(os.path.join(self.target.root, '.hgreview')):
raise PreexistingDatastore(False)
else:
if not rpath:
raise DatastoreRequiresRemotePath
elif rpath.startswith('.'):
raise RelativeRemotePath
else:
self.rpath = rpath
with open(os.path.join(self.target.root, '.hgreview'), 'w') as hgrf:
hgrf.write('remote = %s\n' % self.rpath)
self.target[None].add(['.hgreview'])
self.repo = hg.repository(ui, self.lpath, create)
def __getitem__(self, rev):
"""Return a ReviewChangeset for the given revision."""
node = hex(self.target[str(rev)].node())
return ReviewChangeset(self.ui, self.repo, self.target, node)
def reviewed_changesets(self):
"""Return a list of all the ReviewChangesets in the data store."""
hashes = []
for fname in os.listdir(self.repo.root):
if os.path.isdir(os.path.join(self.repo.root, fname)):
try:
self.target[fname]
hashes.append(self[fname])
except error.RepoLookupError:
pass
return hashes
def get_items(self, identifier):
"""Return the comments and signoffs which match the given identifier.
WARNING: This is going to be slow. Send patches.
"""
rcsets = self.reviewed_changesets()
comments = _flatten_filter(rcset.comments for rcset in rcsets)
signoffs = _flatten_filter(rcset.signoffs for rcset in rcsets)
return [i for i in comments + signoffs if i.identifier.startswith(identifier)]
def remove_item(self, identifier):
"""Remove a comment or signoff from this changeset."""
items = self.get_items(identifier)
if len(items) == 0:
raise UnknownIdentifier
elif len(items) > 1:
raise AmbiguousIdentifier
else:
items[0]._delete(self.ui, self.repo)
def edit_comment(self, identifier, message=None, ufilename=None, filename=None,
lines=None, style=None):
olds = self.get_items(identifier)
if len(olds) == 0:
raise UnknownIdentifier
elif len(olds) > 1:
raise AmbiguousIdentifier
old = olds[0]
if old.itemtype != 'comment':
raise WrongEditItemType()
filename = filename if filename is not None else old.filename
if filename and filename not in self.target[old.node].files():
raise FileNotInChangeset(filename)
old.hgdate = util.makedate()
old.ufilename = ufilename if ufilename is not None else fromlocal(filename)
old.filename = filename
old.lines = lines if lines is not None else old.lines
old.message = message if message is not None else old.message
old.style = style if style is not None else old.style
old._rename(self.ui, self.repo, old.identifier)
def edit_signoff(self, identifier, message=None, opinion=None, style=None):
olds = self.get_items(identifier)
if len(olds) == 0:
raise UnknownIdentifier
elif len(olds) > 1:
raise AmbiguousIdentifier
old = olds[0]
if old.itemtype != 'signoff':
raise WrongEditItemType()
old.hgdate = util.makedate()
old.opinion = opinion if opinion is not None else old.opinion
old.message = message if message is not None else old.message
old.style = style if style is not None else old.style
old._rename(self.ui, self.repo, old.identifier)
class ReviewChangeset(object):
"""The review data about one changeset in the target repository.
Individual changesets can be retrieved from a ReviewDatastore.
Each ReviewChangeset stores a list of ReviewComment objects and a list
of ReviewSignoff objects:
rcset = rd['tip']
rcset.comments
rcset.signoffs
Comments and signoffs should be added to a changeset by using the
add_comment and add_signoff methods:
rcset = rd['tip']
rcset.add_comment(...)
rcset.add_signoff(...)
Diffs for files modified in a changeset can be retrived with the diffs
and full_diffs methods. See the docs of those methods for more info.
"""
def _load_comment_file(self, filename):
data = _parse_data(self.repo['tip'][filename].data())
data['hgdate'] = util.parsedate(data['hgdate'])
data['identifier'] = _split_path_dammit(filename)[-1]
data['ufilename'] = data['file'][0]
data['filename'] = base64.b64decode(data['file'][1]) if data['file'] else ''
return ReviewComment(**data)
def _load_signoff_file(self, filename):
data = _parse_data(self.repo['tip'][filename].data())
data['hgdate'] = util.parsedate(data['hgdate'])
data['identifier'] = _split_path_dammit(filename)[-1]
return ReviewSignoff(**data)
def __init__(self, ui, repo, target, node):
"""Initialize a ReviewChangeset.
You shouldn't need to create these directly -- use a ReviewDatastore
object to get them:
review_data = ReviewDatastore(ui, repo)
tip_review_data = review_data['tip']
"""
self.repo = repo
self.target = target
self.ui = ui
self.node = node
if '%s/.exists' % self.node in self.repo['tip']:
_match = lambda p: lambda fn: fn.startswith(p)
relevant = filter(_match(node), self.repo['tip'])
commentfns = filter(_match('%s/comments' % node), relevant)
signofffns = filter(_match('%s/signoffs' % node), relevant)
self.comments = [self._load_comment_file(fn) for fn in commentfns]
self.comments.sort(key=operator.attrgetter('local_datetime'))
self.signoffs = [self._load_signoff_file(fn) for fn in signofffns]
self.signoffs.sort(key=operator.attrgetter('local_datetime'))
else:
self.comments = []
self.signoffs = []
path = os.path.join(self.repo.root, self.node)
os.mkdir(path)
with open(os.path.join(path, '.exists'), 'w') as e:
pass
cmdutil.commit(ui, self.repo, _commitfunc,
[os.path.join(path, '.exists')],
{ 'message': 'Initialize review data for changeset %s' % self.node,
'addremove': True, })
def signoffs_for_user(self, username):
return filter(lambda s: s.author == username, self.signoffs)
def signoffs_for_current_user(self):
return self.signoffs_for_user(self.ui.username())
def add_signoff(self, message, opinion='', style=''):
"""Add (and commit) a signoff for the given revision.
The opinion argument should be 'yes', 'no', or ''.
If a signoff from the user already exists a SignoffExists exception
will be raised.
"""
existing = self.signoffs_for_current_user()
if existing:
raise SignoffExists
signoff = ReviewSignoff(self.ui.username(), util.makedate(),
self.node, opinion, message, style)
signoff._commit(self.ui, self.repo)
def add_comment(self, message, ufilename=u'', filename='', lines=[], style=''):
"""Add (and commit) a comment for the given file and lines.
The filename should be normalized to the format Mercurial expects,
that is: relative to the root of the repository and using forward
slashes as the separator. Paths can be converted with the
sanitize_path function in this module.
If the comment is on one or more lines, a filename *must* be given.
Line numbers should be passed as a list, even if there is only one.
See the full_diffs function for how to refer to line numbers.
"""
if filename and filename not in self.target[self.node].files():
raise FileNotInChangeset(filename)
if filename and not ufilename:
ufilename = fromlocal(filename)
comment = ReviewComment(self.ui.username(), util.makedate(),
self.node, ufilename, filename, map(int, lines), message, style)
comment._commit(self.ui, self.repo)
def full_diffs(self, filenames=None, opts={}):
"""Return full diffs of the given files (or all files).
If the filenames argument is not used, diffs for every file in the
changeset will be returned.
The diffs are returned as a dictionary in the form:
{ 'filename': 'string of the diff' }
All headers are stripped, so the an entire diff looks like this:
unchanged line
unchanged line
-removed line
-removed line
+added line
unchanged line
-removed line
-removed line
unchanged line
unchanged line
When adding a comment, the line number given should be the line
number from this diff (starting at 0). To comment on the first two
removed lines in the above example you would pass [2, 3].
"""
target_files = self.target[self.node].files()
if not filenames:
filenames = target_files
else:
filenames = filter(lambda f: self.has_diff(f), filenames)
opts['unified'] = '100000'
node2 = self.node
node1 = self.target[node2].parents()[0].node()
diffs = {}
for filename in filenames:
m = cmdutil.matchfiles(self.target, [filename])
diff_opts = patch.diffopts(self.ui, opts)
diff_opts.git = True
d = patch.diff(self.target, node1, node2, match=m, opts=diff_opts)
# patch.diff will give us back a generator with two items
# the first is the diff --git header, which we don't care about
d.next()
# the second is the diff's contents, which is what we want,
# minus the header
try:
diffs[filename] = '\n'.join(d.next().splitlines()[3:])
except StopIteration:
# This is an add, remove, or something else without a diff body.
diffs[filename] = None
return diffs
def diffs(self, filenames=None, context=5):
"""Return a mapping of diff lines for the given files (or all).
If the filenames argument is not used, diffs for every file in the
changeset will be returned.
The diffs are returned in a dictionary of the form:
{
'filename': {
# the line number of the last line of the FULL diff
'max': 90,
# A sorted list of tuples of (line_number, line_content)
'content': [
(10, ' context line'),
(11, ' context line'),
(12, '-removed line'),
(13, '+added line'),
(14, ' context line'),
(15, ' context line'),
(39, ' context line'),
(40, ' context line'),
(41, '-removed line'),
(42, '+added line'),
(43, ' context line'),
(44, ' context line'),
],
},
}
There's a lot of structure there, but it will provide everything you
need to display contextualized diffs.
"""
ds = self.full_diffs(filenames, {})
def _filter_diff(d):
for n, line in enumerate(d):
start = n - context if n > context else 0
end = n + context + 1
if any(filter(lambda l: l[0] in '+-', d[start:end])):
yield (n, line)
for filename, content in ds.iteritems():
if content:
content = content.splitlines()
ds[filename] = {
'max': len(content) - 1,
'content': list(_filter_diff(content)),
}
else:
ds[filename] = {'max': None, 'content': []}
return ds
def annotated_diff(self, filename, context=5):
"""Return a generator that yields annotated lines of a diff.
The first item yielded will be a simple integer of the last line
number of the diff. This is ugly but useful when creating monospaced
line-number-prefixed output.
Each line yielded will be of the form:
{
# If 'skipped' is not None, this line is a "skip" line, which
# represents a group of lines that were skipped due to context.
'skipped': 23,
# The line number of this line, or None for skip lines.
'number': 585,
# The actual content of this line, or None for skip lines.
'content': '+added line',
# Any comments that apply to this line.
# If the line is a skip line, this will be any comments that apply
# to any line in the skipped group.
'comments': [ReviewComment(), ReviewComment()],
}
"""
diffs = self.diffs([filename], context).values()
if not diffs:
return
diff = diffs[0]
max_line, content = diff['max'], diff['content']
line_level_comments = self.line_level_comments(filename)
previous_n = -1
if content:
yield content[-1][0]
else:
yield 0
for n, line in content:
if n - 1 > previous_n:
yield {
'skipped': (n - previous_n) - (previous_n == -1 and 1 or 0),
'number': None, 'content': None,
'comments': filter(
lambda c: max(c.lines) in range(previous_n + 1, n),
line_level_comments
),
}
yield {
'skipped': None,
'number': n, 'content': line,
'comments': filter(
lambda c: max(c.lines) == n, line_level_comments
)
}
previous_n = n
if previous_n < max_line:
yield {
'skipped': max_line - previous_n,
'number': None, 'content': None,
'comments': filter(
lambda c: max(c.lines) in range(previous_n + 1, max_line),
line_level_comments
),
}
def has_diff(self, filename):
"""Return whether the given filename has a diff in this revision."""
return filename in self.files()
def files(self):
"""Return the list of files in the revision for this ReviewChangeset."""
return self.target[self.node].files()
def unicode_files(self):
"""Returns a tuple of files in the revision for this ReviewChangeset.
Each element is a pair of strings, the first is a unicode string of the
filename, the second is a bytestring of the filename (which Mercurial
will want).
"""
return [(fromlocal(f), f) for f in self.target[self.node].files()]
def review_level_comments(self):
"""Comments on this changeset which aren't on a particular file."""
return filter(lambda c: not c.filename, self.comments)
def file_level_comments(self, filename=None):
"""Comments on this changeset that are on a file, but not a line.
If a file is given only comments for that file will be returned.
"""
if filename:
return filter(
lambda c: filename == c.filename and not c.lines, self.comments
)
else:
return filter(
lambda c: filename and not c.lines, self.comments
)
def line_level_comments(self, filename=None):
"""Comments on this changeset that are on a line of file.
If a file is given only comments for that file will be returned.
"""
if filename:
return filter(
lambda c: filename == c.filename and c.lines, self.comments
)
else:
return filter(
lambda c: filename and c.lines, self.comments
)
class _ReviewObject(object):
"""A base object for some kind of review data (a signoff or comment)."""
def __init__(self, container, commit_message, delete_message, rename_message):
self.container = container
self.commit_message = commit_message
self.delete_message = delete_message
self.rename_message = rename_message
def _commit(self, ui, repo):
"""Write and commit this object to the given repo."""
path = os.path.join(repo.root, self.node, self.container)
if not os.path.exists(path):
os.mkdir(path)
data = self._render_data()
filename = util.sha1(data).hexdigest()
objectpath = os.path.join(path, filename)
with open(objectpath, 'w') as objectfile:
objectfile.write(data)
cmdutil.commit(ui, repo, _commitfunc, [objectpath],
{ 'message': self.commit_message % self.node, 'addremove': True, })
def _delete(self, ui, repo):
"""Delete and commit this object in the given repo."""
data = self._render_data()
filename = util.sha1(data).hexdigest()
objectpath = os.path.join(repo.root, self.node, self.container, filename)
os.remove(objectpath)
cmdutil.commit(ui, repo, _commitfunc, [objectpath],
{ 'message': self.delete_message % self.node, 'addremove': True, })
def _rename(self, ui, repo, identifier):
"""Commit this object in the given repo and mark it as a rename of identifier."""
data = self._render_data()
newidentifier = util.sha1(data).hexdigest()
newpath = os.path.join(repo.root, self.node, self.container, newidentifier)
oldpath = os.path.join(repo.root, self.node, self.container, identifier)
if oldpath == newpath:
# Nothing has changed. This is probably from a "touch" edit made
# within the same second as the previous modification time.
return
wlock = repo.wlock(False)
try:
cmdutil.copy(ui, repo, [oldpath, newpath], {'force': True}, rename=True)
finally:
wlock.release()
with open(newpath, 'w') as objectfile:
objectfile.write(data)
cmdutil.commit(ui, repo, _commitfunc, [oldpath, newpath],
{ 'message': self.rename_message % self.node })
self.identifier = newidentifier
@property
def local_datetime(self):
return _datetime_from_hgdate(self.hgdate)
class ReviewComment(_ReviewObject):
"""A single review comment.
A list of comments can be retrieved from a ReviewChangeset.
The following pieces of information are stored for comments:
comment = rcset.comments[0]
comment.author U
comment.hgdate
comment.node
comment.ufilename U
comment.filename
comment.lines
comment.local_datetime
comment.message U
comment.style
comment.identifier
comment.itemtype
Each item is a string, except for lines, hgdate, and local_datetime.
lines is a list of ints.
hgdate is a tuple of (seconds from the epoch, seconds offset from UTC),
which is the format Mercurial itself uses internally.
local_datetime is a datetime object representing what a clock on the wall
next to the current computer would have read at the instant the comment
was added.
ufilename is a unicode string representing the filename.
filename is a byte string representing the filename.
"""
def __init__(self, author, hgdate, node, ufilename, filename, lines,
message, style='', identifier=None, **extra):
"""Initialize a ReviewComment.
You shouldn't need to create these directly -- use a ReviewChangeset
to add comments and retrieve existing ones:
review_data = ReviewDatastore(ui, repo)
tip_review = review_data['tip']
tip_review.add_comment(...)
tip_comments = tip_review.comments
"""
super(ReviewComment, self).__init__(container='comments',
commit_message=messages.COMMIT_COMMENT,
delete_message=messages.DELETE_COMMENT,
rename_message=messages.RENAME_COMMENT)
self.author = author
self.hgdate = hgdate
self.node = node
self.ufilename = ufilename
self.filename = filename
self.lines = lines
self.message = message
self.style = style
self.identifier = identifier
self.itemtype = 'comment'
def _render_data(self):
"""Render the data of this comment into a string for writing to disk.
You probably don't need to call this directly, the add_comment method
of a ReviewChangeset will handle it for you.
"""
return json.dumps({ 'author': self.author, 'node': self.node,
'hgdate': util.datestr(self.hgdate),
'file': [self.ufilename, base64.b64encode(self.filename)],
'lines': self.lines,
'style': self.style, 'message': self.message
}, indent=4, sort_keys=True)
def __str__(self):
"""Stringify this comment for easy printing (for debugging)."""
return '\n'.join(map(str, [
self.author,
self.hgdate,
self.node,
self.filename,
self.lines,
self.style,
self.message,
'\n',
]))
class ReviewSignoff(_ReviewObject):
"""A single review signoff.
A list of signoffs can be retrieved from a ReviewChangeset.
The following pieces of information are stored for signoffs:
signoff = rcset.comments[0]
signoff.author
signoff.hgdate
signoff.node
signoff.opinion
signoff.local_datetime
signoff.message
signoff.style
signoff.identifier
signoff.itemtype
Each item is a string, except for hgdate and local_datetime.
hgdate is a tuple of (seconds from the epoch, seconds offset from UTC),
which is the format Mercurial itself uses internally.
local_datetime is a datetime object representing what a clock on the wall
next to the current computer would have read at the instant the signoff
was added.
"""
def __init__(self, author, hgdate, node, opinion, message,
style='', identifier=None, **extra):
"""Initialize a ReviewSignoff.
You shouldn't need to create these directly -- use a ReviewChangeset
to add signoffs and retrieve existing ones:
review_data = ReviewDatastore(ui, repo)
tip_review = review_data['tip']
tip_review.add_signoff(...)
tip_signoffs = tip_review.signoffs
"""
super(ReviewSignoff, self).__init__(container='signoffs',
commit_message=messages.COMMIT_SIGNOFF,
delete_message=messages.DELETE_SIGNOFF,
rename_message=messages.RENAME_SIGNOFF)
self.author = author
self.hgdate = hgdate
self.node = node
self.opinion = opinion
self.message = message
self.style = style
self.identifier = identifier
self.itemtype = 'signoff'
def _render_data(self):
"""Render the data of this signoff into a string for writing to disk.
You probably don't need to call this directly, the add_signoff method
of a ReviewChangeset will handle it for you.
"""
return json.dumps({ 'author': self.author, 'node': self.node,
'hgdate': util.datestr(self.hgdate),
'opinion': self.opinion, 'style': self.style,
'message': self.message,
}, indent=4, sort_keys=True)