mirror of
https://github.com/gnustep/libs-base.git
synced 2025-04-23 00:41:02 +00:00
tentative/partial improvement of 'precision' of c-strings (%s format)
git-svn-id: svn+ssh://svn.gna.org/svn/gnustep/libs/base/trunk@35625 72102866-910b-0410-8b05-ffd578937521
This commit is contained in:
parent
f1fb0822c9
commit
1d7830df31
2 changed files with 33 additions and 21 deletions
|
@ -1,3 +1,12 @@
|
|||
2012-10-03 Richard Frith-Macdonald <rfm@gnu.org>
|
||||
|
||||
* Source/GSFormat.m: for C-string formatting, avoid reading to the
|
||||
nul terminator ... limit to a number of bytes equal to the precision
|
||||
(if any) given in the format. This may be the wrong thing to do when
|
||||
handling multibyte encodings (where a natural expectation is to have
|
||||
precision mean number of characters), but seems legitimate going by
|
||||
current xopen printf documentation.
|
||||
|
||||
2012-10-03 Wolfgang Lux <wolfgang.lux@gmail.com>
|
||||
|
||||
* SSL/GSSSLHandle.m (-sslSetOptions): Apply string name change
|
||||
|
|
|
@ -1758,37 +1758,40 @@ NSDictionary *locale)
|
|||
byteEncoding = GSPrivateIsByteEncoding(enc);
|
||||
}
|
||||
|
||||
len = strlen(str); // Number of bytes to convert.
|
||||
blen = len; // Size of unichar output buffer.
|
||||
|
||||
if (prec != -1)
|
||||
if (-1 == prec)
|
||||
{
|
||||
len = strlen(str); // Number of bytes to convert.
|
||||
blen = len; // Size of unichar output buffer.
|
||||
}
|
||||
else
|
||||
{
|
||||
if (prec < len)
|
||||
{
|
||||
/* We don't neeed an output buffer bigger than the
|
||||
* precision specifies.
|
||||
*/
|
||||
blen = prec;
|
||||
}
|
||||
if (byteEncoding == YES)
|
||||
{
|
||||
/* Where the external encoding is one byte per character,
|
||||
* we know we don't need to convert more bytes than the
|
||||
* precision required for output.
|
||||
*/
|
||||
if (prec < len)
|
||||
{
|
||||
len = prec;
|
||||
len = 0;
|
||||
while (len < prec && str[0] != 0)
|
||||
{
|
||||
len++;
|
||||
}
|
||||
blen = len;
|
||||
}
|
||||
else if (prec * 4 < len)
|
||||
else
|
||||
{
|
||||
/* We assume no multibyte encoding is going to use more
|
||||
* than the maximum four bytes used by utf-8 for any
|
||||
* unicode code point. So we do not need to convert
|
||||
* more than four times the precision.
|
||||
*/
|
||||
len = prec * 4;
|
||||
/* FIXME ... it looks like modern standards mean that
|
||||
* the number of *bytes* in an input string may not
|
||||
* exceed the precision ... but that's unintuitive for
|
||||
* input strings with multibyte characters, so we need
|
||||
* to check and emulate OSX behavior.
|
||||
*/
|
||||
len = 0;
|
||||
while (len < prec && str[0] != 0)
|
||||
{
|
||||
len++;
|
||||
}
|
||||
blen = len;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue