Re: [Maria-developers] c79c6f78205: Added typedef decimal_digits_t (uint16) for number of decimals
Hi, Michael! On Sep 08, Michael Widenius wrote:
revision-id: c79c6f78205 (mariadb-10.5.2-268-gc79c6f78205) parent(s): bf8bd057344 author: Michael Widenius <michael.widenius@gmail.com> committer: Michael Widenius <michael.widenius@gmail.com> timestamp: 2020-09-02 20:58:33 +0300 message:
Added typedef decimal_digits_t (uint16) for number of decimals
For fields and Item's uint8 should be good enough. After discussions with Alexander Barkov we choose uint16 (for now) as some format functions may accept +256 digits.
The reason for this patch was to make the storage of decimal digits simlar. Before this patch decimals was stored/used as uint8, int and uint.
Changed most decimal variables and functions to use the new typedef.
diff --git a/include/decimal.h b/include/decimal.h index cab18f99348..f713409ede1 100644 --- a/include/decimal.h +++ b/include/decimal.h @@ -53,10 +53,11 @@ int decimal2double(const decimal_t *from, double *to); int double2decimal(double from, decimal_t *to); int decimal_actual_fraction(const decimal_t *from); int decimal2bin(const decimal_t *from, uchar *to, int precision, int scale); -int bin2decimal(const uchar *from, decimal_t *to, int precision, int scale); +int bin2decimal(const uchar *from, decimal_t *to, int precision, + decimal_digits_t scale);
I don't understand. Is it the type for precision (max number of digits in a decimal number) or for scale (mux number of digits after the dot)? If the former - why did you not change the type of the precision argument? If the latter - why is it called not decimal_scale_t? I personally think the former, using decimal_digits_t for both precision and scale, should be fine.
-int decimal_size(int precision, int scale); -int decimal_bin_size(int precision, int scale); +int decimal_size(int precision, decimal_digits_t scale); +int decimal_bin_size(int precision, decimal_digits_t scale); int decimal_result_size(decimal_t *from1, decimal_t *from2, char op, int param);
diff --git a/include/m_ctype.h b/include/m_ctype.h index 59ac7814aee..34821f76474 100644 --- a/include/m_ctype.h +++ b/include/m_ctype.h @@ -79,6 +79,7 @@ typedef const struct my_collation_handler_st MY_COLLATION_HANDLER; typedef const struct unicase_info_st MY_UNICASE_INFO; typedef const struct uni_ctype_st MY_UNI_CTYPE; typedef const struct my_uni_idx_st MY_UNI_IDX; +typedef uint16 decimal_digits_t;
A bit strange to see it here and not in decimal.h above.
typedef struct unicase_info_char_st { diff --git a/plugin/type_inet/sql_type_inet.h b/plugin/type_inet/sql_type_inet.h index 8c42431ccaa..df74b91172a 100644 --- a/plugin/type_inet/sql_type_inet.h +++ b/plugin/type_inet/sql_type_inet.h @@ -402,19 +402,19 @@ class Type_handler_inet6: public Type_handler bool can_return_time() const override { return false; } bool convert_to_binary_using_val_native() const override { return true; }
- uint Item_time_precision(THD *thd, Item *item) const override + decimal_digits_t Item_time_precision(THD *thd, Item *item) const override
here you return the precision using the decimal_digits_t type. So it seems that it should apply to both precision and scale.
{ return 0; } - uint Item_datetime_precision(THD *thd, Item *item) const override +decimal_digits_t Item_datetime_precision(THD *thd, Item *item) const override { return 0; } - uint Item_decimal_scale(const Item *item) const override + decimal_digits_t Item_decimal_scale(const Item *item) const override { return 0; } - uint Item_decimal_precision(const Item *item) const override + decimal_digits_t Item_decimal_precision(const Item *item) const override { /* This will be needed if we ever allow cast from INET6 to DECIMAL. diff --git a/sql/field.cc b/sql/field.cc index f50ddec1c80..a5d00dd6fd5 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -3242,10 +3242,11 @@ Field *Field_decimal::make_new_field(MEM_ROOT *root, TABLE *new_table, ** Field_new_decimal ****************************************************************************/
-static uint get_decimal_precision(uint len, uint8 dec, bool unsigned_val) +static decimal_digits_t get_decimal_precision(uint len, decimal_digits_t dec, + bool unsigned_val) { uint precision= my_decimal_length_to_precision(len, dec, unsigned_val);
shouldn't my_decimal_length_to_precision() return decimal_digits_t result?
- return MY_MIN(precision, DECIMAL_MAX_PRECISION); + return (decimal_digits_t) MY_MIN(precision, DECIMAL_MAX_PRECISION); }
Field_new_decimal::Field_new_decimal(uchar *ptr_arg, diff --git a/sql/item.h b/sql/item.h index 4cdee637415..948bb85f253 100644 --- a/sql/item.h +++ b/sql/item.h @@ -1644,14 +1644,14 @@ class Item: public Value_source, return type_handler()->Item_decimal_precision(this); } /* Returns the number of integer part digits only */ - inline int decimal_int_part() const - { return my_decimal_int_part(decimal_precision(), decimals); } + inline decimal_digits_t decimal_int_part() const + { return (decimal_digits_t) my_decimal_int_part(decimal_precision(), decimals); }
again, shouldn't my_decimal_int_part() return decimal_digits_t?
/* Returns the number of fractional digits only. NOT_FIXED_DEC is replaced to the maximum possible number of fractional digits, taking into account the data type. */ - uint decimal_scale() const + decimal_digits_t decimal_scale() const { return type_handler()->Item_decimal_scale(this); } @@ -4856,7 +4856,7 @@ class Item_temporal_literal :public Item_literal Item_literal(thd) { collation= DTCollation_numeric(); - decimals= dec_arg; + decimals= (decimal_digits_t) dec_arg;
shouldnt dec_arg be decimal_digits_t?
cached_time= *ltime; }
diff --git a/sql/item_func.cc b/sql/item_func.cc index 8a75d2c9946..805647d2a04 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2713,7 +2713,7 @@ my_decimal *Item_func_round::decimal_op(my_decimal *decimal_value) dec= INT_MIN;
if (!(null_value= (value.is_null() || args[1]->null_value || - value.round_to(decimal_value, (uint) dec, + value.round_to(decimal_value, (int) dec,
You don't need to cast uint16 to int explicitly
truncate ? TRUNCATE : HALF_UP) > 1))) return decimal_value; return 0; diff --git a/sql/item_func.h b/sql/item_func.h index 5b4acdce1c6..d5aa1477ff3 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1369,10 +1369,10 @@ class Item_decimal_typecast :public Item_func { my_decimal decimal_value; public: - Item_decimal_typecast(THD *thd, Item *a, uint len, uint dec) + Item_decimal_typecast(THD *thd, Item *a, uint len, decimal_digits_t dec) :Item_func(thd, a) { - decimals= (uint8) dec; + decimals= (decimal_digits_t) dec;
you don't need a cast here
collation= DTCollation_numeric(); fix_char_length(my_decimal_precision_to_length_no_truncation(len, dec, unsigned_flag)); diff --git a/sql/my_decimal.cc b/sql/my_decimal.cc index edf3e82de40..ac86ff71b64 100644 --- a/sql/my_decimal.cc +++ b/sql/my_decimal.cc @@ -198,7 +198,8 @@ str_set_decimal(uint mask, const my_decimal *val, E_DEC_OVERFLOW */
-int my_decimal::to_binary(uchar *bin, int prec, int scale, uint mask) const +int my_decimal::to_binary(uchar *bin, int prec, decimal_digits_t scale,
again, shouldn't prec be decimal_digits_t too?
+ uint mask) const { int err1= E_DEC_OK, err2; my_decimal rounded; @@ -329,7 +330,7 @@ my_decimal *date2my_decimal(const MYSQL_TIME *ltime, my_decimal *dec) }
-void my_decimal_trim(ulonglong *precision, uint *scale) +void my_decimal_trim(ulonglong *precision, decimal_digits_t *scale)
and here?
{ if (!(*precision) && !(*scale)) { diff --git a/sql/my_decimal.h b/sql/my_decimal.h index 2db08bf01e3..9f7232f94b4 100644 --- a/sql/my_decimal.h +++ b/sql/my_decimal.h @@ -50,7 +50,7 @@ typedef struct st_mysql_time MYSQL_TIME; #define DECIMAL_MAX_FIELD_SIZE DECIMAL_MAX_PRECISION
-inline uint my_decimal_size(uint precision, uint scale) +inline uint my_decimal_size(uint precision, decimal_digits_t scale)
and here? (and in many places below, that I'll stop commenting on)
{ /* Always allocate more space to allow library to put decimal point diff --git a/sql/sql_type.cc b/sql/sql_type.cc index 9e0f9b013c0..6569ea1a415 100644 --- a/sql/sql_type.cc +++ b/sql/sql_type.cc @@ -1197,11 +1198,12 @@ Type_numeric_attributes::aggregate_numeric_attributes_decimal(Item **item, uint nitems, bool unsigned_arg) { - int max_int_part= find_max_decimal_int_part(item, nitems); + decimal_digits_t max_int_part= find_max_decimal_int_part(item, nitems); decimals= find_max_decimals(item, nitems); - int precision= MY_MIN(max_int_part + decimals, DECIMAL_MAX_PRECISION); + decimal_digits_t precision= (decimal_digits_t)
why do you need a cast here?
+ MY_MIN(max_int_part + decimals, DECIMAL_MAX_PRECISION); max_length= my_decimal_precision_to_length_no_truncation(precision, - (uint8) decimals, + decimals, unsigned_flag); }
@@ -6910,20 +6912,20 @@ bool Type_handler_string_result::
/***************************************************************************/
-uint Type_handler::Item_time_precision(THD *thd, Item *item) const +decimal_digits_t Type_handler::Item_time_precision(THD *thd, Item *item) const { return MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS);
note, no cast here (good)
}
-uint Type_handler::Item_datetime_precision(THD *thd, Item *item) const +decimal_digits_t Type_handler::Item_datetime_precision(THD *thd, Item *item) const { return MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS);
and not here
}
-uint Type_handler_string_result::Item_temporal_precision(THD *thd, Item *item, - bool is_time) const +decimal_digits_t Type_handler_string_result:: +Item_temporal_precision(THD *thd, Item *item, bool is_time) const { StringBuffer<64> buf; String *tmp; @@ -6939,34 +6941,34 @@ uint Type_handler_string_result::Item_temporal_precision(THD *thd, Item *item, Datetime(thd, &status, tmp->ptr(), tmp->length(), tmp->charset(), Datetime::Options(TIME_FUZZY_DATES, TIME_FRAC_TRUNCATE)). is_valid_datetime())) - return MY_MIN(status.precision, TIME_SECOND_PART_DIGITS); - return MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS); + return (decimal_digits_t) MY_MIN(status.precision, TIME_SECOND_PART_DIGITS); + return (decimal_digits_t) MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS);
and here again. why? (and below too, I'll stop commenting on casts now)
}
diff --git a/sql/sql_type.h b/sql/sql_type.h index a6d85b5bb47..7b1a4f730db 100644 --- a/sql/sql_type.h +++ b/sql/sql_type.h @@ -2955,9 +2955,10 @@ class Type_numeric_attributes class Type_temporal_attributes: public Type_numeric_attributes { public: - Type_temporal_attributes(uint int_part_length, uint dec, bool unsigned_arg) + Type_temporal_attributes(uint32 int_part_length, decimal_digits_t dec, bool unsigned_arg) :Type_numeric_attributes(int_part_length + (dec ? 1 : 0), - MY_MIN(dec, TIME_SECOND_PART_DIGITS), + MY_MIN(dec, + (decimal_digits_t) TIME_SECOND_PART_DIGITS),
this should not be necessary
unsigned_arg) { max_length+= decimals;
Regards, Sergei VP of MariaDB Server Engineering and security@mariadb.org
participants (1)
-
Sergei Golubchik