Print this page
4470 overly aggressive D integer narrowing breaks 32-bit ustack helpers


 459 /*
 460  * Generate code for a typecast or for argument promotion from the type of the
 461  * actual to the type of the formal.  We need to generate code for casts when
 462  * a scalar type is being narrowed or changing signed-ness.  We first shift the
 463  * desired bits high (losing excess bits if narrowing) and then shift them down
 464  * using logical shift (unsigned result) or arithmetic shift (signed result).
 465  */
 466 static void
 467 dt_cg_typecast(const dt_node_t *src, const dt_node_t *dst,
 468     dt_irlist_t *dlp, dt_regset_t *drp)
 469 {
 470         size_t srcsize = dt_node_type_size(src);
 471         size_t dstsize = dt_node_type_size(dst);
 472 
 473         dif_instr_t instr;
 474         int rg;
 475 
 476         if (!dt_node_is_scalar(dst))
 477                 return; /* not a scalar */
 478         if (dstsize == srcsize &&
 479             ((src->dn_flags ^ dst->dn_flags) & DT_NF_SIGNED) != 0)
 480                 return; /* not narrowing or changing signed-ness */
 481         if (dstsize > srcsize && (src->dn_flags & DT_NF_SIGNED) == 0)
 482                 return; /* nothing to do in this case */
 483 
 484         rg = dt_regset_alloc(drp);
 485 
 486         if (dstsize > srcsize) {
 487                 int n = sizeof (uint64_t) * NBBY - srcsize * NBBY;
 488                 int s = (dstsize - srcsize) * NBBY;
 489 
 490                 dt_cg_setx(dlp, rg, n);
 491 
 492                 instr = DIF_INSTR_FMT(DIF_OP_SLL, src->dn_reg, rg, dst->dn_reg);
 493                 dt_irlist_append(dlp, dt_cg_node_alloc(DT_LBL_NONE, instr));
 494 
 495                 if ((dst->dn_flags & DT_NF_SIGNED) || n == s) {
 496                         instr = DIF_INSTR_FMT(DIF_OP_SRA,
 497                             dst->dn_reg, rg, dst->dn_reg);
 498                         dt_irlist_append(dlp,
 499                             dt_cg_node_alloc(DT_LBL_NONE, instr));




 459 /*
 460  * Generate code for a typecast or for argument promotion from the type of the
 461  * actual to the type of the formal.  We need to generate code for casts when
 462  * a scalar type is being narrowed or changing signed-ness.  We first shift the
 463  * desired bits high (losing excess bits if narrowing) and then shift them down
 464  * using logical shift (unsigned result) or arithmetic shift (signed result).
 465  */
 466 static void
 467 dt_cg_typecast(const dt_node_t *src, const dt_node_t *dst,
 468     dt_irlist_t *dlp, dt_regset_t *drp)
 469 {
 470         size_t srcsize = dt_node_type_size(src);
 471         size_t dstsize = dt_node_type_size(dst);
 472 
 473         dif_instr_t instr;
 474         int rg;
 475 
 476         if (!dt_node_is_scalar(dst))
 477                 return; /* not a scalar */
 478         if (dstsize == srcsize &&
 479             ((src->dn_flags ^ dst->dn_flags) & DT_NF_SIGNED) == 0)
 480                 return; /* not narrowing or changing signed-ness */
 481         if (dstsize > srcsize && (src->dn_flags & DT_NF_SIGNED) == 0)
 482                 return; /* nothing to do in this case */
 483 
 484         rg = dt_regset_alloc(drp);
 485 
 486         if (dstsize > srcsize) {
 487                 int n = sizeof (uint64_t) * NBBY - srcsize * NBBY;
 488                 int s = (dstsize - srcsize) * NBBY;
 489 
 490                 dt_cg_setx(dlp, rg, n);
 491 
 492                 instr = DIF_INSTR_FMT(DIF_OP_SLL, src->dn_reg, rg, dst->dn_reg);
 493                 dt_irlist_append(dlp, dt_cg_node_alloc(DT_LBL_NONE, instr));
 494 
 495                 if ((dst->dn_flags & DT_NF_SIGNED) || n == s) {
 496                         instr = DIF_INSTR_FMT(DIF_OP_SRA,
 497                             dst->dn_reg, rg, dst->dn_reg);
 498                         dt_irlist_append(dlp,
 499                             dt_cg_node_alloc(DT_LBL_NONE, instr));