[prev in list] [next in list] [prev in thread] [next in thread] 

List:       gcc-fortran
Subject:    Fwd: (Re)allocation of allocatable arrays on assignment - F2003
From:       Paul Richard Thomas <paul.richard.thomas () gmail ! com>
Date:       2010-10-23 9:22:56
Message-ID: AANLkTi=bk46xTEUQhCJLMuHw5aumPbBLk62SCLTZU1h- () mail ! gmail ! com
[Download RAW message or body]

Cc: fortran@gcc.gnu.org


Dear Dominique and Tobias,

I thought I should update you on the present state of the allocate on
assignment patch.  It clears all the testcases and the remarks that
you had previously.  At the end of the testcase, you will see that
there is something untoward with scalar references.  I think that I am
picking up the wrong dimensions from the loop_info.  I'll get that
sorted this weekend and then I'll move on to allocatable scalars,
including character(:).

Cheers

Paul

On Sat, Oct 9, 2010 at 3:40 PM, Dominique Dhumieres <dominiq@lps.ens.fr> wrote:
>
> Reduced test case for nf.f90
>
> program NF
> implicit none
> integer,parameter :: dpkind=kind(1.0D0)
> integer :: nx=9 , ny=10 , nz=9
> real(dpkind),allocatable,dimension(:) :: ad,g
> integer :: nxyz , nxy
>
> nxy = nx*ny ; nxyz = nxy*nz
> allocate(ad(nxyz),g(nxyz))
> ad=111.001d0
> call GetGI3D(1,nxyz)
> deallocate(ad,g)
> contains
>
> subroutine GetGI3D(i1,i2)
> integer :: i1 , i2
> integer :: i
> g = ad
> print *, g(i1), g(i2), ad(i2), maxval(abs(ad-g))
> end subroutine GetGI3D
>
> end
>
> result without patch
>   111.00100000000000        111.00100000000000        111.00100000000000        0.0000000000000000
>
> with patch
>   111.00100000000000        0.0000000000000000        111.00100000000000        111.00100000000000
>
> i.e., the last element of 'ad' is not assigned to 'g',
> probably the signature of what Tobias reported in
> http://gcc.gnu.org/ml/fortran/2010-10/msg00112.html
>
> Dominique



--
The knack of flying is learning how to throw yourself at the ground and miss.
       --Hitchhikers Guide to the Galaxy



--
The knack of flying is learning how to throw yourself at the ground and miss.
       --Hitchhikers Guide to the Galaxy

["check2310.diff" (text/x-diff)]

Index: gcc/fortran/trans-array.c
===================================================================
*** gcc/fortran/trans-array.c	(revision 164755)
--- gcc/fortran/trans-array.c	(working copy)
*************** gfc_trans_array_constructor (gfc_loopinf
*** 1830,1835 ****
--- 1830,1836 ----
    tree offsetvar;
    tree desc;
    tree type;
+   tree tmp;
    bool dynamic;
    bool old_first_len, old_typespec_chararray_ctor;
    tree old_first_len_val;
*************** gfc_trans_array_constructor (gfc_loopinf
*** 1942,1947 ****
--- 1943,1951 ----
  	}
      }
  
+   if (TREE_CODE (loop->to[0]) == VAR_DECL)
+     dynamic = true;
+ 
    gfc_trans_create_temp_array (&loop->pre, &loop->post, loop, &ss->data.info,
  			       type, NULL_TREE, dynamic, true, false, where);
  
*************** gfc_trans_array_constructor (gfc_loopinf
*** 1956,1967 ****
    /* If the array grows dynamically, the upper bound of the loop variable
       is determined by the array's final upper bound.  */
    if (dynamic)
!     loop->to[0] = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[0]);
  
    if (TREE_USED (offsetvar))
      pushdecl (offsetvar);
    else
      gcc_assert (INTEGER_CST_P (offset));
  #if 0
    /* Disable bound checking for now because it's probably broken.  */
    if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
--- 1960,1982 ----
    /* If the array grows dynamically, the upper bound of the loop variable
       is determined by the array's final upper bound.  */
    if (dynamic)
!     {
!       tmp = fold_build2_loc (input_location, MINUS_EXPR,
! 			     gfc_array_index_type,
! 			     offsetvar, gfc_index_one_node);
!       tmp = gfc_evaluate_now (tmp, &loop->pre);
!       gfc_conv_descriptor_ubound_set (&loop->pre, desc, gfc_rank_cst[0], tmp);
!       if (loop->to[0] && TREE_CODE (loop->to[0]) == VAR_DECL)
! 	gfc_add_modify (&loop->pre, loop->to[0], tmp);
!       else
! 	loop->to[0] = tmp;
!     }
  
    if (TREE_USED (offsetvar))
      pushdecl (offsetvar);
    else
      gcc_assert (INTEGER_CST_P (offset));
+ 
  #if 0
    /* Disable bound checking for now because it's probably broken.  */
    if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
*************** gfc_conv_ss_descriptor (stmtblock_t * bl
*** 2174,2179 ****
--- 2189,2199 ----
  
        tmp = gfc_conv_array_offset (se.expr);
        ss->data.info.offset = gfc_evaluate_now (tmp, block);
+ 
+       /* Make absolutely sure that the saved_offset is indeed saved
+ 	 so that the variable is still accessible after the loops
+ 	 are translated.  */
+       ss->data.info.saved_offset = ss->data.info.offset;
      }
  }
  
*************** gfc_copy_only_alloc_comp (gfc_symbol * d
*** 6449,6454 ****
--- 6469,6818 ----
  }
  
  
+ /* Returns the value of LBOUND for an expression.  This could be broken out
+    from gfc_conv_intrinsic_bound but this seemed to be simpler.  This is
+    called by gfc_alloc_allocatable_for_assignment.  */
+ static tree
+ get_std_lbound (gfc_expr *expr, tree desc, int dim, bool assumed_size)
+ {
+   tree lbound;
+   tree ubound;
+   tree stride;
+   tree cond, cond1, cond3, cond4;
+   tree tmp;
+   if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)))
+     {
+       tmp = gfc_rank_cst[dim];
+       lbound = gfc_conv_descriptor_lbound_get (desc, tmp);
+       ubound = gfc_conv_descriptor_ubound_get (desc, tmp);
+       stride = gfc_conv_descriptor_stride_get (desc, tmp);
+       cond1 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ 			       ubound, lbound);
+       cond3 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
+ 			       stride, gfc_index_zero_node);
+       cond3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ 			       boolean_type_node, cond3, cond1);
+       cond4 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
+ 			       stride, gfc_index_zero_node);
+       if (assumed_size)
+ 	cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ 				tmp, build_int_cst (gfc_array_index_type,
+ 						    expr->rank - 1));
+       else
+ 	cond = boolean_false_node;
+ 
+       cond1 = fold_build2_loc (input_location, TRUTH_OR_EXPR,
+ 			       boolean_type_node, cond3, cond4);
+       cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
+ 			      boolean_type_node, cond, cond1);
+ 
+       return fold_build3_loc (input_location, COND_EXPR,
+ 			      gfc_array_index_type, cond,
+ 			      lbound, gfc_index_one_node);
+     }
+   else if (expr->expr_type == EXPR_VARIABLE)
+     {
+       tmp = TREE_TYPE (expr->symtree->n.sym->backend_decl);
+       return GFC_TYPE_ARRAY_LBOUND(tmp, dim);
+     }
+ 
+   return gfc_index_one_node;
+ }
+ 
+ /* Allocate the lhs of an assignment to an allocatable array, otherwise
+    reallocate it.  */
+ 
+ void
+ gfc_alloc_allocatable_for_assignment (gfc_loopinfo *loop,
+ 				      gfc_expr *expr1,
+ 				      gfc_expr *expr2)
+ {
+   stmtblock_t realloc_block;
+   stmtblock_t alloc_block;
+   stmtblock_t fblock;
+   gfc_ss *rss;
+   gfc_ss *lss;
+   tree realloc_expr;
+   tree alloc_expr;
+   tree size1;
+   tree size2;
+   tree array1;
+   tree cond;
+   tree tmp;
+   tree tmp2;
+   tree lbound;
+   tree ubound;
+   tree desc;
+   tree desc2;
+   tree offset;
+   tree jump_label;
+   tree lbd;
+   int n, dim;
+   gfc_array_spec * as;
+ 
+   if (!expr1->symtree->n.sym->attr.allocatable
+ 	|| (expr1->ref && expr1->ref->type == REF_ARRAY
+ 	      && expr1->ref->u.ar.type != AR_FULL)
+ 	|| !expr2->rank)
+     return;
+ 
+   /* Find the ss for the lhs.  */
+   lss = loop->ss;
+   for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
+     if (lss->expr == expr1)
+       break;
+ 
+   if (lss == gfc_ss_terminator)
+     return;
+ 
+   /* Find the ss for the rhs.  */
+   rss = loop->ss;
+   for (; rss && rss != gfc_ss_terminator; rss = rss->loop_chain)
+     if (rss->expr == expr2)
+       break;
+ 
+   if (rss == gfc_ss_terminator)
+     return;
+ 
+   gfc_start_block (&fblock);
+ 
+   /* Since the lhs data and array size.  */
+   desc = lss->data.info.descriptor;
+   gcc_assert (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)));
+   array1 = gfc_conv_descriptor_data_get (desc);
+   size1 = gfc_conv_descriptor_size (desc, expr1->rank);
+ 
+  /* Get the rhs size.  Fix both sizes.  */
+   desc2 = rss->data.info.descriptor;
+   if (desc2 && GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc2)))
+     size2 = gfc_conv_descriptor_size (desc2, expr1->rank);
+   else
+     {
+       size2 = gfc_index_one_node;
+       for (n = 0; n < expr2->rank; n++)
+ 	{
+ 	  dim = rss->data.info.dim[n];
+ 	  tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ 				 gfc_array_index_type,
+ 				 loop->to[dim], loop->from[dim]);
+ 	  tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ 				 gfc_array_index_type,
+ 				 tmp, gfc_index_one_node);
+ 	  size2 = fold_build2_loc (input_location, MULT_EXPR,
+ 			 	   gfc_array_index_type,
+ 				   tmp, size2);
+ 	}
+     }
+   size1 = gfc_evaluate_now (size1, &fblock);
+   size2 = gfc_evaluate_now (size2, &fblock);
+ 
+   /* If the lhs is allocated and the lhs and rhs are equal length, jump
+      past the realloc/malloc.  This allows F95 compliant expressions
+      to escape allocation on assignment.  */
+   jump_label = gfc_build_label_decl (NULL_TREE);
+   tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ 			 array1, build_int_cst (TREE_TYPE (array1), 0));
+   tmp2 = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ 			  size1, size2);
+   cond = fold_build2_loc (input_location, TRUTH_AND_EXPR,
+ 			  boolean_type_node, tmp, tmp2);
+   tmp = build3_v (COND_EXPR, cond,
+ 		  build1_v (GOTO_EXPR, jump_label),
+ 		  build_empty_stmt (input_location));
+ 
+   gfc_add_expr_to_block (&fblock, tmp);
+ 
+   /* Now modify the lhs descriptor and the associated scalarizer
+      variables.
+      7.4.1.3: If variable is or becomes an unallocated allocatable
+      variable, then it is allocated with each deferred type parameter
+      equal to the corresponding type parameters of expr , with the
+      shape of expr , and with each lower bound equal to the
+      corresponding element of LBOUND(expr ).  */
+   size1 = gfc_index_one_node;
+   offset = gfc_index_zero_node;
+   as = gfc_get_full_arrayspec_from_expr (expr2);
+   if (desc2 && GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc2)))
+     {
+       for (n = 0; n < expr2->rank; n++)
+ 	{
+ 	  tmp = gfc_rank_cst[n];
+ 	  lbound = gfc_conv_descriptor_lbound_get (desc2, tmp);
+ 	  ubound = gfc_conv_descriptor_ubound_get (desc2, tmp);
+ 	  tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ 				 gfc_array_index_type, ubound, lbound);
+ 	  tmp = gfc_evaluate_now (tmp, &fblock);
+ 
+ 	  if (as)
+ 	    {
+ 	      lbd = get_std_lbound (expr2, desc2, n,
+ 				    as->type == AS_ASSUMED_SIZE);
+ 	      ubound = fold_build2_loc (input_location,
+ 					MINUS_EXPR,
+ 					gfc_array_index_type,
+ 					ubound, lbound);
+ 	      ubound = fold_build2_loc (input_location,
+ 					PLUS_EXPR,
+ 					gfc_array_index_type,
+ 					ubound, lbd);
+ 	      lbound = lbd;
+ 	    }
+ 
+ 	  dim = lss->data.info.dim[n];
+ 	  gfc_conv_descriptor_lbound_set (&fblock, desc,
+ 					  gfc_rank_cst[n], lbound);
+ 	  gfc_conv_descriptor_ubound_set (&fblock, desc,
+ 					  gfc_rank_cst[n], ubound);
+ 
+ 	  /* Reset the loop to the rhs size.  */
+ 	  tmp2 = fold_build2_loc (input_location, PLUS_EXPR,
+ 				  gfc_array_index_type,
+ 				  tmp, loop->from[dim]);
+ 	  if (loop->to[dim]
+ 		&& TREE_CODE (loop->to[dim]) == VAR_DECL)
+ 	    gfc_add_modify (&fblock, loop->to[dim], tmp2);
+ 	  else
+ 	    loop->to[dim] = gfc_evaluate_now (loop->to[dim],
+ 					      &fblock);
+ 	  tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ 				 gfc_array_index_type,
+ 				 tmp, gfc_index_one_node);
+ 	  tmp2 = fold_build2_loc (input_location, MULT_EXPR,
+ 				  gfc_array_index_type,
+ 				  lbound, size1);
+ 	  offset = fold_build2_loc (input_location, MINUS_EXPR,
+ 				    gfc_array_index_type,
+ 				    offset, tmp2);
+ 	  size1 = fold_build2_loc (input_location, MULT_EXPR,
+ 			 	   gfc_array_index_type,
+ 				   tmp, size1);
+ 	}
+     }
+   else
+     {
+       for (n = 0; n < expr2->rank; n++)
+ 	{
+ 	  dim = rss->data.info.dim[n];
+ 	  tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ 				 gfc_array_index_type,
+ 				 loop->to[dim], loop->from[dim]);
+ 	  tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ 				 gfc_array_index_type,
+ 				 tmp, gfc_index_one_node);
+ 	  lbound = gfc_index_one_node;
+ 	  ubound = tmp;
+ 	  if (as)
+ 	    {
+ 	      lbd = get_std_lbound (expr2, desc2, n,
+ 				    as->type == AS_ASSUMED_SIZE);
+ 	      ubound = fold_build2_loc (input_location,
+ 					MINUS_EXPR,
+ 					gfc_array_index_type,
+ 					ubound, lbound);
+ 	      ubound = fold_build2_loc (input_location,
+ 					PLUS_EXPR,
+ 					gfc_array_index_type,
+ 					ubound, lbd);
+ 	      lbound = lbd;
+ 	    }
+ 
+ 	  gfc_conv_descriptor_lbound_set (&fblock, desc,
+ 					  gfc_rank_cst[n],
+ 					  lbound);
+ 	  gfc_conv_descriptor_ubound_set (&fblock, desc,
+ 					  gfc_rank_cst[n],
+ 					  ubound);
+ 	  lbound = gfc_conv_descriptor_lbound_get (desc,
+ 						   gfc_rank_cst[n]);
+ 	  tmp2 = fold_build2_loc (input_location, MULT_EXPR,
+ 				  gfc_array_index_type,
+ 				  lbound, size1);
+ 	  offset = fold_build2_loc (input_location, MINUS_EXPR,
+ 				    gfc_array_index_type,
+ 				    offset, tmp2);
+ 	  size1 = fold_build2_loc (input_location, MULT_EXPR,
+ 			 	   gfc_array_index_type,
+ 				   tmp, size1);
+ 	}
+     }
+ 
+   /* Set the lhs descriptor and scalarizer offsets.  For rank > 1,
+      the array offset is saved and the info.offset is used for a
+      running offset.  Use the saved_offset instead.  */
+   tmp = gfc_conv_descriptor_offset (desc);
+   gfc_add_modify (&fblock, tmp, offset);
+   if (lss->data.info.saved_offset
+ 	&& TREE_CODE (lss->data.info.saved_offset) == VAR_DECL)
+       gfc_add_modify (&fblock, lss->data.info.saved_offset, tmp);
+ 
+   /* Now set the deltas for the lhs.  */
+   for (n = 0; n < expr2->rank; n++)
+     {
+       tmp = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
+       dim = lss->data.info.dim[n];
+       tmp = fold_build2_loc (input_location, MINUS_EXPR,
+ 			     gfc_array_index_type, tmp,
+ 			     loop->from[dim]);
+       if (lss->data.info.delta[dim]
+ 	    && TREE_CODE (lss->data.info.delta[dim]) == VAR_DECL)
+ 	gfc_add_modify (&fblock, lss->data.info.delta[dim], tmp);
+     }
+ 
+   /* Get the new lhs size in bytes.  */
+   tmp = TYPE_SIZE_UNIT (gfc_typenode_for_spec (&expr2->ts));
+   tmp = fold_convert (gfc_array_index_type, tmp);
+   size2 = fold_build2_loc (input_location, MULT_EXPR,
+ 			   gfc_array_index_type,
+ 			   tmp, size2);
+   size2 = fold_convert (size_type_node, size2);
+   size2 = gfc_evaluate_now (size2, &fblock);
+ 
+   /* Realloc expression.  Note that the scalarizer uses desc.data
+      in the array reference - (*desc.data)[<element>]. */
+   gfc_init_block (&realloc_block);
+   tmp = build_call_expr_loc (input_location,
+ 			     built_in_decls[BUILT_IN_REALLOC], 2,
+ 			     fold_convert (pvoid_type_node, array1),
+ 			     size2);
+   gfc_conv_descriptor_data_set (&realloc_block,
+ 				desc, tmp);
+   realloc_expr = gfc_finish_block (&realloc_block);
+ 
+   /* Malloc expression.  */
+   gfc_init_block (&alloc_block);
+   tmp = build_call_expr_loc (input_location,
+ 			     built_in_decls[BUILT_IN_MALLOC], 1,
+ 			     size2);
+   gfc_conv_descriptor_data_set (&alloc_block,
+ 				desc, tmp);
+   tmp = gfc_conv_descriptor_dtype (desc);
+   gfc_add_modify (&alloc_block, tmp, gfc_get_dtype (TREE_TYPE (desc)));
+   alloc_expr = gfc_finish_block (&alloc_block);
+ 
+   /* Malloc if not allocated; realloc otherwise.  */
+   tmp = build_int_cst (TREE_TYPE (array1), 0);
+   cond = fold_build2_loc (input_location, EQ_EXPR,
+ 			  boolean_type_node,
+ 			  array1, tmp);
+   tmp = build3_v (COND_EXPR, cond, alloc_expr, realloc_expr);
+   gfc_add_expr_to_block (&fblock, tmp);
+ 
+   if (lss->data.info.data
+ 	&& TREE_CODE (lss->data.info.data) == VAR_DECL)
+     {
+       tmp = gfc_conv_descriptor_data_get (desc);
+       gfc_add_modify (&fblock, lss->data.info.data, tmp);
+     }
+ 
+   /* Add the exit label.  */
+   tmp = build1_v (LABEL_EXPR, jump_label);
+   gfc_add_expr_to_block (&fblock, tmp);
+ 
+   tmp = gfc_finish_block (&fblock);
+   gfc_add_expr_to_block (&loop->pre, tmp);
+ }
+ 
+ 
  /* NULLIFY an allocatable/pointer array on function entry, free it on exit.
     Do likewise, recursively if necessary, with the allocatable components of
     derived types.  */
Index: gcc/fortran/trans-expr.c
===================================================================
*** gcc/fortran/trans-expr.c	(revision 164755)
--- gcc/fortran/trans-expr.c	(working copy)
*************** gfc_trans_assignment_1 (gfc_expr * expr1
*** 5744,5749 ****
--- 5744,5752 ----
  	  gfc_add_expr_to_block (&body, tmp);
  	}
  
+       /* Allocate or reallocate lhs of allocatable array.  */
+       gfc_alloc_allocatable_for_assignment (&loop, expr1, expr2);
+ 
        /* Generate the copying loops.  */
        gfc_trans_scalarizing_loops (&loop, &body);
  
Index: gcc/fortran/trans-array.h
===================================================================
*** gcc/fortran/trans-array.h	(revision 164755)
--- gcc/fortran/trans-array.h	(working copy)
*************** tree gfc_copy_alloc_comp (gfc_symbol *, 
*** 57,62 ****
--- 57,64 ----
  
  tree gfc_copy_only_alloc_comp (gfc_symbol *, tree, tree, int);
  
+ void gfc_alloc_allocatable_for_assignment (gfc_loopinfo*, gfc_expr*, gfc_expr*);
+ 
  /* Add initialization for deferred arrays.  */
  void gfc_trans_deferred_array (gfc_symbol *, gfc_wrapped_block *);
  /* Generate an initializer for a static pointer or allocatable array.  */

["test-alloc.f90" (text/x-fortran)]

! { dg-do run }
! Tests the patch that implements F2003 automatic allocation and
! reallocation of allocatable arrays on assignment.
!
! TODO:
! 1] Discuss on list whether this should be the default. < DONE
! 2] Understand why the branch to allocation is not working in the
!    case that an array is in an undefined state - see below for
!    the assignment to b. < DONE
! 3] Check all the modifications to trans-array.c very carefully.
! 4] Insert and overall condition to bypass the new code when lhs
!    and rhs have the same length and are known at runtime.
! 5] Insert a runtime condition to jump past the code when the
!    lhs and rhs lengths are equal.
! 6] Tidy up the code.
!
  integer(4), allocatable :: a(:), b(:), c(:,:)
  integer(4) :: j
  integer(4) :: src(2:5) = [11,12,13,14]
  integer(4) :: mat(2:3,5:6)
  character(4), allocatable :: chr1(:)
  character(4) :: chr2(2) = ["abcd", "wxyz"]

  allocate(a(1))
  mat = reshape (src, [2,2])

  a = [4,3,2,1]
  if (size(a, 1) .ne. 4) call abort
  if (any (a .ne. [4,3,2,1])) call abort

  a = [((42 - i), i = 1, 10)]  ! Implicit reallocation.
  if (size(a, 1) .ne. 10) call abort
  if (any (a .ne. [((42 - i), i = 1, 10)])) call abort

  b = a
  if (size(b, 1) .ne. 10) call abort
  if (any (b .ne. a)) call abort

  a = [4,3,2,1]
  if (size(a, 1) .ne. 4) call abort
  if (any (a .ne. [4,3,2,1])) call abort

  a = b
  if (size(a, 1) .ne. 10) call abort
  if (any (a .ne. [((42 - i), i = 1, 10)])) call abort

  j = 20
  a = [(i, i = 1, j)]
  if (size(a, 1) .ne. j) call abort
  if (any (a .ne. [(i, i = 1, j)])) call abort

  a = foo (15)
  if (size(a, 1) .ne. 15) call abort
  if (any (a .ne. [((i + 15), i = 1, 15)])) call abort

  a = src
  if (lbound(a, 1) .ne. 2) call abort
  if (ubound(a, 1) .ne. 5) call abort
  if (any (a .ne. [11,12,13,14])) call abort

  k = 7
  a = b(k:8)
  if (lbound(a, 1) .ne. 7) call abort
  if (ubound(a, 1) .ne. 8) call abort
  if (any (a .ne. [34,33])) call abort

  c = mat
  if (any (lbound (c) .ne. lbound (mat))) call abort
  if (any (ubound (c) .ne. ubound (mat))) call abort
  if (any (c .ne. mat)) call abort

  chr1 = chr2
  print *, chr1

  c = mat(3:,:)
!  b = c(3, :)    ! This causes an ICE - elemental scalars are being botched
!  print *, b
contains
  function foo (n) result(res)
    integer(4), allocatable, dimension(:) :: res
    integer(4) :: n
    allocate (res(n))
    res = [((i + 15), i = 1, n)]
  end function foo
end


[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic